-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm.py
More file actions
34 lines (26 loc) · 1.06 KB
/
llm.py
File metadata and controls
34 lines (26 loc) · 1.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import os
from openai import OpenAI
input_token_count = 0
output_token_count = 0
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
def count_tokens(text):
return len(text.split()) if text else 0
def create_completion(*, model, messages, **kwargs):
global input_token_count, output_token_count
in_tokens = sum(count_tokens(msg.get("content", "")) for msg in messages)
input_token_count += in_tokens
response = client.chat.completions.create(model=model, messages=messages, **kwargs)
out_text = response.choices[0].message.content
out_tokens = count_tokens(out_text)
output_token_count += out_tokens
return response
def print_token_usage():
total_input = input_token_count
total_output = output_token_count
total_tokens = total_input + total_output
cost = (total_input * 0.10 + total_output * 0.40) / 1_000_000
print("\n--- Token Usage Report ---")
print("Input tokens:", total_input)
print("Output tokens:", total_output)
print("Total tokens:", total_tokens)
print("Estimated cost: ${:.6f}".format(cost))