-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathllm_api.py
More file actions
91 lines (73 loc) · 3.11 KB
/
llm_api.py
File metadata and controls
91 lines (73 loc) · 3.11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# from kalm.agent import KalmAgent
from openai import OpenAI
import yaml
config = "config.yaml"
with open(config, 'r', encoding='utf-8') as f:
cfg = yaml.safe_load(f)
class OpenAIAgent:
def __init__(self, model_name, system_prompt, temperature, max_tokens):
self.api_key = cfg['llm_api_key']
self.base_url = cfg['base_url']
self.model_name = model_name
self.system_prompt = system_prompt
self.temperature = temperature
self.max_tokens = max_tokens
def generate(self, query, history=None):
client = OpenAI(base_url=self.base_url, api_key=self.api_key)
messages = [{"role": "system", "content": self.system_prompt}]
if history:
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": query})
response = client.chat.completions.create(
model=self.model_name,
messages=messages,
temperature=self.temperature,
max_tokens=self.max_tokens
)
return response.choices[0].message.content
def generate_stream(self, query, history=None):
"""Generate response with streaming support, including reasoning content"""
client = OpenAI(base_url=self.base_url, api_key=self.api_key)
messages = [{"role": "system", "content": self.system_prompt}]
if history:
for user_msg, assistant_msg in history:
messages.append({"role": "user", "content": user_msg})
messages.append({"role": "assistant", "content": assistant_msg})
messages.append({"role": "user", "content": query})
stream = client.chat.completions.create(
model=self.model_name,
messages=messages,
temperature=self.temperature,
max_tokens=self.max_tokens,
stream=True
)
for chunk in stream:
delta = chunk.choices[0].delta
reasoning = None
content = None
if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
reasoning = delta.reasoning_content
if delta.content is not None:
content = delta.content
if reasoning is not None or content is not None:
yield (reasoning, content)
# def kalm_agent_init(service_url, system_prompt, temerature, max_new_tokens):
# agent = KalmAgent(
# adams_business_name="xxxxxx",
# adams_platform_user="xxxxxx",
# adams_user_token="xxxxxx",
# service_url = service_url,
# temperature = temerature,
# top_p=0.8,
# top_k=20,
# num_beams=1,
# max_new_tokens=max_new_tokens,
# repetition_penalty=1,
# no_repeat_ngrams=0,
# retries=3,
# single_retry_timeout=300,
# system_prompt= system_prompt
# )
# return agent