forked from i-am-bee/beeai-framework
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllms.py
More file actions
46 lines (36 loc) · 1.23 KB
/
llms.py
File metadata and controls
46 lines (36 loc) · 1.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import asyncio
import sys
from dotenv import load_dotenv
from beeai_framework.agents.react import ReActAgent
from beeai_framework.backend import ChatModel
from beeai_framework.memory import UnconstrainedMemory
LLMS = {
"ollama": "ollama:llama3.1",
"watsonx": "watsonx:ibm/granite-3-8b-instruct",
}
HELP = """
Usage:
examples.beeai_framework.llms <ollama|openai|watsonx>
Arguments
`ollama` - requires local ollama service running (i.e., http://127.0.0.1:11434)
`watsonx` - requires environment variable
- WATSONX_URL - base URL of your WatsonX instance
and one of the following
- WATSONX_API_KEY: API key
- WATSONX_TOKEN: auth token
"""
async def main(name: str) -> None:
chat_model = ChatModel.from_name(name)
agent = ReActAgent(llm=chat_model, tools=[], memory=UnconstrainedMemory())
result = await agent.run("What is the smallest of the Cabo Verde islands?")
print("answer:", result.result.text)
if __name__ == "__main__":
if len(sys.argv) < 2 or sys.argv[1] == "--help":
print(HELP)
else:
load_dotenv()
model = LLMS.get(sys.argv[1])
if model:
asyncio.run(main(model))
else:
print(f"Unknown provider: {sys.argv[1]}\n{HELP}")