-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
45 lines (37 loc) · 1.5 KB
/
main.py
File metadata and controls
45 lines (37 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from pydantic import BaseModel
from dotenv import load_dotenv
from langchain_ollama import ChatOllama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import PydanticOutputParser
from langchain.agents import create_tool_calling_agent, AgentExecutor
from tools.tools import save_tool, search_tool, wiki_tool
tools = [save_tool, search_tool, wiki_tool]
load_dotenv()
class ResearchResponse(BaseModel):
topic: str
summary: str
sources: list[str]
tools_used: list[str]
llm = ChatOllama(model="gpt-oss:20b", temperature=0.6, num_gpu=-1, num_ctx=20000)
research_response_parser = PydanticOutputParser(pydantic_object=ResearchResponse)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""
You are a research assistant that will help generate a research paper.
Answer the user query and use neccessary tools.
Wrap the output in this format and provide no other text\n{format_instructions}
""",
),
("placeholder", "{chat_history}"),
("human", "{query}"),
("placeholder", "{agent_scratchpad}"),
]
).partial(format_instructions=research_response_parser.get_format_instructions())
agent = create_tool_calling_agent(llm=llm, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(agent=agent, verbose=True, tools=tools)
raw_response = agent_executor.invoke(
{"query": "What is the most dangerous mountain to climb in the world?"}
)
print(raw_response)