main.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. from langchain.chains import create_history_aware_retriever, create_retrieval_chain
  2. from langchain.chains.combine_documents import create_stuff_documents_chain
  3. from langchain_core.prompts import (
  4. ChatPromptTemplate,
  5. MessagesPlaceholder,
  6. PromptTemplate,
  7. )
  8. from langchain_ollama import OllamaLLM
  9. from chroma_manager import ChromaManager
  10. from config import OLLAMA_URL
  11. print("Инициализация LLM...")
  12. llm = OllamaLLM(
  13. model="llama3.1:8b",
  14. base_url=f"{OLLAMA_URL}",
  15. temperature=0.15,
  16. num_predict=1024,
  17. reasoning=False,
  18. )
  19. print("Инициализация Chroma...")
  20. retriever = ChromaManager().retriever()
  21. contextualize_q_system_prompt = (
  22. "Given a chat history and the latest user question "
  23. "which might reference context in the chat history, "
  24. "formulate a standalone question which can be understood "
  25. "without the chat history. Do NOT answer the question, just "
  26. "reformulate it if needed and otherwise return it as is."
  27. )
  28. contextualize_q_prompt = ChatPromptTemplate.from_messages(
  29. [
  30. ("system", contextualize_q_system_prompt),
  31. MessagesPlaceholder("chat_history"),
  32. ("human", "{input}"),
  33. ]
  34. )
  35. history_aware_retriever = create_history_aware_retriever(
  36. llm, retriever, contextualize_q_prompt
  37. )
  38. qa_system_prompt = (
  39. "You are an assistant for question-answering tasks. Use "
  40. "the following pieces of retrieved context to answer the "
  41. "question. If you don't know the answer, just say that you "
  42. "don't know. Use three sentences maximum and keep the answer "
  43. "concise."
  44. "{context}"
  45. )
  46. qa_prompt = ChatPromptTemplate.from_messages(
  47. [
  48. ("system", qa_system_prompt),
  49. MessagesPlaceholder("chat_history"),
  50. ("human", "{input}"),
  51. ]
  52. )
  53. question_answer_chain = create_stuff_documents_chain(
  54. llm, qa_prompt, document_prompt=PromptTemplate.from_template("{answer}")
  55. )
  56. rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
  57. chat_history = []
  58. def ask(question: str):
  59. print("=" * 100)
  60. print("Вопрос пользователя:", question)
  61. result = {"question": question, "answer": ""}
  62. print("=" * 100)
  63. print("Ответ модели:")
  64. for chunk in rag_chain.stream(
  65. {"input": question, "chat_history": chat_history},
  66. config={"callbacks": [langfuse_handler]},
  67. ):
  68. if "answer" in chunk:
  69. print(chunk["answer"], end="", flush=True)
  70. result["answer"] += chunk["answer"]
  71. print()
  72. chat_history.append(("human", result["question"]))
  73. chat_history.append(("ai", result["answer"]))
  74. def main():
  75. questions = [
  76. # 'Какие есть боссы в Террарии?',
  77. # 'Какой финальный босс?',
  78. # 'И как его победить?',
  79. # 'Какую броню на него использовать?',
  80. "What bosses are there in Terraria?",
  81. "What is the final boss?",
  82. "And how to defeat it?",
  83. "What armor should be used against it?",
  84. ]
  85. for question in questions:
  86. ask(question)
  87. if __name__ == "__main__":
  88. main()