import json from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ) from global_langchain import global_model from utils.utils import convert_choice def format_langchain(text, output_type): print('[修复]--------------------------修复格式中--------------------------') chat = ChatOpenAI(temperature=0) model = global_model() system_template = model.format_system_template system_message_prompt = SystemMessagePromptTemplate.from_template( system_template) human_template = model.format_human_template human_message_prompt = HumanMessagePromptTemplate.from_template( human_template) chat_prompt = ChatPromptTemplate.from_messages( [system_message_prompt, human_message_prompt]) rsp = chat( chat_prompt.format_prompt(output_type=output_type, text=text).to_messages()) print('[修复]--------------------------GPT得到--------------------------') print(rsp.content) try: result = json.loads(rsp.content, object_hook=convert_choice) except: result = format_langchain(text, output_type) print( '[修复]--------------------------json.loads得到--------------------------') print(result) return result