|
@@ -37,7 +37,7 @@ def main(
|
|
|
lora_weights_path: str = "",
|
|
lora_weights_path: str = "",
|
|
|
lora_config_path: str= "", # provide only the file path, excluding the file name 'adapter_config.json'
|
|
lora_config_path: str= "", # provide only the file path, excluding the file name 'adapter_config.json'
|
|
|
prompt_template: str = "", # The prompt template to use, will default to alpaca.
|
|
prompt_template: str = "", # The prompt template to use, will default to alpaca.
|
|
|
- server_name: str = "127.0.0.1",
|
|
|
|
|
|
|
+ server_name: str = "0.0.0.0",
|
|
|
share_gradio: bool = False,
|
|
share_gradio: bool = False,
|
|
|
):
|
|
):
|
|
|
# 从命令行参数或环境变量获取基础模型名称
|
|
# 从命令行参数或环境变量获取基础模型名称
|
|
@@ -213,16 +213,13 @@ def main(
|
|
|
gr.components.Checkbox(label="Stream output"),
|
|
gr.components.Checkbox(label="Stream output"),
|
|
|
],
|
|
],
|
|
|
outputs=[
|
|
outputs=[
|
|
|
- gr.inputs.Textbox(
|
|
|
|
|
- lines=5,
|
|
|
|
|
- label="Output",
|
|
|
|
|
- )
|
|
|
|
|
|
|
+ gr.components.Textbox(lines=5, label="Output")
|
|
|
],
|
|
],
|
|
|
title="FederatedGPT-shepherd",
|
|
title="FederatedGPT-shepherd",
|
|
|
description="Shepherd is a LLM that has been fine-tuned in a federated manner ",
|
|
description="Shepherd is a LLM that has been fine-tuned in a federated manner ",
|
|
|
).queue()
|
|
).queue()
|
|
|
|
|
|
|
|
- sherpherd_UI.launch(share=True)
|
|
|
|
|
|
|
+ sherpherd_UI.launch(server_name=server_name, share=share_gradio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|