use_registry: false use_deserialize_safe_module: false dependent_distribution: false encrypt_password: false encrypt_module: fate_arch.common.encrypt_utils#pwdecrypt private_key: party_id: hook_module: client_authentication: fate_flow.hook.flow.client_authentication site_authentication: fate_flow.hook.flow.site_authentication permission: fate_flow.hook.flow.permission hook_server_name: authentication: client: switch: false http_app_key: http_secret_key: site: switch: false permission: switch: false component: false dataset: false fateflow: # you must set real ip address, 127.0.0.1 and 0.0.0.0 is not supported host: 127.0.0.1 http_port: 9380 grpc_port: 9360 # when you have multiple fateflow server on one party, # we suggest using nginx for load balancing. nginx: host: http_port: grpc_port: # use random instance_id instead of {host}:{http_port} random_instance_id: false # support rollsite/nginx/fateflow as a coordination proxy # rollsite support fate on eggroll, use grpc protocol # nginx support fate on eggroll and fate on spark, use http or grpc protocol, default is http # fateflow support fate on eggroll and fate on spark, use http protocol, but not support exchange network mode # format(proxy: rollsite) means rollsite use the rollsite configuration of fate_one_eggroll and nginx use the nginx configuration of fate_one_spark # you also can customize the config like this(set fateflow of the opposite party as proxy): # proxy: # name: fateflow # host: xx # http_port: xx # grpc_port: xx proxy: rollsite # support default/http/grpc protocol: default database: name: fate_flow user: fate passwd: fate host: 127.0.0.1 port: 3306 max_connections: 100 stale_timeout: 30 zookeeper: hosts: - 127.0.0.1:2181 use_acl: false user: fate password: fate # engine services default_engines: computing: standalone federation: standalone storage: standalone fate_on_standalone: standalone: cores_per_node: 20 nodes: 1 fate_on_eggroll: clustermanager: cores_per_node: 16 nodes: 1 rollsite: host: 127.0.0.1 port: 9370 fate_on_spark: spark: # default use SPARK_HOME environment variable home: cores_per_node: 20 nodes: 2 linkis_spark: cores_per_node: 20 nodes: 2 host: 127.0.0.1 port: 9001 token_code: MLSS python_path: /data/projects/fate/python hive: host: 127.0.0.1 port: 10000 auth_mechanism: username: password: linkis_hive: host: 127.0.0.1 port: 9001 hdfs: name_node: hdfs://fate-cluster # default / path_prefix: rabbitmq: host: 192.168.0.4 mng_port: 12345 port: 5672 user: fate password: fate # default conf/rabbitmq_route_table.yaml route_table: # mode: replication / client, default: replication mode: replication max_message_size: 1048576 pulsar: host: 192.168.0.5 port: 6650 mng_port: 8080 cluster: standalone # all parties should use a same tenant tenant: fl-tenant # message ttl in minutes topic_ttl: 30 # default conf/pulsar_route_table.yaml route_table: # mode: replication / client, default: replication mode: replication max_message_size: 1048576 nginx: host: 127.0.0.1 http_port: 9300 grpc_port: 9310 # external services fateboard: host: 127.0.0.1 port: 8080 enable_model_store: false model_store_address: # use mysql as the model store engine # storage: mysql # database: fate_model # user: fate # password: fate # host: 127.0.0.1 # port: 3306 # other optional configs send to the engine # max_connections: 10 # stale_timeout: 10 # use tencent cos as model store engine storage: tencent_cos Region: SecretId: SecretKey: Bucket: servings: hosts: - 127.0.0.1:8000 fatemanager: host: 127.0.0.1 port: 8001 federatedId: 0