pipeline_homo_nn_train_binary.py 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #
  2. # Copyright 2019 The FATE Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import argparse
  17. # torch
  18. import torch as t
  19. from torch import nn
  20. from pipeline import fate_torch_hook
  21. # pipeline
  22. from pipeline.backend.pipeline import PipeLine
  23. from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
  24. from pipeline.component.nn import TrainerParam
  25. from pipeline.interface import Data
  26. from pipeline.utils.tools import load_job_config
  27. fate_torch_hook(t)
  28. def main(config="../../config.yaml", namespace=""):
  29. # obtain config
  30. if isinstance(config, str):
  31. config = load_job_config(config)
  32. parties = config.parties
  33. guest = parties.guest[0]
  34. host = parties.host[0]
  35. arbiter = parties.arbiter[0]
  36. pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
  37. train_data_0 = {"name": "breast_homo_guest", "namespace": "experiment"}
  38. train_data_1 = {"name": "breast_homo_host", "namespace": "experiment"}
  39. reader_0 = Reader(name="reader_0")
  40. reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
  41. reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
  42. data_transform_0 = DataTransform(name='data_transform_0')
  43. data_transform_0.get_party_instance(
  44. role='guest', party_id=guest).component_param(
  45. with_label=True, output_format="dense")
  46. data_transform_0.get_party_instance(
  47. role='host', party_id=host).component_param(
  48. with_label=True, output_format="dense")
  49. model = nn.Sequential(
  50. nn.Linear(30, 1),
  51. nn.Sigmoid()
  52. )
  53. loss = nn.BCELoss()
  54. optimizer = t.optim.Adam(model.parameters(), lr=0.01)
  55. nn_component = HomoNN(name='nn_0',
  56. model=model,
  57. loss=loss,
  58. optimizer=optimizer,
  59. trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128,
  60. validation_freqs=1),
  61. torch_seed=100
  62. )
  63. pipeline.add_component(reader_0)
  64. pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
  65. pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
  66. pipeline.add_component(Evaluation(name='eval_0'), data=Data(data=nn_component.output.data))
  67. pipeline.compile()
  68. pipeline.fit()
  69. if __name__ == "__main__":
  70. parser = argparse.ArgumentParser("PIPELINE DEMO")
  71. parser.add_argument("-config", type=str,
  72. help="config file")
  73. args = parser.parse_args()
  74. if args.config is not None:
  75. main(args.config)
  76. else:
  77. main()