pipeline_homo_nn_train_regression.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. #
  2. # Copyright 2019 The FATE Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import argparse
  17. # torch
  18. import torch as t
  19. from torch import nn
  20. from pipeline import fate_torch_hook
  21. # pipeline
  22. from pipeline.backend.pipeline import PipeLine
  23. from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
  24. from pipeline.component.nn import TrainerParam
  25. from pipeline.interface import Data
  26. from pipeline.utils.tools import load_job_config
  27. fate_torch_hook(t)
  28. def main(config="../../config.yaml", namespace=""):
  29. # obtain config
  30. if isinstance(config, str):
  31. config = load_job_config(config)
  32. parties = config.parties
  33. guest = parties.guest[0]
  34. host = parties.host[0]
  35. arbiter = parties.arbiter[0]
  36. pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
  37. train_data_0 = {"name": "student_homo_guest", "namespace": "experiment"}
  38. train_data_1 = {"name": "student_homo_host", "namespace": "experiment"}
  39. reader_0 = Reader(name="reader_0")
  40. reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
  41. reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
  42. data_transform_0 = DataTransform(name='data_transform_0')
  43. data_transform_0.get_party_instance(
  44. role='guest', party_id=guest).component_param(
  45. with_label=True, output_format="dense")
  46. data_transform_0.get_party_instance(
  47. role='host', party_id=host).component_param(
  48. with_label=True, output_format="dense")
  49. model = nn.Sequential(
  50. nn.Linear(13, 1)
  51. )
  52. loss = nn.MSELoss()
  53. optimizer = t.optim.Adam(model.parameters(), lr=0.01)
  54. nn_component = HomoNN(name='nn_0',
  55. model=model,
  56. loss=loss,
  57. optimizer=optimizer,
  58. trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128,
  59. validation_freqs=1),
  60. torch_seed=100
  61. )
  62. pipeline.add_component(reader_0)
  63. pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
  64. pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
  65. pipeline.add_component(Evaluation(name='eval_0', eval_type='regression'), data=Data(data=nn_component.output.data))
  66. pipeline.compile()
  67. pipeline.fit()
  68. if __name__ == "__main__":
  69. parser = argparse.ArgumentParser("PIPELINE DEMO")
  70. parser.add_argument("-config", type=str,
  71. help="config file")
  72. args = parser.parse_args()
  73. if args.config is not None:
  74. main(args.config)
  75. else:
  76. main()