pipeline_homo_nn_train_multi.py 3.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. #
  2. # Copyright 2019 The FATE Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import argparse
  17. # torch
  18. import torch as t
  19. from torch import nn
  20. from pipeline import fate_torch_hook
  21. # pipeline
  22. from pipeline.backend.pipeline import PipeLine
  23. from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
  24. from pipeline.component.nn import TrainerParam, DatasetParam
  25. from pipeline.interface import Data
  26. from pipeline.utils.tools import load_job_config
  27. fate_torch_hook(t)
  28. def main(config="../../config.yaml", namespace=""):
  29. # obtain config
  30. if isinstance(config, str):
  31. config = load_job_config(config)
  32. parties = config.parties
  33. guest = parties.guest[0]
  34. host = parties.host[0]
  35. arbiter = parties.arbiter[0]
  36. pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
  37. train_data_0 = {"name": "vehicle_scale_homo_guest", "namespace": "experiment"}
  38. train_data_1 = {"name": "vehicle_scale_homo_host", "namespace": "experiment"}
  39. reader_0 = Reader(name="reader_0")
  40. reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
  41. reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
  42. data_transform_0 = DataTransform(name='data_transform_0')
  43. data_transform_0.get_party_instance(
  44. role='guest', party_id=guest).component_param(
  45. with_label=True, output_format="dense")
  46. data_transform_0.get_party_instance(
  47. role='host', party_id=host).component_param(
  48. with_label=True, output_format="dense")
  49. model = nn.Sequential(
  50. nn.Linear(18, 4),
  51. nn.Softmax(dim=1) # actually cross-entropy loss does the softmax
  52. )
  53. loss = nn.CrossEntropyLoss()
  54. optimizer = t.optim.Adam(model.parameters(), lr=0.01)
  55. nn_component = HomoNN(name='nn_0',
  56. model=model,
  57. loss=loss,
  58. optimizer=optimizer,
  59. trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=50, batch_size=128,
  60. validation_freqs=1),
  61. # reshape and set label to long for CrossEntropyLoss
  62. dataset=DatasetParam(dataset_name='table', flatten_label=True, label_dtype='long'),
  63. torch_seed=100
  64. )
  65. pipeline.add_component(reader_0)
  66. pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
  67. pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
  68. pipeline.add_component(Evaluation(name='eval_0', eval_type='multi'), data=Data(data=nn_component.output.data))
  69. pipeline.compile()
  70. pipeline.fit()
  71. if __name__ == "__main__":
  72. parser = argparse.ArgumentParser("PIPELINE DEMO")
  73. parser.add_argument("-config", type=str,
  74. help="config file")
  75. args = parser.parse_args()
  76. if args.config is not None:
  77. main(args.config)
  78. else:
  79. main()