pipeline-upload-spark.py 2.2 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. #
  2. # Copyright 2019 The FATE Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. import os
  17. from pipeline.backend.pipeline import PipeLine
  18. # path to data
  19. # default fate installation path
  20. DATA_BASE = "/data/projects/fate"
  21. def main():
  22. # parties config
  23. guest = 9999
  24. # partition for data storage
  25. partition = 4
  26. dense_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
  27. tag_data = {"name": "tag_value_1", "namespace": "experiment"}
  28. pipeline_upload = PipeLine().set_initiator(role="guest", party_id=guest).set_roles(guest=guest)
  29. # add upload data info
  30. # csv file name from python path & file name
  31. pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/breast_hetero_guest.csv"),
  32. table_name=dense_data["name"], # table name
  33. namespace=dense_data["namespace"], # namespace
  34. head=1, partition=partition, # data info
  35. id_delimiter=",") # id delimiter, needed for spark
  36. pipeline_upload.add_upload_data(file=os.path.join(DATA_BASE, "examples/data/tag_value_1000_140.csv"),
  37. table_name=tag_data["name"],
  38. namespace=tag_data["namespace"],
  39. head=0, partition=partition,
  40. id_delimiter=",")
  41. # upload all data
  42. pipeline_upload.upload(drop=1)
  43. import json
  44. print(json.dumps(pipeline_upload._upload_conf(), indent=4))
  45. if __name__ == "__main__":
  46. main()