_csession.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. #
  2. # Copyright 2019 The FATE Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. from typing import Iterable
  17. from fate_arch.abc import AddressABC
  18. from fate_arch.abc import CSessionABC
  19. from fate_arch.common.address import LocalFSAddress
  20. from fate_arch.computing.spark._table import from_hdfs, from_rdd, from_hive, from_localfs
  21. from fate_arch.common import log
  22. LOGGER = log.getLogger()
  23. class CSession(CSessionABC):
  24. """
  25. manage RDDTable
  26. """
  27. def __init__(self, session_id):
  28. self._session_id = session_id
  29. def load(self, address: AddressABC, partitions, schema, **kwargs):
  30. from fate_arch.common.address import HDFSAddress
  31. if isinstance(address, HDFSAddress):
  32. table = from_hdfs(
  33. paths=f"{address.name_node}/{address.path}",
  34. partitions=partitions,
  35. in_serialized=kwargs.get(
  36. "in_serialized",
  37. True),
  38. id_delimiter=kwargs.get(
  39. "id_delimiter",
  40. ','))
  41. table.schema = schema
  42. return table
  43. from fate_arch.common.address import PathAddress
  44. if isinstance(address, PathAddress):
  45. from fate_arch.computing.non_distributed import LocalData
  46. from fate_arch.computing import ComputingEngine
  47. return LocalData(address.path, engine=ComputingEngine.SPARK)
  48. from fate_arch.common.address import HiveAddress, LinkisHiveAddress
  49. if isinstance(address, (HiveAddress, LinkisHiveAddress)):
  50. table = from_hive(
  51. tb_name=address.name,
  52. db_name=address.database,
  53. partitions=partitions,
  54. )
  55. table.schema = schema
  56. return table
  57. if isinstance(address, LocalFSAddress):
  58. table = from_localfs(
  59. paths=address.path, partitions=partitions, in_serialized=kwargs.get(
  60. "in_serialized", True), id_delimiter=kwargs.get(
  61. "id_delimiter", ','))
  62. table.schema = schema
  63. return table
  64. raise NotImplementedError(
  65. f"address type {type(address)} not supported with spark backend"
  66. )
  67. def parallelize(self, data: Iterable, partition: int, include_key: bool, **kwargs):
  68. # noinspection PyPackageRequirements
  69. from pyspark import SparkContext
  70. _iter = data if include_key else enumerate(data)
  71. rdd = SparkContext.getOrCreate().parallelize(_iter, partition)
  72. return from_rdd(rdd)
  73. @property
  74. def session_id(self):
  75. return self._session_id
  76. def cleanup(self, name, namespace):
  77. pass
  78. def stop(self):
  79. pass
  80. def kill(self):
  81. pass
  82. def destroy(self):
  83. pass