inference_request.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. import grpc
  2. import time
  3. import json
  4. import sys
  5. import uuid
  6. from fate_arch.protobuf.python import inference_service_pb2
  7. from fate_arch.protobuf.python import inference_service_pb2_grpc
  8. import threading
  9. def run(address):
  10. ths = []
  11. with grpc.insecure_channel(address) as channel:
  12. for i in range(1):
  13. th = threading.Thread(target=send, args=(channel,))
  14. ths.append(th)
  15. st = int(time.time())
  16. for th in ths:
  17. th.start()
  18. for th in ths:
  19. th.join()
  20. et = int(time.time())
  21. def process_response(call_future):
  22. print(call_future.result())
  23. def send(channel):
  24. stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
  25. request = inference_service_pb2.InferenceMessage()
  26. request_data = dict()
  27. request_data['serviceId'] = 'xxxxxxxxx'
  28. request_data['applyId'] = ''
  29. # request_data['modelId'] = 'arbiter-10000#guest-10000#host-10000#model' # You can specify the model id this way
  30. # request_data['modelVersion'] = 'acd3e1807a1211e9969aacde48001122' # You can specify the model version this way
  31. request_data['caseid'] = uuid.uuid1().hex
  32. feature_data = dict()
  33. feature_data['fid1'] = 5.1
  34. feature_data['fid2'] = 6.2
  35. feature_data['fid3'] = 7.6
  36. request_data['featureData'] = feature_data
  37. request_data['sendToRemoteFeatureData'] = feature_data
  38. print(json.dumps(request_data, indent=4))
  39. request.body = json.dumps(request_data).encode(encoding='utf-8')
  40. print(stub.inference(request))
  41. if __name__ == '__main__':
  42. run(sys.argv[1])