optimizer_test.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. import math
  2. import unittest
  3. import numpy as np
  4. from federatedml.linear_model.linear_model_weight import LinearModelWeights
  5. from federatedml.optim.optimizer import _SgdOptimizer
  6. class TestInitialize(unittest.TestCase):
  7. def test_optimizer(self):
  8. model_weights = LinearModelWeights(np.array([0.10145129, 0.39987222, -0.96630206, -0.41208423, -0.24609715,
  9. -0.70518652, 0.71478064, 0.57973894, 0.5703622, -0.45482125,
  10. 0.32676194, -0.00648212, 0.35542874, -0.26412695, -0.07964603,
  11. 1.2158522, -0.41255564, -0.01686044, -0.99897542, 1.56407211,
  12. 0.52040711, 0.24568055, 0.4880494, 0.52269909, -0.14431923,
  13. 0.03282471, 0.09437969, 0.21407206, -0.270922]), True)
  14. prev_model_weights = LinearModelWeights(np.array([0.10194331, 0.40062114, -0.96597859, -0.41202348, -0.24587005,
  15. -0.7047801, 0.71515712, 0.58045583, 0.57079086, -0.45473676,
  16. 0.32775863, -0.00633238, 0.35567219, -0.26343469, -0.07964763,
  17. 1.2165642, -0.41244749, -0.01589344, -0.99862982, 1.56498698,
  18. 0.52058152, 0.24572171, 0.48809946, 0.52272993, -0.14330367,
  19. 0.03283002, 0.09439601, 0.21433497, -0.27011673]), True)
  20. prev_model_weights_null = None
  21. eps = 0.00001
  22. # 1: alpha = 0, no regularization
  23. learning_rate = 0.2
  24. alpha = 0
  25. penalty = "L2"
  26. decay = "0.2"
  27. decay_sqrt = "true"
  28. mu = 0.01
  29. init_params = [learning_rate, alpha, penalty, decay, decay_sqrt, mu]
  30. optimizer = _SgdOptimizer(*init_params)
  31. loss_norm = optimizer.loss_norm(model_weights, prev_model_weights_null)
  32. self.assertTrue(math.fabs(loss_norm) <= eps) # == 0
  33. # 2
  34. alpha = 0.1
  35. init_params = [learning_rate, alpha, penalty, decay, decay_sqrt, mu]
  36. optimizer = _SgdOptimizer(*init_params)
  37. loss_norm = optimizer.loss_norm(model_weights, prev_model_weights_null)
  38. print("loss_norm = {}".format(loss_norm))
  39. self.assertTrue(math.fabs(loss_norm - 0.47661579875266186) <= eps)
  40. # 3
  41. loss_norm = optimizer.loss_norm(model_weights, prev_model_weights)
  42. print("loss_norm = {}".format(loss_norm))
  43. self.assertTrue(math.fabs(loss_norm - 0.47661583737200075) <= eps)
  44. if __name__ == '__main__':
  45. unittest.main()