123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415 |
- from torch import optim
- from federatedml.nn.backend.torch.base import FateTorchLayer, Sequential
- from federatedml.nn.backend.torch.base import FateTorchOptimizer
- class ASGD(optim.ASGD, FateTorchOptimizer):
- def __init__(
- self,
- params=None,
- lr=0.01,
- lambd=0.0001,
- alpha=0.75,
- t0=1000000.0,
- weight_decay=0,
- foreach=None,
- maximize=False,
- ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['lambd'] = lambd
- self.param_dict['alpha'] = alpha
- self.param_dict['t0'] = t0
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['foreach'] = foreach
- self.param_dict['maximize'] = maximize
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.ASGD.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer ASGD without initiated parameters'.format(type(self).__name__)
- class Adadelta(optim.Adadelta, FateTorchOptimizer):
- def __init__(self, params=None, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0, foreach=None, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['rho'] = rho
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['foreach'] = foreach
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.Adadelta.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer Adadelta without initiated parameters'.format(type(self).__name__)
- class Adagrad(optim.Adagrad, FateTorchOptimizer):
- def __init__(
- self,
- params=None,
- lr=0.01,
- lr_decay=0,
- weight_decay=0,
- initial_accumulator_value=0,
- eps=1e-10,
- foreach=None,
- ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['lr_decay'] = lr_decay
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['initial_accumulator_value'] = initial_accumulator_value
- self.param_dict['eps'] = eps
- self.param_dict['foreach'] = foreach
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.Adagrad.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer Adagrad without initiated parameters'.format(type(self).__name__)
- class Adam(optim.Adam, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['amsgrad'] = amsgrad
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.Adam.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer Adam without initiated parameters'.format(type(self).__name__)
- class AdamW(optim.AdamW, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['amsgrad'] = amsgrad
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.AdamW.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer AdamW without initiated parameters'.format(type(self).__name__)
- class Adamax(optim.Adamax, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['foreach'] = foreach
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.Adamax.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer Adamax without initiated parameters'.format(type(self).__name__)
- class LBFGS(optim.LBFGS, FateTorchOptimizer):
- def __init__(
- self,
- params=None,
- lr=1,
- max_iter=20,
- max_eval=None,
- tolerance_grad=1e-07,
- tolerance_change=1e-09,
- history_size=100,
- line_search_fn=None,
- ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['max_iter'] = max_iter
- self.param_dict['max_eval'] = max_eval
- self.param_dict['tolerance_grad'] = tolerance_grad
- self.param_dict['tolerance_change'] = tolerance_change
- self.param_dict['history_size'] = history_size
- self.param_dict['line_search_fn'] = line_search_fn
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.LBFGS.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer LBFGS without initiated parameters'.format(type(self).__name__)
- class NAdam(optim.NAdam, FateTorchOptimizer):
- def __init__(
- self,
- params=None,
- lr=0.002,
- betas=(
- 0.9,
- 0.999),
- eps=1e-08,
- weight_decay=0,
- momentum_decay=0.004,
- foreach=None,
- ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['momentum_decay'] = momentum_decay
- self.param_dict['foreach'] = foreach
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.NAdam.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer NAdam without initiated parameters'.format(type(self).__name__)
- class RAdam(optim.RAdam, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['foreach'] = foreach
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.RAdam.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer RAdam without initiated parameters'.format(type(self).__name__)
- class RMSprop(optim.RMSprop, FateTorchOptimizer):
- def __init__(
- self,
- params=None,
- lr=0.01,
- alpha=0.99,
- eps=1e-08,
- weight_decay=0,
- momentum=0,
- centered=False,
- foreach=None,
- maximize=False,
- differentiable=False,
- ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['alpha'] = alpha
- self.param_dict['eps'] = eps
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['momentum'] = momentum
- self.param_dict['centered'] = centered
- self.param_dict['foreach'] = foreach
- self.param_dict['maximize'] = maximize
- self.param_dict['differentiable'] = differentiable
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.RMSprop.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer RMSprop without initiated parameters'.format(type(self).__name__)
- class Rprop(optim.Rprop, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50), foreach=None, maximize=False, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['etas'] = etas
- self.param_dict['step_sizes'] = step_sizes
- self.param_dict['foreach'] = foreach
- self.param_dict['maximize'] = maximize
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.Rprop.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer Rprop without initiated parameters'.format(type(self).__name__)
- class SGD(optim.SGD, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['momentum'] = momentum
- self.param_dict['dampening'] = dampening
- self.param_dict['weight_decay'] = weight_decay
- self.param_dict['nesterov'] = nesterov
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.SGD.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer SGD without initiated parameters'.format(type(self).__name__)
- class SparseAdam(optim.SparseAdam, FateTorchOptimizer):
- def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, maximize=False, ):
- FateTorchOptimizer.__init__(self)
- self.param_dict['lr'] = lr
- self.param_dict['betas'] = betas
- self.param_dict['eps'] = eps
- self.param_dict['maximize'] = maximize
- self.torch_class = type(self).__bases__[0]
- if params is None:
- return
- params = self.check_params(params)
- self.torch_class.__init__(self, params, **self.param_dict)
- # optim.SparseAdam.__init__(self, **self.param_dict)
- def __repr__(self):
- try:
- return type(self).__bases__[0].__repr__(self)
- except BaseException:
- return 'Optimizer SparseAdam without initiated parameters'.format(type(self).__name__)
|