clientlgfedavg.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. import copy
  2. import torch
  3. import torch.nn as nn
  4. import numpy as np
  5. from flcore.clients.clientbase import Client
  6. class clientLGFedAvg(Client):
  7. def __init__(self, args, id, train_samples, test_samples, **kwargs):
  8. super().__init__(args, id, train_samples, test_samples, **kwargs)
  9. self.criterion = nn.CrossEntropyLoss()
  10. self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9)
  11. def train(self):
  12. trainloader = self.load_train_data()
  13. self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9)
  14. # self.model.to(self.device)
  15. self.model.train()
  16. max_local_steps = self.local_steps
  17. for step in range(max_local_steps):
  18. for i, (x, y) in enumerate(trainloader):
  19. if type(x) == type([]):
  20. x[0] = x[0].to(self.device)
  21. else:
  22. x = x.to(self.device)
  23. y = y.to(self.device)
  24. for param in self.model.base.parameters():
  25. param.requires_grad = True
  26. for param in self.model.predictor.parameters():
  27. param.requires_grad = False
  28. self.optimizer.zero_grad()
  29. output = self.model(x)
  30. loss = self.criterion(output, y)
  31. loss.backward()
  32. self.optimizer.step()
  33. for param in self.model.base.parameters():
  34. param.requires_grad = False
  35. for param in self.model.predictor.parameters():
  36. param.requires_grad = True
  37. self.optimizer.zero_grad()
  38. output = self.model(x)
  39. loss = self.criterion(output, y)
  40. loss.backward()
  41. self.optimizer.step()
  42. # self.model.cpu()
  43. def set_parameters(self, model):
  44. for new_param, old_param in zip(model.parameters(), self.model.predictor.parameters()):
  45. old_param.data = new_param.data.clone()