Main.py 4.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. from Crawler.Page import get_one_page
  2. from Processor.Content import get_content
  3. from Processor.CommentNum import get_comment_count
  4. from Processor.Time import get_time
  5. from Processor.LikeNum import get_like_count
  6. from Processor.Mid import get_mid
  7. from Storage.Excel import get_one_page_excel, save_to_excel
  8. from lxml import etree
  9. import datetime
  10. import time
  11. import random
  12. from Storage.Json import save_2_json
  13. from Storage.Utils import Merge
  14. def run_from_time_a_2_time_b(keyword_temp, date_begin_temp, date_end_temp, proxy_temp, cookie_temp, user_agent_temp):
  15. begin_num = 1
  16. page_count = begin_num
  17. all_data_excel = []
  18. all_data_json = {}
  19. html = get_one_page(keyword_temp, page_count, date_begin_temp,
  20. date_end_temp, proxy_temp, cookie_temp, user_agent_temp)
  21. while True:
  22. # try:
  23. print('[-](' + date_begin_temp + '——' + date_end_temp +
  24. ')-page_' + str(page_count) + ':开始爬取...')
  25. html = get_one_page(keyword_temp, page_count, date_begin_temp,
  26. date_end_temp, proxy_temp, cookie_temp, user_agent_temp)
  27. wei_bo_content_str, ifEnd = get_content(html)
  28. if ifEnd:
  29. break
  30. wei_bo_mid_str = get_mid(html)
  31. wei_bo_comment_count = get_comment_count(html)
  32. wei_bo_like_count = get_like_count(html)
  33. wei_bo_time_str = get_time(html, date_begin_temp)
  34. length = len(wei_bo_content_str)
  35. result_one_page = {}
  36. for i in range(length):
  37. result_one_page[wei_bo_mid_str[i]] = {
  38. 'content': wei_bo_content_str[i],
  39. 'comment_count': wei_bo_comment_count[i],
  40. 'like_count': wei_bo_like_count[i],
  41. 'time': wei_bo_time_str[i]
  42. }
  43. Merge(result_one_page, all_data_json)
  44. print('[-](' + date_begin_temp + '——' + date_end_temp + ')-page_' + str(page_count) + ':爬取到' + str(
  45. length) + '条信息')
  46. one_page_data = get_one_page_excel(wei_bo_content_str, wei_bo_mid_str, wei_bo_time_str, wei_bo_like_count,
  47. wei_bo_comment_count, length)
  48. all_data_excel += one_page_data
  49. time.sleep(random.randint(3, 6))
  50. page_count += 1
  51. # except Exception as e:
  52. # print(e)
  53. # continue
  54. print('[-](' + date_begin_temp + '——' + date_end_temp +
  55. ')-page_' + str(page_count) + ':爬取完毕')
  56. return all_data_excel, all_data_json
  57. if __name__ == '__main__':
  58. keyword = input('[-]请输入检索话题:')
  59. date_str = input('[-]请输入需要查询的当天日期(格式:2021-07-01):')
  60. date = datetime.datetime.strptime(date_str, "%Y-%m-%d")
  61. # cookie = input('[-]请输入cookie:')
  62. cookie = 'SINAGLOBAL=5651725432098.134.1642487258936; UOR=,,www.google.com.hk; SSOLoginState=1654606657; SUB=_2A25PmzsRDeRhGeBO4lsY9y_Pyz-IHXVtZEVZrDV8PUJbkNAKLUOkkW1NRYEkPlI6BeV0nEOardLZmWDV2bJuQAkj; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWLS0lcQryz4UlBfKyjai.L5NHD95Qceh.41KMpe050Ws4Dqcjz-cyLdspDqgYt; _s_tentry=weibo.com; Apache=8874316633747.783.1654656854407; ULV=1654656854423:4:1:1:8874316633747.783.1654656854407:1646621305826'
  63. user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0'
  64. proxy = '127.0.0.1:80'
  65. data_excel = []
  66. data_json = {}
  67. for i in range(9, 10):
  68. date_begin = date_str + '-' + str(i % 24)
  69. if i == 23:
  70. date_temp_str = datetime.datetime.strftime(
  71. date + datetime.timedelta(days=1), "%Y-%m-%d")
  72. date_end = date_temp_str + '-' + str((i + 1) % 24)
  73. else:
  74. date_end = date_str + '-' + str((i + 1) % 24)
  75. if not cookie:
  76. print('请在程序中填写cookie!')
  77. break
  78. a_2_b_data_excel, a_2_b_data_json = run_from_time_a_2_time_b(
  79. keyword, date_begin, date_end, proxy, cookie, user_agent)
  80. data_excel += a_2_b_data_excel
  81. Merge(a_2_b_data_json, data_json)
  82. save_to_excel(data_excel, keyword, date_str)
  83. save_2_json(data_json, keyword, date_str)