python抓取搜狗微信公众号文章

作者:萌力突破 时间:2021-10-25 17:56:08 

初学python,抓取搜狗微信公众号文章存入mysql

mysql表:

python抓取搜狗微信公众号文章

python抓取搜狗微信公众号文章

代码:


import requests
import json
import re
import pymysql

# 创建连接
conn = pymysql.connect(host='你的数据库地址', port=端口, user='用户名', passwd='密码', db='数据库名称', charset='utf8')
# 创建游标
cursor = conn.cursor()

cursor.execute("select * from hd_gzh")
effect_row = cursor.fetchall()
from bs4 import BeautifulSoup

socket.setdefaulttimeout(60)
count = 1
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
#阿布云ip代理暂时不用
# proxyHost = "http-cla.abuyun.com"
# proxyPort = "9030"
# # 代理隧道验证信息
# proxyUser = "H56761606429T7UC"
# proxyPass = "9168EB00C4167176"

# proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
#  "host" : proxyHost,
#  "port" : proxyPort,
#  "user" : proxyUser,
#  "pass" : proxyPass,
# }

# proxies = {
#   "http" : proxyMeta,
#   "https" : proxyMeta,
# }

#查看是否已存在数据
def checkData(name):
 sql = "select * from gzh_article where title = '%s'"
 data = (name,)
 count = cursor.execute(sql % data)
 conn.commit()
 if(count!=0):
   return False
 else:
   return True
#插入数据
def insertData(title,picture,author,content):
 sql = "insert into gzh_article (title,picture,author,content) values ('%s', '%s','%s', '%s')"
 data = (title,picture,author,content)
 cursor.execute(sql % data)
 conn.commit()
 print("插入一条数据")
 return

for row in effect_row:
 newsurl = 'https://weixin.sogou.com/weixin?type=1&s_from=input&query=' + row[1] + '&ie=utf8&_sug_=n&_sug_type_='
 res = requests.get(newsurl,headers=headers)
 res.encoding = 'utf-8'
 soup = BeautifulSoup(res.text,'html.parser')
 url = 'https://weixin.sogou.com' + soup.select('.tit a')[0]['href']
 res2 = requests.get(url,headers=headers)
 res2.encoding = 'utf-8'
 soup2 = BeautifulSoup(res2.text,'html.parser')
 pattern = re.compile(r"url \+= '(.*?)';", re.MULTILINE | re.DOTALL)
 script = soup2.find("script")
 url2 = pattern.search(script.text).group(1)
 res3 = requests.get(url2,headers=headers)
 res3.encoding = 'utf-8'
 soup3 = BeautifulSoup(res3.text,'html.parser')
 print()
 pattern2 = re.compile(r"var msgList = (.*?);$", re.MULTILINE | re.DOTALL)
 script2 = soup3.find("script", text=pattern2)
 s2 = json.loads(pattern2.search(script2.text).group(1))
 #等待10s
 time.sleep(10)

for news in s2["list"]:
   articleurl = "https://mp.weixin.qq.com"+news["app_msg_ext_info"]["content_url"]
   articleurl = articleurl.replace('&','&')
   res4 = requests.get(articleurl,headers=headers)
   res4.encoding = 'utf-8'
   soup4 = BeautifulSoup(res4.text,'html.parser')
   if(checkData(news["app_msg_ext_info"]["title"])):
     insertData(news["app_msg_ext_info"]["title"],news["app_msg_ext_info"]["cover"],news["app_msg_ext_info"]["author"],pymysql.escape_string(str(soup4)))
   count += 1
   #等待5s
   time.sleep(10)
   for news2 in news["app_msg_ext_info"]["multi_app_msg_item_list"]:
     articleurl2 = "https://mp.weixin.qq.com"+news2["content_url"]
     articleurl2 = articleurl2.replace('&','&')
     res5 = requests.get(articleurl2,headers=headers)
     res5.encoding = 'utf-8'
     soup5 = BeautifulSoup(res5.text,'html.parser')
     if(checkData(news2["title"])):
       insertData(news2["title"],news2["cover"],news2["author"],pymysql.escape_string(str(soup5)))
     count += 1
     #等待10s
     time.sleep(10)
cursor.close()
conn.close()
print("操作完成")

来源:https://blog.csdn.net/a2398936046/article/details/88814078

标签:python,抓取,微信
0
投稿

猜你喜欢

  • 如何将 Access 的 Memo 型态字段汇入到 SQL2005 的 nvarchar 型态字段

    2008-12-26 18:13:00
  • python实现简单五子棋游戏

    2021-04-04 16:15:57
  • 三大措施设置数据库安全 保障网站安全运营

    2008-11-28 14:41:00
  • Django 拆分model和view的实现方法

    2022-03-28 02:35:34
  • 关于《回访确认》的几个问题

    2009-08-24 12:43:00
  • Mootools 1.2教程(17)——手风琴插件

    2008-12-11 13:39:00
  • sqlserver合并DataTable并排除重复数据的通用方法分享

    2012-01-05 18:59:56
  • 改进评论提交表单

    2009-03-25 20:37:00
  • 不得不看的JS基础知识(事件触发篇)

    2008-12-04 16:38:00
  • python使用nibabel和sitk读取保存nii.gz文件实例

    2021-03-11 16:12:53
  • ORACLE 数据库RMAN备份恢复

    2009-04-24 12:23:00
  • ASP教程:制作登陆验证页面程序

    2008-10-23 15:00:00
  • jquery 常用操作

    2010-01-12 16:00:00
  • XML教程—编写结构完整的XML文档

    2008-10-11 13:43:00
  • 使用JScript遍历Request表单参数集合

    2011-02-26 11:08:00
  • 基于Python制作一个汇率换算程序

    2022-05-25 20:33:25
  • 浅析网页Transitional和Strict的文档声明的区别

    2009-02-17 12:45:00
  • python3.6使用urllib完成下载的实例

    2023-08-03 21:21:13
  • 浅谈SQL与PLSQL开发实战

    2011-05-05 08:15:00
  • sqlserver 不重复的随机数

    2012-02-12 15:29:29
  • asp之家 网络编程 m.aspxhome.com