Python基于多线程实现抓取数据存入数据库的方法
作者:zn505119020 时间:2024-01-22 12:36:34
本文实例讲述了Python基于多线程实现抓取数据存入数据库的方法。分享给大家供大家参考,具体如下:
1. 数据库类
"""
使用须知:
代码中数据表名 aces ,需要更改该数据表名称的注意更改
"""
import pymysql
class Database():
# 设置本地数据库用户名和密码
host = "localhost"
user = "root"
password = ""
database = "test"
port = 3306
charset = "utf8"
cursor=''
connet =''
def __init__(self):
#连接到数据库
self.connet = pymysql.connect(host = self.host , user = self.user,password = self.password , database = self.database, charset = self.charset)
self.cursor = self.connet.cursor()
# #删表
def dropTables(self):
self.cursor.execute('''''drop table if exists aces''')
print("删表")
#建表
def createTables(self):
self.cursor.execute('''''create table if not exists aces
(
asin varchar(11) primary key not null,
checked varchar(200));''')
print("建表")
#保存数据
def save(self,aceslist):
self.cursor.execute("insert into aces ( asin, checked) values(%s,%s)", (aceslist[0],aceslist[1]))
self.connet.commit()
#判断元素是否已经在数据库里,在就返回true ,不在就返回false
def is_exists_asin(self,asin):
self.cursor.execute('select * from aces where asin = %s',asin)
if self.cursor.fetchone() is None:
return False
return True
# db =Database()
2. 多线程任务类
import urllib.parse
import urllib.parse
import urllib.request
from queue import Queue
import time
import random
import threading
import logging
import pymysql
from bs4 import BeautifulSoup
from local_data import Database
#一个模块中存储多个类 AmazonSpeder , ThreadCrawl(threading.Thread), AmazonSpiderJob
class AmazonSpider():
def __init__(self):
self.db = Database()
def randHeader(self):
head_connection = ['Keep-Alive', 'close']
head_accept = ['text/html, application/xhtml+xml, */*']
head_accept_language = ['zh-CN,fr-FR;q=0.5', 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3']
head_user_agent = ['Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3',
'Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12',
'Opera/9.27 (Windows NT 5.2; U; zh-cn)',
'Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0',
'Opera/8.0 (Macintosh; PPC Mac OS X; U; en)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11']
header = {
'Connection': head_connection[0],
'Accept': head_accept[0],
'Accept-Language': head_accept_language[1],
'User-Agent': head_user_agent[random.randrange(0, len(head_user_agent))]
}
return header
def getDataById(self , queryId):
#如果数据库中有的数据,直接返回不处理
if self.db.is_exists_asin(queryId):
return
req = urllib.request.Request(url="https://www.amazon.com/dp/"+str(queryId) , headers=self.randHeader())
webpage = urllib.request.urlopen(req)
html = webpage.read()
soup = BeautifulSoup(html, 'html.parser')
content = soup.find_all("span" , id = "asTitle")
# 加入一种判断,有的asin没有该定位,
if len(content):
# 非空
state = content[0].string
else:
# 列表为空,没有定位到
state = "other"
print(queryId)
print(state)
self.db.save([queryId,state])
class ThreadCrawl(threading.Thread): #ThreadCrawl类继承了Threading.Thread类
def __init__(self, queue): #子类特有属性, queue
FORMAT = time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime()) + "[AmazonSpider]-----%(message)s------"
logging.basicConfig(level=logging.INFO, format=FORMAT)
threading.Thread.__init__(self)
self.queue = queue
self.spider = AmazonSpider() #子类特有属性spider, 并初始化,将实例用作属性
def run(self):
while True:
success = True
item = self.queue.get() #调用队列对象的get()方法从队头删除并返回一个项目item
try:
self.spider.getDataById(item) #调用实例spider的方法getDataById(item)
except :
# print("失败")
success = False
if not success :
self.queue.put(item)
logging.info("now queue size is: %d" % self.queue.qsize()) #队列对象qsize()方法,返回队列的大小
self.queue.task_done() #队列对象在完成一项工作后,向任务已经完成的队列发送一个信号
class AmazonSpiderJob():
def __init__(self , size , qs):
self.size = size # 将形参size的值存储到属性变量size中
self.qs = qs
def work(self):
toSpiderQueue = Queue() #创建一个Queue队列对象
for q in self.qs:
toSpiderQueue.put(q) #调用队列对象的put()方法,在对尾插入一个项目item
for i in range(self.size):
t = ThreadCrawl(toSpiderQueue) #将实例用到一个类的方法中
t.setDaemon(True)
t.start()
toSpiderQueue.join() #队列对象,等到队列为空,再执行别的操作
3. 主线程类
from amazon_s import AmazonSpiderJob #从一个模块中导入类
import pymysql
import pandas as pd
from local_data import Database
if __name__ == '__main__':
#初次跑程序的时候,需要删除旧表,然后新建表,之后重启再跑的时候需要注释
#----------------------
db = Database()
db.dropTables()
db.createTables()
#---------------------------
df = pd.read_excel("ASIN检查_viogico_1108.xlsx")
# print(df.info())
qs = df["asin1"].values
print(qs)
print(len(qs))
amazonJob = AmazonSpiderJob(8, qs)
amazonJob.work()
希望本文所述对大家Python程序设计有所帮助。
来源:https://blog.csdn.net/zn505119020/article/details/78590416
标签:Python,多线程,数据库
0
投稿
猜你喜欢
详解MySQL中的NULL值
2024-01-14 16:05:00
linux环境下安装mysql数据库的详细教程
2024-01-15 02:12:31
1分钟快速生成用于网页内容提取的xslt
2021-08-06 17:43:41
Python 利用高德地图api实现经纬度与地址的批量转换
2021-08-13 19:52:05
Python各种扩展名区别点整理
2023-10-14 21:23:01
如何在Access数据库中立即得到所插入记录的自动编号?
2010-06-17 12:45:00
python中快速进行多个字符替换的方法小结
2021-08-19 05:59:34
Python实现的微信公众号群发图片与文本消息功能实例详解
2023-02-04 07:43:48
Python模拟脉冲星伪信号频率实例代码
2023-02-12 06:01:08
Python folium的实用功能详解
2021-08-27 10:07:21
Python 在局部变量域中执行代码
2023-06-12 04:57:15
SQL Server小知识:Processor Affinity
2008-11-24 20:50:00
python 的numpy库中的mean()函数用法介绍
2021-12-19 16:22:37
php和asp利用Shell.Application来执行程序的代码
2024-04-29 13:58:02
通过按钮实时切换CSS样式 实现CSS换肤的实例
2008-07-17 12:55:00
Mysql 数据库常用备份方法和注意事项
2024-01-17 15:43:25
asp下用fso和ado.stream写xml文件的方法
2011-04-07 10:55:00
完美解决Python2操作中文名文件乱码的问题
2022-12-12 11:29:46
利用Python实现一个简易的截图工具
2023-08-07 08:50:03
c语言http请求解析表单内容
2024-01-21 13:34:31