python多线程抓取天涯帖子内容示例

时间:2021-03-18 11:56:42 

使用re, urllib, threading多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名


#coding:utf-8

import urllib
import re
import threading
import os, time

class Down_Tianya(threading.Thread):
    """多线程下载"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根据传入的url抓出各页内容,按页数做键存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根据第一页地址抓取总页数"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace('&nbsp;', '')
            tx_file.write(tx.strip()+'\r\n'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根据页数构造urls进行多线程下载"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """检查下载完成后再进行写入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

down_tianya.py


#coding:utf-8

import urllib
import re
import threading
import os

class Down_Tianya(threading.Thread):
    """多线程下载"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根据传入的url抓出各页内容,按页数做键存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<div class="atl-item".*?<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>\s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根据第一页地址抓取总页数"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="\S*?">(\d*)</a>\s*<a href="\S*?" class="\S*?">下页</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', '\r\n').replace('<br />', '\r\n').replace('&nbsp;', '')
            tx_file.write(tx.strip()+'\r\n'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根据页数构造urls进行多线程下载"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """检查下载完成后再进行写入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

标签:python,多线程,天涯
0
投稿

猜你喜欢

  • python基础之并发编程(二)

    2023-01-16 03:10:37
  • Sql Server 2005读取外部数据的方法

    2008-07-08 19:08:00
  • python开根号实例讲解

    2022-10-03 12:29:07
  • python base64库给用户名或密码加密的流程

    2021-01-30 16:30:39
  • python3 assert 断言的使用详解 (区别于python2)

    2023-03-10 03:05:26
  • mysql优化的重要参数 key_buffer_size table_cache

    2024-01-16 03:01:52
  • python3.7简单的爬虫实例详解

    2023-06-30 15:55:13
  • 巧用weui.topTips验证数据的实例

    2023-08-12 03:00:51
  • 如何理解及使用Python闭包

    2021-12-22 23:50:59
  • 使用bandit对目标python代码进行安全函数扫描的案例分析

    2021-04-07 02:01:22
  • Python实现一个简单的毕业生信息管理系统的示例代码

    2023-12-20 04:40:46
  • python读取json文件并将数据插入到mongodb的方法

    2021-03-22 20:30:22
  • 定位?浮动?自适应?

    2008-06-30 14:20:00
  • python列表添加元素append(),extend(),insert(),+list的区别及说明

    2022-12-11 08:48:35
  • python复制文件到指定目录的实例

    2021-03-17 17:10:26
  • 深入了解vue-router原理并实现一个小demo

    2024-04-30 10:25:31
  • Python实现视频下载功能

    2022-04-06 13:42:14
  • JS的IE和FF兼容性问题汇总

    2008-03-08 13:01:00
  • 阿里云go开发环境搭建过程

    2024-04-25 15:12:31
  • 使用python+poco+夜神模拟器进行自动化测试实例

    2022-12-19 09:09:29
  • asp之家 网络编程 m.aspxhome.com