发布于 2014-08-10 14:27:35 | 256 次阅读 | 评论: 0 | 来源: 网友投递

这里有新鲜出炉的Python3 官方中文指南,程序狗速度看过来!

Python编程语言

Python 是一种面向对象、解释型计算机程序设计语言,由Guido van Rossum于1989年底发明,第一个公开发行版发行于1991年。Python语法简洁而清晰,具有丰富和强大的类库。它常被昵称为胶水语言,它能够把用其他语言制作的各种模块(尤其是C/C++)很轻松地联结在一起。


本文是一个利用python写的多线程抓取天涯帖子内容的示例代码,使用python的re, urllib, threading 多线程抓取天涯帖子内容,设置url为需抓取的天涯帖子的第一页,设置file_name为下载后的文件名,感兴趣的同学参考下.

 

代码如下:

#coding:utf-8

 

import urllib
import re
import threading
import os, time

class Down_Tianya(threading.Thread):
    """多线程下载"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根据传入的url抓出各页内容,按页数做键存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['rnrnrnrn'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根据第一页地址抓取总页数"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace(' ', '')
            tx_file.write(tx.strip()+'rn'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根据页数构造urls进行多线程下载"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """检查下载完成后再进行写入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

 

down_tianya.py

 

代码如下:

#coding:utf-8

 

import urllib
import re
import threading
import os

class Down_Tianya(threading.Thread):
    """多线程下载"""
    def __init__(self, url, num, dt):
        threading.Thread.__init__(self)
        self.url = url
        self.num = num
        self.txt_dict = dt

    def run(self):
        print 'downling from %s' % self.url
        self.down_text()

    def down_text(self):
        """根据传入的url抓出各页内容,按页数做键存入字典"""
        html_content =urllib.urlopen(self.url).read()
        text_pattern = re.compile('<div class="atl-item".*?<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
        text = text_pattern.findall(html_content)
        text_join = ['rnrnrnrn'.join(item) for item in text]
        self.txt_dict[self.num] = text_join

 



def page(url):
    """根据第一页地址抓取总页数"""
    html_page = urllib.urlopen(url).read()
    page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
    page_result = page_pattern.search(html_page)
    if page_result:
        page_num = int(page_result.group(1))
        return page_num

 

def write_text(dict, fn):
    """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    tx_file = open(fn, 'w+')
    pn = len(dict)
    for i in range(1, pn+1):
        tx_list = dict[i]
        for tx in tx_list:
            tx = tx.replace('<br>', 'rn').replace('<br />', 'rn').replace(' ', '')
            tx_file.write(tx.strip()+'rn'*4)
    tx_file.close()


def main():
    url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    file_name ='abc.txt'
    my_page = page(url)
    my_dict = {}

    print 'page num is : %s' % my_page

    threads = []

    """根据页数构造urls进行多线程下载"""
    for num in range(1, my_page+1):
        myurl = '%s%s.shtml' % (url[:-7], num)
        downlist = Down_Tianya(myurl, num, my_dict)
        downlist.start()
        threads.append(downlist)

    """检查下载完成后再进行写入"""
    for t in threads:
        t.join()

    write_text(my_dict, file_name)

    print 'All download finished. Save file at directory: %s' % os.getcwd()

if __name__ == '__main__':
    main()

 



最新网友评论  共有(0)条评论 发布评论 返回顶部

Copyright © 2007-2017 PHPERZ.COM All Rights Reserved   冀ICP备14009818号  版权声明  广告服务