Python多进程爬取人人单词数据

系统 1561 0

基于Python requests的人人词典数据爬虫,爬取站点http://www.91dict.com

爬取内容包含:单词、单词词性及翻译、单词发音、单词例句剧照、单词例句及翻译、单词例句发音
总共数据: 单词53189个,例句发音文件及图片文件共10G左右,20M带宽不到一个小时就能爬完,我测试是这样的。。。
关于单词发音,可自行添加爬取

爬取内容1

Python多进程爬取人人单词数据_第1张图片

爬取内容2

Python多进程爬取人人单词数据_第2张图片

例句发音

Python多进程爬取人人单词数据_第3张图片

例句剧照

Python多进程爬取人人单词数据_第4张图片

Python版本

Python3+ ,建议Python3.6

requirements.txt

requests==2.21.0
lxml==4.3.3

运行方式

clone 后windows运行run.bat,linux运行run.sh

目录结构

|---------------------------------------------------------------------------------------------------------------------

|--audio 单词音频文件,项目中为部分文件,仅供查看

|--pic 单词图片文件,项目中为部分文件,仅供查看

|--words 拆分的原始单词数据,每个文件5000个单词,爬虫为每个文件创建一个进程来爬取

|--result_demo 单词数据结果demo,非完整数据

|--allWords.json 所有的单词,一行一个单词,共53189个单词,爬虫根据此爬取单词数据

|--combain.py 合并最后的结果,即合并words目录下的json文件

|--requirements.txt Python依赖模块

|--run.bat Windows启动脚本

|--run.sh Linux启动脚本

|--scrapy.py 爬虫脚本

|--split.py 拆分原始单词数据,及allWords.json文件中的单词

|----------------------------------------------------------------------------------------------------------------------

 

核心代码

split.py(数据拆分)

            
              #!usr/bin/env python3
def read_file(file_name):
    with open(file_name, 'r', encoding='utf-8') as f:
        for line in f:
            yield line


def save(file_name, data):
    with open(file_name, 'a', encoding='utf-8') as f:
        f.write(data)
    f.close()


i, j = 0, 0
for line in read_file('allWords.json'):
    if i % 5000 == 0:
        j += 1
        print('第' + str(i) + '次')
    save('./words/' + str(j) + '.txt', line)
    i += 1
            
          

scrapy.py(数据抓取)

            
              #!/usr/bin/env python3
import requests
import json
import os
import uuid
from lxml import etree
from multiprocessing import Process


class ScrapyProcess(Process):

    def __init__(self, file_name):
        super(ScrapyProcess, self).__init__()
        self.file_name = file_name

    def read_file(self):
        with open(self.file_name + '.txt', 'r', encoding='utf-8') as f:
            for line in f:
                yield line[:-1]

    def download_file(self, url, path):
        res = requests.get(url)
        with open(path, 'wb') as f:
            f.write(res.content)

    def connect_file(self, file_name1, file_name2, file_name3):
        file1 = open(file_name1, 'rb')
        file2 = open(file_name2, 'rb')
        file3 = open(file_name3, 'wb')
        file3.write(file1.read())
        file3.write(file2.read())
        file1.close()
        file2.close()
        file3.flush()
        file3.close()
        os.remove(file_name1)
        os.remove(file_name2)

    def is_in(self, key, dict_list):
        for item in dict_list:
            if key in item.keys():
                return True
        return False

    def scrapy(self, word):
        word_info = {}
        url = 'http://www.91dict.com/words?w=' + word
        res = requests.get(url)
        res.encoding = 'utf-8'
        data = etree.HTML(res.text)
        if data.xpath('/html/body/div[2]/section[2]/div/div/div/div[1]/div[1]/p/text()'):

            # 单词
            word_info['word'] = data.xpath(
                '/html/body/div[2]/section[2]/div/div/div/div[1]/div[1]/p/text()')[0]
            word_info['am_phonetic'] = '//'
            word_info['en_phonetic'] = '//'
            # print(data.xpath("//*[@class='vos']/span[1]/text()"))
            # print(data.xpath("//*[@class='vos']/span[2]/text()"))
            if list(filter(lambda x: x != '\n', data.xpath("//*[@class='vos']/span[1]/text()"))):
                word_info['en_phonetic'] = list(filter(lambda x: x != '\n', data.xpath(
                    "//*[@class='vos']/span[1]/text()")))[0].replace('\n', '')[1:].replace('[', "/").replace(']', '/')
            if list(filter(lambda x: x != '\n', data.xpath("//*[@class='vos']/span[2]/text()"))):
                word_info['am_phonetic'] = list(filter(lambda x: x != '\n', data.xpath(
                    "//*[@class='vos']/span[2]/text()")))[0].replace('\n', '')[1:].replace('[', "/").replace(']', '/')
            # 翻译
            train = []
            for item in filter(lambda x: x != '', map(lambda x: x.replace('\n', ''),
                                                      data.xpath("//*[@class='listBox']/text()"))):
                if len(item.split('. ')) == 1:
                    train.append({'': item.split('. ')[0]})
                elif len(item.split('. ')) == 2 and not item.startswith('=') and not self.is_in(item.split('. ')[0], train):
                    train.append({item.split('. ')[0]: item.split('. ')[1]})
            word_info['tran'] = train

            # 例子
            example = []
            example_len = len(data.xpath(
                "//*[@class='flexslider flexslider_2']/ul/li/div[@class='imgMainbox']"))
            # 例句
            sens = data.xpath("//*[@class='mBottom']")
            # 例句范意思
            sen_trains = data.xpath("//*[@class='mFoot']/text()")
            origins = list(filter(lambda x: x != '\n', data.xpath(
                "//*[@class='mTop']/text()")))
            # 下文内容及翻译
            next_sens = data.xpath(
                "//*[@class='mTextend']/div[2]/div[2]/p[1]/text()")
            next_sen_trains = data.xpath(
                "//*[@class='mTextend']/div[2]/div[2]/p[2]/text()")
            pic_urls = data.xpath(
                "//*[@class='flexslider flexslider_2']/ul/li/div[@class='imgMainbox']/img/@src")
            pron_urls = data.xpath(
                "//*[@class='flexslider flexslider_2']/ul/li/div[@class='imgMainbox']/div/div/audio/@src")
            next_pron_urls = data.xpath("//*[@class='viewdetail']/@href")
            for i in range(example_len):
                sen = etree.tostring(
                    sens[i], encoding='utf-8')[22:-7].decode('utf-8')
                sen_train = sen_trains[i][1:]
                # 图片
                pic_url = './pic/%s-%d.jpg' % (word_info['word'], i)
                pron_url = './audio/%s-%d.mp3' % (word_info['word'], i)
                self.download_file(pic_urls[i], pic_url)
                # 如果句子没有完,需要拼接句子并合成语音
                if not sen.endswith('.') and not sen.endswith(';') and not sen.endswith('?') and not sen.endswith('!'):
                    if sen[-1] != ',':
                        sen += ','
                    sen_train += ','
                    if i < len(next_sens) and i < len(next_sen_trains):
                        # 例句
                        sen += next_sens[i]
                        # 翻译
                        sen_train += next_sen_trains[i]
                        # 语音1
                        pron_url_1 = './audio/%s-%d-1.mp3' % (
                            word_info['word'], i)
                        # 语音2
                        pron_url_2 = './audio/%s-%d-2.mp3' % (
                            word_info['word'], i)
                        temp = requests.get(
                            'http://www.91dict.com' + next_pron_urls[i]).text
                        temp_data = etree.HTML(temp)
                        self.download_file(pron_urls[i], pron_url_1)
                        for li in temp_data.xpath("//*[@class='item']/li"):
                            if li.xpath("./div[@class='mBottom']/text()")[0].replace('\n', '') == next_sens[i]:
                                self.download_file(
                                    li.xpath("./div[@class='mTop']/audio/@src")[0], pron_url_2)
                                break
                        self.connect_file(pron_url_1, pron_url_2, pron_url)
                else:
                    # 直接下载语音
                    self.download_file(pron_urls[i], pron_url)
                example.append({
                    'origin': origins[i][1:-1],
                    "sen": sen,
                    'sen_tran': sen_train,
                    'pic_url': pic_url,
                    'pron_url': pron_url
                })
            word_info['example'] = example
            return word_info

    def main(self):
        for word in self.read_file():
            print(word)
            self.save(self.scrapy(word))

    def save(self, word_info):
        with open(self.file_name + '.json', 'a', encoding='utf-8') as f:
            if word_info:
                json.dump(word_info, fp=f, indent=4, ensure_ascii=False)
                f.write(',\n')

    def run(self):
        self.main()


if __name__ == "__main__":
    for i in range(1, 12):
        p = ScrapyProcess('./words/' + str(i))
        # 启动子进程
        p.start()
            
          

combain.py(数据合并)

            
              #!/usr/bin/env python3
def read_file(file_name):
    with open(file_name, 'r', encoding='utf-8') as f:
        for line in f:
            yield line


def save(file_name, data):
    with open(file_name, 'a', encoding='utf-8') as f:
        f.write(data)
    f.close()


for i in range(1, 12):
    print('当前:' + str(i))
    for line in read_file('./words/' + str(i) + '.json'):
        save('单词数据.json', line)
            
          

单词数据结果demo

            
              {
    //单词
    "word": "sir",
    //美式发音
    "am_phonetic": "/sɝ/",
    //英式发音
    "en_phonetic": "/sɜː/",
    #词性及翻译
    "tran": [
        {
            "n": "先生;(用于姓名前)爵士;阁下;(中小学生对男教师的称呼)先生;老师"
        }
    ],
    #例句
    "example": [
        {
            //例句出处
            "origin": "来自《一位年轻医生的笔记 第1季 第2集》",
            //例句,如果例句结尾符号不是.;?!,会在结尾加上逗号拼接下文
            "sen": "It was me, 
              
                sir
              
               and no one else, 
              
                sir
              
              .",
            //例句翻译,同上也会拼接
            "sen_tran": "我一个人喝掉了 医生",
            //例句图片
            "pic_url": "./pic/sir-0.jpg",
            //例句发音文件地址,如果例句结尾符号不是.;?!, 会拼接两个句子的语音文件,合成为一个
            "pron_url": "./audio/sir-0.mp3"
        },
        {
            "origin": "来自《拆弹部队》",
            "sen": "No, 
              
                sir
              
              , 
              
                sir
              
              , that's sergeant James. He's right here.",
            "sen_tran": "不 长官 是詹姆斯中士 他就在那里",
            "pic_url": "./pic/sir-1.jpg",
            "pron_url": "./audio/sir-1.mp3"
        },
        {
            "origin": "来自《雷斯特雷波》",
            "sen": "
              
                Sir
              
              . How you doing, 
              
                sir
              
              ? Good to see you again.",
            "sen_tran": "长官 还好吗 很高兴再见到您",
            "pic_url": "./pic/sir-2.jpg",
            "pron_url": "./audio/sir-2.mp3"
        },
        {
            "origin": "来自《太空堡垒卡拉狄加 第4季 第12集》",
            "sen": "Yes, 
              
                sir
              
              . I'm sorry, 
              
                sir
              
              , but what can I do?",
            "sen_tran": "是 长官 我很抱歉 可我能怎么办?",
            "pic_url": "./pic/sir-3.jpg",
            "pron_url": "./audio/sir-3.mp3"
        },
        {
            "origin": "来自《太空堡垒卡拉狄加 第2季 第12集》",
            "sen": "Don't worry, 
              
                sir
              
              . I'll take it real slow, 
              
                sir
              
              .",
            "sen_tran": "别担心 长官 我们会慢慢来的 长官!",
            "pic_url": "./pic/sir-4.jpg",
            "pron_url": "./audio/sir-4.mp3"
        },
        {
            "origin": "来自《耶鲁大学开放课程:欧洲文明》",
            "sen": "And he replied, 
              
                Sir
              
              , I pedal so quickly,they'll never catch me.",
            "sen_tran": "他回答道 先生 我踩踏板很快,他们永远也追不上我",
            "pic_url": "./pic/sir-5.jpg",
            "pron_url": "./audio/sir-5.mp3"
        }
    ]
}

            
          

代码很烂,请见谅,git地址:https://github.com/RickyHal/91dict_scrapy


更多文章、技术交流、商务合作、联系博主

微信扫码或搜索:z360901061

微信扫一扫加我为好友

QQ号联系: 360901061

您的支持是博主写作最大的动力,如果您喜欢我的文章,感觉我的文章对您有帮助,请用微信扫描下面二维码支持博主2元、5元、10元、20元等您想捐的金额吧,狠狠点击下面给点支持吧,站长非常感激您!手机微信长按不能支付解决办法:请将微信支付二维码保存到相册,切换到微信,然后点击微信右上角扫一扫功能,选择支付二维码完成支付。

【本文对您有帮助就好】

您的支持是博主写作最大的动力,如果您喜欢我的文章,感觉我的文章对您有帮助,请用微信扫描上面二维码支持博主2元、5元、10元、自定义金额等您想捐的金额吧,站长会非常 感谢您的哦!!!

发表我的评论
最新评论 总共0条评论