Python爬虫一则

系统 1640 0

就是个python爬虫

就像爬个图看看
源网站链接:http://www.setuw.com
使用python编写,使用了threadpool 等库,自行下载。
环境:python 3 , win10 , 树莓派环境下测试通过

网站元素结构


Python爬虫一则_第1张图片

代码

            
              # -*- coding: utf-8 -*
from concurrent.futures import ThreadPoolExecutor
import urllib.request
import _thread
import json
import threadpool  
from time import sleep
from bs4 import BeautifulSoup 
import os
import random


maxThreadCount = 8

available_thread = 8

baseDomain="http://www.setuw.com"
intrance = "http://www.setuw.com/tag/rosi/"

#网站分类对应的目录
tags = [ "/tag/rosi/", "/tag/tuigirl/" , "/tag/ugirls/" ,
"/tag/xiuren/" , "/tag/disi/" , "/tag/dongman/" , "/tag/xinggan/" , 
"/tag/qingchun/" , "/tag/youhuo/" , "/tag/mote/" , "/tag/chemo/" ,
"/tag/tiyu/" , "/tag/zuqiubaobei/" , "/meinv/liuyan/"
]

types = ["ROSI" , "推女郎" , " 尤果" , "  秀人 " , 
" DISI " , "动漫 " , "性感 " , "清纯 " , " 诱惑 " , " 模特 " , " 车模" , "体育" , " 足球" , " 柳岩" ]

typeSize = len(types)
path =  ""
header = {
    "User-Agent":'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
 
    'Accept': '*/*',
    'Accept-Language': 'en-US,en;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive'
}



def Download(argv1):
    url = argv1.split("#")[0]
    title = argv1.split("#")[1]
    name = argv1.split("#")[2]
    #print("URl is " , url, " , title is " , title , " , name is "  , name)
    print("Download processing:" , argv1.split("#")[3])
    apath = path+"/" + title  + "/"
    #print(apath)
    if not os.path.exists(apath): #判断系统是否存在该路径,不存在则创建
        os.makedirs(apath)
    urllib.request.urlretrieve( url, '{0}{1}.jpg'.format(apath, name)) # ,下载图片保存在本地
    return

    

def run(targetUrl,title):
    global available_thread
    print("downloading " + title)
    req = urllib.request.Request(url=targetUrl,headers=header)
    response = urllib.request.urlopen(req)#1111111这里的req可看成一种更为高级的URL
    html = response.read().decode('utf-8','ignore')
    soup = BeautifulSoup(html, 'html.parser')


    imgs = soup.find_all('img') 
    size = len(imgs)
    resules = 1
    with ThreadPoolExecutor(maxThreadCount) as pool:
        for i in range(2,size-16):
            #已经证实过,页面中抓取的img,2到size-6为图集图片
            data =  imgs[i]["datas"]
            all = data.split("'")
            '''
            参数列表:下载链接,图集名,图片名,图集下载进度
            '''
            argv ={ all[len(all) - 2] + "#" + title  + "#" + all[len(all) - 2].split(".")[1]+str(i) + "#" + str(i-1) + "/" + str(size-18) }
            results = pool.map(Download,(argv)) #使用map添加线程进线程池
    print(title  , " download successfull;")
    return



if __name__ == '__main__':
    '''自定义下载路径。若输入.,则下载至当前目录,跳过则下载到/home/hdd/picDl/(这是我)
    自己的硬盘挂载点。。。可自定义)'''
    input1 = input("input a folder(. as ./ , none as /home/hdd/picDl/):")
    if input1==".":
        path = "./"
    elif input1=="":
        path = "/home/hdd/picDl/"
    else:
        path = input1
    print("Path seted to " + path)
    #选择一个下载类别。在网站最上方有,我是手动找出来的,有时效性
    for i in range(0,len(types)-1):
        print("| " + str(i)+ " | " + types[i] + " | ")
    print("select a type to download , ")
    index = input(" or input nothing to download index page:")
    if index == "":
        intrance = intrance
    else:
        index1 = int(index)
        if index1 < len(types)-1 and index1 > 0 :
            intrance = baseDomain + tags[index1]
        else:
            print("something wrong , setting download tartget as default")
            intrance = intrance
    print( intrance +  " is going to download.")
    '''
    自定义下载线程数。注意,函数中每个线程用于下载一张图片,
    所以只能说是多个图片并行下载。
    '''
    maxThreadCount_ = input("input a number if you want to modify default thread number:")
    if maxThreadCount_ == "" :
        print("using default number:" , maxThreadCount)
    else :
        print("Modified number to:" , maxThreadCount_)
        maxThreadCount = int(maxThreadCount_)
    req = urllib.request.Request(url=intrance,headers=header)
    response = urllib.request.urlopen(req)
    html = response.read().decode('utf-8','ignore')
    #解码 得到这个网页的html源代码
    soup = BeautifulSoup(html, 'html.parser')
    Divs = soup.find_all('a',attrs={'class':'a1' }) 
    for div in Divs:
        if div["href"] is None:
            print("没有图集了")
#            return
        elif div['href'] is None or div['href']=="": #有链接,但是是 空链接
            print("图集没有连接")
#            return
        else:
            targetUrl= baseDomain + div['href']
            title=div["title"]
            print("正在下载套图:" + title)
            run(targetUrl,title)


            
          

缺点(改进方向):

  1. 目前只下载分类第一页的图集
  2. 不能保存下载进度。
  3. 没了吧。。欢迎批评指正

更多文章、技术交流、商务合作、联系博主

微信扫码或搜索:z360901061

微信扫一扫加我为好友

QQ号联系: 360901061

您的支持是博主写作最大的动力,如果您喜欢我的文章,感觉我的文章对您有帮助,请用微信扫描下面二维码支持博主2元、5元、10元、20元等您想捐的金额吧,狠狠点击下面给点支持吧,站长非常感激您!手机微信长按不能支付解决办法:请将微信支付二维码保存到相册,切换到微信,然后点击微信右上角扫一扫功能,选择支付二维码完成支付。

【本文对您有帮助就好】

您的支持是博主写作最大的动力,如果您喜欢我的文章,感觉我的文章对您有帮助,请用微信扫描上面二维码支持博主2元、5元、10元、自定义金额等您想捐的金额吧,站长会非常 感谢您的哦!!!

发表我的评论
最新评论 总共0条评论