一.request基础见https://blog.csdn.net/pittpakk/article/details/81218566
request实例:
1.京东商品页面爬取

import requests
url = "https://item.jd.com/35820996778.html"
try:
    r = requests.get(url)
    r.raise_for_status()
    r.encoding = r.apparent_encoding
    print(r.text[:1000])
except:
    print("爬取失败")

2亚马逊商品页面的爬取

import requests
url = "https://www.amazon.cn/dp/B01N9EATAN/ref=lp_1536479071_1_1?s=apparel&ie=UTF8&qid=1581314721&sr=1-1"
try:
    kv = {'user-agent':'Mozilla/5.0'}
    r = requests.get(url,headers=kv)
    r.raise_for_status()
    r.encoding = r.apparent_encoding
    print(r.text[1000:2000])
except:
    print("爬取失败")

4.百度搜索关键词提交

import requests
keyword = "Python"
try:
    kv = {'wd':keyword}
    r = requests.get("http://www.baidu.com/s",params=kv)
    print(r.request.url)
    r.raise_for_status()
    print(len(r.text))
except:
    print("爬取失败")

5.网络图片的爬取与储存

import requests
import os
root="D:\\users\\赵靖通\\桌面"
url = "http://tupian.baike.com/ipad/a2_36_48_19300001357258133412489354717_jpg.html"
path = root

url.split('/')[-1]
 try:
     if not os.path.exists(root):
         os.mkdir(root)
     if not os.path.exists(path):
        r=requests.get(url)
         with open(path,'wb') as f:
            f.write(r.content)
            f.close()
             print("文件保存成功")
     else:
         print("文件已存在")
 except:
     print("爬取失败")

6.IP地址归属地自动查询

import requests
url = "http://m.ip138.com/ip.asp?ip="
try:
     r = requests.get(url+'202.204.80.112')
     r.raise_for_status()
     r.encoding = r.apparent_encoding
     print(r.text[-500:])
except:
     print("爬取失败")

二.beautifulsoup
知识点https://blog.csdn.net/weixin_34127717/article/details/90583410
实例:中国大学排名定向爬虫

import requests
import bs4
from bs4 import BeautifulSoup

def getHTMLText(url):
    try:
        r = requests.get(url,timeout = 30)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""

def fillUnivList(ulist, html):
    soup = BeautifulSoup(html ,"html.parser")
    for tr in soup.find('tbody').children:
        if isinstance(tr, bs4.element.Tag):
            tds = tr('td')
            ulist.append([tds[0].string, tds[1].string, tds[2].string])

def printUnivList(ulist, num):
    tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"#中文输出的优化
    print(tplt.format("排名","学校名称","地区",chr(12288)))#12288中文空格的位置
    for i in range(num):
        u=ulist[i]
        print(tplt.format(u[0],u[1],u[2],chr(12288)))

def main():
    uinfo =[]
    url = 'http://www.zuihaodaxue.com/zuihaodaxuepaiming2019.html'
    html = getHTMLText(url)
    fillUnivList(uinfo, html)
    printUnivList(uinfo, 40)

main()

三.正则表达式
re库基本知识点:https://blog.csdn.net/qq_26591517/article/details/89333851
https://blog.csdn.net/weixin_40136018/article/details/81183504
https://www.cnblogs.com/mehome/p/9513492.html
应用:淘宝商品信息定向爬虫

import requests
import re
def getHTMLText(url):
    try:
        r = requests.get(url, timeout=30)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""
def parsePage(ilt, html):
    try:
        plt = re.findall(r'\"view_price\"\:\"[\d\.]*\"',html)
        tlt = re.findall(r'\"raw_title\"\:\".*?\"',html)
        for i in range(len(plt)):
            price = eval(plt[i].split(':')[1])
            title = eval(tlt[i].split(':')[1])
            ilt.append([price, title])
    except:
        print("")

def printGoodsList(ilt):
    tplt = "{:4}\t{:8}\t{:16}"
    print(tplt.format("序号","价格","商品名称"))
    count = 0
    for g in ilt:
        count = count + 1
        print(tplt.format(count,g[0]),g[1])

def main():
    goods="书包"
    depth = 2
    start_url = 'https://s.taobao.com/search?q=' + goods
    infoList = []
    for i in range(depth):
        try:
            url = start_url + '&s=' + str(44*i)
            html = getHTMLText(url)
            parsePage(infoList, html)
        except:
            continue
    printGoodsList(infoList)

main()

四.scrarp基础知识
https://blog.csdn.net/qq_41500222/article/details/82850582