# -*- coding: utf-8 -*-
import urllib2
import re
import chardet
class Blog_Spider:
def __init__(self):
#定义一个常数,读入结果当前页和后面的两页
self.pages = 2
#接受关键字,进行url编码构成完整网址
print u"输入检索词:"
s = raw_input()
s1 ="?q="+urllib2.quote(s)
self.myUrl = "http://so.csdn.net/so/search/s.do"+s1+"&t=blog&o=&s="
# 抓取一个章节
def GetPage(self):
myUrl=self.myUrl
user_agent = ' Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12'
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(myUrl, headers = headers)
myResponse = urllib2.urlopen(request)
myPage = myResponse.read()
#先检测网页的字符编码,最后统一转为 utf-8
charset = chardet.detect(myPage)
charset = charset['encoding']
if charset == 'utf-8' or charset == 'UTF-8':
myPage = myPage
else:
myPage = myPage.decode('gb2312','ignore').encode('utf-8')
unicodePage = myPage.decode("utf-8")
item = re.findall('<dl class=.*?>(.*?)</dl>',unicodePage,re.S)
for eachitem in item:
#抓取标题
my_title = re.search('<dt><a href=.*? target=.*?>(.*?)</a></dt>',eachitem,re.S)
my_title = my_title.group(1)
my_title = my_title.replace("<em>","")
my_title = my_title.replace("</em>","")
my_url = re.search('<dd class="search-link"><a href=.*?target=.*?>(.*?)</a></dd>',eachitem,re.S)
my_url = my_url.group(1)
#抓取作者-时间
my_author_time = re.search('<dd class="author-time">(.*?)</dd>',eachitem,re.S)
my_author_time = my_author_time.group(1)
my_author_time = my_author_time.replace(" ","")
my_author_time = my_author_time.replace(";","")
# ------提取作者
my_author = re.search('<a href=.*?>(.*?)</a>', my_author_time, re.S)
my_author = my_author.group(1)
#-----提取时间
my_time = re.search(r'\d{4}-\d{2}-\d{2}', my_author_time,re.S)
my_time = my_time.group()
#抓取章节内容
my_content =re.search('<dd class="search-detail">(.*?)</dd>',eachitem,re.DOTALL)
my_content = my_content.group(1)
#替换正文中的网页代码
my_content = my_content.replace("<em>","")
my_content = my_content.replace("</em>","")
my_content = my_content.replace("'","")
my_content = my_content.replace('"' ,"")
#用字典存储一章的标题和内容
onePage = {'title':my_title,'content':my_content,'author':my_author,'time':my_time,'url':my_url}
print u'标题: '+onePage['title']
print u'来源: ' +onePage['url']
print u'作者: ' + onePage['author']
print u'时间: ' + onePage['time']
print u'简介: '+onePage['content']
print " \n \n"
#获取与下一页相关的信息
about_next = re.search(' <div class="csdn-pagination hide-set" >(.*?)</div>',unicodePage,re.S)
about_next = about_next.group(1)
#------获取结果总数
total_num = re.search('<span class="text">(.*?)</span>',about_next,re.S)
total_num = total_num.group(1)
#print total_num
#-------找到最后一个含有url的块
url_list = re.findall(r'<a href=.*?class=.*?>.*?</a>' ,about_next,re.S)
maxL = len(url_list)
url_m = url_list[maxL-1]
#----------验证最有一个块是否是“下一页”而非当前“激活页”;并替换掉字符的引号
url = re.search('<a href=(.*?)class="btn btn-xs btn-default btn-next">.*?</a>', url_m,re.S, )
#print url
if url == None:
self.pages =0
else:
url = url.group(1)
url = url.replace("\"" ,"")
#对self.myUrl重新赋值,刷新页面
self.myUrl = "http://so.csdn.net/so/search/s.do"+url
#print self.myUrl
# 用于加载章节
def LoadPage(self):
while self.pages > 0:
try:
# 获取新的章节
a=self.GetPage()
self.pages = self.pages - 1
except:
print '无法连接服务器!'
self.pages = 0
def Start(self):
#把这一页加载进来
self.LoadPage()
# 如果self的pages数组中存有元素
# if self.pages:
# nowPage = self.pages[0]
#self.ShowPage(nowPage)
#----------- 程序的入口处 -----------
print u"""
程序:拼博搜索引擎
版本:0.1
语言:Python 2.7
--------------------------------------
"""
#print u'请按下回车:'
#raw_input()
myBlog = Blog_Spider()
myBlog.Start()
import urllib2
import re
import chardet
class Blog_Spider:
def __init__(self):
#定义一个常数,读入结果当前页和后面的两页
self.pages = 2
#接受关键字,进行url编码构成完整网址
print u"输入检索词:"
s = raw_input()
s1 ="?q="+urllib2.quote(s)
self.myUrl = "http://so.csdn.net/so/search/s.do"+s1+"&t=blog&o=&s="
# 抓取一个章节
def GetPage(self):
myUrl=self.myUrl
user_agent = ' Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12'
headers = { 'User-Agent' : user_agent }
request = urllib2.Request(myUrl, headers = headers)
myResponse = urllib2.urlopen(request)
myPage = myResponse.read()
#先检测网页的字符编码,最后统一转为 utf-8
charset = chardet.detect(myPage)
charset = charset['encoding']
if charset == 'utf-8' or charset == 'UTF-8':
myPage = myPage
else:
myPage = myPage.decode('gb2312','ignore').encode('utf-8')
unicodePage = myPage.decode("utf-8")
item = re.findall('<dl class=.*?>(.*?)</dl>',unicodePage,re.S)
for eachitem in item:
#抓取标题
my_title = re.search('<dt><a href=.*? target=.*?>(.*?)</a></dt>',eachitem,re.S)
my_title = my_title.group(1)
my_title = my_title.replace("<em>","")
my_title = my_title.replace("</em>","")
my_url = re.search('<dd class="search-link"><a href=.*?target=.*?>(.*?)</a></dd>',eachitem,re.S)
my_url = my_url.group(1)
#抓取作者-时间
my_author_time = re.search('<dd class="author-time">(.*?)</dd>',eachitem,re.S)
my_author_time = my_author_time.group(1)
my_author_time = my_author_time.replace(" ","")
my_author_time = my_author_time.replace(";","")
# ------提取作者
my_author = re.search('<a href=.*?>(.*?)</a>', my_author_time, re.S)
my_author = my_author.group(1)
#-----提取时间
my_time = re.search(r'\d{4}-\d{2}-\d{2}', my_author_time,re.S)
my_time = my_time.group()
#抓取章节内容
my_content =re.search('<dd class="search-detail">(.*?)</dd>',eachitem,re.DOTALL)
my_content = my_content.group(1)
#替换正文中的网页代码
my_content = my_content.replace("<em>","")
my_content = my_content.replace("</em>","")
my_content = my_content.replace("'","")
my_content = my_content.replace('"' ,"")
#用字典存储一章的标题和内容
onePage = {'title':my_title,'content':my_content,'author':my_author,'time':my_time,'url':my_url}
print u'标题: '+onePage['title']
print u'来源: ' +onePage['url']
print u'作者: ' + onePage['author']
print u'时间: ' + onePage['time']
print u'简介: '+onePage['content']
print " \n \n"
#获取与下一页相关的信息
about_next = re.search(' <div class="csdn-pagination hide-set" >(.*?)</div>',unicodePage,re.S)
about_next = about_next.group(1)
#------获取结果总数
total_num = re.search('<span class="text">(.*?)</span>',about_next,re.S)
total_num = total_num.group(1)
#print total_num
#-------找到最后一个含有url的块
url_list = re.findall(r'<a href=.*?class=.*?>.*?</a>' ,about_next,re.S)
maxL = len(url_list)
url_m = url_list[maxL-1]
#----------验证最有一个块是否是“下一页”而非当前“激活页”;并替换掉字符的引号
url = re.search('<a href=(.*?)class="btn btn-xs btn-default btn-next">.*?</a>', url_m,re.S, )
#print url
if url == None:
self.pages =0
else:
url = url.group(1)
url = url.replace("\"" ,"")
#对self.myUrl重新赋值,刷新页面
self.myUrl = "http://so.csdn.net/so/search/s.do"+url
#print self.myUrl
# 用于加载章节
def LoadPage(self):
while self.pages > 0:
try:
# 获取新的章节
a=self.GetPage()
self.pages = self.pages - 1
except:
print '无法连接服务器!'
self.pages = 0
def Start(self):
#把这一页加载进来
self.LoadPage()
# 如果self的pages数组中存有元素
# if self.pages:
# nowPage = self.pages[0]
#self.ShowPage(nowPage)
#----------- 程序的入口处 -----------
print u"""
程序:拼博搜索引擎
版本:0.1
语言:Python 2.7
--------------------------------------
"""
#print u'请按下回车:'
#raw_input()
myBlog = Blog_Spider()
myBlog.Start()