南强小屋 Design By 杰米
本文实例为大家分享了Python抓取天猫商品详细信息及交易记录的具体代码,供大家参考,具体内容如下
一、搭建Python环境
本帖使用的是Python 2.7
涉及到的模块:spynner, scrapy, bs4, pymmssql
二、要获取的天猫数据
三、数据抓取流程
四、源代码
#coding:utf-8
import spynner
from scrapy.selector import Selector
from bs4 import BeautifulSoup
import random
import pymssql
#------------------------接数据库-----------------------------#
server="localhost"
user="sa"
password = "123456"
conn=pymssql.connect(server,user,password,"TmallData")
if conn:
print "DataBase connecting successfully!"
else:
print "DataBase connecting error!"
cursor=conn.cursor()
#----------------------定义网页操作函数--------------------------#
def py_click_element(browser,pos):
#点击网页中的元素
#pos example:'a[href="#description" rel="external nofollow" rel="external nofollow" ]'
browser.click(pos)
browser.wait(random.randint(3,10))
return browser
def py_click_xpath(browser,xpath):
xpath=xpath+'/@href'
inner_href=Selector(text=browser.html).xpath(xpath).extract()
pos='a[href="'+str(inner_href[0])+'" rel="external nofollow" ]'
browser=py_click_element(browser, pos)
return browser
def py_webpage_load(browser,url):
browser.load(url,load_timeout=60)
browser.wait(10)
return browser
def py_check_element(browser,xpath):
#按照xpath查找元素,如果存在则返回True,否则返回False
if Selector(text=browser.html).xpath(xpath).extract()!=[]:
return True
else:
return False
def py_extract_xpath(browser,xpath):
if py_check_element(browser, xpath):
return Selector(text=browser.html).xpath(xpath).extract()[0]
else:
return "none"
def py_extract_xpaths(browser,xpaths):
#批量提取网页内容
length=len(xpaths)
results=[0]*length
for i in range(length):
results[i]=py_extract_xpath(browser, xpaths[i])
return results
#-----------------------------数据库操作函数---------------------------#
#-----------------------------数据提取函数----------------------------#
def py_getDealReord(doc):
soup=BeautifulSoup(doc,'lxml')
tr=soup.find_all('tr')
total_dealRecord=[([0]*5)for i in range(len(tr))]
i=-1
for this_tr in tr:
i=i+1
td_user=this_tr.find_all('td',attrs={'class':"cell-align-l buyer"})
for this_td in td_user:
total_dealRecord[i][0]=this_td.getText().strip(' ')
#print username
td_style=this_tr.find_all('td',attrs={'class':"cell-align-l style"})
for this_td in td_style:
total_dealRecord[i][1]=this_td.getText(',').strip(' ')
#print style
td_quantity=this_tr.find_all('td',attrs={'class':"quantity"})
for this_td in td_quantity:
total_dealRecord[i][2]=this_td.getText().strip(' ')
#print quantity
td_dealtime=this_tr.find_all('td',attrs={'class':"dealtime"})
for this_td in td_dealtime:
total_dealRecord[i][3]=this_td.find('p',attrs={'class':"date"}).getText()
total_dealRecord[i][4]=this_td.find('p',attrs={'class':"time"}).getText()
return total_dealRecord
#--------------------获取要抓取的所有商品链接-----------------------#
cursor.execute("""
select * from ProductURLs where BrandName='NB'
""")
file=open("H:\\Eclipse\\TmallCrawling\\HTMLParse\\errLog.txt")
InProductInfo=cursor.fetchall()
browser=spynner.Browser()
for temp_InProductInfo in InProductInfo:
url='https:'+temp_InProductInfo[2]
BrandName=temp_InProductInfo[0]
ProductType=temp_InProductInfo[1]
print BrandName,'\t',ProductType,'\t',url
#url= 'https://detail.tmall.com/item.htm"Loading webpage failed."
file.write(url)
file.write('\n')
continue
xpaths=['//*[@id="J_PromoPrice"]/dd/div/span/text()', '//*[@id="J_StrPriceModBox"]/dd/span/text()', '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[1]/h1/text()', '//*[@id="J_PostageToggleCont"]/p/span/text()', '//*[@id="J_EmStock"]/text()', '//*[@id="J_CollectCount"]/text()', '//*[@id="J_ItemRates"]/div/span[2]/text()', '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()']
out_ProductInfo=py_extract_xpaths(browser,xpaths)
browser=py_click_element(browser,'a[href="#description" rel="external nofollow" rel="external nofollow" ]')
ProductProperty=py_extract_xpath(browser, '//*[@id="J_AttrUL"]')
soup=BeautifulSoup(ProductProperty,'lxml')
li=soup.find_all('li')
prop=''
for this_li in li:
prop=prop+this_li.getText()+'\\'
prop=prop[0:len(prop)-1]
out_ProductProperty=prop
print out_ProductProperty
cursor.execute("""
Insert into py_ProductInfo values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
""",(BrandName,ProductType,url, out_ProductInfo[2],out_ProductInfo[1], out_ProductInfo[0],out_ProductInfo[7], out_ProductInfo[1],out_ProductInfo[3], out_ProductInfo[4],out_ProductInfo[5], out_ProductProperty))
conn.commit()
Deal_PageCount=0
browser=py_click_element(browser, 'a[href="#J_DealRecord" rel="external nofollow" ]')
#browser.browse(True)
DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
out_DealRecord=py_getDealReord(DealRecord)
for temp_DealRecord in out_DealRecord:
if str(temp_DealRecord[4])=='0':
continue
cursor.execute("""
Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
""",(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4]))
conn.commit()
Deal_PageCount=Deal_PageCount+1
print "Page ",Deal_PageCount
for i in range(6):
if (i==0) or (i==2):
continue
xpath='//*[@id="J_showBuyerList"]/div/div/a['+str(i)+']'
if py_check_element(browser,xpath):
browser=py_click_xpath(browser, xpath)
DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
out_DealRecord=py_getDealReord(DealRecord)
for temp_DealRecord in out_DealRecord:
if str(temp_DealRecord[4])=='0':
continue
cursor.execute("""
Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
""",(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4]))
conn.commit()
Deal_PageCount=Deal_PageCount+1
print "Page ",Deal_PageCount
while py_check_element(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]'):
browser=py_click_xpath(browser, '//*[@id="J_showBuyerList"]/div/div/a[6]')
DealRecord=py_extract_xpath(browser, '//*[@id="J_showBuyerList"]/table/tbody')
out_DealRecord=py_getDealReord(DealRecord)
for temp_DealRecord in out_DealRecord:
if str(temp_DealRecord[4])=='0':
continue
cursor.execute("""
Insert into DealRecord values(%s,%s,%s,%s,%s,%s)
""",(url,temp_DealRecord[0],temp_DealRecord[1], temp_DealRecord[2],temp_DealRecord[3], temp_DealRecord[4]))
conn.commit()
Deal_PageCount=Deal_PageCount+1
print "Page ",Deal_PageCount
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
南强小屋 Design By 杰米
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
南强小屋 Design By 杰米
暂无Python如何抓取天猫商品详细信息及交易记录的评论...
《魔兽世界》大逃杀!60人新游玩模式《强袭风暴》3月21日上线
暴雪近日发布了《魔兽世界》10.2.6 更新内容,新游玩模式《强袭风暴》即将于3月21 日在亚服上线,届时玩家将前往阿拉希高地展开一场 60 人大逃杀对战。
艾泽拉斯的冒险者已经征服了艾泽拉斯的大地及遥远的彼岸。他们在对抗世界上最致命的敌人时展现出过人的手腕,并且成功阻止终结宇宙等级的威胁。当他们在为即将于《魔兽世界》资料片《地心之战》中来袭的萨拉塔斯势力做战斗准备时,他们还需要在熟悉的阿拉希高地面对一个全新的敌人──那就是彼此。在《巨龙崛起》10.2.6 更新的《强袭风暴》中,玩家将会进入一个全新的海盗主题大逃杀式限时活动,其中包含极高的风险和史诗级的奖励。
《强袭风暴》不是普通的战场,作为一个独立于主游戏之外的活动,玩家可以用大逃杀的风格来体验《魔兽世界》,不分职业、不分装备(除了你在赛局中捡到的),光是技巧和战略的强弱之分就能决定出谁才是能坚持到最后的赢家。本次活动将会开放单人和双人模式,玩家在加入海盗主题的预赛大厅区域前,可以从强袭风暴角色画面新增好友。游玩游戏将可以累计名望轨迹,《巨龙崛起》和《魔兽世界:巫妖王之怒 经典版》的玩家都可以获得奖励。