爬虫生成dict后,想将其写入csv文件,却出错
使用jupyter notebook,window环境。
具体代码如下
import requests
from multiprocessing.dummy import Pool as ThreadPool
from lxml import etree
import sys
import time
import random
import csv
def spider(url):
header={
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
timeout=random.choice(range(31,50))
html = requests.get(url,header,timeout=timeout)
time.sleep(random.choice(range(8,16)))
selector = etree.HTML(html.text)
content_field = selector.xpath('//*[@class="inner"]/p[3]/p[2]/ul/li')
item ={}
for each in content_field:
g = each.xpath('a/p[1]/p[1]/h3/span/text()')
go = each.xpath('a/p[1]/p[2]/p/h3/text()')
h = each.xpath('a/p[1]/p[2]/p/p/text()[1]')
j= each.xpath('a/p[1]/p[1]/p/text()[2]')
ge = each.xpath('a/p[1]/p[2]/p/p/text()[3]')
x = each.xpath('a/p[1]/p[1]/p/text()[3]')
city = each.xpath('a/p[1]/p[1]/p/text()[1]')
gg = each.xpath('a/p[2]/span/text()')
item['city']="".join(city)
item['hangye']="".join(hangye)
item['guimo']="".join(guimo)
item['gongsi']="".join(gongsi)
item['gongzi']="".join(gongzi)
item['jingyan']="".join(jingyan)
item['xueli']="".join(xueli)
item['gongzuoneirong']="".join(gongzuoneirong)
fieldnames =['city','hangye','guimo','gongsi','gongzi','jingyan','xueli','gongzuoneirong']
with open('bj.csv','a',newline='',errors='ignore')as f:
f_csv=csv.DictWriter(f,fieldnames=fieldnames)
f_csv.writeheader()
f_csv.writerow(item)
if __name__ == '__main__':
pool = ThreadPool(4)
f=open('bj.csv','w')
page = []
for i in range(1,100):
newpage = 'https://www.zhipin.com/c101010100/h_101010100/?query=%E6%95%B0%E6%8D%AE%E8%BF%90%E8%90%A5&page='+str(i) + '&ka=page-' + str(i)
page.append(newpage)
results = pool.map(spider,page)
pool.close()
pool.join()
f.close()
运行上面代码,提示错误为
ValueError: too many values to unpack (expected 2)
通过查询原因是要将dict遍历,需要dict.items()的形式。但在上述代码中如何实现,一直没有理顺,求教各位
習慣沉默2017-05-18 10:51:20
不好意思哈,现在才有时间来回答你的问题,看到你根据我的建议把代码改过来了,下面我把改过的代码贴出来,我运行过,是没问题的
import requests
from multiprocessing.dummy import Pool
from lxml import etree
import time
import random
import csv
def spider(url):
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
timeout = random.choice(range(31, 50))
html = requests.get(url, headers=header, timeout=timeout)
time.sleep(random.choice(range(8, 16)))
selector = etree.HTML(html.text)
content_field = selector.xpath('//*[@class="inner"]/p[3]/p[2]/ul/li')
item = {}
for each in content_field:
g = each.xpath('a/p[1]/p[1]/h3/span/text()')
go = each.xpath('a/p[1]/p[2]/p/h3/text()')
h = each.xpath('a/p[1]/p[2]/p/p/text()[1]')
j = each.xpath('a/p[1]/p[1]/p/text()[2]')
ge = each.xpath('a/p[1]/p[2]/p/p/text()[3]')
x = each.xpath('a/p[1]/p[1]/p/text()[3]')
city = each.xpath('a/p[1]/p[1]/p/text()[1]')
gg = each.xpath('a/p[2]/span/text()')
item['city'] = "".join(city)
item['hangye'] = "".join(g)
item['guimo'] = "".join(go)
item['gongsi'] = "".join(h)
item['gongzi'] = "".join(j)
item['jingyan'] = "".join(ge)
item['xueli'] = "".join(x)
item['gongzuoneirong'] = "".join(gg)
fieldnames = ['city', 'hangye', 'guimo', 'gongsi', 'gongzi', 'jingyan', 'xueli', 'gongzuoneirong']
with open('bj.csv', 'a', newline='', errors='ignore')as f:
f_csv = csv.DictWriter(f, fieldnames=fieldnames)
f_csv.writeheader()
f_csv.writerow(item)
if __name__ == '__main__':
f = open('bj.csv', 'w')
page = []
for i in range(1, 100):
newpage = 'https://www.zhipin.com/c101010100/h_101010100/?query=%E6%95%B0%E6%8D%AE%E8%BF%90%E8%90%A5&page=' + str(
i) + '&ka=page-' + str(i)
page.append(newpage)
print(page)
pool = Pool(4)
results = pool.map(spider, page)
pool.close()
pool.join()
f.close()
这里主要是header,你原来是set
类型,我修改后是dict
类型
这里还需要给你一些建议
你的代码是放到ide还是文本编辑器中运行的?有的东西在ide下明显会报错啊
建议新手从开始学的时候就遵守PEP8规范,别养成了坏习惯,你看看你的命名
过去多啦不再A梦2017-05-18 10:51:20
item = {'a':1, 'b':2}
fieldnames = ['a', 'b']
with open('test.csv', 'a') as f:
f_csv = DictWriter(f, fieldnames=fieldnames)
f_csv.writeheader()
f_csv.writerow(item)
我这样写并没报错喔
writerow就是直接接收dict的吧,你这个问题,我感觉是因为item的key与你表头不对应