##-coding:utf-8-##
import time
from bs4 import BeautifulSoup
import urllib2
import pymongo
import re
import datetime
def update():
datas = {}
connection = pymongo.Connection('192.168.1.2', 27017)
#连接mongodb
db = connection.test_hq
#创建或连接test_hq库
for i in soup.find_all("div", class_="item"):
datas['_id'] = str(i.h2.a['href']).split('/')[-1].split('.')[0]
#获取html页面名称为id号
datas['title'] = i.h2.get_text()
#获取标题
url2 = i.h2.a['href']
#获取标题内容url地址
html2 = urllib2.urlopen(url2)
html_doc2 = html2.read()
soup2 = BeautifulSoup(html_doc2)
datas['content'] = soup2.find(attrs={"name":"description"})['content']
#获取文章内容
stock_name = []
stock_id = []
for name in re.findall(u"[u4e00-u9fa5]+",i.find(class_="stocks").get_text()):
stock_name.append(name)
#获取影响股票名称,已数组方式保存对应股票id号,mongo支持数组插入
datas['stock_name'] = stock_name
for id in re.findall("d+",i.find(class_="stocks").get_text()):
stock_id.append(id)
#获取影响股票id
datas['stock_id'] = stock_id
datas['update_time'] = datetime.datetime.strptime(re.search("w+.*w+", i.find(class_="fl date").span.get_text()).group(), '%Y-%m-%d %H:%M') - datetime.timedelta(hours=8)
#获取发布时间,转换为mongo时间格式
datas['onlooker'] = int(re.search("d+",i.find(class_="icons ic-wg").get_text()).group())
#获取围观数
db.test.save(datas)
#插入数据库
def get_data():
title = str(soup.h2.a['href']).split('/')[-1].split('.')[0]
#获取html页面名称做更新判断
with open('update.txt', 'r') as f:
time = f.readline()
if title == time:
print 'currently no update', title
else:
with open('update.txt', 'w') as f:
f.write(title)
update()
while True:
if __name__ == '__main__':
url = 'http://www.ipython.me/qingbao/'
html = urllib2.urlopen(url)
html_doc = html.read()
soup = BeautifulSoup(html_doc)
get_data()
time.sleep(30)
#每30秒刷新一次
|