本文主要為大家帶來一篇Node.js+jade抓取部落格所有文章產生靜態html檔案的實例。小編覺得蠻不錯的,現在就分享給大家,也給大家做個參考。一起跟著小編過來看看吧,希望能幫助大家。
專案結構:
#好了,接下來,我們就來講解下,這篇文章主要實現的功能:
1,抓取文章,主要抓取文章的標題,內容,超鏈接,文章id(用於生成靜態html文件)
2,根據jade模板產生html檔
一、抓取文章如何實現?
非常簡單,跟上文抓取文章清單的實作差不多
function crawlerArc( url ){ var html = ''; var str = ''; var arcDetail = {}; http.get(url, function (res) { res.on('data', function (chunk) { html += chunk; }); res.on('end', function () { arcDetail = filterArticle( html ); str = jade.renderFile('./views/layout.jade', arcDetail ); fs.writeFile( './html/' + arcDetail['id'] + '.html', str, function( err ){ if( err ) { console.log( err ); } console.log( 'success:' + url ); if ( aUrl.length ) crawlerArc( aUrl.shift() ); } ); }); }); }
參數url就是文章的位址,把文章的內容抓取完畢之後,呼叫filterArticle ( html ) 過濾出需要的文章資訊(id, 標題,超鏈接,內容),然後用jade的renderFile這個api,實現模板內容的替換,
模板內容替換完之後,肯定就需要生成html檔了, 所以用writeFile寫入文件,寫入文件時候,用id當html檔名。這就是產生一篇靜態html檔的實現,
接下來就是循環產生靜態html檔了, 就是下面這行:
if ( aUrl.length ) crawlerArc( aUrl.shift() );
aUrl保存的是我的部落格所有文章的url, 每次採集完一篇文章之後,就把目前文章的url刪除,讓下一篇文章的url出來,繼續採集
完整的實作程式碼server.js:
var fs = require( 'fs' ); var http = require( 'http' ); var cheerio = require( 'cheerio' ); var jade = require( 'jade' ); var aList = []; var aUrl = []; function filterArticle(html) { var $ = cheerio.load( html ); var arcDetail = {}; var title = $( "#cb_post_title_url" ).text(); var href = $( "#cb_post_title_url" ).attr( "href" ); var re = /\/(\d+)\.html/; var id = href.match( re )[1]; var body = $( "#cnblogs_post_body" ).html(); return { id : id, title : title, href : href, body : body }; } function crawlerArc( url ){ var html = ''; var str = ''; var arcDetail = {}; http.get(url, function (res) { res.on('data', function (chunk) { html += chunk; }); res.on('end', function () { arcDetail = filterArticle( html ); str = jade.renderFile('./views/layout.jade', arcDetail ); fs.writeFile( './html/' + arcDetail['id'] + '.html', str, function( err ){ if( err ) { console.log( err ); } console.log( 'success:' + url ); if ( aUrl.length ) crawlerArc( aUrl.shift() ); } ); }); }); } function filterHtml(html) { var $ = cheerio.load(html); var arcList = []; var aPost = $("#content").find(".post-list-item"); aPost.each(function () { var ele = $(this); var title = ele.find("h2 a").text(); var url = ele.find("h2 a").attr("href"); ele.find(".c_b_p_desc a").remove(); var entry = ele.find(".c_b_p_desc").text(); ele.find("small a").remove(); var listTime = ele.find("small").text(); var re = /\d{4}-\d{2}-\d{2}\s*\d{2}[:]\d{2}/; listTime = listTime.match(re)[0]; arcList.push({ title: title, url: url, entry: entry, listTime: listTime }); }); return arcList; } function nextPage( html ){ var $ = cheerio.load(html); var nextUrl = $("#pager a:last-child").attr('href'); if ( !nextUrl ) return getArcUrl( aList ); var curPage = $("#pager .current").text(); if( !curPage ) curPage = 1; var nextPage = nextUrl.substring( nextUrl.indexOf( '=' ) + 1 ); if ( curPage < nextPage ) crawler( nextUrl ); } function crawler(url) { http.get(url, function (res) { var html = ''; res.on('data', function (chunk) { html += chunk; }); res.on('end', function () { aList.push( filterHtml(html) ); nextPage( html ); }); }); } function getArcUrl( arcList ){ for( var key in arcList ){ for( var k in arcList[key] ){ aUrl.push( arcList[key][k]['url'] ); } } crawlerArc( aUrl.shift() ); } var url = 'http://www.cnblogs.com/ghostwu/'; crawler( url );
layout.jade檔:
doctype html html head meta(charset='utf-8') title jade+node.js express link(rel="stylesheet", href='./css/bower_components/bootstrap/dist/css/bootstrap.min.css') body block header p.container p.well.well-lg h3 ghostwu的博客 p js高手之路 block container p.container h3 a(href="#{href}" rel="external nofollow" ) !{title} p !{body} block footer p.container footer 版权所有 - by ghostwu
後續的打算:
1,採用mongodb入庫
#2,支援斷點採集
3,採集圖片
4,採集小說
等等....
相關推薦:
以上是Node.js、jade產生靜態html檔實例的詳細內容。更多資訊請關注PHP中文網其他相關文章!