我想使用Node.js獲取網站的站點地圖。任何人都可以指出我該怎麼做?如何使用Node.js掃描網站並構建站點地圖
我目前正在尋找https://github.com/cgiffard/node-simplecrawler,但我不確定如何阻止它實際上爬行頁面。我只需要鏈接,並可能在一個結構化的對象...
我希望這是明確的!
乾杯, H.
我想使用Node.js獲取網站的站點地圖。任何人都可以指出我該怎麼做?如何使用Node.js掃描網站並構建站點地圖
我目前正在尋找https://github.com/cgiffard/node-simplecrawler,但我不確定如何阻止它實際上爬行頁面。我只需要鏈接,並可能在一個結構化的對象...
我希望這是明確的!
乾杯, H.
我發現正是這樣做的一個非常有用的命令行工具,寫在節點。我發現它的源代碼在這個任務中非常有用。下面是包的存儲庫的鏈接:https://github.com/lgraubner/node-sitemap-generator-cli
這裏是我最後使用的代碼:
var Crawler = require('simplecrawler');
var port = 80;
var exclude = ['gif', 'jpg', 'jpeg', 'png', 'ico', 'bmp', 'ogg', 'webp',
'mp4', 'webm', 'mp3', 'ttf', 'woff', 'json', 'rss', 'atom', 'gz', 'zip',
'rar', '7z', 'css', 'js', 'gzip', 'exe'];
var exts = exclude.join('|');
var regex = new RegExp('\.(' + exts + ')', 'i'); // This is used for filtering crawl items.
var crawler = new Crawler('www.website.com');
var pages = []; // This array will hold all the URLs
// Crawler configuration
crawler.initialPort = port;
crawler.initalPath = '/';
crawler.addFetchCondition(function (parsedURL) {
return !parsedURL.path.match(regex); // This will reject anything that's not a link.
});
// Run the crawler
crawler.start();
crawler.on('fetchcomplete', function(item, responseBuffer, response) {
pages.push(item.url); // Add URL to the array of pages
});
林不知道,但我並不快樂與工具, 我使用履帶式包寫一些代碼,它建立一個網站地圖,全自動
var Crawler = require("crawler");
var url = require('url');
var fs = require('fs');
var writeStream = fs.createWriteStream('./output');
writeStream.write('<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation=" http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">');
var strBuff = '<?xml version="1.0" encoding="UTF-8"?><urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:mobile="http://www.google.com/schemas/sitemap-mobile/1.0" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">';
var Router = require('routes');
var router = Router();
var noop = function(){};
var peroid = {
'/':'hourly',
'/results':'hourly',
'/tips': 'hourly',
'/tips/:country':'hourly',
'/tips/:country/:venue':'hourly',
'/support':'hourly',
}
function addToMap(url) {
var key = router.match(url.replace('https://www.yourwebsite.com',''));
if(!key) {
key = {};
key.route = '/';
} else {
console.log('match ', url);
}
var route = key.route;
var freq = peroid[route];
var buf = '<url>\n<loc>'+url+'</loc>\n <changefreq>'+freq+'</changefreq>\n<priority>0.5</priority>\n</url>';
strBuff += '<url>\n<loc>'+url+'</loc>\n <changefreq>'+freq+'</changefreq>\n<priority>0.5</priority>\n</url>';
writeStream.write(buf);
}
function saveTofile() {
console.log('end');
writeStream.write('\n</urlset>');
writeStream.end();
}
router.addRoute("/", noop);
router.addRoute("/tips", noop);
router.addRoute("/tips/:country", noop);
router.addRoute("/tips/:country/:venue", noop);
router.addRoute("/support", noop);
router.addRoute("/algorithm", noop);
var cache = {};
var c = new Crawler({
maxConnections : 25,
skipDuplicates: true,
// This will be called for each crawled page
onDrain: function() {
console.log('ondrain');
saveTofile();
},
callback : function (error, result, $) {
if(error || !$) {
console.log(error, result.uri);
return;
}
$('a').each(function(index, a) {
var toQueueUrl = $(a).attr('href');
if(!toQueueUrl) {
return;
}
if((toQueueUrl && toQueueUrl[0] !== '/') || toQueueUrl.indexOf('/api/') !== -1 || toQueueUrl.indexOf('.pdf') !== -1) {
//console.log('not crawliing', toQueueUrl);
return;
}
if(cache.hasOwnProperty(toQueueUrl) || !toQueueUrl) {
return;
}
//console.log(toQueueUrl);
c.queue('https://www.yourwebsite.com'+toQueueUrl);
addToMap('https://www.yourwebsite.com'+toQueueUrl);
cache[toQueueUrl] = 1;
var keyz = Object.keys(cache);
if(! (keyz.length % 100)) {
console.log('total', keyz.length);
}
});
}
});
c.queue('https://www.yourwebsite.com');
希望它可以幫助ü