from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.selector import HtmlXPathSelector from scrapy.item import Item from scrapy.spider import BaseSpider from scrapy import log class ExampleSpider(CrawlSpider): name = "dmoz" allowed_domains = ["domain.co.za"] start_urls = ["http://domain.co.za/"] rules = [Rule(SgmlLinkExtractor(allow=()), follow=True), Rule(SgmlLinkExtractor(allow=()), callback='parse_item') ] def parse_item(self,response): filename = response.url.split("/")[-2] open(filename, 'wb').write(response.body)
Run
Reset
Share
Import
Link
Embed
Language▼
English
中文
Python Fiddle
Python Cloud IDE
Follow @python_fiddle
Browser Version Not Supported
Due to Python Fiddle's reliance on advanced JavaScript techniques, older browsers might have problems running it correctly. Please download the latest version of your favourite browser.
Chrome 10+
Firefox 4+
Safari 5+
IE 10+
Let me try anyway!
url:
Go
Python Snippet
Stackoverflow Question