[docs]classBigFileSpider(SimpleSpider):""" This class makes it easy to collect data from a source that publishes very large packages. Each package is split into smaller packages, each containing 100 releases or records. Users can then process the files without using an iterative parser and without having memory issues. #. Inherit from ``BigFileSpider`` #. Write a ``start_requests()`` method to request the archive files .. code-block:: python from kingfisher_scrapy.base_spiders import BigFileSpider from kingfisher_scrapy.util import components class MySpider(BigFileSpider): name = 'my_spider' def start_requests(self): yield self.build_request('https://example.com/api/package.json', formatter=components(-1) .. note:: ``concatenated_json = True``, ``line_delimited = True`` and ``root_path`` are not supported, because this spider yields items whose ``data`` field has ``package`` and ``data`` keys. """resize_package=True
[docs]@classmethoddeffrom_crawler(cls,crawler,*args,**kwargs):spider=super(BigFileSpider,cls).from_crawler(crawler,*args,**kwargs)ifspider.data_typenotin('release_package','record_package'):raiseIncoherentConfigurationError(f"data_type must be 'release_package' or 'record_package', not {spider.data_type!r}.")returnspider