有没有对 Scrapy 爬虫很精通的朋友,我目前的爬虫有些问题
import scrapy
from xxx.items import WorkItem
class XXXSpider(scrapy.Spider):
name = "xxx"
allowed_domains = ["example.com"]
start_urls = ["https://example.com/xx/xx"]
def parse(self, response):
year_list = response.xpath('//ul[@class="p-accordion"]/li')
for year in year_list[:2]:
release_dates_url_of_year = year.xpath('.//div[@class="genre -s"]/a/@href').extract()
for date_url in release_dates_url_of_year:
yield scrapy.Request (
url = date_url,
callback =self.date_detail_parse
)
def date_detail_parse(self, response):
work_list = response.xpath('.//div[@class="swiper-slide c-low--6"]/div')
for work in work_list:
actress_name = work.xpath('.//a[@class="name c-main-font-hover"]/text()').extract_first()
if actress_name is not None:
item = WorkItem()
item['actress_name'] = actress_name
item['image_hover'] = work.xpath('.//img[@class="c-main-bg lazyload"]/@data-src').extract_first()
work_detail_url = work.xpath('.//a[@class="img hover"]/@href').extract_first()
if work_detail_url is not None:
yield scrapy.Request (
url = work_detail_url,
callback = self.work_detail_pares,
meta = {'workItem' : item}
)
def work_detail_pares(self, response):
item = response.meta['workItem']
pics_list = response.xpath('.//div[@class="swiper-wrapper"]/div')
pre_images = []
for pic in pics_list:
img_url = pic.xpath('./img/@data-src').extract_first()
pre_images.append(img_url)
item['pre_images'] = pre_images
item['name'] = response.xpath('.//div[@class="p-workPage l-wrap"]/h2/text()').extract_first().strip()
item['id'] = response.xpath('.//span[@class="c-tag02 c-main-bg-hover c-main-bg"]/../text()').extract_first()
item['company'] = 'xxx'
item['release_date'] = response.xpath('.//div[@class="p-workPage__table"]/div[2]//div[@class="item"]/a/text()').extract_first()
actress_detail_url = response.xpath('.//div[@class="p-workPage__table"]/div[1]//div[@class="item"]/a/@href').extract_first()
yield scrapy.Request(
url = actress_detail_url,
callback = self.actress_detail_pase,
meta = {'workItem' : item}
)
def actress_detail_pase(self, response):
item = response.meta['workItem']
item['actress_avatar'] = response.xpath('.//div[@class="swiper-slide"]/img/@data-src').extract_first()
yield item
year_list
我目前就只取了前 2 条数据做测试,发现爬取下来的数据跟网站上相比,很漏爬很多很多数据,而且每次爬取的数量都是同一个,网站上应该有至少几百条数据,而爬虫爬取下来,每次只能爬 35 条数据。而且每次爬取的数据内容都是变化的,这次没爬到的数据可能下一次再启动爬虫他就爬到了,但是总数永远是 35 。item
包在deepcopy()
里,也没有区别。for
循环,就只爬单条数据的话,他又确实可以把完整的数据都爬取下来有没有大佬知道我这问题到底出在哪里?问 GPT 的各种解答方案也试了,还是不行。
这是一个专为移动设备优化的页面(即为了让你能够在 Google 搜索结果里秒开这个页面),如果你希望参与 V2EX 社区的讨论,你可以继续到 V2EX 上打开本讨论主题的完整版本。
V2EX 是创意工作者们的社区,是一个分享自己正在做的有趣事物、交流想法,可以遇见新朋友甚至新机会的地方。
V2EX is a community of developers, designers and creative people.