class QingHaiSpider(scrapy.Spider): name = "qinghai" headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36', 'Accept': 'application/json, text/javascript, /; q=0.01'} results = {"001002":"01", "001001":"02", "001005":"03"} def start_requests(self): url = 'http://111.44.251.34/inteligentsearch/rest/inteligentSearch/getFullTextData' for k in self.results: formdata1 = {"token":"","pn":0,"rn":10,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title","cnum":"001;002;003;004;005;006;007;008;009;010","sort":"{"showdate":"0"}","ssort":"title","cl":200,"terminal":"","condition":[{"fieldName":"categorynum","isLike":True,"likeType":2,"equal":k}],"time":None,"highlights":"title","statistics":None,"unionCondition":None,"accuracy":"100","noParticiple":"0","searchRange":None,"isBusiness":1} formdata = json.dumps(formdata1) request = scrapy.Request(url=url, method='POST', body=formdata ,headers=self.headers, callback=self.parse, dont_filter=False, meta={'key':k})
yield request
def parse(self, response):
w = response.meta['key']
max_counts = json.loads(response.text)["result"]["totalcount"]
datas = json.loads(response.text)["result"]["records"]
for data in datas:
item = QhItem()
item["title"] = data["title"]
item["date"] = data["showdate"]
item["detail_url"] = "http://111.44.251.34" + data["linkurl"]
item["area_code"] = "QINGHAI"
item["publish_id"] = "181818"
item["thing_id"] = "42"
if "001002" in item["detail_url"]:
item["content_type"] = "01"
elif "001001" in item["detail_url"]:
item["content_type"] = "02"
else:
item["content_type"] = "03"
yield item
for i in range(10, max_counts + 10, 10):
formdata2 = {"token":"","pn":i,"rn":10,"sdt":"","edt":"","wd":"","inc_wd":"","exc_wd":"","fields":"title","cnum":"001;002;003;004;005;006;007;008;009;010","sort":"{\"showdate\":\"0\"}","ssort":"title","cl":200,"terminal":"","condition":[{"fieldName":"categorynum","isLike":True,"likeType":2,"equal":w}],"time":None,"highlights":"title","statistics":None,"unionCondition":None,"accuracy":"100","noParticiple":"0","searchRange":None,"isBusiness":1}
formdata = json.dumps(formdata2)
url = 'http://111.44.251.34/inteligentsearch/rest/inteligentSearch/getFullTextData'
yield scrapy.Request(url=url, method='POST', body=formdata ,headers=self.headers, callback=self.parse, dont_filter=False)
这是一个专为移动设备优化的页面(即为了让你能够在 Google 搜索结果里秒开这个页面),如果你希望参与 V2EX 社区的讨论,你可以继续到 V2EX 上打开本讨论主题的完整版本。
V2EX 是创意工作者们的社区,是一个分享自己正在做的有趣事物、交流想法,可以遇见新朋友甚至新机会的地方。
V2EX is a community of developers, designers and creative people.