#!/usr/local/bin/python2.7
# -*- coding:utf-8 -*-
import re, time, random, hashlib, urllib, requests, os, math, json, sys, base64, torndb, uuid, threading, sys, itertools, copy, traceback
import requests;requests.packages.urllib3.disable_warnings()
from common import *
from config import *
mkdir('jobs')
class MyWorker(Worker):
table_coms = 'zp_coms'
table_jobs = 'zp_jobs'
htmlfolder = 'jobs'
def crawlTask(self):
try:
url = self.getAddr() # 获取 url
htm = self.getHtml(url, None, 0)#获取 html
#getJob 解析 html 获取职位数据
self.record.update(self.getJob(htm))
self.record['flag'] = 10
except:
self.record['flag'] = 99
print traceback.format_exc()
finally:
#向每条职位数据中添加字段
self.record['job_link_href'] = url
self.record['company_type']=''
self.record['company_tel']=''
self.record['company_email']=''
self.record['job_type']=''
self.record['job_type_code']=''
self.record['company_fax']=''
#本次任务处理完成后更新数据库
self.update(self.table_jobs, self.record)
def getJob(self, job):
self.record['job_title'] = grep(u'<h1>(.+?)</h1>', job)
self.record['job_salary'] = grep(u'<span class="red">(.+?)</span>', job)
self.record['job_date'] = grep(u'<span.*?>发布于(.+?)</span>', job)
infos = grep(u'<div class="info-primary">.+?<p>(.+?)</p>', job, re.S).replace('<em class="vline"></em>', '|').split('|')
for info in infos:
if u'城市' in info:
self.record['city_text'] = info[3:]
self.record['job_city_code'] = info[3:]
break
cmp_infos = grep(u'h3 class="name".+?p>(.+?)</p>',job,re.S).replace('<em class="vline"></em>', '|').split('|')
for info in cmp_infos:
info = re.sub('<.+?>', '',info)
if info in paramx['s'].values():
self.record['company_size'] = info
continue
if info in paramx['i'].values():
self.record['job_industry_code'] = info
self.record['company_industry'] = info
continue
self.record['company_linkman'] = re.sub('<.*?>', '',grep(u'<h2 class="name">(.+?)</h2>',job,re.S)).strip()
self.record['address'] = grep(u'div class="location-address">(.+?)</div>',job,re.S)
self.record['company_link_url'] = '
https://www.zhipin.com'+grep(u'ka="job-detail-company" href="(.+?)"', job,re.S)
self.record['company_name'] = grep(u'<h3 class="name".+?>(.+?)<.*?/h3>', job)
self.record['cmp_company_id']=grep(u'"job-detail-company" href="/gongsi/(.+?).html"', job)
def checkHtml(self, html):#用于检查页面是否有数据需要解析
if html.find(u'<title>BOSS 直聘验证码</title>' )>=0:return 0
if html.find(u'您暂时无法继续访问~' )>=0:return 0
return 1
def reqHtml(self, addr, data=None):#获取 response
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
}
return requests.get(addr, timeout=5, proxies=random.choice(proxies), headers=headers).content.decode('utf8', errors='ignore')
def getAddr(self):#构建 url
return '
https://www.zhipin.com/job_detail/%(cid)s.html?' % self.record
class MyLeader(Leader):
table_coms = 'zp_coms'
table_jobs = 'zp_jobs'
def getTaskFromDatabase(self):#从数据库中获取一万条任务
return self.dbconn.query("SELECT `id`,`flag`,`cid` FROM `%s` WHERE `flag` IN (0) LIMIT 10000;" % self.table_jobs)
if __name__=='__main__':
dbconn = torndb.Connection(**dbconf)
MyLeader().runWork(MyWorker,1)
#Leader 类 runWork()方法为开启 MyWorker 多线程的方法,数字参数为指定线程数量
#getTaskFromDatabase()从数据库取出一万条数据(cid 用于职位链接,flag 用于标识抓取进度,id 用于更新数据库)
#MyWorker 为多线程类,run 方法中,self.leader.getTask()方法循环取出一条任务绑定 self.record,然后 crawlTask 函数处理任务,处理完成后 self.leader.popTask(self.record)删除此任务
#crawlTask()函数内部解析任务获取职位数据,并更新 self.record,最后 update()方法存入数据库