知乎高颜值图片抓取到本地(Python3 爬虫.人脸检测.颜值检测)

2020-06-28 0 230

**1.代码在vscode和centos下均可成功执行
2.安装好python3和pip3
3.安装好依赖库(pip3 install requests lxml baidu-aip requests)
4.在百度云注册登录账号.开通人脸检查服务(https://cloud.baidu.com/product/face).在代码中填写appid和ak信息
5.image目录必须和代码文件在同一个目录下
**



#!/usr/bin/python3
#coding: utf-8

import time
import os
import re

import requests
# shell pip install requests lxml baidu-aip
from lxml import etree

from aip import AipFace

#百度云 人脸检测 申请信息




#唯一必须填的信息就这三行
APP_ID = \\\"\\\"
API_KEY = \\\"\\\"
SECRET_KEY = \\\"\\\"




# 文件存放目录名,相对于当前目录
DIR = \\\"image\\\"
# 过滤颜值阈值,存储空间大的请随意
BEAUTY_THRESHOLD = 45

#浏览器中打开知乎,在开发者工具复制一个,无需登录
#如何替换该值下文有讲述
AUTHORIZATION = \\\"oauth c3cef7c66a1843f8b3a9e6a1e3160e20\\\"

#以下皆无需改动

#每次请求知乎的讨论列表长度,不建议设定太长,注意节操
LIMIT = 5

#这是话题『美女』的 ID,其是『颜值』(20013528)的父话题
SOURCE = \\\"19552207\\\"

#爬虫假装下正常浏览器请求
USER_AGENT = \\\"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.5 Safari/534.55.3\\\"
#爬虫假装下正常浏览器请求
REFERER = \\\"https://www.zhihu.com/topic/%s/newest\\\" % SOURCE
#某话题下讨论列表请求 url
BASE_URL = \\\"https://www.zhihu.com/api/v4/topics/%s/feeds/timeline_activity\\\"
#初始请求 url 附带的请求参数
URL_QUERY = \\\"?include=data%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Danswer%29%5D.target.content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Danswer%29%5D.target.is_normal%2Ccomment_count%2Cvoteup_count%2Ccontent%2Crelevant_info%2Cexcerpt.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Darticle%29%5D.target.content%2Cvoteup_count%2Ccomment_count%2Cvoting%2Cauthor.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dtopic_sticky_module%29%5D.target.data%5B%3F%28target.type%3Dpeople%29%5D.target.answer_count%2Carticles_count%2Cgender%2Cfollower_count%2Cis_followed%2Cis_following%2Cbadge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Danswer%29%5D.target.content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%3Bdata%5B%3F%28target.type%3Danswer%29%5D.target.author.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Darticle%29%5D.target.content%2Cauthor.badge%5B%3F%28type%3Dbest_answerer%29%5D.topics%3Bdata%5B%3F%28target.type%3Dquestion%29%5D.target.comment_count&limit=\\\" + str(LIMIT)

#指定 url,获取对应原始内容 / 图片
def fetch_image(url):
    try:
        headers = {
                \\\"User-Agent\\\": USER_AGENT,
                \\\"Referer\\\": REFERER,
                \\\"authorization\\\": AUTHORIZATION
                }
        s = requests.get(url, headers=headers)
    except Exception as e:
        print(\\\"fetch last activities fail. \\\" + url)
        raise e

    return s.content

#指定 url,获取对应 JSON 返回 / 话题列表
def fetch_activities(url):
    try:
        headers = {
                \\\"User-Agent\\\": USER_AGENT,
                \\\"Referer\\\": REFERER,
                \\\"authorization\\\": AUTHORIZATION
                }
        s = requests.get(url, headers=headers)
    except Exception as e:
        print(\\\"fetch last activities fail. \\\" + url)
        raise e

    return s.json()

#处理返回的话题列表
def process_activities(datums, face_detective):
    for data in datums[\\\"data\\\"]:

        target = data[\\\"target\\\"]
        if \\\"content\\\" not in target or \\\"question\\\" not in target or \\\"author\\\" not in target:
            continue

        #解析列表中每一个元素的内容
        html = etree.HTML(target[\\\"content\\\"])

        seq = 0

        #question_url = target[\\\"question\\\"][\\\"url\\\"]
        question_title = target[\\\"question\\\"][\\\"title\\\"]

        author_name = target[\\\"author\\\"][\\\"name\\\"]
        #author_id = target[\\\"author\\\"][\\\"url_token\\\"]

        print(\\\"current answer: \\\" + question_title + \\\" author: \\\" + author_name)

        #获取所有图片地址
        images = html.xpath(\\\"//img/@src\\\")
        for image in images:
            if not image.startswith(\\\"http\\\"):
                continue
            s = fetch_image(image)
            
            #请求人脸检测服务
            scores = face_detective(s)

            for score in scores:
                filename = (\\\"%d--\\\" % score) + author_name + \\\"--\\\" + question_title + (\\\"--%d\\\" % seq) + \\\".jpg\\\"
                filename = re.sub(r\\\'(?u)[^-\\\\w.]\\\', \\\'_\\\', filename)
                #注意文件名的处理,不同平台的非法字符不一样,这里只做了简单处理,特别是 author_name / question_title 中的内容
                seq = seq + 1
                with open(os.path.join(DIR, filename), \\\"wb\\\") as fd:
                    fd.write(s)

            #人脸检测 免费,但有 QPS 限制
            time.sleep(2)

    if not datums[\\\"paging\\\"][\\\"is_end\\\"]:
        #获取后续讨论列表的请求 url
        return datums[\\\"paging\\\"][\\\"next\\\"]
    else:
        return None

def get_valid_filename(s):
    s = str(s).strip().replace(\\\' \\\', \\\'_\\\')
    return re.sub(r\\\'(?u)[^-\\\\w.]\\\', \\\'_\\\', s)

import base64
def detect_face(image, token):
    try:
        URL = \\\"https://aip.baidubce.com/rest/2.0/face/v3/detect\\\"
        params = {
                \\\"access_token\\\": token
                }
        data = {
                \\\"face_field\\\": \\\"age,gender,beauty,qualities\\\",
                \\\"image_type\\\": \\\"BASE64\\\",
                \\\"image\\\": base64.b64encode(image)
                }
        s = requests.post(URL, params=params, data=data)
        return s.json()[\\\"result\\\"]
    except Exception as e:
        print(\\\"detect face fail. \\\" + url)
        raise e

def fetch_auth_token(api_key, secret_key):
    try:
        URL = \\\"https://aip.baidubce.com/oauth/2.0/token\\\"
        params = {
                \\\"grant_type\\\": \\\"client_credentials\\\",
                \\\"client_id\\\": api_key,
                \\\"client_secret\\\": secret_key
                }
        s = requests.post(URL, params=params)
        return s.json()[\\\"access_token\\\"]
    except Exception as e:
        print(\\\"fetch baidu auth token fail. \\\" + url)
        raise e

def init_face_detective(app_id, api_key, secret_key):
    # client = AipFace(app_id, api_key, secret_key)
    # 百度云 V3 版本接口,需要先获取 access token   
    token = fetch_auth_token(api_key, secret_key)
    def detective(image):
        #r = client.detect(image, options)
        # 直接使用 HTTP 请求
        r = detect_face(image, token)
        #如果没有检测到人脸
        if r is None or r[\\\"face_num\\\"] == 0:
            return []

        scores = []
        for face in r[\\\"face_list\\\"]:
            #人脸置信度太低
            if face[\\\"face_probability\\\"] < 0.6:
                continue
            #颜值低于阈值
            if face[\\\"beauty\\\"] < BEAUTY_THRESHOLD:
                continue
            #性别非女性
            if face[\\\"gender\\\"][\\\"type\\\"] != \\\"female\\\":
                continue
            scores.append(face[\\\"beauty\\\"])

        return scores

    return detective

def init_env():
    if not os.path.exists(DIR):
        os.makedirs(DIR)

init_env()
face_detective = init_face_detective(APP_ID, API_KEY, SECRET_KEY)

url = BASE_URL % SOURCE + URL_QUERY
while url is not None:
    print(\\\"current url: \\\" + url)
    datums = fetch_activities(url)
    url = process_activities(datums, face_detective)
    #注意节操,爬虫休息间隔不要调小
    time.sleep(5)


# vim: set ts=4 sw=4 sts=4 tw=100 et:

:本文采用 知识共享署名-非商业性使用-相同方式共享 4.0 国际许可协议 进行许可, 转载请附上原文出处链接。
1、本站提供的源码不保证资源的完整性以及安全性,不附带任何技术服务!
2、本站提供的模板、软件工具等其他资源,均不包含技术服务,请大家谅解!
3、本站提供的资源仅供下载者参考学习,请勿用于任何商业用途,请24小时内删除!
4、如需商用,请购买正版,由于未及时购买正版发生的侵权行为,与本站无关。
5、本站部分资源存放于百度网盘或其他网盘中,请提前注册好百度网盘账号,下载安装百度网盘客户端或其他网盘客户端进行下载;
6、本站部分资源文件是经压缩后的,请下载后安装解压软件,推荐使用WinRAR和7-Zip解压软件。
7、如果本站提供的资源侵犯到了您的权益,请邮件联系: 442469558@qq.com 进行处理!

猪小侠源码-最新源码下载平台 大数据领域 知乎高颜值图片抓取到本地(Python3 爬虫.人脸检测.颜值检测) https://www.20zxx.cn/43755/zdyk/dsj.html

猪小侠源码,优质资源分享网

常见问题
  • 本站所有资源版权均属于原作者所有,均只能用于参考学习,请勿直接商用。若由于商用引起版权纠纷,一切责任均由使用者承担
查看详情
  • 最常见的情况是下载不完整: 可对比下载完压缩包的与网盘上的容量,建议提前注册好百度网盘账号,使用百度网盘客户端下载
查看详情

相关文章

官方客服团队

为您解决烦忧 - 24小时在线 专业服务