树莓派实现人脸识别和上传云端以及语音播报

  • Post author:
  • Post category:其他


import RPi.GPIO as GPIO

import paho.mqtt.client as mqtt

from urllib.parse import quote

import time

import json

import random

import base64

import hmac

from aip import AipFace

from picamera import PiCamera

import urllib.request

from aip import AipSpeech

import schedule

import os

import datetime

import pyttsx3

engine = pyttsx3.init()

# 用语音包ID来配置engine

engine.setProperty(‘voice’, ‘zh’)

# 百度人脸识别API账号信息

APP_ID = ”

API_KEY = ”

SECRET_KEY = ”

client1 = AipFace(APP_ID, API_KEY, SECRET_KEY)  # 创建一个客户端用以访问百度云

IMAGE_TYPE = ‘BASE64’                           # 图像编码方式

camera = PiCamera()                             # 定义一个摄像头对象

GROUP = ’01’                                    # 用户组

#OneNET云平台账号信息

HOST = “mqtts.heclouds.com”                     # 未加密地址

PORT = “1883”                                   # 未加密端口

PRO_ID = “”                               # 产品ID

DEV_ID = “”                            # 设备ID

DEV_NAME = “”                        # 设备名称

DEV_KEY = “”        # 设备Key

ACCESS_KET = “”     # 产品AccessKey

# 控制LED亮灭的函数

def LED_Control(cmd):

if(cmd == “b’ON'”):             # 开灯命令 [ON]

GPIO.output(11, GPIO.HIGH)

ts_print(“[Command] –> LED ON”)

elif(cmd == “b’OFF'”):          # 关灯命令 [OFF]

GPIO.output(11, GPIO.LOW)

ts_print(“[Command] –> LED OFF”)

# 用于生成Token的函数

def token(_pid, dname, access_key):

version = ‘2018-10-31’

# res = ‘mqs/%s’ % id           # 通过MQ_ID访问

# res = ‘products/%s’ % id      # 通过产品ID访问产品API

res = ‘products/%s/devices/%s’ % (_pid, dname)  # 通过MQTTS产品id和设备名称访问

# 用户自定义token过期时间

et = str(int(time.time()) + 3600000)

# 签名方法,支持md5、sha1、sha256

method = ‘md5’

# 对access_key进行decode

key = base64.b64decode(access_key)

# print(key)

# 计算sign

org = et + ‘\n’ + method + ‘\n’ + res + ‘\n’ + version

# print(org)

sign_b = hmac.new(key=key, msg=org.encode(), digestmod=method)

sign = base64.b64encode(sign_b.digest()).decode()

# print(sign)

# value 部分进行url编码,method/res/version值较为简单无需编码

sign = quote(sign, safe=”)

res = quote(res, safe=”)

# token参数拼接

token = ‘version=%s&res=%s&et=%s&method=%s&sign=%s’ % (version, res, et, method, sign)

return token

# 定义了带时间戳的输出格式

def ts_print(*args):

t = time.strftime(“[%Y-%m-%d %H:%M:%S”)

ms = str(time.time()).split(‘.’)[1][:3]

t += ms + ‘]:’

print(t, *args)

# 当MQTT代理响应客户端连接请求时触发

def on_connect(client, userdata, flags, rc):

ts_print(“<<<<CONNACK”)

ts_print(“connected with result code: ” + mqtt.connack_string(rc), rc)

client.subscribe(topic=topic_cmd, qos=1)        # 订阅由OneNET平台下发的命令

client.subscribe(topic=topic_dp, qos=1)         # 订阅上传数据的响应结果

# 当接收到MQTT代理发布的消息时触发

def on_message(client, userdata, msg):

ts_print(‘on_message’)

#ts_print(“Topic: ” + str(msg.topic))

#ts_print(“Payload: ” + str(msg.payload))

LED_Control(str(msg.payload))

if topic_cmds in msg.topic:                     # 命令响应的主题

responseTopic = str(msg.topic).replace(“request”,”response”,1)

# print(responseTopic)

client.publish(responseTopic,’OK’,qos = 1)  # 发布命令响应

# 当客户端调用publish()发布一条消息至MQTT代理后被调用

def on_publish(client, userdata, mid):

i = 0

#ts_print(“Puback:mid: ” + str(mid))

#ts_print(“Puback:userdata: ” + str(userdata))

# 当MQTT代理响应订阅请求时被调用

def on_subscribe(client, obj, mid, granted_qos):

i = 0

#ts_print(“Subscribed: message:” + str(obj))

#ts_print(“Subscribed: mid: ” + str(mid) + ”  qos:” + str(granted_qos))

# 当客户端与代理服务器断开连接时触发

def on_disconnect(client):

ts_print(‘DISCONNECTED’)

# 从树莓派发布到服务器的数据内容

def data(ds_id,value,countAccess):

message = {

“id”: int(ds_id),

“dp”: {

“person”: [{      # 识别通过的人数

“v”: value

}],

“door”: [{        # 开门信息

“v”: countAccess

}]

}

}

# print(message)

message = json.dumps(message).encode(‘ascii’)

return message

def ligntLed():

GPIO.setmode(GPIO.BOARD)

GPIO.setup(11, GPIO.OUT)

GPIO.output(11, True)

time.sleep(1)

GPIO.output(11, False)

GPIO.cleanup()

# 照相函数

def getimage():

camera.resolution = (1024, 768)  # 摄像界面为1024*768

camera.start_preview()  # 开始摄像

time.sleep(2)

camera.capture(‘faceimage.jpg’)  # 拍照并保存

time.sleep(2)

# 对图片的格式进行转换

def transimage():

f = open(‘faceimage.jpg’, ‘rb’)

img = base64.b64encode(f.read())

return img

def Speech_recognition(weather):

“”” 你的 APPID AK SK “””

APP_ID1 = ‘26222851’

API_KEY1 = ‘jL0hgkZ5GdYyOui1wmz3jWrF’

SECRET_KEY1 = ‘9BtfehxqNAhN2Y7pZWlYndYkkW6k0ZeT’

client = AipSpeech(APP_ID1, API_KEY1, SECRET_KEY1)

result = client.synthesis(weather, ‘zh’, 1, {

‘vol’: 5,  #音量,取值0-15,默认为5中音量

‘per’:3,   #发音人  发音人选择, 0为女声,1为男声,3为情感合成-度逍遥,4为情感合成-度丫丫,默认为普通女

‘spd’:4

#语速    语速,取值0-9,默认为5中语速

})

# 识别正确返回语音二进制 错误则返回dict 参照下面错误码

if not isinstance(result, dict):

with open(‘auido.mp3’, ‘wb’) as f:

f.write(result)

time.sleep(1)

f.close()

# 上传到百度api进行人脸检测

def go_api(image):

# 在百度云人脸库中寻找有没有匹配的人脸

result = client1.search(str(image, ‘utf-8’), IMAGE_TYPE, GROUP);

if result[‘error_msg’] == ‘SUCCESS’:  # 如果成功了

name = result[‘result’][‘user_list’][0][‘user_id’]  # 获取名字

score = result[‘result’][‘user_list’][0][‘score’]  # 获取相似度

if score > 80:  # 如果相似度大于80

if name == ‘dnx’:

print(“欢迎%s !” % name)

time.sleep(3)

engine.say(‘欢迎’)

engine.say(‘XXX’)

engine.runAndWait()

if name == ‘qc’:

print(“欢迎%s !” % name)

time.sleep(3)

engine.say(‘欢迎’)

engine.say(‘XXX’)

engine.runAndWait()

ligntLed()

else:

print(“对不起,我不认识你!”)

engine.say(‘对不起,我不认识你!’)

engine.runAndWait()

name = ‘Unknow’

return 0

curren_time = time.asctime(time.localtime(time.time()))  # 获取当前时间

# 将人员出入的记录保存到Log.txt中

f = open(‘Log.txt’, ‘a’)

f.write(“Person: ” + name + ”     ” + “Time:” + str(curren_time) + ‘\n’)

f.close()

return 1

if result[‘error_msg’] == ‘pic not has face’:

print(‘检测不到人脸’)

engine.say(‘检测不到人脸’)

engine.runAndWait()

time.sleep(2)

return 0

else:

print(result[‘error_code’] + ‘ ‘ + result[‘error_code’])

return 0

if __name__ == ‘__main__’:

# 配置树莓派GPIO引脚

GPIO.setmode(GPIO.BOARD)    # BOARD编号方式,基于插座引脚编号

GPIO.setup(11, GPIO.OUT)    # 输出模式

# 配置MQTT连接信息

client_id = DEV_NAME

username = PRO_ID

password = token(PRO_ID, DEV_NAME, DEV_KEY)

print(‘username:’ + username)

print(‘password:’ + password)

client = mqtt.Client(client_id=client_id, clean_session=True, protocol=mqtt.MQTTv311)

client.on_connect = on_connect

client.on_message = on_message

client.on_publish = on_publish

client.on_subscribe = on_subscribe

client.on_disconnect = on_disconnect

client.username_pw_set(username=username, password=password)

# client.tls_set(ca_certs=’MQTTS-certificate.pem’)              # 加密方式需要使用鉴权证书

# client.tls_insecure_set(True) #关验证

client.connect(HOST, int(PORT), keepalive=1200)

# 按照OneENT要求的格式,配置数据发布和订阅的主题

topic_dp = ‘$sys/%s/%s/dp/post/json/+’ % (username, DEV_NAME)   # 设备上报数据主题

topic_cmd = ‘$sys/%s/%s/cmd/#’ % (username, DEV_NAME)           # 设备接受命令主题

topic_cmds = ‘$sys/%s/%s/cmd/request/’ % (username, DEV_NAME)   # 设备接受命令主题

topic_publish = ‘$sys/%s/%s/dp/post/json’ %(username,DEV_NAME)

client.loop_start()

countId = 0

countPerson = 0

countAccess = 0

while True:

countId += 1

if True:

print(“准备”)

#Speech_recognition(“ready”)

engine.say(‘准备’)

engine.runAndWait()

getimage()  # 拍照

img = transimage()  # 转换照片格式

res = go_api(img)  # 将转换了格式的图片上传到百度云

if (res == 1):  # 是人脸库中的人

print(“开门”)

#Speech_recognition(“开门”)

engine.say(‘开门’)

engine.runAndWait()

countPerson += 1

countAccess = 1

else:

print(“关门”)

#Speech_recognition(“关门”)

engine.say(‘关门’)

engine.runAndWait()

countAccess = 2

print(‘稍等三秒进入下一个’)

#Speech_recognition(“稍等三秒进入下一个”)

engine.say(‘稍等三秒进入下一个’)

engine.runAndWait()

time.sleep(3)

# 树莓派循环发布数据到OneNET

client.publish(topic=topic_publish, payload=data(countId, countPerson, countAccess), qos=1)

print(“——————————————————————————-“)

time.sleep(3)



版权声明:本文为F_l_i_p_p_e_d_原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。