Arduino + 树莓派等于AI汽车
arduino和树莓派是两个最知名的微控制器,对于arduino来说,他们的目标不同,他们的目标是使用C ++轻松编程; 对于树莓派,他们愿意以合理的价格提供全面的编程和树莓派更愿意用python编写代码。
从这个方面来看,组合它们起来进行控制会很有趣。 arduino很难读取DNN网络,但很容易配置输出通道和PWM。同样,我将进行一个简单的程序来演示如何从树莓派接收结果并对结果做出反应。树莓派用于Keras图像识别,简单易懂。
本文将分为两部分,第一部分是arduino部分,第二部分是树莓派部分,两部分都将分为软件和硬件部分。
arduino 硬件提示
红色和黑色的电源必须如下,否则树莓派和arduino将被损坏
arduino代码关键点
串行
我们将串口设置为9600并设置Left_motor_go,PIN 8(PWM); Left_motor_back,PIN 9(PWM); Right_motor_go,PIN 10(PWM); Right_motor_back,PIN 11(PWM)作为输出。
Serial.begin(9600);
pinMode(Left_motor_go,OUTPUT); // PIN 8 (PWM)
pinMode(Left_motor_back,OUTPUT); // PIN 9 (PWM)
pinMode(Right_motor_go,OUTPUT);// PIN 10 (PWM)
pinMode(Right_motor_back,OUTPUT);// PIN 11 (PWM)
原地左转
如果需要左转,那么我们必须减慢左轮并加速右轮。 下面的代码将RIGHT_MOTOR_GO设置为高,RIGHT_MOTOR_BACK设置为低,左边部分不相同,我们必须将go模式设置为LOW和back模式都设置为HIGH。 然后我们选择右轮0-255的PWM为255顶,0基;选择左轮0-255的PWM为0顶,255基。 结果汽车将向左转。
digitalWrite(Right_motor_go,HIGH); //Set left motor as forward
digitalWrite(Right_motor_back,LOW);
analogWrite(Right_motor_go,SLOW_SPEED); //PWM of right motor [0, 255]
analogWrite(Right_motor_back,0);
digitalWrite(Left_motor_go,LOW); //Set left motor as forward
digitalWrite(Left_motor_back,HIGH);
analogWrite(Left_motor_go,0); //PWM of left motor [0, 255]
analogWrite(Left_motor_back,SLOW_SPEED);
// delay(time * 100); //time interval
左行
如果需要左转,那么我们必须减慢左轮并加速右轮。 下面的代码将RIGHT_MOTOR_GO设置为高,RIGHT_MOTOR_BACK设置为低,左边部分不相同,我们必须将go和back模式都设置为LOW。 然后我们选择0-255的PWM为150顶,0基,并停止左轮。 结果汽车将向左转。
digitalWrite(Right_motor_go,HIGH);
digitalWrite(Right_motor_back,LOW);
analogWrite(Right_motor_go,150);
analogWrite(Right_motor_back,0);
digitalWrite(Left_motor_go,LOW);
digitalWrite(Left_motor_back,LOW);
analogWrite(Left_motor_go,0);
analogWrite(Left_motor_back,0);
// delay(time * 100); //time interval
将arduino引脚2,3,5和6分别连接到树莓派引脚4,17,27和22。
以下模式是将二进制数更改为十进制数。
int mode = digitalRead(readBit0)*1+digitalRead(readBit1)*2+digitalRead(readBit2)*4+digitalRead(readBit3)*8;
从树莓派读取gpio引脚并转换为0-9数字编号。
因此,我们可以设置一个表从0-9互补到左转,右转,后退,直行等的...例如,0表示直线,2表示返回,3表示左转。
树莓派代码关键点
就像“Hello world!” 首先在你的控制台上打印,训练手写数字MNIST模型相当于深度学习程序员。
这是一个Keras模型可以很好地处理几个卷积层,然后是最终输出阶段。 完整的train-mnist.py代码在我的GitHub上,而这里有一个快速的代码片段向您展示这一点。
from keras import layers, models
model = models.Sequential()
model.add(layers.Conv2D(16, 3, activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPool2D())
model.add(layers.Conv2D(32, 3, activation='relu'))
model.add(layers.MaxPool2D())
model.add(layers.Conv2D(64, 3, activation='relu'))
model.add(layers.MaxPool2D())
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
model.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy')
history = model.fit(x_train, y_train, epochs=2, batch_size=128)
output = model.predict(test_image.reshape(1, 28, 28, 1))[0]
print("Keras \r\n", output, '\r\nPredicted:',output.argmax())
训练后,将模型和权重保存到两个单独的文件中。
with open("model.json", "w") as file:
file.write(model.to_json())
model.save_weights("weights.h5")
或者,您可以调用model.save('model.h5',include_optimizer = False)将模型保存在一个文件中,注意我们通过将include_optimizer设置为False来排除优化器。
Main program:
我们必须将相机设置为640 * 480并导入raspberry pi GPIO引脚控制库--gpiozero。
#!/usr/bin/env python3
from keras import layers
from keras import models
from keras.models import load_model
from keras.utils import to_categorical
from keras import backend as K
from gpiozero import LED
from time import sleep
led1 = LED(4)
led2 = LED(17)
led4 = LED(27)
led8 = LED(22)
import numpy as np
import cv2
from ImageProcessor import ImageProcessor
# name of the opencv window
cv_window_name = "MNIST Camera"
CAMERA_INDEX = 0
REQUEST_CAMERA_WIDTH = 640
REQUEST_CAMERA_HEIGHT = 480
为arduino定义led引脚的开启和关闭
def ClintonLED(pre_no):
if (pre_no == 0):
led1.off()
led2.off()
led4.off()
led8.off()
print('i am',pre_no)
elif (pre_no == 1):
led1.on()
led2.off()
led4.off()
led8.off()
print('i am',pre_no)
elif (pre_no == 2):
led1.off()
led2.on()
led4.off()
led8.off()
elif (pre_no == 3):
led1.on()
led2.on()
led4.off()
led8.off()
print('i am',pre_no)
elif (pre_no == 4):
led1.off()
led2.off()
led4.on()
led8.off()
print('i am',pre_no)
elif (pre_no == 5):
led1.on()
led2.off()
led4.on()
led8.off()
print('i am',pre_no)
elif (pre_no == 6):
led1.off()
led2.on()
led4.on()
led8.off()
print('i am',pre_no)
elif (pre_no == 7):
led1.on()
led2.on()
led4.on()
led8.off()
print('i am',pre_no)
elif (pre_no == 8):
led1.off()
led2.off()
led4.off()
led8.on()
print('i am',pre_no)
elif (pre_no == 9):
led1.on()
led2.off()
led4.off()
led8.on()
print('i am',pre_no)
按Q或q退出程序
按w a s d调整框架和清洁率
# handles key presses
# raw_key is the return value from cv2.waitkey
# returns False if program should end, or True if should continue
def handle_keys(raw_key):
global processor
ascii_code = raw_key & 0xFF
if ((ascii_code == ord('q')) or (ascii_code == ord('Q'))):
return False
elif (ascii_code == ord('w')):
processor.p1 +=10
print('processor.p1:' + str(processor.p1))
elif (ascii_code == ord('s')):
processor.p1 -=10
print('processor.p1:' + str(processor.p1))
elif (ascii_code == ord('a')):
processor.p2 +=10
print('processor.p2:' + str(processor.p2))
elif (ascii_code == ord('d')):
processor.p2 -=10
print('processor.p1:' + str(processor.p2))
return True
# Test image
processor = ImageProcessor()
# input_image = cv2.imread(test_image)
# cropped_input = processor.preprocess_image(input_image)
导入图形和重量
并启动视频框
model = load_model('model.h5')
#
cv2.namedWindow(cv_window_name)
cv2.moveWindow(cv_window_name, 10, 10)
cap = cv2.VideoCapture(CAMERA_INDEX)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, REQUEST_CAMERA_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, REQUEST_CAMERA_HEIGHT)
actual_frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
actual_frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print ('actual video resolution: ' + str(actual_frame_width) + ' x ' + str(actual_frame_height))
盖上一张照片
if ((cap == None) or (not cap.isOpened())):
print ('Could not open camera. Make sure it is plugged in.')
# print ('file name:' + input_video_file)
print ('Also, if you installed python opencv via pip or pip3 you')
print ('need to uninstall it and install from source with -D WITH_V4L=ON')
print ('Use the provided script: install-opencv-from_source.sh')
exit_app = True
exit()
exit_app = False
while(True):
led1.on()
led2.on()
led4.on()
led8.on()
ret, input_image = cap.read()
将图片砍成28 * 28。
计算百分比和指示灯开启或关闭。
if (not ret):
print("No image from from video device, exiting")
break
# check if the window is visible, this means the user hasn't closed
# the window via the X button
prop_val = cv2.getWindowProperty(cv_window_name, cv2.WND_PROP_ASPECT_RATIO)
if (prop_val < 0.0):
exit_app = True
break
cropped_input, cropped = processor.preprocess_image(input_image)
output = model.predict(cropped_input.reshape(1, 28, 28, 1))[0]
predict_label = output.argmax()
pre_no = output.argmax()
percentage = int(output[predict_label] * 100)
label_text = str(predict_label) + " (" + str(percentage) + "%)"
print('Predicted:',label_text)
processor.postprocess_image(input_image, percentage, label_text)
cv2.imshow(cv_window_name, input_image)
if (percentage >=95):
ClintonLED(pre_no)
sleep(1)
elif (percentage <95):
led1.on()
led2.on()
led4.on()
led8.on()
raw_key = cv2.waitKey(1)
if (raw_key != -1):
if (handle_keys(raw_key) == False):
exit_app = True
break
cap.release()
DEMO
示例代码
评论