Women Alerting System

by JRajashekarJ in Circuits > Arduino

196 Views, 2 Favorites, 0 Comments

Women Alerting System

IMG_20211027_165556.jpg
IMG_20211027_165519.jpg
There is number of women's who have been afraid of strangers for their safety. Around 80% of
the women in our country have fear regarding their safety. In past decades women usually
won’t step out from their house for work, so there was more safety. But in the recent situation,
women's want to be employed and want to work outside, but there is the lack of safety; In we
use hand gesture movements using pattern recognition concept. In this method we point our
fingers/hand Infront of a camera. And send the location to person using gps and fast2sms used
in women voice while they are in danger situation. Most systems use mobile devices for
detecting women's unsafe situations, such as phone mic to detect women scream, camera to take
pictures. When the women face into unsecured situations, to ensure the safety, automatic
detection system needs to developed which send an alert message to the police department and
people. This can be done by sensing various factors such as abnormal sounds, body reaction
like shivering, sweating and heartbeat which can be detected using sensor and to provide alert
message with the various factors to identify the safety of women.
1.2 LITERATURE SURVEY
Islam et al. purposed “Design and Implementation of Women Auspice Syste

About

IMG_20211211_180747.jpg
These days the safety of an individual is at stake, it may be due to ill health or due to the
increasing crimes such as the sexual assaults, molestation, abuse etc. So, in order to prevent
these to a certain extent, this project proposes an automated wearable smart device to prevent
the above-mentioned cause, which has access to internet (IOT). Here a small camera is used to
help when any abnormality is found as per the preprogram of the device and Arduino is used for
detecting the gestures, while the buzzer alerts the surroundings of the victim and GPS using to
find location and send the location in SMS using fast2sms. Today there are many cases which
are happening about wo

Required Components

IMG_20211211_180747.jpg
Arduino
Gps
Gsm/sms provider server
Camera
Buzzer

Python Code to Run

IMG_20211027_165529.jpg
import sys
import os
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import copy
import cv2
import serial
import requests
port = serial.Serial("COM7",9600)
# Disable tensorflow compilation warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
def sms(data):

url = "https://www.fast2sms.com/dev/bulk"
API_KEY =
"tHufxgSevhIbzp56CUARdXLE3DNiYcBnKFZlT70M1JOaWqswVre8lKRqUwW0ZCcQVbJjmpTa9F
PrMNv5"
msg = (data)
payload =
"sender_id=FSTSMS&message="+msg+"&language=english&route=p&numbers=73829802
03"
headers = {
'authorization': API_KEY,
'Content-Type': "application/x-www-form-urlencoded",
'Cache-Control': "no-cache",
}
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
print( "SMS has been sent...")

def predict(image_data):
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
max_score = 0.0
res = ''
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
if score > max_score:
max_score = score
res = human_string
return res, max_score
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("logs/output_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("logs/output_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
c = 0
cap = cv2.VideoCapture(0)
res, score = '', 0.0
i = 0
mem = ''
consecutive = 0
sequence = ''
aval=0
bval=0
cval=0
dval=0

while True:
ret, img = cap.read()
img = cv2.flip(img, 1)

if ret:
x1, y1, x2, y2 = 100, 100, 300, 300
img_cropped = img[y1:y2, x1:x2]
c += 1
image_data = cv2.imencode('.jpg', img_cropped)[1].tostring()

a = cv2.waitKey(1) # waits to see if `esc` is pressed
##
## rcv = port.readline()
## print("rcv:",rcv)

if i == 4:
res_tmp, score = predict(image_data)
res = res_tmp
## print(res)

if res== "a":
aval+=1
print("enter")
if aval>3:
aval=0
port.write(str.encode('a'))

elif res=="b":
bval+=1
if bval>3:
bval=0
port.write(str.encode('b'))
rcv = port.readline()
rcv = rcv.decode("utf-8")
print("rcv:", rcv)
# rcv = rcv.split(",")
# lat = rcv[0]
# lon = rcv[1]
# print("lat:",lat)
# print("lon:",lon)
# loc = "lat:",lat + " lon:",lon
sms("Shee need help....!")
sms(rcv)

i = 0
if mem == res:
consecutive += 1
else:
consecutive = 0
if consecutive == 2 and res not in ['nothing']:
if res == 'space':
sequence += ' '
elif res == 'del':
sequence = sequence[:-1]
else:
sequence += res
consecutive = 0
i += 1
cv2.putText(img, '%s' % (res.upper()), (100,400),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255,255,255), 4)
cv2.putText(img, '(score = %.5f)' % (float(score)), (100,450),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))
mem = res
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2)
cv2.imshow("img", img)
img_sequence = np.zeros((200,1200,3), np.uint8)
cv2.putText(img_sequence, '%s' % (sequence.upper()), (30,30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
cv2.imshow('sequence', img_sequence)

if a == 27: # when `esc` is pressed
break
# Following line should... <-- This should work fine now
cv2.destroyAllWindows()
cv2.VideoCapture(0).release()