import datetime
import math
import cv2
import numpy as np

# global variables
width =  0
height =  0
Counter =  0
CounterSaids =  0
AreaDestornoLimiteMin =  3000   # this value is empirical. Adjust it as required
ThresholdBinarizacao =  70   # this value is empirical, Adjust it according to your need
OffsetLinesRef =  150   # this value is empirical. Adjust-as required.

# Checks if the detected body is entering the monitored sonar
def  TestIntersectionEntry ( y , CoordinateYourLine , CoordinateYLineSaida ):
        DiferencaAbsoluta =  abs (y - CoordinateYLinhaEntrada)	

        if ((DifferentialAbsolute <=  2 ) and (y < CoordinateYLineSaida)):
		return  1
	else :
		return  0

# Check if the detected body is leaving the monitored sona
def  TestIntersectionSaida ( y , CoordinateYourLine , CoordinateYLineSaida ):
        DiferencaAbsoluta =  abs (y - Coordinate YLinhaSaida)	

	if ((DifferentialAbsolute <=  2 ) and (y > CoordinateYLineEntry)):
		return  1
	else :
		return  0

camera = cv2.VideoCapture ( 0 )

# force the camera to have 640x480 resolution
camera.set ( 3 , 640 )
camera.set ( 4 , 480 )

FirstFrame =  None

# do some frame reads before consiting the analysis
# Reason: some cameras may take longer to "acuminate the light" when they connect, capturing consecutive frames with a lot of brightness. In order not to bring this effect to image processing, successive captures are made outside the image processing, giving the camera time to "get accustomed" to the brightness of the environment

for i in  range ( 0 , 20 ):
    (grabbed, Frame) = camera.read ()

while  True :
    # le first frame and determines image resolution
    (grabbed, Frame) = camera.read ()
    height = np.size (Frame, 0 )
    width = np.size (Frame, 1 )

    # if it was not possible to get frame, nothing else should be done
    if  not grabbed:
        break

    # converts frame to grayscale and applies blur effect (to enhance the outlines)
    FrameGray = cv2.cvtColor (Frame, cv2, COLOR_BGR2GRAY )
    FrameGray = cv2.GaussianBlur (FrameGray, ( 21 , 21 ), 0 )

    # as the comparison is made between two subsequent images, if the first frame is null (ie first "passed" in the loop), this is initialized
    if FirstFrame is  None :
        PrimeiroFrame = FrameGray
        continues

    # yesterday absolute difference between initial frame and current frame (background subtraction)
    # additionally, it makes the binarization of the frame with subtracted background
    FrameDelta = cv2.absdiff (FirstFrame, FrameGray)
    FrameThresh = cv2.threshold (FrameDelta, ThresholdBinarization, 255 , cv2, THRESH_BINARY ) [ 1 ]
    
    # dilates the binarized frame with the purpose of eliminating "holes" / white zones within detected contours.
    # In this way, detected objects will be considered a "mass" of black color
    # In addition, find the contours after dilation.
    FrameThresh = cv2.dilate (FrameThresh, None , iterations = 2 )
    _, cnts, _ = cv2.findContours (FrameThresh.copy (), cv2 , RETR_EXTERNAL, cv2, CHAIN_APPROX_SIMPLE )

    QtySets =  0

    # draws reference lines
    CoordinateYLineInput = (height /  2 ) - OffsetLinesRef
    CoordinateYLineSaida = (height /  2 ) + OffsetLinesRef
    cv2.line (Frame, ( 0 , CoordinateYourLine), (width, CoordinateYourLine), ( 255 , 0 , 0 ), 2 )
    cv2.line (Frame, ( 0 , CoordinateYLineSaida), (width, CoordinateYLineSaida), ( 0 , 0 , 255 ), 2 )


    # Scans all contours found
    for c in cnts:
        # small-area contours are ignored.
        if cv2.contourArea (c) < AreaContractLimiteMin:
            continues

        # For debugging purposes, count number of contours found
        QtySets = QtySets + 1    

        # gets contour coordinates (in fact, from a rectangle that can span the entire contour) and
        # Highlight the outline with a rectangle.
        (x, y, w, h) = cv2.boundingRect (c) # x and y: coordinates of the upper left vertex
                                           # weh: respectively width and height of the rectangle

        cv2.rectangle (Frame, (x, y), (x + w, y + h), ( 0 , 255 , 0 ), 2 )

        # determines the center point of the contour and draws a circle to indicate
        Coordination XCentre Contour = (x + x + w) / 2
	Coordinate Y Center Contour = (y + y + h) / 2
        CenterSetpoint = ( CenterSet Center, CenterSet Center )
        cv2.circle (Frame, ContourCentral, 1 , ( 0 , 0 , 0 ), 5 )
        
        # intersect the intersection of the contour centers with the reference lines
        # in this way, it is counted which outlines crossed which lines (in a certain sense)
	if (InputInterfaceContext (ContourCentralCentral, CoordinateYourInputLine, CoordinateYLineSaid)):
            Counter +  Inputs = 1

	if (IntersectionCurrent (Coordinate and ContourCenter, Coordinate and EnterChart, Coordinate and CoordinateSaid)):  
            CounterSaids + =  1

        # If necessary, uncomment the files below to show the frames used in image processing
        # cv2.imshow ("Binarized Frame", FrameThresh)
        # cv2.waitKey (1);
        # cv2.imshow ("Frame with background subtraction", FrameDelta)
        # cv2.waitKey (1);


    print  " Contours found: " + str (Contour Qty)

    # Write in the picture the number of people who entered or left the surveillance area
    cv2.putText (Frame, " Inputs: {} " .format ( str ( InputController )), ( 10 , 50 ),
                cv2. FONT_HERSHEY_SIMPLEX , 0.5 , ( 250 , 0 , 1 ), 2 )
    cv2.putText (Frame, " Outputs: {} " .format ( str ( SubSaid Counter)), ( 10 , 70 ),
                cv2. FONT_HERSHEY_SIMPLEX , 0.5 , ( 0 , 0 , 255 ), 2 )
    cv2.imshow ( " Original " , Frame)
    cv2.waitKey ( 1 ) ;


# cleanup the camera and close any open windows
camera.release ()
cv2.destroyAllWindows ()