#! /usr/bin/env python

# This code runs on a Raspberry Pi 3 to control a targeting servo and water
# relay to create a motion-controlled water-blaster.  The primary purpose is
# to discourage deer from eating our roses!  The system uses an IR illuminator
# and NoIR Raspberry Pi camera to make it night capable.

# Uses the cv2 image processing library (based on opencv 3) and the pigpio IO control 
# library to read images from the picamera, find the location of object that has 
# moved into the scene then control an aiming servo and water-valve firing relay.

# Start the code with ./water_blaster.py.   A monitor window will open so you can see
# the targeting video.  An initial reference video frame is captured and then used to 
# compare subsequent captures.  If a new object is detected, a green targeting rectangle
# is drawn and the state moves to "Occupied".  If the targeting rectangle stops moving 
# for MIN_AQUIRE_TIME seconds, then a picture is taken (stored in 
# trigger_pictures/trigger_<m_d_y__h_m_s>.jpg) and the water valve is opened and sprays 
# for a few seconds.  If too many triggers occur in a given time, then the trigger is 
# disabled until the next video reference frame is updated.  This provides some "self-healing" 
# to minimize false triggering for cases where something in the reference frame has changed 
# (perhaps due to a cloud, light-change, etc.).

# Logs all activity to "log_date" where "date" is the date/time the prog was started.

# The code uses the pigpio package as that gives a very stable 1us resolution for the 
# servo control.  Requires that the Pi is running a daemon (pigpiod) that I've added to 
# the /etc/rc.local startup file.  Trying to use the regular RPi.GPIO package resulted 
# in a shakey servo. For pigpio documentation see: https://pypi.python.org/pypi/pigpio

#  Lines to include in /etc/rc.local
#
#    # Set up /dev/video0 to link to Raspberry Pi built in camera interface
#    modprobe bcm2835-v4l2
#    
#    # Start the pigpio daemon for the Raspberry Pi IO control library
#    pigpiod


# Many thanks to Adrian Rosebrock for his cv2 image processing code samples and openCV 
# installation instructions at:
# https://www.pyimagesearch.com/2015/06/01/home-surveillance-and-motion-detection-with-the-raspberry-pi-python-and-opencv


# dlf 8/18/2017

# Import the necessary packages
import argparse
import datetime
from datetime import timedelta
import distutils.dir_util
import imutils
import time
import cv2
from Tkinter import *
import pigpio

startTime= datetime.datetime.now()
print("Date: %s" % startTime)
logfile = open("log_" + time.strftime("%m_%d_%Y__%H_%M_%S") , "w")
logfile.write("Start monitoring\n")

# Pin definitions
TRIGGER = 17                # The pin that will drive the trigger relay
IR = 27                     # The pin that will drive the IR illuminator
SERVO = 18                  # The PWM pin that controls the tracking servo
DEBUG_SWITCH = 23           # The pin hooked to a switch to put the shooter into debug mode

# Timing constants
MIN_AQUIRE_TIME = 2         # target must be aquired and stationary for this many seconds before we take a shot
MAX_SHOTS = 3               # Number of consecutive shots allowed between video reference frame updates

# Video constants
FRAME_WIDTH = 500           # How many pixels we are scaling the video frame to
REF_FRAME_TIME_LIMIT = 120  # Number of seconds before updating the video reference frame
MIN_TIME_FROM_LAST_REF_FRAME_UPDATE = 10 # Number of seconds that must pass after a frame update before allowing a shot
TARGET_MOVEMENT_THRESHOLD = 50 # Number of pixels the target can move and still be considered "still"
MIN_CONTOUR_AREA = 16       # If the object detected has an area smaller than this many sq-pixels, ignore it
THRESHOLD_SENSITIVITY = 25  # Object detection sensitivity.  25-50 seems to work well.  Smaller is more sensitive.
BLUR_SIZE =21               # Amount to blur the video frame to smooth it

# Set to adjust the amount of servo travel (1000 = 1ms => min-arc, 2000 = 2ms => max-arc)
SERVO_MAX_RANGE = 1850       # Sets the end points of the servo travel
SERVO_MIN_RANGE = 1250
SERVO_CENTER_ADJ = 10        # To calibrate the servo center to the center of the frame.  Adjust as necessary.
SERVO_TRIGGER_SWEEP = 50     # How far we will sweep the servo back and forth when shooting
SERVO_CENTER = SERVO_MIN_RANGE + (SERVO_MAX_RANGE - SERVO_MIN_RANGE)/2 + SERVO_CENTER_ADJ

# Initialize the servo and firing gpios
pi = pigpio.pi()
pi.set_mode(SERVO, pigpio.OUTPUT)
pi.set_mode(TRIGGER, pigpio.OUTPUT)
pi.set_mode(IR, pigpio.OUTPUT)
pi.set_mode(DEBUG_SWITCH, pigpio.INPUT)
pi.set_pull_up_down(DEBUG_SWITCH, pigpio.PUD_UP)
pi.write(TRIGGER, 0)
pi.write(IR, 1)

# Initialize vars
firstFrame = None                         # The reference frame
refFrameTime = datetime.datetime.now()    # When the reference frame was taken
monitorText = "Unoccupied"                # The text added to the video monitor window
targetFirstAquiredTime =0                 # Time when we detect a target
maxShotCount = 1                          # Keep track of how many shots we've taken since the last reference frame update
totalShots = 0                            # Total number of shots taken
lastTriggerX = 0                          # X coord of the target of the previous target position
lastTriggerY = 0                          # Y coord of the target of the previous target position
forceRefresh = False                      # Flag to force a frame update if needed

# Set up a directory to save pictures to
distutils.dir_util.mkpath("trigger_pictures")

# Connect to the camera
camera = cv2.VideoCapture(0)
time.sleep(0.25)

# Start servo in the middle at 1.5ms pulse
pi.set_servo_pulsewidth(SERVO,SERVO_CENTER)

# Main loop.  Look for motion, track and if the object pauses for a few seconds, blast it
while True:

    # We have a switch on the board that if pushed, will set the DEBUG pin to zero.  
    # We will use that later in the code to enable/disable certain functionality.
    debugging = 0
    if pi.read(DEBUG_SWITCH) == 0:
        debugging = 1

    # Grab the current frame and initialize the occupied/unoccupied text
    (grabbed, frame) = camera.read()

    # If the first frame, warm up the camera so we get a stable image
    if firstFrame is None:
       print("Warming up the camera")
       time.sleep(3)
       (grabbed, frame) = camera.read()


    # Resize the frame, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=FRAME_WIDTH)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (BLUR_SIZE, BLUR_SIZE), 0)

    # If we haven't taken a reference frame or it is old, then (re)initialize it
    lastRefFrame = datetime.datetime.now() - refFrameTime
    if firstFrame is None or lastRefFrame.seconds > REF_FRAME_TIME_LIMIT or forceRefresh is True:
        firstFrame = gray
        refFrameTime = datetime.datetime.now()
        txt = "Video reference frame updated at " + time.strftime("%m_%d_%Y__%H_%M_%S")
        logfile.write(txt + "\n")
        print(txt)
        maxShotCount = 1
        forceRefresh = False
        continue

    # Compute the absolute difference between the current frame and first frame
    frameDelta = cv2.absdiff(firstFrame, gray)

    # Get the areas with differences larget than THRESHOLD_SENSITIVITY
    thresh = cv2.threshold(frameDelta, THRESHOLD_SENSITIVITY, 255, cv2.THRESH_BINARY)[1]

    # Dilate the thresholded image to fill in holes, then find contours on thresholded image
    thresh = cv2.dilate(thresh, None, iterations=2) 

    #dlf - comment out next line as it is for opencv2.4
    #(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    #dlf - this one is for opencv 3
    (_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # Loop over the contours and find the largest one
    largestArea = 0
    xl = 0
    yl = 0
    wl = 0
    hl = 0
    for c in cnts:
        # Find the largest one
        (x, y, w, h) = cv2.boundingRect(c)
        area = w * h

        # If the contour is too small, ignore it
        if area < MIN_CONTOUR_AREA:
            continue

        if area > largestArea:
            xl = x
            yl = y
            wl = w
            hl = h
            largestArea = area

    # Compute the bounding box for the contour, draw it on the frame, and update the text
    centerX = xl + int((xl + wl) - xl)/2
    centerY = yl + int((yl + hl) - yl)/2

    if lastTriggerX == 0:
        lastTriggerX = centerX
    if lastTriggerY == 0:
        lastTriggerY = centerY

    # Draw a small targeting box at the center of the largest contour
    #cv2.rectangle(frame, (xl, yl), (xl + wl, yl + hl), (0, 255, 0), 2)
    if (xl != 0 and yl !=0):
        cv2.rectangle(frame, (centerX-10,centerY-10), (centerX+10,centerY+10), (0, 255, 0), 2)

    # If there is a target and it hasn't moved more than TARGET_MOVEMENT_THRESHOLD pixels
    if largestArea != 0 and abs(lastTriggerX - centerX) <  TARGET_MOVEMENT_THRESHOLD  \
                        and abs(lastTriggerY - centerY) < TARGET_MOVEMENT_THRESHOLD:
        monitorText = "Occupied"
        if targetFirstAquiredTime == 0:
            targetFirstAquiredTime = datetime.datetime.now()
    else:
        monitorText = "Unoccupied"
        targetFirstAquiredTime = 0
        lastTriggerX = centerX
        lastTriggerY = centerY

    # Aim the servo
    # Range between 0 and 100 degrees which is 1ms to 2ms pulse (full range of servo)
    # Scale to the FRAME_WIDTH pixel video frame (i.e. pixel 250 is 50%)
    if monitorText == "Unoccupied":
        duty = SERVO_CENTER
    else:
        duty = SERVO_MIN_RANGE + (((centerX * 1.0) / (FRAME_WIDTH * 1.0) + 
               SERVO_CENTER_ADJ*1.0/(SERVO_MAX_RANGE - SERVO_MIN_RANGE)*1.0) * 
               (SERVO_MAX_RANGE - SERVO_MIN_RANGE))

    pi.set_servo_pulsewidth(SERVO,duty)

    if monitorText == "Occupied":
        aquiredDelta = datetime.datetime.now() - targetFirstAquiredTime
        lastRefFrameUpdateDelta = datetime.datetime.now() - refFrameTime

        # Don't try to fire if we aquired too soon after a frame update (means the scene was not settled)
        if aquiredDelta.seconds >= MIN_AQUIRE_TIME:
            if lastRefFrameUpdateDelta.seconds <= MIN_TIME_FROM_LAST_REF_FRAME_UPDATE and totalShots != 0:
                forceRefresh = True
                txt = "Trying to shoot before frame is settled.  Force a frame refresh..."
                logfile.write(txt + "\n")
                print(txt)
                continue

            # Fire the trigger if target acquired (unless in debug mode, then don't fire ever)
            if maxShotCount <= MAX_SHOTS and not debugging:

                totalShots += 1
                loc = 'X:{} Y:{}'.format(centerX , centerY)
                txt = "Shot " + str(maxShotCount) + " " + loc + " at " + time.strftime("%m_%d_%Y__%H_%M_%S") + \
                      " (Total Shots: " + str(totalShots) + ")"
                logfile.write(txt + "\n")
                print(txt)
                pix = "trigger_pictures/trigger_" + time.strftime("%m_%d_%Y__%H_%M_%S") + ".jpg"
                cv2.imwrite(pix,frame)
                maxShotCount += 1

                # The power supply I'm using is maxed with everything turned on...
                # While sweeping the servo and activating the valve relay, turn off the IR illunimator to save power 
                pi.write(IR, 0)
                shooting_count = 0
                sweep = SERVO_TRIGGER_SWEEP
                while True:
                    # Shoot for a few seconds swiveling the servo for scatter-shot
                    pi.write(TRIGGER,1)
                    if shooting_count % 2 == 0:
                        sweep_duty = duty + sweep
                    else:
                        sweep_duty = duty - sweep
                    pi.set_servo_pulsewidth(SERVO,sweep_duty)
                    time.sleep(.3)
                    shooting_count = shooting_count + 1
                    if shooting_count == 11:
                        break


                # Turn the IR illuminator back on
                pi.write(IR, 1)
                targetFirstAquiredTime = 0
                pi.write(TRIGGER,0)
                if maxShotCount > MAX_SHOTS:
                    txt = "Max shot limit of " + str(MAX_SHOTS) + " reached.  Force a frame refresh..."
                    logfile.write(txt + "\n")
                    print(txt)
                    forceRefresh = True
            else:
                pi.write(TRIGGER,0)

    # Draw the text and timestamp on the frame
    cv2.putText(frame, "Room Status: {}".format(monitorText), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), \
                cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

    # Display the frame 
    cv2.imshow("Security Feed", frame)

    # Uncomment these if you want to see the threshold and delta video output (mainly for debugging)
    ##cv2.imshow("Thresh", thresh)
    ##cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF

    # If the `q` key is pressed, break from the loop
    if key == ord("q"):
        break

# Cleanup the camera and close any open windows
logfile.write("Stop monitoring at " + time.strftime("%m_%d_%Y__%H_%M_%S") + "\n")
logfile.close
camera.release()
cv2.destroyAllWindows()
pi.write(TRIGGER, 0)
pi.write(IR, 0)
pi.stop()

