

#-*- coding:utf-8 -*-
import cv2.cv as cv

class CameraCapture(object):
	def __init__(self, index= -1, width=640, height=480, threshold=0, process_ratio=2):
		"""Initialise la caméra de capture"""
		self.threshold = threshold
		# Initialisation de la capture et de la première frame
		self._capture = cv.CreateCameraCapture(index)
		if self._capture is None:
			raise Exception("Aucune caméra détectée")
		cv.SetCaptureProperty(self._capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
		cv.SetCaptureProperty(self._capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)
		self._frame1 = self.get_frame()
		self._moved = False
		# Dimensions (ratio de process)
		self._height = self._frame1.height / process_ratio
		self._width = self._frame1.width / process_ratio
		self._resolution = self._height * self._width
		# Stockage dernière image pleine qualité
		self._last_frame = cv.CreateImage(cv.GetSize(self._frame1), self._frame1.depth, self._frame1.nChannels)
		# Création de l'image redimensionnée
		self._frame_resized = cv.CreateImage((self._width, self._height), self._frame1.depth, self._frame1.nChannels)
		# Redimensionne et convertit la première image en niveau de gris
		cv.Resize(self._frame1, self._frame_resized)
		self._frame1_grayscale = cv.CreateMat(self._height, self._width, cv.CV_8U)
		cv.CvtColor(self._frame_resized, self._frame1_grayscale, cv.CV_RGB2GRAY)
		# Seconde image en niveau de gris
		self._frame2_grayscale = cv.CreateMat(self._height, self._width, cv.CV_8U)
		# Stockage de la différence
		self._diff = cv.CreateMat(self._height, self._width, cv.CV_8U)
	
	def get_frame(self):
		"""Retourne une frame capturée"""
		return cv.QueryFrame(self._capture)
	
	def process(self, frame):
		"""Redimensionne et calcule la différence avec la dernière frame"""
		# Copie de la frame et redimensionnement
		cv.Copy(frame, self._last_frame)
		cv.Resize(frame, self._frame_resized)
		# Conversion en niveau de gris
		cv.CvtColor(self._frame_resized, self._frame2_grayscale, cv.CV_RGB2GRAY)
		# Différence entre les deux dernières frames
		cv.AbsDiff(self._frame1_grayscale, self._frame2_grayscale, self._diff)
		cv.Copy(self._frame2_grayscale, self._frame1_grayscale)
		# Traitements de la différence
		cv.Smooth(self._diff, self._diff, cv.CV_BLUR, 2, 2)
		cv.Threshold(self._diff, self._diff, 5, 255, cv.CV_THRESH_BINARY_INV)
	
	def has_moved(self):
		"""Détecte les changements entre les deux dernières frames"""
		# Compte les pixels noirs (mouvement) et calcule le pourcentage de mouvement
		black_pixels = self._resolution - cv.CountNonZero(self._diff)
		avg = (black_pixels * 100) / self._resolution
		
		if avg > self.threshold:
			return True
		else:
			return False
	
	def run(self):
		"""Capture la dernière frame et lance les process de traitement"""
		self.process(self.get_frame())
		if self.has_moved():
			print 'Bougé !'


if __name__ == '__main__':
	capture = CameraCapture(index=1)
	while True:
		capture.run()

from cv2.cv import *
# Initialize the camera
capture = CaptureFromCAM(0)  # 0 -> index of camera
if capture:     # Camera initialized without any errors
   NamedWindow("cam-test",CV_WINDOW_AUTOSIZE)
   f = QueryFrame(capture)     # capture the frame
   if f:
       ShowImage("cam-test",f)
       WaitKey(0)
DestroyWindow("cam-test")

from cv2 import *
# initialize the camera
cam = VideoCapture(0)   # 0 -> index of camera
s, img = cam.read()
if s:    # frame captured without any errors
    namedWindow("cam-test",CV_WINDOW_AUTOSIZE)
    imshow("cam-test",img)
    waitKey(0)
    destroyWindow("cam-test")
	
	
	
	
import cv2
 
# Camera 0 is the integrated web cam on my netbook
camera_port = 0
 
#Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 30
 
# Now we can initialize the camera capture object with the cv2.VideoCapture class.
# All it needs is the index to a camera port.
camera = cv2.VideoCapture(camera_port)
 
# Captures a single image from the camera and returns it in PIL format
def get_image():
 # read is the easiest way to get a full image out of a VideoCapture object.
 retval, im = camera.read()
 return im
 
# Ramp the camera - these frames will be discarded and are only used to allow v4l2
# to adjust light levels, if necessary
for i in xrange(ramp_frames):
 temp = get_image()
print("Taking image...")
# Take the actual image we want to keep
camera_capture = get_image()
file = "/home/codeplasma/test_image.png"
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
cv2.imwrite(file, camera_capture)
 
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
del(camera)





import cv2.cv as cv
import time

cv.NamedWindow("camera", 1)

capture = cv.CaptureFromCAM(0)

while True:
    img = cv.QueryFrame(capture)
    cv.ShowImage("camera", img)
    if cv.WaitKey(10) == 27:
        break


import cv2
 
# setup video capture
cap = cv2.VideoCapture(0)
 
ret,im = cap.read()
height, width = im.shape[:2]
 
while True:
    ret,im = cap.read()
 
    # trait horizontal
    for i in range(width):
        im[height/2][i] = [0, 0, 255]
 
    # trait vertical
    for i in range(height):
        im[i][width/2] = [255, 0, 0]
 
    cv2.imshow('video test',im)
    key = cv2.waitKey(10)



import cv

cv.NamedWindow("w1", cv.CV_WINDOW_AUTOSIZE)
camera_index = 0
capture = cv.CaptureFromCAM(camera_index)

def repeat():
    global capture #declare as globals since we are assigning to them now
    global camera_index
    frame = cv.QueryFrame(capture)
    cv.ShowImage("w1", frame)
    c = cv.WaitKey(10)
    if(c=="n"): #in "n" key is pressed while the popup window is in focus
        camera_index += 1 #try the next camera index
        capture = cv.CaptureFromCAM(camera_index)
        if not capture: #if the next camera index didn't work, reset to 0.
            camera_index = 0
            capture = cv.CaptureFromCAM(camera_index)

while True:
    repeat()
	
	
	
	
	
	
	
	
	
	
	
	
	
import cv2.cv as cv
from datetime import datetime
import time

class MotionDetector():

    def onChange(self, val): #callback when the user change the ceil
        self.ceil = val

    def __init__(self,ceil=8, doRecord=True, showWindows=True):
        self.writer = None
        self.font = None
        self.doRecord=doRecord #Either or not record the moving object
        self.show = showWindows #Either or not show the 2 windows
        self.frame = None

        self.capture=cv.CaptureFromCAM(0)
        self.frame = cv.QueryFrame(self.capture) #Take a frame to init recorder
        if doRecord:
            self.initRecorder()

        self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t-1
        cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY)

        #Will hold the thresholded result
        self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U)

        self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U) #Gray frame at t

        self.width = self.frame.width
        self.height = self.frame.height
        self.nb_pixels = self.width * self.height
        self.ceil = ceil
        self.isRecording = False
        self.trigger_time = 0 #Hold timestamp of the last detection

        if showWindows:
            cv.NamedWindow("Image")
            cv.CreateTrackbar("Mytrack", "Image", self.ceil, 100, self.onChange)

    def initRecorder(self): #Create the recorder
        codec = cv.CV_FOURCC('D', 'I', 'V', 'X')
        #codec = cv.CV_FOURCC("D", "I", "B", " ")
        self.writer=cv.CreateVideoWriter(datetime.now().strftime("%b-%d_%H:%M:%S")+".avi", codec, 15, cv.GetSize(self.frame), 1)
        #FPS set at 15 because it seems to be the fps of my cam but should be ajusted to your needs
        self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font

    def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame

            self.processImage(curframe) #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                        print "Something is moving !"
                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1)
            if c==27 or c == 1048603: #Break if user enters 'Esc'.
                break

    def processImage(self, frame):
        cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)

        #Absdiff to get the difference between to the frames
        cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)

        #Remove the noise and do the threshold
        cv.Smooth(self.res, self.res, cv.CV_BLUR, 5,5)
        element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5,  cv.CV_SHAPE_RECT)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
        cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)

    def somethingHasMoved(self):
        nb=0 #Will hold the number of black pixels

        for y in range(self.height): #Iterate the hole image
            for x in range(self.width):
                if self.res[y,x] == 0.0: #If the pixel is black keep it
                    nb += 1
        avg = (nb*100.0)/self.nb_pixels #Calculate the average of black pixel in the image
        #print "Average: ",avg, "%\r",
        if avg > self.ceil:#If over the ceil trigger the alarm
            return True
        else:
            return False

if __name__=="__main__":
    detect = MotionDetector(doRecord=False)
    detect.run()




import cv
import sys
 
class Target:
 
def __init__(self):
# CaptureFromFile or CaptureFromCAM
if sys.argv[1] == '-':
self.capture = cv.CaptureFromCAM(0)
else:
self.capture = cv.CaptureFromFile(sys.argv[1])
cv.NamedWindow("Target", 1)
 
def run(self):
# Capture first frame to get size
frame = cv.QueryFrame(self.capture)
frame_size = cv.GetSize(frame)
color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
 
first = True
 
while True:
closest_to_left = cv.GetSize(frame)[0]
closest_to_right = cv.GetSize(frame)[1]
 
color_image = cv.QueryFrame(self.capture)
 
# Smooth to get rid of false positives
cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
 
if first:
difference = cv.CloneImage(color_image)
temp = cv.CloneImage(color_image)
cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
first = False
else:
cv.RunningAvg(color_image, moving_average, 0.020, None)
 
# Convert the scale of the moving average.
cv.ConvertScale(moving_average, temp, 1.0, 0.0)
 
# Minus the current frame from the moving average.
cv.AbsDiff(color_image, temp, difference)
 
# Convert the image to grayscale.
cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
 
# Convert the image to black and white.
cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)
 
# Dilate and erode to get people blobs
cv.Dilate(grey_image, grey_image, None, 18)
cv.Erode(grey_image, grey_image, None, 10)
 
storage = cv.CreateMemStorage(0)
contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
points = []
movementArea = 0
 
while contour:
bound_rect = cv.BoundingRect(list(contour))
contour = contour.h_next()
# Compute the bounding points to the boxes that will be drawn
# on the screen
pt1 = (bound_rect[0], bound_rect[1])
pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
# Add this latest bounding box to the overall area that is being
# detected as movement
movementArea += ( ( pt2[0] - pt1[0] ) * ( pt2[1] - pt1[1] ) );
points.append(pt1)
points.append(pt2)
cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)
if movementArea > 0:
print 'MA: ' + repr(movementArea) + ' @ ' + repr(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_MSEC))
 
if len(points):
center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)
 
cv.ShowImage("Target", color_image)
 
# Listen for ESC key
c = cv.WaitKey(7) % 0x100
if c == 27:
break
 
if __name__=="__main__":
t = Target()
t.run()
 
 
 
 
 
 
import math, operator
from PIL import Image
def compare(file1, file2):
    image1 = Image.open(file1)
    image2 = Image.open(file2)
    h1 = image1.histogram()
    h2 = image2.histogram()
    rms = math.sqrt(reduce(operator.add,
                           map(lambda a,b: (a-b)**2, h1, h2))/len(h1))
    return rms

if __name__=='__main__':
    import sys
    file1, file2 = sys.argv[1:]
    print compare(file1, file2)
	
	
	
import ImageChops
import math, operator

def rmsdiff(im1, im2):
    "Calculate the root-mean-square difference between two images"

    h = ImageChops.difference(im1, im2).histogram()

    # calculate rms
    return math.sqrt(reduce(operator.add,
        map(lambda h, i: h*(i**2), h, range(256))
    ) / (float(im1.size[0]) * im1.size[1]))






from itertools import izip
import Image
 
i1 = Image.open("image1.jpg")
i2 = Image.open("image2.jpg")
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
 
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
    # for gray-scale jpegs
    dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
    dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
 
ncomponents = i1.size[0] * i1.size[1] * 3
print "Difference (percentage):", (dif / 255.0 * 100) / ncomponents	
	
	
	


from PIL import Image,ImageChops,ImageStat
im2 = Image.open("2.png")
im1 = Image.open("1.png")
diff = ImageChops.difference(im1, im2)
stat = ImageStat.Stat(diff)
print stat.rms




import ImageChops
import math, operator
def rmsdiff(im1, im2):
"Calculate the root-mean-square difference between two images"
h = ImageChops.difference(im1, im2).histogram()
# calculate rms
return math.sqrt(reduce(operator.add,
map(lambda h, i: h*(i**2), h, range(256))
) / (float(im1.size[0]) * im1.size[1]))	
	

	
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* #include "imglib.h" */
 
#define RED_C 0
#define GREEN_C 1
#define BLUE_C 2
#define GET_PIXEL(IMG, X, Y) ((IMG)->buf[ (Y) * (IMG)->width + (X) ])
 
int main(int argc, char **argv)
{
   image im1, im2;
   double totalDiff = 0.0;
   unsigned int x, y;
 
   if ( argc < 3 )
   {
      fprintf(stderr, "usage:\n%s FILE1 FILE2\n", argv[0]);
      exit(1);
   }
   im1 = read_image(argv[1]);
   if ( im1 == NULL ) exit(1);
   im2 = read_image(argv[2]);
   if ( im2 == NULL ) { free_img(im1); exit(1); }
   if ( (im1->width != im2->width) || (im1->height != im2->height) )
   {
      fprintf(stderr, "width/height of the images must match!\n");
   } else {
   /* BODY: the "real" part! */
      for(x=0; x < im1->width; x++)
      {
         for(y=0; y < im1->width; y++)
         {
           totalDiff += fabs( GET_PIXEL(im1, x, y)[RED_C] - GET_PIXEL(im2, x, y)[RED_C] ) / 255.0;
           totalDiff += fabs( GET_PIXEL(im1, x, y)[GREEN_C] - GET_PIXEL(im2, x, y)[GREEN_C] ) / 255.0;
           totalDiff += fabs( GET_PIXEL(im1, x, y)[BLUE_C] - GET_PIXEL(im2, x, y)[BLUE_C] ) / 255.0;
         }
      }
      printf("%lf\n", 100.0 * totalDiff / (double)(im1->width * im1->height * 3) );
   /* BODY ends */
   }
   free_img(im1);
   free_img(im2);
}





dimg=diffImg(t_minus, t, t_plus)
if cv2.countNonZero(dimg) > x:
cv2.imwrite(datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.jpg', dimg)



#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<fcntl.h>
#include<pthread.h>

int num_frame=0;

void main()
{
	time_t start,end,exe_time;
	time(&start);

	pthread_t thrd;
	int thrd_f;

	int low_t = 25, high_t = 150, gaussian_kern = 5 , count = 0, kern_size = 3,frame_n=0,occur=0;
	float deviation = 1.41;
	char first = 0;
	int i=0,ret=0,ret_cnt=0,sent=0;
	CvFont font;
	double hScale=1.0;
	double vScale=1.0;
	int    lineWidth=1;
	char buf[3];
	int high=0,low=0;
	CvRect rect;
	int *params=0;
	int frame_count=0;
	char name[]="imgs/img",ext[]=".jpg";
	char full_name[17];

	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth,8);
	CvCapture *ip = cvCaptureFromFile("ipmovie.MPG");
	if(ip == NULL)
	{
		printf("Cant catch any frame\n");	
		return;
	}
	printf("Capture from file successfull\n");
	sleep(1);
	IplImage *frame = cvQueryFrame(ip);
	IplImage *canny_img = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);	
	IplImage *gray_img = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);
	IplImage *moving_ave = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_32F, 1);
	IplImage *tmp, *diff;	

	char *outfile = "opmovie_new.avi";
	CvVideoWriter *opmovie =  cvCreateVideoWriter(outfile, CV_FOURCC('P', 'I', 'M', '1'), 25, cvSize(frame->width, frame->height),1);

	while(1)
	{
		frame = cvQueryFrame(ip);
		if(frame == NULL)
		{
			printf("Unable to query frame, breaking...after %d\n",count);
			break;
		}

		rect = cvRect (300,0,300,300);
		cvRectangle(frame, cvPoint(300,0), cvPoint (600,300), cvScalar(255,255,0,0),1, 8, 0 );
		cvSetImageROI(frame, rect);
		cvSetImageROI(gray_img, rect);
		cvSetImageROI(canny_img, rect);
		cvSetImageROI(moving_ave, rect);
		
		cvCvtColor(frame, gray_img, CV_RGB2GRAY);
		cvSmooth(gray_img, gray_img, CV_GAUSSIAN, gaussian_kern, 0, deviation, 0);		
		cvCanny(gray_img, canny_img, low_t, high_t, kern_size); 		
		
		if(first == 0)
		{
			diff = cvCloneImage(canny_img);
			tmp = cvCloneImage(canny_img);
			cvConvertScale(canny_img, moving_ave, 1, 0);
			printf("First scale conversion done\n");
			first = 1;
		}

		else
			cvRunningAvg(canny_img, moving_ave, 5, NULL);

		cvConvertScale(moving_ave, tmp, 1, 0);
		cvAbsDiff(canny_img, tmp, diff);
		
		CvMemStorage* storage = cvCreateMemStorage(0);
		CvSeq* contour = 0;
		
		cvFindContours(canny_img, storage, &contour, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
		for(;contour!=0;contour = contour->h_next)
		{
			cvDrawContours(frame, contour, cvScalar(125,15,80,150), cvScalarAll(0), 2, 1, 8, cvPoint(0,0));
			i++;
		}
		sprintf(buf,"%d",i);
		if(i>20 && frame_n==0)
		{
			++high;
			frame_n=count;
		}
		if(high > 0 && count < frame_n + 6)
		{	
			if(i<75)
			{
				++low;
			}
		}
		else
		{
			high = 0;
			frame_n = 0;
		}

		if(low > 3)
		{
			++occur;
		}
		
		if(occur>10)	
		{
			printf("Fast motion Detected\n");
			low = 0;
			high = 0;
			occur=0;
			frame_n=0;
		}
		sprintf(full_name,"%s%d%s",name,frame_count,ext);
	
		cvResetImageROI(frame);
		cvResetImageROI(gray_img);
		cvResetImageROI(canny_img);
		cvResetImageROI(moving_ave);

		ret = cvSaveImage(full_name, frame, params);

		if(!ret)
		{
			frame_count=0;
		}
		else
			++frame_count;

		i=0;
		count++;
	}
	printf("%d frames \n", count);
	cvReleaseImage(&tmp);
	cvReleaseImage(&diff);
	cvReleaseImage(&gray_img);
	cvReleaseImage(&moving_ave);
	cvReleaseImage(&canny_img);
	cvReleaseCapture(&ip);
	cvReleaseVideoWriter(&opmovie);
	time(&end);
	exe_time=end-start;
	printf("Total time for execution %d\n",exe_time);
	pthread_join(thrd,NULL);
}



import RPi.GPIO as GPIO  
import subprocess
import time

pir_pin = 26
#capteur_pin = 26
#system_pin = 27

GPIO.setmode(GPIO.BCM)  
GPIO.setup(pir_pin, GPIO.IN)  
#GPIO.setup(capteur_pin, GPIO.IN)  
#GPIO.setup(system_pin, GPIO.OUT)  
#GPIO.output(pir_pin,GPIO.HIGH)

#x = subprocess.Popen(["gpio", "read", "29"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#etat_alarme = x.stdout.read()
#print "Etat de l'alarme: " + etat_alarme 
#val = GPIO.input(pir_pin)
#print  "Info capteur PIR: " + str(GPIO.input(pir_pin)) + "\n"


def log_intrusion():

	fp = open('/var/www/Travail/status_alarm.txt', 'a')
	temps = time.strftime("%d %m %Y  %H:%M:%S")
	msg = "[" + temps + "]:" + " Intrusion" + "\n"
	fp.write(msg)
	fp.close()


try:

	print("Alarme active: Attente evenement..")

	while True:
		
			# scrute le changement du status de l'alarme
			x = subprocess.Popen(["gpio", "read", "29"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			etat_alarme = x.stdout.read()

			#fp = open('/var/www/Travail/status_alarm.txt', 'r')
			#contenu = fp.read()
			#print(contenu)
	
			if GPIO.input(pir_pin) and etat_alarme:
						print("Detection")
						log_intrusion():


			#if contenu == "off":
			#			print("zero")
			#if contenu == "on":
			#			print("one")


		
			time.sleep(0.5)
		
except KeyboardInterrupt:  
    #GPIO.cleanup()       
    print("Sortie du programme Alarme")	


#########################################		
###		3.3V en 1 et wpi 26 en 37
###		on utilise wpi 29 pour le bit de status de l'alarme
		
# comment lancer un script python a partir du php
# idem pour le stopper		 
	
	
	
http://framboisepi.fr/category/installation/

nohup raspistill -o a%04d.jpg -t 21600000 -tl 30000 > /dev/null 2>&1 &	





import smtplib
 
def sendemail(from_addr, to_addr_list, cc_addr_list,
              subject, message,
              login, password,
              smtpserver='smtp.gmail.com:587'):
    header  = 'From: %s\n' % from_addr
    header += 'To: %s\n' % ','.join(to_addr_list)
    header += 'Cc: %s\n' % ','.join(cc_addr_list)
    header += 'Subject: %s\n\n' % subject
    message = header + message
 
    server = smtplib.SMTP(smtpserver)
    server.starttls()
    server.login(login,password)
    problems = server.sendmail(from_addr, to_addr_list, message)
    server.quit()
    return problems

Example use:

sendemail(from_addr    = 'python@RC.net', 
          to_addr_list = ['RC@gmail.com'],
          cc_addr_list = ['RC@xx.co.uk'], 
          subject      = 'Howdy', 
          message      = 'Howdy from a python function', 
          login        = 'pythonuser', 
          password     = 'XXXXX')
		  
		  
import os.path, time
print "last modified: %s" % time.ctime(os.path.getmtime(file))
print "created: %s" % time.ctime(os.path.getctime(file))


import os
import glob
newest = max(glob.iglob('*.[Mm][Pp]3'), key=os.path.getctime)		  

