This repository was archived by the owner on Nov 1, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvideostreaming.py
1364 lines (1161 loc) · 55.5 KB
/
videostreaming.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Main functions and classes
--------------------------
* OpenCV_VideoStream
Uses generic Open CV commands to provide a set of all purpose methods for
video capture with any camera. Easy to use, but may not be able to access
some camera specific functions (e.g. freerun modes for high frame rates).
* uEyeVideoStream
Uses pyueye module which provides Python bindings to the uEye SDK.
Allows running an IDS uEye camera in freerun mode, which will allow acess
to the higher frame rates.
* FrameStim
Thin wrapper around PsychoPy ImageStim which allows for providing image as
a uint8 numpy array and interpolating image to size of PsychoPy window.
* calc_imResize / calc_imCrop
Functions for working out how to interpolate a camera image up to the
resolution of a PsychoPy window.
Dependencies
------------
* numpy
* PsychoPy
* OpenCV3 python bindings
* Python Image Library (PIL) or Pillow
* pyueye (available in pip) and the IDS uEye drivers and SDK if wanting to
use the uEye functions / classes.
"""
from __future__ import division
import os, cv2, warnings
import numpy as np
from psychopy import visual
from psychopy.tools.attributetools import setAttribute
import pyglet.gl as GL
try:
from pyueye import ueye
from pyueye.ueye import sizeof
have_pyueye = True
except ImportError:
have_pyueye = False
# Py2 <--> Py3 compatibility fixes
from past.builtins import unicode
# Known camera resolutions
cam_res_lookup = {'uEye1':(752,480),
'uEye2':(1280,1024),
'laptop':(640,480)}
#############################################################################
### Function definitions
#############################################################################
def uEyeCheck(code, action='error', msg=None):
"""
Handy func for checking ueye return codes for errors. Action can be one
of 'error' or 'warn'. msg can be used to prepend error code with a
custom message.
"""
if action not in ['error','warn']:
raise ValueError("action must be one of 'error' or 'warn'")
if code != ueye.IS_SUCCESS:
if msg is not None:
code = msg + ': ' + str(code)
if action == 'error':
raise RuntimeError(code)
elif action == 'warn':
with warnings.catch_warnings():
warnings.simplefilter('always')
warnings.warn(code)
def calc_imResize(imsize, screensize):
"""
Calculates size image will need to be to fill the screen whilst maintaining
the original aspect ratio. Note this simply returns the required size -
the actual interpolation of the image to this size needs to be handled
separately.
Differs from calc_imCrop in that here the image is resized to fill the
screen as much as possible but whilst still maintaining the original
aspect ratio, ensuring none of the image is lost but potentially leaving
some blank borders at edges of screen.
Arguments
---------
imsize - (width, height) tuple
Size of original image.
screensize - (wdith, height) tuple
Size of screen to interpolate image to.
Returns
-------
new_imsize (width, height) tuple
Size image needs to be interpolated to.
"""
# Ensure numpy array. Also implicitly copies list so we don't accidentally
# alter original in-place, e.g. psychopy window size
imsize = np.asarray(imsize)
screensize = np.asarray(screensize)
# Calculate W:H ratios
im_ratio = imsize[0] / imsize[1]
screen_ratio = screensize[0] / screensize[1]
# If image and screen already same ratio, just return screen size as is
if im_ratio == screen_ratio:
return tuple(screensize.astype(int))
# If screen more square than image, rescale width to match
elif screen_ratio < im_ratio:
return tuple( (imsize * (screensize[0] / imsize[0])).astype(int) )
# Else screen more rectangular than image, rescale height to match
else:
return tuple( (imsize * (screensize[1] / imsize[1])).astype(int) )
def calc_imCrop(imsize, screensize):
"""
Calculates amount to crop image by so as to match screen aspect ratio.
Note this simply returns the required cropping slices - the actual
application of the cropping and the interpolation of the image to the
screen size needs to be handled separately.
Differs from calc_imResize in that here we crop the image to match the
screen aspect ratio, ensuring the image fills the screen, but potentially
reducing the field of view of the camera.
Arguments
---------
imsize - (width, height) tuple
Size of original image.
screensize - (wdith, height) tuple
Size of screen to interpolate image to.
Returns
-------
crop_slices - (width, height) tuple of slices
Pair of slice objects that may be applied to image to crop it to the
correct aspect ratio.
"""
# Ensure numpy array. Also implicitly copies list so we don't accidentally
# alter original in-place, e.g. psychopy window size
imsize = np.asarray(imsize)
im_cols, im_rows = imsize
screensize = np.asarray(screensize)
# Calculate W:H ratios
im_ratio = imsize[0] / imsize[1]
screen_ratio = screensize[0] / screensize[1]
# Image and screen already same ratio, return slices for full image
if im_ratio == screen_ratio:
return (slice(im_rows), slice(im_cols))
# Else image and screen not same ratio - calculate new size
else:
newsize = imsize.copy()
# If screen more square than image, crop width to match ratio
if screen_ratio < im_ratio:
newsize[0] = newsize[1] * screen_ratio
# Else screen more rectangular than image, crop height to match ratio
else:
newsize[1] = newsize[0] / screen_ratio
# Offsets given by difference between new and old sizes, halved
offset_cols, offset_rows = ((imsize - newsize) / 2).astype(int)
# Return slices
return (slice(offset_rows, im_rows - offset_rows),
slice(offset_cols, im_cols - offset_cols))
#############################################################################
### Class definitions
#############################################################################
##### Video streaming classes ######
class BaseVideoStream(object):
"""
Base Class Arguments
--------------------
vertical_reverse : bool, optional
If True, will vertically flip image (default = False).
horizontal_reverse : bool, optional
If True, will horizontally flip image (default = False).
postproc : function, optional
Function for apply post-processing to frame. This must accept a numpy
array with uint8 dtype as its first argument, and return a numpy
array with uint8 dtype as its only output. Can be used to apply
custom manipulations to video stream, such as a delay or a filter.
Postproc function is only applied as long as the status indicates to;
this defaults to ON when the class is first initialised and can be
changed with the .switchApplyPostproc function. Note that
post-processing is applied after all other processing steps (including
image reversals, colour conversions, etc.), but before writing the
image out to file (if applicable).
postproc_kwargs : dict, optional
Dictionary of further keyword arguments to be passed to the postproc
function. Should be of form {'arg':val1, 'arg2':val2, etc}.
warnMissedFrames : bool, optional
If True (default), a warning will be raised whenever frame acquistion
fails.
Base Class Methods
------------------
.get_frame
Returns a frame from the camera as a numpy array. If image acquisition
fails, a warning will be printed (if warnMissedFrames == True) and
None will be returned instead.
.openVideoWriter
Open a video writer object to an output file. Whilst the recording
status is ON, further calls to .get_frame will also write the frame
out to the file. Note - recording status defaults to OFF when stream
is first initialised.
.closeVideoWriter
Close video writer object. Should get called automatically when stream
is closed.
.switchRecordingStatus
Switch the recording status ON or OFF. Defaults to OFF when class is
first initialised.
.switchApplyPostproc
Switches whether the postproc function (if provided) is applied or not.
Defaults to ON when class is first initialised.
.setPostproc
Supply a new post-processing function.
Example usage
-------------
Examples show usage for OpenCV backend; setup for other backends will
be similar.
Create an instance of the video stream.
>>> from utils.videostreaming import OpenCV_VideoStream
>>> videostream = OpenCV_VideoStream()
Calls to the .get_frame method return the frames from the camera. Stick
these in a loop to continuously acquire. Here we display them in an
OpenCV window.
>>> import cv2
>>> cv2.namedWindow('display')
>>> keepgoing = True
>>> while keepoing:
... frame = videostream.get_frame()
... if frame is not None:
... cv2.imshow('display', frame)
... key = cv2.waitKey(1)
... if key == 27: # code for escape key
... videostream.close()
... cv2.destroyAllWindows()
... keepgoing = False
To apply some custom post-processing to the frames, supply a function to
the class postproc parameter. Here we create a short function for photo
negating the frames. Subsequent calls to the .get_frame method should now
return the images photo negated.
>>> def negate(frame):
... return 255 - frame
>>> videostream = OpenCV_VideoStream(postproc=negate)
A video writer object can be opened to record the stream to a file. The
recording status defaults to OFF when the class is first initialised;
call .switchRecording to begin recording.
>>> videostream.openVideoWriter('./test.mp4')
>>> # When ready, start recording
>>> videostream.switchRecording()
Subsequent calls to the .get_frame method will now also write that
frame out to the video file. When finished with the recording, the video
writer must be closed, otherwise the file may not be written out properly.
If continuing with the stream after recording, the .closeVideoWriter
function can be used to close the writer specifically.
>>> videostream.closeVideoWriter()
Once finished with the stream entirely, the .close method must be called.
If a video writer is currently open, it will automatically be closed
(negating the need to call .closeVideoWriter here).
>>> videostream.close()
See Also
--------
* FrameStim - Thin wrapper around PsychoPy's ImageStim class. Can be
used to display frames in a PsychoPy window, with options for cropping
and / or rescaling the image.
"""
"""
2nd docstring kept separate from 1st one so that it is not inherited by
child classes.
Child classes must provide the following methods:
* self._acquire_image_data() - Must return a frame as numpy array, or
return None if image acquisition fails.
* self.close() - Some method for closing the video stream, which will
also call this base class's .closeVideoWriter method when it does.
Child classes must also provide the following attributes
* self.fps - float giving frames per second
* self.cam_res - (width, height) tuple of ints of camera resolution
* self.colour_mode - str giving colour mode, selected from 'bgr',
'rgb', or 'mono'
"""
def __init__(self, vertical_reverse=False, horizontal_reverse=False,
postproc=None, postproc_kwargs={}, warnMissedFrames=True):
# Assign local vars into class
self.vertical_reverse = vertical_reverse
self.horizontal_reverse = horizontal_reverse
self.warnMissedFrames = warnMissedFrames
# Assign postproc func
self.setPostproc(postproc, postproc_kwargs)
# Assorted internal flags
self.RECORDING = False # sets whether to record
self.APPLYPOSTPROC = True # sets whether to apply postproc func
# Default video writer to None - will be overwritten with real
# writer object if one is opened via .openVideoWriter
self.video_writer = None
def _acquire_image_data(self):
"""Placeholder function - should be overwritten by child class"""
raise NotImplementedError
def close(self):
"""Placeholder function - should be overwritten by child class"""
raise NotImplementedError
def get_frame(self):
"""
Acquire a single frame from the camera and return it the user after
applying some minimal post-processing plus any additional custom
post-processing if such a function was specified.
Returns
-------
frame : numpy array with uint8 datatype or None
If image acquisition is successful, the resulting frame is
returned as a numpy array. If it fails, a warning will be printed
(if warnMissedFrames is True) and None will be returned instead.
"""
# Get frame
frame = self._acquire_image_data()
if frame is None:
if self.warnMissedFrames:
with warnings.catch_warnings():
warnings.simplefilter('always')
warnings.warn('Missed camera frame')
return
# Flip image upside-down and / or left-right if requested
if self.vertical_reverse:
frame = np.flipud(frame)
if self.horizontal_reverse:
frame = np.fliplr(frame)
# If any post-processing requested, apply it now
if self.postproc and self.APPLYPOSTPROC:
frame = self.postproc(frame, **self.postproc_kwargs)
# Write video to output if requested (convert RGB -> BGR if needed)
if self.video_writer and self.RECORDING:
if self.colour_mode == 'rgb':
_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
else:
_frame = frame
self.video_writer.write(_frame)
# Return
return frame
def openVideoWriter(self, output_file, codec='mp4v', overwrite=False):
"""
Opens video writer object for specified output file. Whilst recording
status is ON, further calls to .get_frame will also write that frame
out to the file. Note that the recording status defaults to OFF when
the class is first initialised, so you will need to switch it ON
with the .switchRecording function to start writing to the file.
Arguments
---------
output_file : str, required
Filepath to desired output video. Recommend using '.mp4' file
extension (default if no extension provided).
codec : str or -1, optional
Fourcc (https://www.fourcc.org/codecs.php) code indicating codec
to use for encoding video output. Codec must be appropriate for
file type; recommend using 'mp4v' for MP4 files (default).
overwrite : bool, optional
If True, will overwrite output file if it already exists. If
False (default), will error if output file already exists.
"""
# Default to .mp4 format
if not os.path.splitext(output_file)[1]:
output_file += '.mp4'
# HACK - opencv seems to mess up attempting to write to an existing
# file (frames get appended rather than overwritten), so delete
# file if it already exists
if os.path.isfile(output_file):
if overwrite:
os.remove(output_file)
else:
raise IOError('Output video file already exists')
# Obtain fourcc
fourcc = cv2.VideoWriter_fourcc(*codec)
# Create writer object
self.video_writer = cv2.VideoWriter(
output_file, fourcc, self.fps, tuple(map(int, self.cam_res)),
isColor=self.colour_mode != 'mono'
)
# Error check
if not self.video_writer.isOpened():
raise IOError('Failed to open video writer for {}, check video ' \
'file and codec settings'.format(output_file))
else:
print('Opened video writer for ' + output_file)
def closeVideoWriter(self):
"""
Closes video writer object and hence current output file. Gets called
automatically when .close method is called, so should only be
necessary to use directly if you want to close one video writer and
open another without stopping the video stream.
"""
if self.video_writer:
if self.RECORDING:
self.switchRecording()
self.video_writer.release()
print('Closed video writer')
def switchRecording(self, value=None):
"""
Switches recording status ON or OFF. If provided value is a boolean,
then will set status to this value. If provided value is None
(default), then will switch to opposite of current status.
"""
if self.video_writer:
if value is None:
self.RECORDING = not self.RECORDING
else:
self.RECORDING = value
if self.RECORDING:
print('Video stream recording')
else:
print('Video stream not recording')
else:
pass
def switchApplyPostproc(self, value=None):
"""
Switches whether to apply post-processing function ON or OFF.
If provided value is a boolean, then will set status to this value.
If provided value is None (default), then will switch to opposite of
current status.
"""
if value is None:
self.APPLYPOSTPROC = not self.APPLYPOSTPROC
else:
self.APPLYPOSTPROC = value
if self.APPLYPOSTPROC:
print('Applying post-processing')
else:
print('Not applying post-processing')
def setPostproc(self, postproc, postproc_kwargs=None):
"""
Set post-processing function after class has been initialised, e.g. to
overwrite old function with a new one mid-stream. If postproc_kwargs
is None, existing entry in class will not be changed.
"""
self.postproc = postproc
if postproc_kwargs is not None:
self.postproc_kwargs = postproc_kwargs
class OpenCV_VideoStream(BaseVideoStream):
"""
Video streaming class based on generic methods implemented in OpenCV. This
is easy to use, but for professional cameras may not be able to access
some of the camera's more specific functions. For example, it will not be
able to access free-run modes, which may limit the maximum frame rate
achievable.
Arguments
---------
cam_num - int, optional
Index of camera to use. 0 (default) will use first available camera
(probably laptop camera), 1 will use first available externally
connected camera.
cam_res - (width, height) tuple of ints, str, or None, optional
Resolution to acquire images at. If tuple of ints, will attempt to
use that resolution exactly (if the camera supports it). Can be a
string giving a named camera listed in the cam_res_lookup table
included in this module. If None (default) will attempt to use the
default resolution retrieved from the camera settings, but note that
it might not get this right!
fps - float, optional
Frames per second to acquire at. Default of 30 is limit of most
laptop cameras.
colour_mode - str {'bgr' | 'rgb' | 'mono'}, optional
OpenCV acquires images into BGR colour space. Specify colour mode as
'bgr' to leave the images in this space, as 'rgb' to convert them to
RGB colour space, or 'mono' to convert them to grayscale. Note that
images are always acquired in BGR and must be converted to other
spaces, which may incur a small processing cost.
**kwargs
Further keyword arguments are passed to the videostreaming base class
(details of which are included further below).
Methods
-------
.close
Close video writer (if applicable) and release camera. Must be called
when you are done.
"""
__doc__ += BaseVideoStream.__doc__
def __init__(self, cam_num=0, cam_res=None, fps=30.0, colour_mode='bgr',
**kwargs):
# Assign local vars into class
self.cam_num = cam_num
self.cam_res = cam_res
self.fps = fps
self.colour_mode = colour_mode
# Error check
if self.colour_mode not in ['bgr','rgb','mono']:
raise ValueError("colour_mode must be one of 'bgr', 'rgb', "
"or 'mono'")
# If cam_res is named camera, assign known resolution
if isinstance(self.cam_res, (str, unicode)):
try:
self.cam_res = cam_res_lookup[self.cam_res]
except KeyError:
raise ValueError('Unrecognised camera: ' + self.cam_res)
# Acquire video device
self.cap = cv2.VideoCapture(self.cam_num)
if not self.cap.isOpened():
raise IOError('Could not open camera, is it in use by another ' \
'process?')
# Set / get camera resolution
if self.cam_res:
# If cam_res provided, set to provided values
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.cam_res[0])
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.cam_res[1])
else:
# Otherwise, acquire defaults (tend to be a bit rubbish though!)
self.cam_res = ( self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),
self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) )
# Set / get camera fps
if not self.fps:
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
if self.fps == 0:
self.fps = 30.0
print('Could not determine camera fps, defaulting to 30')
self.cap.set(cv2.CAP_PROP_FPS, self.fps)
# Instantiate parent class
super(OpenCV_VideoStream, self).__init__(**kwargs)
def _acquire_image_data(self):
# Acquire frame. ret will be True if capture was successful
ret, frame = self.cap.read()
if ret:
# Apply colour conversion if necessary
if self.colour_mode == 'rgb':
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
elif self.colour_mode == 'mono':
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Return
return frame
def close(self):
"""
Stop video stream permanently. You must call this when you are done.
Function releases the camera capture device, and releases the video
writer (if applicable).
"""
self.cap.release()
self.closeVideoWriter()
print('Video stream closed')
class uEyeVideoStream(BaseVideoStream):
"""
Video streaming class based on IDS uEye's SDK. Requires that pyueye be
installed on your system (it is available in pip). Also requires the
uEye SDK and drivers to be installed. Camera will be run in freerun mode,
allowing access to the higher frame rates, but also making operation a bit
more complicated. Only compatible with IDS uEye cameras (obviously).
Arguments
---------
cam_num - int, optional
Index of uEye camera to use. Default of 0 selects first available.
pixel_clock - int, str {'min' | 'max'} or None, optional
Value to set for the uEye pixel clock, in MHz. This determines the
bandwidth of the camera data feed. Larger values allow faster frame
rates, but excessive values may cause transmission errors. Ideally
this should be set just high enough to achieve the desired frame rate.
The easiest way to determine this is to run the camera via the uEye
Cockpit application, open the Camera tab under the Camera Properties
menu, and adjust the pixel clock slider till the maximum achievable
frame rate is just above the desired value. If 'min' or 'max', will use
the minimum or maximum values the camera can provide. If None
(default), will use the default pixel clock value of the camera; note
that this will limit the achievable frame rates. Further details can
be found within the .pixel_clock_info attribute after initialisation.
aoi - (left, bottom, width, height) tuple or None, optional
Area of interest, i.e. region of camera sensor to acquire images
from. Could be used to manipulate resolution / aspect ratio. If None
(default), will use the maximum allowable AOI.
fps - float, optional
Frames per second to acquire (default = 30). Note that maximum
achievable frame rate depends on pixel_clock and exposure settings
(see below).
colour_mode - str {'bgr' | 'rgb' | 'mono'}, optional
Colour space to acquire images into. Will use colour BGR if set as
'bgr', colour RGB if set as 'rgb', or grayscale if set as 'mono'.
Note that this determines the space the images are actually acquired
into - no colour conversion of the images is necessary.
buffer_size - int or None, optional
Number of frames to buffer images within. Larger buffers will incur
a delay in the stream of images, but buffers which are too small are
liable to incur memory errors. If None (default) will use 10% of
the frame rate (corresponding to 100ms).
exposure - float, optional
Camera shutter exposure time in ms. Default of 0 is a special value
that sets the exposure to the maximum allowable (1000/fps). Exposure
time should not exceed the duration of the frame acquisition at the
desired fps. Note that if using auto-exposure then this setting
will only affect the first few frames acquired. Further details on
available options can be found within the .exposure_info attribute
after initialisation.
block - bool, optional
If True, each call to acquire a new frame from the buffer will block
till the camera has finished acquiring the next one. If False
(default), will grab image from buffer without delay - note that if a
new frame has not yet been acquired it will instead repeatedly return
the same frame until one has. Non-blocking operation is recommended
if running a live video feed on screen through PsychoPy at higher
camera frame rates (e.g. camera FPS matches the monitor rate) because
PsychoPy's win.flip() command is itself usually a blocking operation.
If not streaming the video via PsychoPy (e.g. video is simply being
recorded to file, or just a simple stream is being displayed with
OpenCV), or if the camera frame rate is less than the monitor rate,
then blocking operation is likely to be preferable.
auto_exposure - str {'camera' | 'software'} or False, optional
If not False, will automatically adjust shutter exposure to try and
maintain a middling luminance level for the frames. Default is False.
See notes below for meaning of string options.
auto_gain_control - str {'camera' | 'software'} or False, optional
If not False, will automatically adjust the camera's gain control to
try and maintain a middling luminance level for the frames. Default is
False. See notes below for meaning of string options.
auto_white_balance - str {'camera' | 'software'} or False, optional
If not False, will compare luminance levels across colour channels to
try and maintain a middling luminance level for the frames. Only
available for colour cameras, and only if images are being acquired in
a colour space. Default is False. See notes below for meaning of
string options.
colour_correction - bool, optional
If True, will apply a correction that enhances colours to produce
a more vibrant looking colour display. Would recommend using in
conjunction with auto white balance. Only available on certain cameras,
and only if images are being acquired in a colour space. Default is
False. See also colour_correction_factor.
colour_correction_factor - float, optional
Value between 0 (min) and 1 (max) indicating strength of colour
correction to apply (default = 0.5). Ignored if colour_correction
is False.
**kwargs
Further keyword arguments are passed to the videostreaming base class
(details of which are included further below).
Auto-control options
--------------------
Auto-control options accept the special strings 'software' or 'camera'.
These determine the point at which the auto-control is performed. If
'software', the control is performed by the computer. If 'camera', the
control is performed by the camera itself. Camera control is generally
preferred as it reduces the processing cost to the computer, but note that
some camera models may not support this type of control for some options.
Modifying further options
-------------------------
The uEye SDK allows for modifying many more options than are listed in
this class. It is possible to further modify other options after the
class has been initialised using the pyueye module to access the relevant
SDK functions. Most functions require the camera handle to be passed as a
parameter - this can be obtained from the .cam attribute of this class
after initialisation. Details of the SDK commands available can be found
in the uEye manual: https://en.ids-imaging.com/manuals-ueye-software.html
Methods
-------
.update_pixel_clock_info
Update internal store of pixel clock info.
.update_exposure_info
Update internal store of exposure info.
.start_freerun
Begin freerun mode; will get called automatically whenever the first
frame is requested.
.stop_freerun
Stop freerun mode; will get called automatically when the stream is
closed, but will also need to be called manually if the stream needs
to be paused at any point.
.close
Close video writer (if applicable) and release camera. Must be called
when you are done.
"""
__doc__ += BaseVideoStream.__doc__
def __init__(self, cam_num=0, pixel_clock=None, fps=30.0, aoi=None,
colour_mode='bgr', buffer_size=None, exposure=0.0,
block=False, auto_exposure=False, auto_gain_control=False,
auto_white_balance=False, colour_correction=False,
colour_correction_factor=0.5, **kwargs):
# Make sure we have pyueye module
if not have_pyueye:
raise RuntimeError('Missing pyueye module')
# Assign args into class. Some of these need to be cast to ctypes
# objects but we will do this later so as to allow allocating other
# default values first.
self.cam_num = cam_num
self.pixel_clock = pixel_clock
self.fps = fps
self.aoi = aoi
self.colour_mode = colour_mode.lower()
self.buffer_size = buffer_size
self.exposure = exposure
self.block = block
self.auto_exposure = auto_exposure
self.auto_gain_control = auto_gain_control
self.auto_white_balance = auto_white_balance
self.colour_correction = colour_correction
self.colour_correction_factor = colour_correction_factor
# Other internal variables
self.RUNNING = False
# Error check
if self.colour_mode not in ['bgr','rgb','mono']:
raise ValueError("colour_mode must be one of 'bgr', 'rgb', "
"or 'mono'")
if self.auto_exposure not in ['camera', 'software', False]:
raise ValueError("auto_exposure must be one of 'camera', "
"'software', or False")
if self.auto_gain_control not in ['camera', 'software', False]:
raise ValueError("auto_gain_control must be one of 'camera', "
"'software', or False")
if self.auto_white_balance not in ['camera', 'software', False]:
raise ValueError("auto_white_balance must be one of 'camera', "
"'software', or False")
# Set buffer_size as 10% of fps if not specified
if self.buffer_size is None:
self.buffer_size = max(1, int(round(0.1 * self.fps)))
# Open handle to specified camera and initialise
self.cam = ueye.HIDS(self.cam_num)
uEyeCheck(ueye.is_InitCamera(self.cam, None), msg='InitCamera')
# Get sensor info
self.sensorInfo = ueye.SENSORINFO()
uEyeCheck(ueye.is_GetSensorInfo(self.cam, self.sensorInfo),
msg='GetSensorInfo')
# Ensure set to freerun mode
uEyeCheck(ueye.is_SetExternalTrigger(self.cam, ueye.IS_SET_TRIGGER_OFF),
msg='SetExternalTrigger')
# Set area of interest. If not provided, use max allowable values
# recovered from sensor info
self.aoi_rect = ueye.IS_RECT()
if self.aoi is not None:
self.aoi_rect.s32X = ueye.int(self.aoi[0])
self.aoi_rect.s32Y = ueye.int(self.aoi[1])
self.aoi_rect.s32Width = ueye.int(self.aoi[2])
self.aoi_rect.s32Height = ueye.int(self.aoi[3])
else:
self.aoi_rect.s32X = ueye.int(0)
self.aoi_rect.s32Y = ueye.int(0)
self.aoi_rect.s32Width = ueye.int(self.sensorInfo.nMaxWidth)
self.aoi_rect.s32Height = ueye.int(self.sensorInfo.nMaxHeight)
uEyeCheck(ueye.is_AOI(self.cam, ueye.IS_AOI_IMAGE_SET_AOI,
self.aoi_rect, sizeof(self.aoi_rect)),
msg='Set AOI')
# Work out camera resolution from AOI
self.cam_res = (self.aoi_rect.s32Width.value,
self.aoi_rect.s32Height.value)
# Set colour mode and corresponding bits per pixel
if self.colour_mode == 'bgr':
uEyeCheck(ueye.is_SetColorMode(self.cam, ueye.IS_CM_BGR8_PACKED),
msg='SetColorMode')
self.bits_per_pixel = 24
elif self.colour_mode == 'rgb':
uEyeCheck(ueye.is_SetColorMode(self.cam, ueye.IS_CM_RGB8_PACKED),
msg='SetColorMode')
self.bits_per_pixel = 24
elif self.colour_mode == 'mono':
uEyeCheck(ueye.is_SetColorMode(self.cam, ueye.IS_CM_MONO8),
msg='SetColorMode')
self.bits_per_pixel = 8
# Cast pixel clock to ctypes, set in ueye
self.update_pixel_clock_info() # to get min / max vals
if self.pixel_clock is not None:
if self.pixel_clock == 'min':
self.c_pixel_clock = self.pixel_clock_info.clockMin
elif self.pixel_clock == 'max':
self.c_pixel_clock = self.pixel_clock_info.clockMax
else:
self.c_pixel_clock = ueye.uint(self.pixel_clock)
uEyeCheck(ueye.is_PixelClock(
self.cam, ueye.IS_PIXELCLOCK_CMD_SET,
self.c_pixel_clock, sizeof(self.c_pixel_clock)
), msg='Set PixelClock')
# Cast fps to ctypes, set in ueye
self.c_fps = ueye.double(self.fps)
actual_fps = ueye.double()
uEyeCheck(ueye.is_SetFrameRate(self.cam, self.c_fps, actual_fps),
msg='Set FrameRate')
if not abs(self.c_fps - actual_fps) < 1.0:
with warnings.catch_warnings():
warnings.simplefilter('always')
warnings.warn('Could not achieve desired fps - largest '
'available was {}. Try adjusting pixel clock.'
.format(actual_fps))
# Cast exposure to ctypes, set in ueye
self.c_exposure = ueye.double(self.exposure)
uEyeCheck(ueye.is_Exposure(
self.cam, ueye.IS_EXPOSURE_CMD_SET_EXPOSURE,
self.c_exposure, sizeof(self.c_exposure)
), msg='Set Exposure')
# Set auto control settings
if self.auto_exposure == 'camera':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_SENSOR_SHUTTER,
ueye.double(True), ueye.double()
), msg='Set auto exposure')
elif self.auto_exposure == 'software':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_SHUTTER,
ueye.double(True), ueye.double()
), msg='Set auto exposure')
if self.auto_gain_control == 'camera':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_SENSOR_GAIN,
ueye.double(True), ueye.double()
), msg='Set auto gain control')
elif self.auto_gain_control == 'software':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_GAIN,
ueye.double(True), ueye.double()
), msg='Set auto gain control')
if self.colour_mode != 'mono':
if self.auto_white_balance == 'camera':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE,
ueye.double(ueye.WB_MODE_AUTO), ueye.double()
), msg='Set auto white-balance')
elif self.auto_white_balance == 'software':
uEyeCheck(ueye.is_SetAutoParameter(
self.cam, ueye.IS_SET_ENABLE_AUTO_WHITEBALANCE,
ueye.double(True), ueye.double()
), msg='Set auto white-balance')
if self.colour_correction:
uEyeCheck(ueye.is_SetColorCorrection(
self.cam, ueye.IS_CCOR_ENABLE_NORMAL,
ueye.double(self.colour_correction_factor)
), msg='Set colour correction')
# Allocate image buffers. Note that the list of buffers is only used
# at end when we clear them - ueye allocates data to the buffers
# internally, so we don't interact with them directly otherwise
self.image_buffers = []
for i in range(self.buffer_size):
buff = uEyeImageBuffer()
uEyeCheck(ueye.is_AllocImageMem(
self.cam, self.aoi_rect.s32Width, self.aoi_rect.s32Height,
self.bits_per_pixel, buff.mem_ptr, buff.mem_id
), msg='AllocImageMem')
uEyeCheck(ueye.is_AddToSequence(self.cam, buff.mem_ptr, buff.mem_id),
msg='AddToSequence')
self.image_buffers.append(buff)
uEyeCheck(ueye.is_InitImageQueue(self.cam, 0), msg='InitImageQueue')
# Create one more image buffer which will be used to receive the
# incoming images during video acquistion
self.receptor_buffer = uEyeImageBuffer()
# Query pitch - gives num image rows * num colour channels
self.c_pitch = ueye.int()
uEyeCheck(ueye.is_GetImageMemPitch(self.cam, self.c_pitch),
msg='GetImageMemPitch')
# Update info now all settings have been set
self.update_pixel_clock_info()
self.update_exposure_info()
# Instantiate base class
super(uEyeVideoStream, self).__init__(**kwargs)
def _acquire_image_data(self):
# Begin image capture if necessary
if not self.RUNNING:
self.start_freerun()
# Get pointer to currently active image buffer, dependent on blocking
if self.block:
ueye.is_WaitForNextImage(
self.cam, 1000, self.receptor_buffer.mem_ptr,
self.receptor_buffer.mem_id
)
else:
ueye.is_GetImageMem(self.cam, self.receptor_buffer.mem_ptr)
# Collect image data into numpy array (might fail, e.g. if memory
# resource not yet free)
try:
frame = ueye.get_data(
self.receptor_buffer.mem_ptr, self.aoi_rect.s32Width,
self.aoi_rect.s32Height, self.bits_per_pixel,
self.c_pitch, copy=True
)
if self.colour_mode in ['bgr','rgb']:
frame = frame.reshape(self.aoi_rect.s32Height.value,
self.aoi_rect.s32Width.value,
3)
elif self.colour_mode == 'mono':
frame = frame.reshape(self.aoi_rect.s32Height.value,
self.aoi_rect.s32Width.value)
except Exception as e:
print(e)
frame = None
# Need to unlock buffer resource so it can be used again
ueye.is_UnlockSeqBuf(self.cam, self.receptor_buffer.mem_id,
self.receptor_buffer.mem_ptr)
# Return
return frame
def update_pixel_clock_info(self):
self.pixel_clock_info = uEyePixelClockInfo(self.cam)