00001 #if defined(__APPLE__)
00002
00003 #include <AvailabilityMacros.h>
00004 #ifdef MAC_OS_X_VERSION_10_6
00005
00006 #include "CameraSourceQTKit.h"
00007 #include "Shared/debuget.h"
00008 #include <cmath>
00009
00010 using namespace std;
00011
00012 INSTANTIATE_NAMEDENUMERATION_STATICS(CameraSourceQTKit::PixelFormat_t);
00013
00014 void CameraSourceQTKit::init() {
00015 format.setPreferredNameForVal("",TYPE_UNKNOWN);
00016 format.setPreferredNameForVal("yuvs",TYPE_YUVS);
00017 format.setPreferredNameForVal("2vuy",TYPE_2VUY);
00018 format.setPreferredNameForVal("gray",TYPE_GRAY);
00019 format.addNameForVal("grayscale",TYPE_GRAY);
00020 addEntry("Parallel",parallel,"If true, will attempt to use Apple's Grand Central Dispatch to do block processing.\n"
00021 "This parallelizes image processing, may slightly increase total CPU usage but should reduce per-frame wall time.");
00022 addEntry("HighRes",highRes,"If true, upsamples color channels horizontally to match Y channel, otherwise downsamples\n"
00023 "everything to common resolution (y/4, u,v/2). Set to true to use full resolution of\n"
00024 "camera (either as the “full” layer or if you are accessing the “double” layer),\n"
00025 "set to false if you are using half-resolution as the resolution of the “full” layer");
00026 addEntry("Layer",layer,"Controls the resolution layer at which the image should be processed.\n"
00027 "0 indicates “automatic” mode (picks layer closest to image's resolution), positive numbers indicate the resolution layer directly.\n"
00028 "Negative values are relative to the number of layers marked available by the vision setup, so that typically -1 would correspond to the “double” layer, and -2 would correspond to the “full” layer.");
00029 addEntry("Format",format,"If non-empty string, requests the camera hardware provide images in the specified format (a four character code aka FourCC)\n"+format.getDescription());
00030 setLoadSavePolicy(FIXED,SYNC);
00031 delegate = [[CameraSourceQTKitDelegate alloc] initWithTarget:this];
00032 [device retain];
00033 }
00034
00035 CameraSourceQTKit::~CameraSourceQTKit() {
00036 ASSERT(session==NULL,"Still have session instance in CameraSourceQTKit destructor? Missed deregisterSource call?");
00037 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00038 [device release];
00039 [delegate release];
00040 [autoreleasepool release];
00041 }
00042
00043 void CameraSourceQTKit::registerSource() {
00044 ASSERTRET(session==NULL,"re-registration?");
00045 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00046
00047 NSError* error=NULL;
00048 QTCaptureDeviceInput* input=NULL;
00049 QTCaptureVideoPreviewOutput* output=NULL;
00050 try {
00051 if(![device open:&error])
00052 throw std::runtime_error("Could not open camera device " + name);
00053
00054 session = [[QTCaptureSession alloc] init];
00055 input = [[QTCaptureDeviceInput alloc] initWithDevice:device];
00056 if(![session addInput:input error:&error])
00057 throw std::runtime_error("Could not add camera device " + name + " to capture session");
00058
00059 output = [[QTCaptureVideoPreviewOutput alloc] init];
00060 if(![session addOutput:output error:&error])
00061 throw std::runtime_error("Could not add preview output to capture session for "+name);
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083 if(format!=TYPE_UNKNOWN)
00084 [output setPixelBufferAttributes:[NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:format] forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey]];
00085
00086 [output setDelegate:delegate];
00087
00088 } catch(const std::runtime_error& ex) {
00089 std::cerr << ex.what() << std::endl;
00090 if(error!=NULL) {
00091 std::cerr << "Description: " << [[error localizedDescription] UTF8String] << std::endl;
00092 std::cerr << "Reason: " << [[error localizedFailureReason] UTF8String] << std::endl;
00093 }
00094 if(session!=NULL) {
00095 [session release];
00096 session=NULL;
00097 }
00098 if([device isOpen])
00099 [device close];
00100 }
00101 [input release];
00102 [output release];
00103 [autoreleasepool release];
00104 }
00105
00106 void CameraSourceQTKit::deregisterSource() {
00107 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00108 if(session!=NULL) {
00109 if([session isRunning])
00110 [session stopRunning];
00111 [session release];
00112 session=NULL;
00113 }
00114 if([device isOpen])
00115 [device close];
00116 [autoreleasepool release];
00117 }
00118
00119 bool CameraSourceQTKit::advance() {
00120 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00121 if([session isRunning]) {
00122 std::cerr << "Advancing " << name << ", but capture session already running" << std::endl;
00123 [autoreleasepool release];
00124 return false;
00125 }
00126 MarkScope lock(frameLock);
00127 oneFrame=true;
00128 [session startRunning];
00129 if(![session isRunning]) {
00130 std::cerr << "Advancing " << name << ", but capture session already running" << std::endl;
00131 [autoreleasepool release];
00132 return false;
00133 }
00134 [autoreleasepool release];
00135 frameCond.wait(frameLock);
00136 return true;
00137 }
00138
00139 void CameraSourceQTKit::doFreeze() {
00140 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00141 if([session isRunning])
00142 [session stopRunning];
00143 [autoreleasepool release];
00144 }
00145
00146 void CameraSourceQTKit::doUnfreeze() {
00147 NSAutoreleasePool *autoreleasepool = [[NSAutoreleasePool alloc] init];
00148 if(![session isRunning])
00149 [session startRunning];
00150 [autoreleasepool release];
00151 }
00152
00153 void CameraSourceQTKit::processImage(CVImageBufferRef videoFrame, QTSampleBuffer* sampleBuffer) {
00154 if(![session isRunning])
00155 return;
00156
00157
00158
00159 float curTime = get_time()/1000.f;
00160 if(std::abs(curTime - (lastTime+duration)) >= duration)
00161 lastTime = curTime;
00162 else
00163 lastTime += duration;
00164
00165 NSTimeInterval dur;
00166 QTGetTimeInterval([sampleBuffer duration],&dur);
00167 duration = static_cast<float>(dur);
00168
00169 CGSize dim = CVImageBufferGetEncodedSize(videoFrame);
00170 unsigned int width = static_cast<unsigned int>(dim.width);
00171 unsigned int height = static_cast<unsigned int>(dim.height);
00172
00173 unsigned int cc = [[sampleBuffer formatDescription] formatType];
00174 switch(cc) {
00175 case kComponentVideoUnsigned:
00176 if(highRes) {
00177 process_yuvs_U(reinterpret_cast<const unsigned char*>([sampleBuffer bytesForAllSamples]),width,height);
00178 } else {
00179 process_yuvs_D(reinterpret_cast<const unsigned char*>([sampleBuffer bytesForAllSamples]),width,height);
00180 }
00181 break;
00182 case kCVPixelFormatType_422YpCbCr8:
00183 if(highRes) {
00184 process_2vuy_U(reinterpret_cast<const unsigned char*>([sampleBuffer bytesForAllSamples]),width,height);
00185 } else {
00186 process_2vuy_D(reinterpret_cast<const unsigned char*>([sampleBuffer bytesForAllSamples]),width,height);
00187 }
00188 break;
00189 case kCVPixelFormatType_8IndexedGray_WhiteIsZero:
00190 process_grayscale_zerowhite(reinterpret_cast<const unsigned char*>([sampleBuffer bytesForAllSamples]),width,height);
00191 break;
00192 default: {
00193 static unsigned int gaveWarning=0;
00194 if(gaveWarning!=cc) {
00195 gaveWarning=cc;
00196 std::cerr << "ERROR: CameraSourceQTKit doesn't know how to convert " << CC2Str(cc) << " pixel format" << std::endl;
00197 }
00198 } break;
00199 }
00200
00201 if(oneFrame) {
00202 MarkScope lock(frameLock);
00203 oneFrame=false;
00204 [session stopRunning];
00205 frameCond.broadcast();
00206 }
00207
00208
00209 }
00210
00211 std::string CameraSourceQTKit::CC2Str(unsigned int x) {
00212 std::string ans;
00213 if(('1234' & 0xFF) == 1) {
00214 ans.append(1,char((x>>0) & 0xFF));
00215 ans.append(1,char((x>>8) & 0xFF));
00216 ans.append(1,char((x>>16) & 0xFF));
00217 ans.append(1,char((x>>24) & 0xFF));
00218 } else {
00219 ans.append(1,char((x>>24) & 0xFF));
00220 ans.append(1,char((x>>16) & 0xFF));
00221 ans.append(1,char((x>>8) & 0xFF));
00222 ans.append(1,char((x>>0) & 0xFF));
00223 }
00224 return ans;
00225 }
00226
00227 unsigned int CameraSourceQTKit::Str2CC(const std::string& x) {
00228 if(('1234' & 0xFF) == 1) {
00229 return (x[0]<<0) | (x[1]<<8) | (x[2]<<16) | (x[3]<<24);
00230 } else {
00231 return (x[0]<<24) | (x[1]<<16) | (x[2]<<8) | (x[3]<<0);
00232 }
00233 }
00234
00235
00236
00237
00238 static const unsigned int ROWS_PER_BLOCK=4;
00239
00240
00241 struct BlockContext {
00242 const unsigned char* src;
00243 const unsigned int srcStride;
00244 unsigned char* dst;
00245 };
00246
00247
00248
00249
00250
00251 static void process_yuvs_UBlock(void* context, size_t i) {
00252 const BlockContext* const ctxt = reinterpret_cast<BlockContext*>(context);
00253 const unsigned int pixels = ctxt->srcStride * ROWS_PER_BLOCK;
00254 const unsigned char* src = ctxt->src+pixels*i;
00255 const unsigned char* const srcEnd = src+pixels;
00256 unsigned char* dst = ctxt->dst+pixels/2*3*i;
00257 while(src!=srcEnd) {
00258
00259 dst[0] = *src++;
00260 dst[1] = dst[4] = *src++;
00261 dst[3] = *src++;
00262 dst[2] = dst[5] = *src++;
00263 dst+=6;
00264 }
00265 }
00266
00267 void CameraSourceQTKit::process_yuvs_U(const unsigned char * s, unsigned int width, unsigned int height) {
00268
00269 const unsigned int components=3;
00270 ssize_t reqSize = sizeof(ImageHeader) + width * height * components;
00271 RCRegion * region = getUnusedRegion(reqSize, 0);
00272 unsigned char * buf = reinterpret_cast<unsigned char*>(region->Base());
00273 new (region->Base()) ImageHeader(0, layer, width, height, components, ++frame, get_time(), nextName());
00274
00275 unsigned char * dst = buf + sizeof(ImageHeader);
00276 if(parallel) {
00277 BlockContext ctxt = { s, width*2, dst };
00278 dispatch_queue_t q_default = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
00279 dispatch_apply_f(height/ROWS_PER_BLOCK, q_default, &ctxt, process_yuvs_UBlock);
00280 } else {
00281 const unsigned char * const dstEnd=dst+width*height*components;
00282 while(dst!=dstEnd) {
00283
00284 dst[0] = *s++;
00285 dst[1] = dst[4] = *s++;
00286 dst[3] = *s++;
00287 dst[2] = dst[5] = *s++;
00288 dst+=6;
00289 }
00290 ASSERTRET(dst-buf==reqSize,"CameraSource bad imgFromyuvs " << reqSize << " vs " << (dst-buf));
00291 }
00292
00293 setImage(region);
00294 }
00295
00296 static void process_yuvs_DBlock(void* context, size_t i) {
00297 const BlockContext* const ctxt = reinterpret_cast<BlockContext*>(context);
00298 const unsigned int srcStride = ctxt->srcStride;
00299 const unsigned char* src = ctxt->src+srcStride*2*i*ROWS_PER_BLOCK;
00300 unsigned char* dst = ctxt->dst+srcStride/4*3*i*ROWS_PER_BLOCK;
00301 unsigned int y,u,v;
00302 for(unsigned int r=0; r<ROWS_PER_BLOCK; ++r) {
00303 const unsigned char* const srcRowEnd = src+srcStride;
00304 while(src!=srcRowEnd) {
00305 y=*src;
00306 y+=*(src+srcStride);
00307
00308 u=*++src;
00309 u+=*(src+srcStride);
00310
00311 y+=*++src;
00312 y+=*(src+srcStride);
00313
00314 v=*++src;
00315 v+=*(src+srcStride);
00316
00317 *dst++ = y/4;
00318 *dst++ = u/2;
00319 *dst++ = v/2;
00320
00321 ++src;
00322 }
00323 src+=srcStride;
00324 }
00325 }
00326
00327 void CameraSourceQTKit::process_yuvs_D(const unsigned char * s, unsigned int srcWidth, unsigned int srcHeight) {
00328
00329 const unsigned int width=srcWidth/2;
00330 const unsigned int height=srcHeight/2;
00331 const unsigned int components=3;
00332 ssize_t reqSize = sizeof(ImageHeader) + width * height * components;
00333 RCRegion * region = getUnusedRegion(reqSize, 0);
00334 unsigned char * buf = reinterpret_cast<unsigned char*>(region->Base());
00335 new (region->Base()) ImageHeader(0, layer, width, height, components, ++frame, get_time(), nextName());
00336
00337 BlockContext ctxt = { s, srcWidth*2, buf + sizeof(ImageHeader) };
00338 if(parallel) {
00339 dispatch_queue_t q_default = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
00340 dispatch_apply_f(height/ROWS_PER_BLOCK, q_default, &ctxt, process_yuvs_DBlock);
00341 } else {
00342 for(size_t i=0; i<height/ROWS_PER_BLOCK; ++i)
00343 process_yuvs_DBlock(&ctxt,i);
00344 }
00345
00346 setImage(region);
00347 }
00348
00349
00350
00351
00352
00353 static void process_2vuy_UBlock(void* context, size_t i) {
00354 const BlockContext* const ctxt = reinterpret_cast<BlockContext*>(context);
00355 const unsigned int pixels = ctxt->srcStride * ROWS_PER_BLOCK;
00356 const unsigned char* src = ctxt->src+pixels*i;
00357 const unsigned char* const srcEnd = src+pixels;
00358 unsigned char* dst = ctxt->dst+pixels/2*3*i;
00359 while(src!=srcEnd) {
00360
00361 dst[1] = dst[4] = *src++;
00362 dst[0] = *src++;
00363 dst[2] = dst[5] = *src++;
00364 dst[3] = *src++;
00365 dst+=6;
00366 }
00367 }
00368
00369 void CameraSourceQTKit::process_2vuy_U(const unsigned char * s, unsigned int width, unsigned int height) {
00370
00371 const unsigned int components=3;
00372 ssize_t reqSize = sizeof(ImageHeader) + width * height * components;
00373 RCRegion * region = getUnusedRegion(reqSize, 0);
00374 unsigned char * buf = reinterpret_cast<unsigned char*>(region->Base());
00375 new (region->Base()) ImageHeader(0, layer, width, height, components, ++frame, get_time(), nextName());
00376
00377 unsigned char * dst = buf + sizeof(ImageHeader);
00378 if(parallel) {
00379 BlockContext ctxt = { s, width*2, dst };
00380 dispatch_queue_t q_default = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
00381 dispatch_apply_f(height/ROWS_PER_BLOCK, q_default, &ctxt, process_2vuy_UBlock);
00382 } else {
00383 const unsigned char * const dstEnd=dst+width*height*components;
00384 while(dst!=dstEnd) {
00385
00386 dst[1] = dst[4] = *s++;
00387 dst[0] = *s++;
00388 dst[2] = dst[5] = *s++;
00389 dst[3] = *s++;
00390 dst+=6;
00391 }
00392 ASSERTRET(dst-buf==reqSize,"CameraSource bad imgFromyuvs " << reqSize << " vs " << (dst-buf));
00393 }
00394
00395 setImage(region);
00396 }
00397
00398 static void process_2vuy_DBlock(void* context, size_t i) {
00399 const BlockContext* const ctxt = reinterpret_cast<BlockContext*>(context);
00400 const unsigned int srcStride = ctxt->srcStride;
00401 const unsigned char* src = ctxt->src+srcStride*2*i*ROWS_PER_BLOCK;
00402 unsigned char* dst = ctxt->dst+srcStride/4*3*i*ROWS_PER_BLOCK;
00403 unsigned int y,u,v;
00404 for(unsigned int r=0; r<ROWS_PER_BLOCK; ++r) {
00405 const unsigned char* const srcRowEnd = src+srcStride;
00406 while(src!=srcRowEnd) {
00407 u=*src;
00408 u+=*(src+srcStride);
00409
00410 y=*++src;
00411 y+=*(src+srcStride);
00412
00413 v=*++src;
00414 v+=*(src+srcStride);
00415
00416 y+=*++src;
00417 y+=*(src+srcStride);
00418
00419 *dst++ = y/4;
00420 *dst++ = u/2;
00421 *dst++ = v/2;
00422
00423 ++src;
00424 }
00425 src+=srcStride;
00426 }
00427 }
00428
00429 void CameraSourceQTKit::process_2vuy_D(const unsigned char * s, unsigned int srcWidth, unsigned int srcHeight) {
00430
00431 const unsigned int width=srcWidth/2;
00432 const unsigned int height=srcHeight/2;
00433 const unsigned int components=3;
00434 ssize_t reqSize = sizeof(ImageHeader) + width * height * components;
00435 RCRegion * region = getUnusedRegion(reqSize, 0);
00436 unsigned char * buf = reinterpret_cast<unsigned char*>(region->Base());
00437 new (region->Base()) ImageHeader(0, layer, width, height, components, ++frame, get_time(), nextName());
00438
00439 BlockContext ctxt = { s, srcWidth*2, buf + sizeof(ImageHeader) };
00440 if(parallel) {
00441 dispatch_queue_t q_default = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
00442 dispatch_apply_f(height/ROWS_PER_BLOCK, q_default, &ctxt, process_2vuy_DBlock);
00443 } else {
00444 for(size_t i=0; i<height/ROWS_PER_BLOCK; ++i)
00445 process_2vuy_DBlock(&ctxt,i);
00446 }
00447
00448 setImage(region);
00449 }
00450
00451
00452
00453
00454 static void process_grayscale_zerowhite_Block(void* context, size_t i) {
00455 const BlockContext* const ctxt = reinterpret_cast<BlockContext*>(context);
00456 const unsigned int pixels = ctxt->srcStride * ROWS_PER_BLOCK;
00457 const unsigned char* src = ctxt->src+pixels*i;
00458 const unsigned char* const srcEnd = src+pixels;
00459 unsigned char* dst = ctxt->dst+pixels*3*i;
00460 while(src!=srcEnd) {
00461
00462
00463 *dst = 235 - (*src++ * 220)/256;
00464 dst+=3;
00465 }
00466 }
00467
00468 void CameraSourceQTKit::process_grayscale_zerowhite(const unsigned char * s, unsigned int width, unsigned int height) {
00469
00470 const unsigned int components=3;
00471 ssize_t reqSize = sizeof(ImageHeader) + width * height * components;
00472 RCRegion * region = getUnusedRegion(reqSize, 0);
00473 unsigned char * buf = reinterpret_cast<unsigned char*>(region->Base());
00474 new (region->Base()) ImageHeader(0, layer, width, height, components, ++frame, get_time(), nextName());
00475
00476 unsigned char* dst = buf + sizeof(ImageHeader);
00477 memset(dst, 128, width*height*components);
00478 BlockContext ctxt = { s, width, dst };
00479 if(parallel) {
00480 dispatch_queue_t q_default = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
00481 dispatch_apply_f(height/ROWS_PER_BLOCK, q_default, &ctxt, process_grayscale_zerowhite_Block);
00482 } else {
00483 for(size_t i=0; i<height/ROWS_PER_BLOCK; ++i)
00484 process_grayscale_zerowhite_Block(&ctxt,i);
00485 }
00486
00487 setImage(region);
00488 }
00489
00490
00491
00492 #pragma mark-
00493
00494 @implementation CameraSourceQTKitDelegate
00495 -(CameraSourceQTKitDelegate*)initWithTarget:(class CameraSourceQTKit*)tgt {
00496 target=tgt;
00497 return self;
00498 }
00499
00500 -(void)captureOutput:(QTCaptureOutput *)captureOutput didOutputVideoFrame:(CVImageBufferRef)videoFrame withSampleBuffer:(QTSampleBuffer *)sampleBuffer fromConnection:(QTCaptureConnection *)connection
00501 {
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513 target->processImage(videoFrame,sampleBuffer);
00514 }
00515 @end
00516
00517 #endif // 10.6 or later
00518 #endif // Apple platform
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529