Tekkotsu Homepage
Demos
Overview
Downloads
Dev. Resources
Reference
Credits

CameraSourceOSX.cc

Go to the documentation of this file.
00001 #if defined(__APPLE__) && !defined(__x86_64__)
00002 
00003 #include "CameraSourceOSX.h"
00004 #include "Shared/LoadSave.h"
00005 #include "Shared/get_time.h"
00006 #include "Shared/RobotInfo.h"
00007 #include <pthread.h>
00008 #include <set>
00009 #include <sstream>
00010 #include <iostream>
00011 
00012 using namespace std;
00013 
00014 static pthread_key_t qtInit;
00015 struct QTThreadInfo {};
00016 
00017 static bool doGlobalQTInit();
00018 static bool autoRegisterQTInit = doGlobalQTInit();
00019 
00020 bool checkQTThreadInit() {
00021   if(!autoRegisterQTInit)
00022     return false;
00023   if(pthread_getspecific(qtInit)!=NULL)
00024     return true;
00025   OSErr err = EnterMoviesOnThread(kQTEnterMoviesFlagDontSetComponentsThreadMode);
00026   if(err!=noErr) {
00027     cerr << "CameraSource: EnterMoviesOnThread returned error " << err << endl;
00028     return false;
00029   }
00030   pthread_setspecific(qtInit,new QTThreadInfo);
00031   return true;
00032 }
00033 static void qtThreadDestructor(void* threadInfo) {
00034   ExitMoviesOnThread();
00035   delete static_cast<QTThreadInfo*>(threadInfo);
00036 }
00037 static bool doGlobalQTInit() {
00038   int err = pthread_key_create(&qtInit,qtThreadDestructor);
00039   if(err!=0)
00040     cerr << "CameraSource: error during doGlobalQTInit, pthread_key_create:" << strerror(err) << endl;
00041   return (err==0);
00042 }
00043 
00044 CameraSource::~CameraSource() {
00045   if(thread!=NULL)
00046     setDataSourceThread(NULL);
00047   if(sgChan!=NULL)
00048     SGDisposeChannel(sg,sgChan);
00049   sgChan=NULL;
00050   if(gworld!=NULL)
00051     DisposeGWorld(gworld);
00052   gworld=NULL;
00053   // docs say that QTNewGWorldFromPtr is supposed to mark the underlying buffer
00054   // so that DisposeGWorld doesn't release it, but that doesn't seem to be the case...
00055   // If we delete here, we get double-free warnings.
00056   /*if(gworldBuf!=NULL)
00057     free(gworldBuf);*/
00058   gworldBuf=NULL;
00059   if(sg!=NULL)
00060     CloseComponent(sg);
00061   sg=NULL;
00062   
00063   delete [] imgbuf;
00064   imgbuf=NULL;
00065 }
00066 
00067 void CameraSource::initCamera() {
00068   OSErr err;
00069   
00070   // open the sequence grabber, assuming there's only ever one component of this type listed
00071   sg = OpenDefaultComponent(SeqGrabComponentType, 0);
00072   if(sg==NULL) throw std::make_pair(NULL,"OpenDefaultComponent(SeqGrabComponentType,0)");
00073   
00074   // initialize the default sequence grabber component
00075   err = SGInitialize(sg);
00076   if(err!=noErr) throw std::make_pair(err,"SGInitialize");
00077   
00078   err = SGSetDataRef(sg, 0, 0, seqGrabToMemory | seqGrabDontMakeMovie | seqGrabDataProcIsInterruptSafe);
00079   if(err!=noErr) throw "SGSetDataRef";
00080     
00081   // this section would get the "default" capture device
00082   /*
00083   ComponentDescription searchCompDesc;
00084   memset(&searchCompDesc,0,sizeof(searchCompDesc));
00085   searchCompDesc.componentType=SeqGrabChannelType;
00086   searchCompDesc.componentSubType=VideoMediaType;
00087   Component chanComponent = FindNextComponent(chanComponent,&searchCompDesc);
00088   if(chanComponent==0) throw std::make_pair(err,"FindNextComponent");
00089     
00090   SGNewChannelFromComponent(sg,&sgChan,chanComponent);
00091   if(err!=noErr) throw std::make_pair(err,"SGNewChannelFromComponent");
00092    */
00093 
00094   // instead, open the *specified* capture device
00095   // thanks Harald ( hxr AT users sourceforge net ) for 'wacaw' source to demonstrate this
00096   err = SGNewChannel(sg, VideoMediaType, &sgChan);
00097   if(err!=noErr) throw std::make_pair(err,"SGNewChannel");
00098   
00099   unsigned char pstr[256]; // sigh, convert devName to pascal-style string
00100   pstr[0]=deviceName.size();
00101   memcpy(pstr+1,deviceName.c_str(),pstr[0]);
00102   err = SGSetChannelDevice(sgChan, pstr);
00103   if(err!=noErr) throw std::make_pair(err,"SGSetChannelDevice");
00104   err = SGSetChannelDeviceInput(sgChan,devInputIdx);
00105   if(err!=noErr) throw std::make_pair(err,"SGSetChannelDeviceInput");
00106   
00107   // does this need to be done? not found in any sample code...
00108   // doesn't seem to work here in any case... 
00109   // (error -9400: noDeviceForChannel; tried before or after the SGSetChannelDevice()...)
00110   /*err = SGInitChannel(sgChan,sg);
00111   if(err!=noErr) throw std::make_pair(err,"SGInitChannel");*/
00112       
00113   // get the active rectangle 
00114   Rect  srcBounds;
00115   err = SGGetSrcVideoBounds(sgChan, &srcBounds);
00116   if(err!=noErr)
00117     std::cerr << "Warning: CameraSource SGGetSrcVideoBounds returned error " << err << std::endl;
00118   else {
00119     srcBounds.right -= srcBounds.left;
00120     srcBounds.bottom -= srcBounds.top;
00121     srcBounds.left = srcBounds.top = 0;
00122     //cout << "original bounds " << srcBounds.right << "x" << srcBounds.bottom << endl;
00123     if((unsigned int)srcBounds.right>CameraResolutionX*2 || (unsigned int)srcBounds.bottom>CameraResolutionY*2) {
00124       srcBounds.right=CameraResolutionX*2;
00125       srcBounds.bottom=CameraResolutionY*2;
00126       //cout << "restricting to " << srcBounds.right << "x" << srcBounds.bottom << endl;
00127     }
00128     err = SGSetChannelBounds(sgChan, &srcBounds);
00129     if(err!=noErr) std::cerr << "Warning: SGSetChannelBounds returned error " << err << std::endl;
00130   }
00131   
00132   // set options for our expected usage
00133   err = SGSetChannelUsage(sgChan, seqGrabRecord | seqGrabLowLatencyCapture /* | seqGrabPreview | seqGrabAlwaysUseTimeBase */);
00134   if(err!=noErr) throw std::make_pair(err,"SGSetChannelUsage");
00135   
00136   // don't need a gworld (yet) -- we want to grab the raw YUV bytes from the camera
00137   /*unsigned int width = srcBounds.right-srcBounds.left;
00138   unsigned int height = srcBounds.bottom-srcBounds.top;
00139   gworldBuf = new char[width*height*4];
00140   err = QTNewGWorldFromPtr(&gworld, k32ARGBPixelFormat, &srcBounds, NULL, NULL, 0, gworldBuf, width*4);
00141   if(err!=noErr) throw std::make_pair(err,"QTNewGWorldFromPtr"); */
00142   
00143   // still have to call SGSetGWorld() though or else SGPrepare() will complain later
00144   err = SGSetGWorld(sg, NULL, NULL);
00145   if(err!=noErr) throw std::make_pair(err,"SGSetGWorld");
00146   
00147   //SGSettingsDialog(sg, sgChan, 0, nil, 0, nil, 0);
00148   
00149   // set up the video bottlenecks so we can get our queued frame count
00150   VideoBottles vb;
00151   memset(&vb,0,sizeof(VideoBottles));
00152   err = SGGetVideoBottlenecks(sgChan, &vb);
00153   if(err!=noErr) throw std::make_pair(err,"SGGetVideoBottlenecks");
00154   
00155   vb.procCount = 9; // there are 9 bottleneck procs; this must be filled in
00156   vb.grabCompressCompleteProc = NewSGGrabCompressCompleteBottleUPP(compressCompleteBottleProc);
00157   
00158   err = SGSetVideoBottlenecks(sgChan, &vb);
00159   if(err!=noErr) throw std::make_pair(err,"SGSetVideoBottlenecks");
00160   
00161   // specify a sequence grabber data function
00162   err = SGSetDataProc(sg, NewSGDataUPP(grabDataProc), (long)this);
00163   if(err!=noErr) throw std::make_pair(err,"SGSetDataProc");
00164   err = SGSetChannelRefCon(sgChan, (long)this); // callback reference context
00165   if(err!=noErr) throw std::make_pair(err,"SGSetChannelRefCon");
00166 
00167   // try to switch to YUV mode
00168   // kComponentVideoCodecType doesn't seem to do what I expect, and k422YpCbCr8CodecType doesn't seem supported...
00169   err = SGSetVideoCompressorType(sgChan,k422YpCbCr8CodecType); // produces 2vuy images
00170   if(err!=noErr) {
00171     if(err!=noCodecErr) std::cerr << "    Could not switch to yuv codec (k422YpCbCr8CodecType), err " << err << std::endl;
00172     
00173     // try component video...
00174     // Actually, this is a little slower than converting each frame individually, so we'll do that instead.
00175     // See color space specification in NewGWorld call below...
00176     /*err = SGSetVideoCompressorType(sgChan,kComponentVideoCodecType); // produces yuv2/yuvu images
00177     if(err==noCodecErr) std::cerr << "    Could not switch to yuv codec (k422YpCbCr8CodecType or kComponentVideoCodecType), not supported by camera (?)" << std::endl;
00178     else if(err!=noErr) std::cerr << "    Could not switch to yuv codec (kComponentVideoCodecType), err " << err << std::endl;*/
00179   }
00180   
00181   // just for debugging...
00182   /*OSType ct;
00183   err = SGGetVideoCompressorType(sgChan,&ct);
00184   if(err!=noErr) std::cerr << "    Could not get current codec" << std::endl;
00185   else { std::cout << "    Current codec type is "; dumpLiteral(ct); std::cout << std::endl; }*/
00186 }
00187 
00188 void CameraSource::setDataSourceThread(LoadDataThread* ldt) {
00189   DataSource::setDataSourceThread(ldt);
00190   if(!checkQTThreadInit())
00191     return;
00192   if(thread!=NULL && !grabbing) { // we're about to be used!
00193     // lights...camera...
00194     OSErr err = SGPrepare(sg, false, true);
00195     if(err!=noErr) {
00196       cerr << "CameraSource: SGPrepare returned error " << err << endl;
00197       return;
00198     }
00199     
00200     /*
00201     // make sure the timebase used by the video channel is being driven by
00202     // the sound clock if there is a sound channel, this has to be done
00203     // after calling SGPrepare - see Q&A 1314
00204     if (NULL != sgchanSound) {
00205       TimeBase soundTimeBase = NULL, sgTimeBase = NULL;
00206       err = SGGetTimeBase(this->seqGrab, &sgTimeBase);
00207       if(noErr == err)
00208         err = SGGetChannelTimeBase(sgchanSound, &soundTimeBase);
00209       if (noErr == err && NULL != soundTimeBase)
00210         SetTimeBaseMasterClock(sgTimeBase, (Component)GetTimeBaseMasterClock(soundTimeBase), NULL);
00211     }
00212      */
00213     
00214     // ...action
00215     err = SGStartRecord(sg);
00216     if(err!=noErr) {
00217       cerr << "CameraSource: SGStartRecord returned error " << err << endl;
00218       err = SGRelease(sg); // undo SGPrepare()
00219       if(err!=noErr)
00220         cerr << "CameraSource: SGRelease returned error during recovery " << err << endl;
00221       return;
00222     }
00223     
00224     grabbing=true;
00225     
00226   } else if(thread==NULL || grabbing) {
00227     OSErr err = SGStop(sg); // undo SGStartPreview or SGStartRecord
00228     if(err!=noErr)
00229       cerr << "CameraSource: SGStop returned error " << err << endl;
00230     err = SGRelease(sg); // undo SGPrepare()
00231     if(err!=noErr)
00232       cerr << "CameraSource: SGRelease returned error " << err << endl;
00233     grabbing=false;
00234   }
00235 }
00236 
00237 void CameraSource::setDataSourceFramerate(float fps) {
00238   DataSource::setDataSourceFramerate(fps);
00239   float s = std::max(getTimeScale(),.1f);
00240   ComponentResult err = SGSetFrameRate(sgChan, FloatToFixed(framerate*s));
00241   if(err!=noErr)
00242     std::cerr << "CameraSource::setDataSourceFramerate("<<fps<<") had error calling SGSetFrameRate " << err << endl;
00243 }
00244 
00245 
00246 unsigned int CameraSource::getData(const char *& payload, unsigned int& payloadSize, unsigned int& timestamp, std::string& dataname) {
00247   payload=NULL;
00248   payloadSize=0;
00249   if(!checkQTThreadInit() || !grabbing)
00250     return frame;
00251   
00252   //cout << "getData at " << get_time() << " request " << timestamp << endl;
00253   setDataSourceFramerate(framerate);
00254   
00255   unsigned int t=get_time();
00256   if(timestamp>t)
00257     usleep(static_cast<unsigned int>((timestamp-t)*1000/(getTimeScale()>0?getTimeScale():1.f)));
00258   if(!grabbing) // in case we shutdown while asleep!
00259     return frame;
00260   
00261   unsigned int prev=frame;
00262   
00263   OSErr err = SGIdle(sg);
00264   if (err!=noErr && err!=callbackerr) {
00265     // some error specific to SGIdle occurred - any errors returned from the
00266     // data proc will also show up here and we don't want to write over them
00267     
00268     // in QT 4 you would always encounter a cDepthErr error after a user drags
00269     // the window, this failure condition has been greatly relaxed in QT 5
00270     // it may still occur but should only apply to vDigs that really control
00271     // the screen
00272     
00273     // you don't always know where these errors originate from, some may come
00274     // from the VDig...
00275     
00276     //DisplayError(pMungData->pWindow, "SGIdle", err);
00277     cerr << "CameraSource: SGIdle error " << err << " occurred, resetting camera!" << endl;
00278     
00279     // ...to fix this we simply call SGStop and SGStartRecord again
00280     // calling stop allows the SG to release and re-prepare for grabbing
00281     // hopefully fixing any problems, this is obviously a very relaxed
00282     // approach
00283     err = SGStop(sg); // undo SGStartPreview or SGStartRecord
00284     if(err!=noErr)
00285       cerr << "CameraSource: SGStop returned error during recovery " << err << endl;
00286     err = SGStartRecord(sg);
00287     if(err!=noErr) {
00288       cerr << "CameraSource: SGStartRecord returned error during recovery " << err << endl;
00289       grabbing=false;
00290       err = SGRelease(sg); // undo SGPrepare()
00291       if(err!=noErr)
00292         cerr << "CameraSource: SGRelease returned error during recovery " << err << endl;
00293     }
00294     return frame;
00295   }
00296   
00297   if(prev!=frame && imgbufUsed!=0) {
00298     //cout << "new frame! ";
00299     payload=imgbuf; // has been filled in during callback of SGIdle
00300     payloadSize=imgbufUsed;
00301     timestamp=get_time();
00302     dataname=nextName();
00303   }
00304   
00305   return frame;
00306 }
00307   
00308 void CameraSource::dumpLiteral(OSType t) {
00309   union {
00310     OSType v;
00311     char s[4];
00312   } x;
00313   x.v=t;
00314   cout << x.s[3] << x.s[2] << x.s[1] << x.s[0];
00315 }
00316 
00317 
00318 /* 
00319 * Purpose:   used to allow us to figure out how many frames are queued by the vDig
00320 *
00321 * Notes:     the UInt8 *queuedFrameCount replaces Boolean *done.  0 (==false) still means no frames, and 1 (==true) one,
00322 *            but if more than one are available, the number should be returned here - The value 2 previously meant more than
00323 *            one frame, so some VDIGs may return 2 even if more than 2 are available, and some will still return 1 as they are
00324 *            using the original definition.
00325 */
00326 pascal ComponentResult CameraSource::compressCompleteBottleProc(SGChannel c, UInt8 *queuedFrameCount, SGCompressInfo *ci, TimeRecord *t, long refCon)
00327 {
00328   OSErr err;
00329   CameraSource* cam = (CameraSource*)refCon;
00330   if (NULL == cam) return -1;
00331   
00332   // call the original proc; you must do this
00333   err = SGGrabCompressComplete(c, queuedFrameCount, ci, t);
00334   
00335   // save the queued frame count so we have it
00336   cam->queuedFrames = *queuedFrameCount;
00337   /*if(cam->queuedFrames>0)
00338     cout << "compressCompleteBottleProc " << cam->queuedFrames << endl;*/
00339   
00340   return err;
00341 }
00342 
00343 
00344 /*****************************************************
00345 * Purpose:   sequence grabber data procedure - this is where the work is done
00346 *
00347 * Notes:
00348 
00349 the sequence grabber calls the data function whenever
00350 any of the grabber's channels write digitized data to the destination movie file.
00351 
00352 NOTE: We really mean any, if you have an audio and video channel then the DataProc will
00353 be called for either channel whenever data has been captured. Be sure to check which
00354 channel is being passed in. In this example we never create an audio channel so we know
00355 we're always dealing with video.
00356 
00357 This data function does two things, it first decompresses captured video
00358 data into an offscreen GWorld, draws some status information onto the frame then
00359 transfers the frame to an onscreen window.
00360 
00361 For more information refer to Inside Macintosh: QuickTime Components, page 5-120
00362 c - the channel component that is writing the digitized data.
00363 p - a pointer to the digitized data.
00364 len - the number of bytes of digitized data.
00365 offset - a pointer to a field that may specify where you are to write the digitized data,
00366 and that is to receive a value indicating where you wrote the data.
00367 chRefCon - per channel reference constant specified using SGSetChannelRefCon.
00368 time  - the starting time of the data, in the channel's time scale.
00369 writeType - the type of write operation being performed.
00370 seqGrabWriteAppend - Append new data.
00371 seqGrabWriteReserve - Do not write data. Instead, reserve space for the amount of data
00372 specified in the len parameter.
00373 seqGrabWriteFill - Write data into the location specified by offset. Used to fill the space
00374 previously reserved with seqGrabWriteReserve. The Sequence Grabber may
00375 call the DataProc several times to fill a single reserved location.
00376 refCon - the reference constant you specified when you assigned your data function to the sequence grabber.
00377 */
00378 pascal OSErr CameraSource::grabDataProc(SGChannel c, Ptr p, long len, long *offset, long chRefCon, TimeValue time, short writeType, long refCon)
00379 {
00380 #pragma unused(offset,chRefCon,writeType)
00381   //cout << "MungGrabDataProc" << endl;
00382   
00383   CameraSource* cam = (CameraSource*)refCon; // might want to use chRefCon instead?
00384   if (NULL == cam) {
00385     cerr << "CameraSource::grabDataProc called without a context" << endl;
00386     return -1;
00387   }
00388   
00389   // we only care about the video 
00390   if (c != cam->sgChan) {
00391     cerr << "CameraSource::grabDataProc called for something other than our channel" << endl;
00392     return noErr; //not an error as far as OS is concerned...
00393   }
00394     
00395   ++cam->skipped; // unless we make it through all the way, in which case we'll subtract this off and add to frame instead
00396   
00397   ComponentResult err=noErr;
00398   TimeValue frameTimeDelta=0;
00399   ImageDescriptionHandle imageDesc = NULL;
00400   try {
00401     // apparently can't do this before we get a frame (I tried, get seqGrabInfoNotAvailable (-9407) on SGGetChannelTimeScale
00402     if(cam->chanTimeScale==0) {
00403       Fixed framesPerSecond;
00404       long  milliSecPerFrameIgnore, bytesPerSecondIgnore;
00405       
00406       // first time here so get the time scale & timebase
00407       err = SGGetChannelTimeScale(cam->sgChan, &cam->chanTimeScale);
00408       if(err!=noErr) throw make_pair(err,"SGGetChannelTimeScale");
00409       
00410       err = SGGetTimeBase(cam->sg, &cam->chanTimeBase);
00411       if(err!=noErr) throw make_pair(err,"SGGetTimeBase");
00412       
00413       err = VDGetDataRate(SGGetVideoDigitizerComponent(cam->sgChan), &milliSecPerFrameIgnore, &framesPerSecond, &bytesPerSecondIgnore);
00414       if(err!=noErr) throw make_pair(err,"VDGetDataRate");
00415       
00416       cam->duration = cam->chanTimeScale / (framesPerSecond >> 16);
00417     }
00418     
00419     // retrieve a channel's current sample description, the channel returns a
00420     // sample description that is appropriate to the type of data being captured
00421     imageDesc = (ImageDescriptionHandle)NewHandle(0);
00422     err = SGGetChannelSampleDescription(c, (Handle)imageDesc);
00423     if(err!=noErr) throw "SGGetChannelSampleDescription";
00424     string formatName = p2c((**imageDesc).name);
00425     //cout << formatName << " TYPE IS "; dumpLiteral((**imageDesc).cType); cout << " " << (**imageDesc).width << "x" << (**imageDesc).height << " => " << (**imageDesc).dataSize << " @ " << (**imageDesc).depth << endl;
00426     
00427     if((**imageDesc).cType==k422YpCbCr8CodecType) { // aka 2vuy
00428     
00429       cam->imgFrom2vuy((unsigned char*)p, (**imageDesc).width, (**imageDesc).height, (**imageDesc).depth, (**imageDesc).dataSize);
00430       
00431     } else if((**imageDesc).cType==kComponentVideoCodecType) { // aka yuv2 aka yuvu
00432       
00433       cam->imgFromyuv2((unsigned char*)p, (**imageDesc).width, (**imageDesc).height, (**imageDesc).depth, (**imageDesc).dataSize);
00434       
00435     } else {
00436       // use a OS provided decompression sequence to put it in the gworld
00437       if (cam->drawSeq == 0) {
00438         
00439         // set up decompression sequence  
00440         Rect           sourceRect = { 0, 0, 0, 0 };
00441         MatrixRecord       scaleMatrix; 
00442         CodecFlags         cFlags = codecNormalQuality;
00443         
00444         if(cam->gworld==NULL) {
00445           Rect  srcBounds;
00446           err = SGGetChannelBounds(cam->sgChan, &srcBounds);
00447           if(err!=noErr) throw "SGGetSrcVideoBounds";
00448           unsigned int width = srcBounds.right-srcBounds.left;
00449           unsigned int height = srcBounds.bottom-srcBounds.top;
00450           cam->gworldBuf = (char*)malloc(width*height*2); // NewPtr(width*height*4);
00451           // once upon a time I could've sworn I had to use k32ARGBPixelFormat and do
00452           // the color space conversion manually, but apparently this is working... (yay! very fast...)
00453           err = QTNewGWorldFromPtr(&cam->gworld, k2vuyPixelFormat, &srcBounds, NULL, NULL, 0, cam->gworldBuf, width*2);
00454           if(err!=noErr) throw "QTNewGWorldFromPtr";
00455         }
00456           
00457         // make a scaling matrix for the sequence
00458         sourceRect.right = (**imageDesc).width;
00459         sourceRect.bottom = (**imageDesc).height;
00460         RectMatrix(&scaleMatrix, &sourceRect, &(*GetPortPixMap(cam->gworld))->bounds);
00461         
00462         // begin the process of decompressing a sequence of frames
00463         // this is a set-up call and is only called once for the sequence - the ICM will interrogate different codecs
00464         // and construct a suitable decompression chain, as this is a time consuming process we don't want to do this
00465         // once per frame (eg. by using DecompressImage)
00466         // for more information see Ice Floe #8 http://developer.apple.com/quicktime/icefloe/dispatch008.html
00467         // the destination is specified as the GWorld
00468         err = DecompressSequenceBeginS(&cam->drawSeq,         // pointer to field to receive unique ID for sequence
00469           imageDesc,              // handle to image description structure
00470           p,                  // points to the compressed image data
00471           len,                                // size of the data buffer
00472           cam->gworld,  // port for the DESTINATION image
00473           NULL,                 // graphics device handle, if port is set, set to NULL
00474           NULL,                 // decompress the entire source image - no source extraction
00475           &scaleMatrix,             // transformation matrix
00476           srcCopy,                // transfer mode specifier
00477           (RgnHandle)NULL,            // clipping region in dest. coordinate system to use as a mask
00478           0,                  // flags
00479           cFlags,                 // accuracy in decompression
00480           bestSpeedCodec);            // compressor identifier or special identifiers ie. bestSpeedCodec
00481         
00482         if(err!=noErr) throw "DecompressSequenceBeginS";
00483       }
00484       
00485       // get the TimeBase time and figure out the delta between that time and this frame time
00486       TimeValue timeBaseTime, timeBaseDelta;
00487       timeBaseTime = GetTimeBaseTime(cam->chanTimeBase, cam->chanTimeScale, NULL);
00488       timeBaseDelta = timeBaseTime - time;
00489       frameTimeDelta = time - cam->lastTime;
00490       
00491       if (timeBaseDelta < 0) return err; // probably don't need this
00492       
00493       // if we have more than one queued frame and our capture rate drops below 10 frames, skip the frame to try and catch up
00494       if ((cam->queuedFrames > 1) &&  ((cam->chanTimeScale / frameTimeDelta) < 10) && (cam->skipped < 15)) {
00495         // do nothing, skipping frame
00496       } else {
00497         CodecFlags ignore;
00498         
00499         // decompress a frame into the window - can queue a frame for async decompression when passed in a completion proc
00500         err = DecompressSequenceFrameS(cam->drawSeq,  // sequence ID returned by DecompressSequenceBegin
00501            p,         // pointer to compressed image data
00502            len,         // size of the buffer
00503            0,         // in flags
00504            &ignore,       // out flags
00505            NULL);       // async completion proc
00506         
00507         if(err!=noErr) throw "DecompressSequenceFrameS";
00508         
00509         cam->skipped = 0;
00510         cam->lastTime = time;
00511         
00512         {
00513           PixMapPtr pm=*GetPortPixMap(cam->gworld);
00514           unsigned int width = pm->bounds.right - pm->bounds.left;
00515           unsigned int height = pm->bounds.bottom - pm->bounds.top;
00516           unsigned char * s = (unsigned char *)GetPixBaseAddr(&pm);
00517           cam->imgFrom2vuy(s,width,height,16,width*height*2);
00518         }
00519       } 
00520     }
00521   } catch(const char* call) {
00522     cerr << "CameraSource: " << call << " returned error " << err << endl;
00523     if(imageDesc!=NULL)
00524       DisposeHandle((Handle)imageDesc);
00525     return cam->callbackerr=err;
00526   }
00527   if(imageDesc!=NULL)
00528     DisposeHandle((Handle)imageDesc);
00529     
00530   /*
00531   // status information
00532   float fps, averagefps;
00533   UInt8   minutes, seconds, frames;
00534   
00535   fps = (float)cam->chanTimeScale / (float)frameTimeDelta;
00536   averagefps = ((float)cam->frame * (float)cam->chanTimeScale) / (float)time;
00537   minutes = (time / cam->chanTimeScale) / 60;
00538   seconds = (time / cam->chanTimeScale) % 60;
00539   frames = (time % cam->chanTimeScale) / frameTimeDelta; //cam->duration;
00540   printf("t: %ld, %02d:%02d.%02d, fps:%5.1f av:%5.1f\n", time, minutes, seconds, frames, fps, averagefps);
00541   */
00542   
00543   // made it!  increment frame and decrement our pessimistic skip
00544   ++cam->frame;
00545   --cam->skipped;
00546   
00547   return err;
00548 }
00549 
00550 
00551 /*! 2vuy is an interleaved format, where the u and v channels are downsampled by half.
00552 *  The first column of pixels has u values, the second column is v's.
00553 *  For example, the first 6 pixels of a row are stored as: uy, vy, uy, vy, uy, vy, ...  */
00554 void CameraSource::imgFrom2vuy(const unsigned char * s, short srcWidth, short srcHeight, short depth, long dataSize) {
00555 #pragma unused(depth,dataSize)
00556   unsigned int width=srcWidth/2;
00557   unsigned int height=srcHeight/2;
00558   size_t reqSize = width * height * 3 + sizeof(unsigned int)*4;
00559   if(imgbuf==NULL || imgbufSize<reqSize) {
00560     delete [] imgbuf;
00561     imgbuf=new char[imgbufSize=reqSize];
00562   }
00563   
00564   unsigned int components=3;
00565   char * buf=imgbuf;
00566   unsigned int remain=imgbufSize;
00567   if(!LoadSave::encodeInc(*layer,buf,remain)) return;
00568   if(!LoadSave::encodeInc(width,buf,remain)) return;
00569   if(!LoadSave::encodeInc(height,buf,remain)) return;
00570   if(!LoadSave::encodeInc(components,buf,remain)) return;
00571   
00572   const unsigned int srcStride=srcWidth*2;
00573   unsigned char * dst=(unsigned char*)buf;
00574   unsigned char * const dstEnd=dst+width*height*components;
00575   while(dst!=dstEnd) {
00576     unsigned char * const rowEnd=dst+width*components;
00577     while(dst!=rowEnd) {
00578       unsigned int y,u,v;
00579       u=*s;
00580       u+=*(s+srcStride);
00581       ++s;
00582       
00583       y=*s;
00584       y+=*(s+srcStride);
00585       ++s;
00586       
00587       v=*s;
00588       v+=*(s+srcStride);
00589       ++s;
00590       
00591       y+=*s;
00592       y+=*(s+srcStride);
00593       ++s;
00594       
00595       *dst++ = y/4;
00596       *dst++ = u/2;
00597       *dst++ = v/2;
00598     }
00599     s+=srcStride;
00600   }
00601   imgbufUsed=dst-(unsigned char*)imgbuf;
00602   if(imgbufUsed!=reqSize) {
00603     cerr << "bad payload! " << reqSize << " vs " << imgbufUsed << endl;
00604     imgbufUsed=0;
00605   }
00606 }
00607 
00608 /*! Similar to 2vuy, except:
00609  *  - byte swapped (yu, yv instead of uy, vy)
00610  *  - u and v channels are signed instead of unsigned
00611  *  - channels use the full byte range, whereas apparently 2vuy only uses 16-235 for the y and 16-240 for the u and v
00612  *    (aka 601/YCbCr standard)
00613  */
00614 void CameraSource::imgFromyuv2(const unsigned char * s, short srcWidth, short srcHeight, short depth, long dataSize) {
00615 #pragma unused(depth,dataSize)
00616   unsigned int width=srcWidth/2;
00617   unsigned int height=srcHeight/2;
00618   size_t reqSize = width * height * 3 + sizeof(unsigned int)*4;
00619   if(imgbuf==NULL || imgbufSize<reqSize) {
00620     delete [] imgbuf;
00621     imgbuf=new char[imgbufSize=reqSize];
00622   }
00623   
00624   unsigned int components=3;
00625   char * buf=imgbuf;
00626   unsigned int remain=imgbufSize;
00627   if(!LoadSave::encodeInc(*layer,buf,remain)) return;
00628   if(!LoadSave::encodeInc(width,buf,remain)) return;
00629   if(!LoadSave::encodeInc(height,buf,remain)) return;
00630   if(!LoadSave::encodeInc(components,buf,remain)) return;
00631   
00632   const unsigned int srcStride=srcWidth*2;
00633   unsigned char * dst=(unsigned char*)buf;
00634   unsigned char * const dstEnd=dst+width*height*components;
00635   while(dst!=dstEnd) {
00636     unsigned char * const rowEnd=dst+width*components;
00637     while(dst!=rowEnd) {
00638       unsigned int y;
00639       int u,v;
00640       y=*s;
00641       y+=*(s+srcStride);
00642       ++s;
00643       
00644       u=(char)*s;
00645       u+=(char)*(s+srcStride);
00646       ++s;
00647       
00648       y+=*s;
00649       y+=*(s+srcStride);
00650       ++s;
00651       
00652       v=(char)*s;
00653       v+=(char)*(s+srcStride);
00654       ++s;
00655             
00656       *dst++ = (y*219/255)/4 + 16;
00657       *dst++ = (u*224/255)/2 + 128;
00658       *dst++ = (v*224/255)/2 + 128;
00659     }
00660     s+=srcStride;
00661   }
00662   imgbufUsed=dst-(unsigned char*)imgbuf;
00663   if(imgbufUsed!=reqSize) {
00664     cerr << "bad payload! " << reqSize << " vs " << imgbufUsed << endl;
00665     imgbufUsed=0;
00666   }
00667 }
00668 
00669 #endif

Tekkotsu Hardware Abstraction Layer 4.0
Generated Thu Nov 22 01:00:53 2007 by Doxygen 1.5.4