2017-03-29 1 views
0
나는 다음과 같은 방식으로 여러 스레드에서 OpenCV의 코드를 실행하고

:OpenCV의 멀티 스레딩주는 오류

std::thread t1(runOnSingleCamera, alphaFile, featureToUse, classifier,0); 
    std::thread t2(runOnSingleCamera, betaFile, featureToUse, classifier,1); 
    std::thread t3(runOnSingleCamera, gammaFile, featureToUse, classifier,2); 
    std::thread t4(runOnSingleCamera, deltaFile, featureToUse, classifier,3); 
    t1.join(); 
    t2.join(); 
    t3.join(); 
    t4.join(); 

이 잘 컴파일,하지만 난 그것을 실행할 때 나는 다양한 오류를 얻고, 심지어 때때로 작동

[email protected]:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1 
Segmentation fault (core dumped) 
[email protected]:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1 

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8571): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed 

(betaInput.webm:8571): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed 
Segmentation fault (core dumped) 
[email protected]:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1 

(alphaInput.webm:8593): GLib-GObject-WARNING **: invalid cast from 'CvImageWidget' to 'CvImageWidget' 

** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_size_allocate(GtkWidget*, GtkAllocation*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed 

** (alphaInput.webm:8593): CRITICAL **: void cvImageWidget_realize(GtkWidget*): assertion 'CV_IS_IMAGE_WIDGET (widget)' failed 
** 
Gtk:ERROR:/build/gtk+2.0-KsZKkB/gtk+2.0-2.24.30/gtk/gtkwidget.c:8861:gtk_widget_real_map: assertion failed: (gtk_widget_get_realized (widget)) 
Aborted (core dumped) 
[email protected]:~/Documents/Project/reidThermal/src$ ./main -d=1 -c=0 -f=1 
/usr/share/themes/Ambiance/gtk-2.0/gtkrc:720: Unable to find include file: "apps/ff.rc" 

(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8615): GLib-GObject-WARNING **: cannot register existing type 'CvImageWidget' 

(betaInput.webm:8615): Gtk-CRITICAL **: IA__gtk_widget_new: assertion 'g_type_is_a (type, GTK_TYPE_WIDGET)' failed 
Segmentation fault (core dumped) 

사람이 전에 본 적이/잘못되어 가고 어떻게 문제를 해결하는 것입니다 알고 ... 여기

내가 얻을 몇 가지 오류의 예입니다?

Thread 4 "main" received signal SIGSEGV, Segmentation fault. 
[Switching to Thread 0x7fffdb7fe700 (LWP 29317)] 
0x0000000000000000 in ??() 

내가 요청시에 가장 OpenCV의의 최신 버전을 사용하는 우분투의 가장 최신 버전입니다 : GDB를 사용하여 실행

는 다음과 같은 수 있습니다.

int runOnSingleCamera(String file, int featureToUse, int classifier, int cameraID) 
{ 
    //enable velocity 
    int timeSteps = 0; 

    string windowName = file; // window name 

    Mat img, outputImage, foreground; // image objects 
    VideoCapture cap; 

    bool keepProcessing = true; // loop control flag 
    unsigned char key;   // user input 
    int EVENT_LOOP_DELAY = 40; // delay for GUI window, 40 ms equates to 1000ms/25fps = 40ms per frame 

    vector<vector<Point> > contours; 
    vector<Vec4i> hierarchy; 
    int width = 40; 
    int height = 100; 
    int learning = 1000; 
    int padding = 40; 

    // if command line arguments are provided try to read image/video_name 
    // otherwise default to capture from attached H/W camera 
    if((cap.open(file) == true)) 
    { 
     // create window object (use flag=0 to allow resize, 1 to auto fix size) 
     namedWindow(windowName, 1); 

     // create background/foreground Mixture of Gaussian (MoG) model 
     Ptr<BackgroundSubtractorMOG2> MoG = createBackgroundSubtractorMOG2(500,25,false); 

     HOGDescriptor hog; 
     hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); 

     CascadeClassifier cascade = CascadeClassifier(CASCADE_TO_USE); 

    Ptr<SuperpixelSEEDS> seeds; 

     // start main loop 
     while(keepProcessing) 
     { 
      int64 timeStart = getTickCount(); 

      if (cap.isOpened()) 
      { 
       cap >> img; 

       if(img.empty()) 
       { 
        std::cerr << "End of video file reached" << std::endl; 
        exit(0); 
       } 
       outputImage = img.clone(); 

       cvtColor(img, img, CV_BGR2GRAY); 
      } 
      else 
      { 
       // if not a capture object set event delay to zero so it waits 
       // indefinitely (as single image file, no need to loop) 
       EVENT_LOOP_DELAY = 0; 
      } 

      // update background model and get background/foreground 
      MoG->apply(img, foreground, (double)(1.0/learning)); 

      //imshow("old foreground", foreground); 

/////////////////////////////////////////////////////////////////////////////////SUPERPIXELS 
      int useSuperpixels = 0; 

      if(useSuperpixels == 1) 
      { 
       Mat seedMask, labels, result; 

       result = img.clone(); 

       int width = img.size().width; 
      int height = img.size().height; 

      seeds = createSuperpixelSEEDS(width, height, 1, 2000, 10, 2, 5, true); 

      seeds->iterate(img, 10); 

      seeds->getLabels(labels); 

      vector<int> counter(seeds->getNumberOfSuperpixels(),0); 
      vector<int> numberOfPixelsPerSuperpixel(seeds->getNumberOfSuperpixels(),0); 

      vector<bool> useSuperpixel(seeds->getNumberOfSuperpixels(),false); 

      for(int i = 0; i<foreground.rows; i++) 
      { 
       for(int j = 0; j<foreground.cols; j++) 
       { 
        numberOfPixelsPerSuperpixel[labels.at<int>(i,j)] += 1; 
        if(foreground.at<unsigned char>(i,j)==255) 
        { 
         counter[labels.at<int>(i,j)] += 1; 
        } 
       } 
      } 

      for(int i = 0; i<counter.size(); i++) 
      { 
       if(counter[i]/numberOfPixelsPerSuperpixel[i] > 0.0001) 
       { 
        useSuperpixel[i] = true; 
       } 
      } 

      for(int i = 0; i<foreground.rows; i++) 
      { 
       for(int j = 0; j<foreground.cols; j++) 
       { 
        if(useSuperpixel[labels.at<int>(i,j)] == true) 
        { 
         foreground.at<unsigned char>(i,j) = 255; 
        } 
        else 
        { 
         foreground.at<unsigned char>(i,j) = 0; 
        } 
       } 
      } 
      } 
///////////////////////////////////////////////////////////////////////////////// 
      else 
      { 
       // perform erosion - removes boundaries of foreground object 
       erode(foreground, foreground, Mat(),Point(),1); 

       // perform morphological closing 
       dilate(foreground, foreground, Mat(),Point(),5); 
       erode(foreground, foreground, Mat(),Point(),1); 
      } 
      //imshow("foreground", foreground); 

      // get connected components from the foreground 
      findContours(foreground, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); 

      // iterate through all the top-level contours, 
      // and get bounding rectangles for them (if larger than given value) 

      for(int idx = 0; idx >=0; idx = hierarchy[idx][0]) 
      { 
       Rect r = boundingRect(contours[idx]); 

       // adjust bounding rectangle to be padding% larger 
       // around the object 
       r.x = max(0, r.x - (int) (padding/100.0 * (double) r.width)); 
       r.y = max(0, r.y - (int) (padding/100.0 * (double) r.height)); 

       r.width = min(img.cols - 1, (r.width + 2 * (int) (padding/100.0 * (double) r.width))); 
       r.height = min(img.rows - 1, (r.height + 2 * (int) (padding/100.0 * (double) r.height))); 

       // draw rectangle if greater than width/height constraints and if 
       // also still inside image 
       if ((r.width >= width) && (r.height >= height) && (r.x + r.width < img.cols) && (r.y + r.height < img.rows)) 
       { 
        vector<Rect> found, found_filtered; 

        Mat roi = outputImage(r); 

        if (classifier == 1) 
        { 
         //changing last parameter helps deal with multiple rectangles per person 
         if (cameraID == 3) 
         { 
          hog.detectMultiScale(roi, found, 0, Size(8,8), Size(32,32), 1.05, 5); 
         } 
         else 
         { 
          hog.detectMultiScale(roi, found, 0, Size(8,8), Size(64,64), 1.05, 5); 
         } 
        } 
        else 
        { 
        if (cameraID == 3) 
        { 
         cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(32,32)); 
        } 
        else 
        { 
         cascade.detectMultiScale(roi, found, 1.1, 4, CV_HAAR_DO_CANNY_PRUNING, cvSize(64,64)); 
        } 
        } 

        for(size_t i = 0; i < found.size(); i++) 
        { 
         Rect rec = found[i]; 

         rec.x += r.x; 
         rec.y += r.y; 

         size_t j; 
         // Do not add small detections inside a bigger detection. 
         for (j = 0; j < found.size(); j++) 
         { 
          if (j != i && (rec & found[j]) == rec) 
          { 
           break; 
          } 
         } 

         if (j == found.size()) 
         { 
          found_filtered.push_back(rec); 
         } 
        } 
        for (size_t i = 0; i < found_filtered.size(); i++) 
        { 
         Rect rec = found_filtered[i]; 

         // The HOG/Cascade detector returns slightly larger rectangles than the real objects, 
         // so we slightly shrink the rectangles to get a nicer output. 
         rec.x += rec.width*0.1; 
         rec.width = rec.width*0.8; 
         rec.y += rec.height*0.1; 
         rec.height = rec.height*0.8; 
         // rectangle(img, rec.tl(), rec.br(), cv::Scalar(0,255,0), 3); 

         Point2f center = Point2f(float(rec.x + rec.width/2.0), float(rec.y + rec.height/2.0)); 

         Mat regionOfInterest; 

         Mat regionOfInterestOriginal = img(rec); 
         //Mat regionOfInterestOriginal = img(r); 

         Mat regionOfInterestForeground = foreground(rec); 
         //Mat regionOfInterestForeground = foreground(r); 

         bitwise_and(regionOfInterestOriginal, regionOfInterestForeground, regionOfInterest); 

         Mat clone = regionOfInterest.clone(); 

         resize(clone, regionOfInterest, Size(64,128), CV_INTER_CUBIC); 

         imshow("roi", regionOfInterest); 

         double huMoments[7]; 
         vector<double> hu(7); 
         Mat hist; 
         vector<float> descriptorsValues; 

         Mat feature; 

         if(featureToUse == 1) //HuMoments 
         { 
          vector<vector<Point> > contoursHu; 
          vector<Vec4i> hierarchyHu; 

          findContours(regionOfInterest, contoursHu, hierarchyHu, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); 

          double largestSize,size; 
          int largestContour; 

          for(int i = 0; i < contoursHu.size(); i++) 
          { 
           size = contoursHu[i].size(); 

           if(size > largestSize) 
           { 
            largestSize = size; 
            largestContour = i; 
           } 
          } 
          Moments contourMoments; 

          contourMoments = moments(contoursHu[largestContour]); 

          HuMoments(contourMoments, huMoments); 

          hu.assign(huMoments,huMoments+7); 

       feature = Mat(hu); 
       feature = feature.t(); 
         } 
         else if(featureToUse == 2) //HistogramOfIntensities 
         { 
          int histSize = 16; // bin size - need to determine which pixel threshold to use 
          float range[] = {0,255}; 
          const float *ranges[] = {range}; 
          int channels[] = {0, 1}; 

          calcHist(&regionOfInterest, 1, channels, Mat(), hist, 1, &histSize, ranges, true, false); 

          feature = hist.clone(); 
          feature = feature.t(); 
         } 

         else if(featureToUse == 3) //HOG 
         { 
          //play with these parameters to change HOG size 
          cv::HOGDescriptor descriptor(Size(64, 128), Size(16, 16), Size(16, 16), Size(16, 16), 4, -1, 0.2, true, 64); 

          descriptor.compute(regionOfInterest, descriptorsValues); 

          feature = Mat(descriptorsValues); 
          feature = feature.t(); 
         } 

         else if(featureToUse == 4) //Correlogram 
         {     
          Mat correlogram(8,8,CV_64F); 
          Mat occurances(8,8,CV_8U); 

          int xIntensity, yIntensity; 

          for(int i = 0; i<regionOfInterest.rows; i++) 
          { 
           for(int j = 0; j<regionOfInterest.cols; j++) 
           { 
            xIntensity = floor(regionOfInterest.at<unsigned char>(i,j)/32); 

            for(int k = i; k<regionOfInterest.rows; k++) 
            { 
             for(int l = 0; l<regionOfInterest.cols; l++) 
             { 
              if((k == i && l > j) || k > i) 
              { 
               yIntensity = floor(regionOfInterest.at<unsigned char>(k,l)/32); 

               correlogram.at<double>(xIntensity,yIntensity) += (norm(Point(i,j)-Point(k,l))); 
               correlogram.at<double>(yIntensity,xIntensity) += (norm(Point(i,j)-Point(k,l))); 

               occurances.at<unsigned char>(xIntensity,yIntensity) += 1; 
               occurances.at<unsigned char>(yIntensity,xIntensity) += 1; 
              } 
             } 
            } 
           } 
          } 
          //average it out 
          for(int i = 0; i<correlogram.rows; i++) 
          { 
           for(int j = 0; j<correlogram.cols; j++) 
           { 
            correlogram.at<double>(i,j) = occurances.at<unsigned char>(i,j); 
           } 
          } 

          feature = correlogram.reshape(1,1); 
         } 
         else if(featureToUse == 5) //Flow 
         { 

         } 

         feature.convertTo(feature, CV_64F); 

         normalize(feature, feature, 1, 0, NORM_L1, -1, Mat()); 
         cout << "New Feature" << endl << feature << endl; 

         //classify first target 
         if(targets.size() == 0) //if first target found 
         { 
          Person person(0, center.x, center.y, timeSteps, rec.width, rec.height); 

          person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height); 

          Rect p = person.kalmanPredict(); 

          person.updateFeatures(feature); 

          person.setCurrentCamera(cameraID); 

          rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3); 

          char str[200]; 
          sprintf(str,"Person %d",person.getIdentifier()); 

          putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0)); 

          targets.push_back(person); 
         } 
         else 
         { 
          vector<double> mDistances; 
          bool singleEntry = false; 

          for(int i = 0; i<targets.size(); i++) 
          { 
           if(targets[i].getFeatures().rows == 1) 
           { 
            singleEntry = true; 
           } 
          } 

          for(int i = 0; i<targets.size(); i++) 
          { 
           Mat covar, mean; 
           Mat data = targets[i].getFeatures(); 

           calcCovarMatrix(data,covar,mean,CV_COVAR_NORMAL|CV_COVAR_ROWS); 

           // cout << i << " data" << endl << data << endl; 

           // cout << i << " Covar" << endl << covar << endl; 

           // cout << i << " mean" << endl << mean << endl; 

           double mDistance; 

           if(singleEntry == false) 
           { 
            Mat invCovar; 

            invert(covar,invCovar,DECOMP_SVD); 

            mDistance = Mahalanobis(feature,mean,invCovar); 

            cout << i << " Mahalanobis Distance" << endl << mDistance << endl; 
           } 
           else 
           { 
            mDistance = norm(feature,mean,NORM_L1); 

            cout << i << " Norm Distance" << endl << mDistance << endl; 
           } 
           mDistances.push_back(mDistance); 
          } 

          Mat test = Mat(mDistances); 
          cout << "Distances" << endl << test << endl; 

          double sum = 0.0; 
          for(int i = 0; i<mDistances.size(); i++) 
          { 
           sum += mDistances[i]; 
          } 
          for(int i = 0; i<mDistances.size(); i++) 
          { 
           mDistances[i] = sum/mDistances[i]; 
          } 

          normalize(mDistances,mDistances,1,0,NORM_L1,-1,Mat()); 

          Mat probabilities = Mat(mDistances); 

          cout << "Probabilities" << endl << probabilities << endl; 

          //special case to classify second target 
         if(targets.size() == 1) 
         { 
          if(fabs(center.x-targets[0].getLastPosition().x)<100 and fabs(center.y-targets[0].getLastPosition().y)<100) 
          { 
           targets[0].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height); 

            Rect p = targets[0].kalmanPredict(); 

           targets[0].updateFeatures(feature); 

           targets[0].setCurrentCamera(cameraID); 

            rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3); 

            char str[200]; 
            sprintf(str,"Person %d",targets[0].getIdentifier()); 

            putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0)); 
          } 
          else 
          { 
           Person person(1, center.x, center.y, timeSteps, rec.width, rec.height); 

            person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height); 

            Rect p = person.kalmanPredict(); 

           person.updateFeatures(feature); 

           person.setCurrentCamera(cameraID); 

            rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3); 

            char str[200]; 
            sprintf(str,"Person %d",person.getIdentifier()); 

            putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0)); 

            targets.push_back(person); 
          } 
         } 

         else 
         { 
          double greatestProbability = 0.0; 
          int identifier = 0; 

          double min, max; 
           Point min_loc, max_loc; 
           minMaxLoc(probabilities, &min, &max, &min_loc, &max_loc); 

           greatestProbability = max; 
           identifier = max_loc.y; 

           cout << greatestProbability << " at " << identifier << endl; 

          if(greatestProbability >= 0.5) 
          { 
           targets[identifier].kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height); 

            Rect p = targets[identifier].kalmanPredict(); 

           targets[identifier].updateFeatures(feature); 

           targets[identifier].setCurrentCamera(cameraID); 

            rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3); 

            char str[200]; 
            sprintf(str,"Person %d",targets[identifier].getIdentifier()); 

            putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0)); 
          } 
          else 
          { 
           int identifier = targets.size(); 
            Person person(identifier, center.x, center.y, timeSteps, rec.width, rec.height); 

            person.kalmanCorrect(center.x, center.y, timeSteps, rec.width, rec.height); 

            Rect p = person.kalmanPredict(); 

           person.updateFeatures(feature); 

           person.setCurrentCamera(cameraID); 

            rectangle(outputImage, p.tl(), p.br(), cv::Scalar(255,0,0), 3); 

            char str[200]; 
            sprintf(str,"Person %d",person.getIdentifier()); 

            putText(outputImage, str, center, FONT_HERSHEY_SIMPLEX,1,(0,0,0)); 

            targets.push_back(person); 
          } 
         } 
        } 
        } 
        rectangle(outputImage, r, Scalar(0,0,255), 2, 8, 0); 
       } 
      } 
      // display image in window 
      imshow(windowName, outputImage); 

     key = waitKey((int) std::max(2.0, EVENT_LOOP_DELAY - (((getTickCount() - timeStart)/getTickFrequency())*1000))); 

     if (key == 'x') 
    { 
      // if user presses "x" then exit 
      std::cout << "Keyboard exit requested : exiting now - bye!" << std::endl; 
      keepProcessing = false; 
     } 
     timeSteps += 1; 
     } 
     // the camera will be deinitialized automatically in VideoCapture destructor 
     // all OK : main returns 0 
     return 0; 
    } 
    // not OK : main returns -1 
    return -1; 
} 

답변

0

무엇이다 :

, 그것은 매우 길고 여러 요소로 구성 요청에 따라 아래

전체 코드입니다 , 나는 문제가 아마도 imshow 기능을 가진 incompatability 일찍 또는 선언에서, 또는 것 상상 다중 스레드 환경에서 실행되는 것을 지원하지 않는 OpenCV의 일부 기능을 악용하는 것입니다. 그렇지 않으면 뮤텍스 및 모니터와 같은 제어 메커니즘을 적절하게 사용하여 코드의 중요한 섹션에 대한 액세스를 제한하지 않을 것입니다 한 번에 한 스레드 씩. 우리는 당신이 당신의 코드를 더 많이 공유하지 않는다면 당신이 잘못하고있는 것을 말할 수 없을 것입니다. 귀하의 로그에서 어떤 종류의 초기화가 두 번 이상 실행되는 것 같습니다. 코드 및 로그에서


는 두 가지 마음에 와서 :

  1. 당신은 실수로 여러 스레드에 비디오 캡처 하드웨어에 액세스하려고합니까?
  2. 새 창을 만들면 GTK에서 무언가가 초기화 될 수 있습니다. 주 스레드에서 창을 만들어보십시오. 도움이되는지 확인하십시오. OpenCV가 아닌지, UI 용 스레드가 두 개 이상있는 것은 좋지 않습니다.

아무 도움이되지 않는다면 오류의 원인이되는 행을 확인할 수 있도록 일부 로그 출력을 코드에 추가해보십시오.


결국 # 2였습니다. 이 문제를 해결하려면 모든 namedWindow 호출을 주 스레드로 이동해야합니다. 그 후에도 imshow 호출에서 여전히 실패하면 주 스레드로 이동해야합니다. 각 스레드에 대한 조건 변수와 스레드가 쓰고 주 스레드가 창을 업데이트하는 데 사용하는 전역 변수가 필요합니다. 코드를 제공 하겠지만 C++ 동시성에 대해서는 잘 모릅니다. 이 작업에 대한 자세한 내용은 여기를 참조하십시오. waiting thread until a condition has been occurred

+0

현재 뮤텍스 나 모니터가 없지만 스레드간에 공유되는 유일한 것은 글로벌 벡터이지만 잘못된 결과가 발생한다는 인상을 받았습니다. 런타임 오류가 아닌? 원하는 경우 전체 코드를 게시 할 수 있지만 길다. – TomRobson

+0

나와 다른 누구도 제공 한 코드로 더 많은 것을 말할 수 없습니다. 더 많은 것을 게시하십시오. 글로벌 벡터는 최악의 경우 세 그 폴트가 아닌 동시에 쓰여지는 논리 오류를 유발합니다. – Arshia001

+0

자신의 변수를 제외하고, 사용하는 라이브러리는 전역 변수를 사용할 수 있습니다.이로 인해 동시성 오류가 발생합니다. – Arshia001