2015-01-27 2 views
-1

multiply (4) 카메라로 이미지 처리를 시도하고 있습니다. 내 코드는 작동하지만 몇 분 더 실행하면 메모리가 부족하다는 오류가 발생합니다. RAM을 모니터링하고 프로그램이 실행되면서 점점 더 많은 것을 사용하고 있음을 알 수 있습니다. 많은 것을 읽었으며 힙과 JVM 메모리의 크기를 변경하려고 시도했습니다. 이것은 조금 도움이되었지만 여전히 충돌합니다. 누군가 나를 도울 수 있습니까? 여기 내 코드입니다 :Java에서 실제 메모리가 부족함

cameraCaller

//This class opens and displays cameras 
import java.io.IOException; 

import javax.swing.JFrame; 
import org.opencv.core.Core; 
import org.opencv.features2d.DescriptorExtractor; 
import org.opencv.highgui.VideoCapture; 
import org.opencv.video.BackgroundSubtractor; 

public class cameraCaller 
{ 

    /** 
    * 
    * @param args 
    * @throws IOException 
    */ 
    public static void main(String[] args) throws IOException 
    { 

     System.loadLibrary(Core.NATIVE_LIBRARY_NAME); 


     //open up the cameras 
     VideoCapture camera = new VideoCapture(0); 
     VideoCapture camera2 = new VideoCapture(2); 
     //VideoCapture camera3 = new VideoCapture(3); 
     //VideoCapture camera2 = new VideoCapture("http://192.168.0.7/VIDEO.CGI?.mjpg"); 

     //Make all the camera windows and cams 


     //Camera 1 
     Cam cam = new Cam(camera); 
     JFrame frame = new JFrame(); 
     frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); 
     frame.add(cam); 
     frame.setSize(800,800); 
     frame.setVisible(true); 

     //Camera 2 
     Cam cam2 = new Cam(camera2); 
     JFrame frame2 = new JFrame(); 
     frame2.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); 
     frame2.add(cam2); 
     frame2.setSize(800,800);  
     frame2.setVisible(true); 

     //Camera 3 
     /* 
     Cam cam3 = new Cam(camera3); 
     JFrame frame3 = new JFrame(); 
     frame3.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); 
     frame3.add(cam3); 
     frame3.setSize(800,800);  
     frame3.setVisible(true); 
     */ 


     //Update the windows while the cameras are open 

     while(camera2.isOpened() && camera.isOpened())// && camera3.isOpened()) 
     { 
      cam.repaint(); 
      cam2.repaint(); 
      //cam3.repaint(); 


     } 


    } 

} 

당신이 당신의 메모리를 모니터링하는 데 사용할 수있는 jvisualvm을,라는 JDK의 \bin 디렉토리에 포함 된 프로파일 링 도구가있다
//this calls process a feed from a camera 
//takes in a a OPENCV 'VideoCapture' object and processes it the feed. 
import java.awt.Graphics; 
import java.awt.image.BufferedImage; 
import java.awt.image.DataBufferByte; 
import java.io.File; 
import java.io.IOException; 
import java.util.ArrayList; 
import java.util.Iterator; 
import java.util.List; 

import javax.imageio.ImageIO; 
import javax.swing.JPanel; 

import org.opencv.core.Core; 
import org.opencv.core.CvType; 
import org.opencv.core.Mat; 
import org.opencv.core.MatOfDMatch; 
import org.opencv.core.MatOfKeyPoint; 
import org.opencv.core.MatOfPoint; 
import org.opencv.core.Rect; 
import org.opencv.core.Scalar; 
import org.opencv.core.Size; 
import org.opencv.features2d.DescriptorExtractor; 
import org.opencv.features2d.DescriptorMatcher; 
import org.opencv.features2d.FeatureDetector; 
import org.opencv.highgui.VideoCapture; 
import org.opencv.imgproc.Imgproc; 


@SuppressWarnings("serial") 
public class Cam extends JPanel 
{ 
    VideoCapture cam; 
    boolean start; 
    Mat bg; 
    Mat last; 
    Mat bw; 
    FeatureDetector detector; 
    DescriptorExtractor extractor; 
    MatOfKeyPoint ball01Points; 
    DescriptorMatcher match; 
    Mat ball01Descriptor; 
    Mat orginal; 
    public Cam(VideoCapture camera) 
    { 

     cam = camera; 
     start = true; 
     bg = new Mat(); 
     last = new Mat(); 
     bw = new Mat(); 
     BufferedImage ball01 = null; 
     detector = FeatureDetector.create(FeatureDetector.SURF); 
     extractor = DescriptorExtractor.create(DescriptorExtractor.SURF); 
     match = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED); 
     ball01Points = new MatOfKeyPoint(); 
     ball01Descriptor = new Mat(); 


     /* 
     * Testing object reconition 
     File ball01File = new File("ball01.jpg"); 
     try 
     { 
      ball01 = ImageIO.read(ball01File); 

     } catch (IOException e) 
     { 
      // TODO Auto-generated catch block 
      e.printStackTrace(); 

     } 
     //detector.detect(Buff2Mat(ball01), ball01Points); 
     //extractor.compute(Buff2Mat(ball01), ball01Points, ball01Descriptor); 

     // System.out.println(ball01Descriptor.size()); 
     */ 

    } 
    public void paintComponent(Graphics g) 
    { 
     System.loadLibrary(Core.NATIVE_LIBRARY_NAME); 
     super.paintComponent(g); 
     //Mats need for image processing 
     Mat orginal = new Mat(); 
     Mat current = new Mat(); 
     Mat grey = new Mat(); 
     Mat blur = new Mat(); 
     Mat temp = new Mat(); 
     Mat sub = new Mat(); 
     Mat sub2 = new Mat(); 
     Mat thresh = new Mat(); 
     Mat thresh2 = new Mat(); 
     Mat canny = new Mat(); 
     Mat erode = new Mat(); 
     Mat dilate = new Mat(); 
     Mat blur2 = new Mat(); 
     Mat blur3 = new Mat(); 

     //Array list to hold location rectangle 
     ArrayList<Rect> array = new ArrayList<Rect>(); 
     //System.out.println(start); 


     //Get the background to subtract out. 
     if(start) 
     { 
      start = false; 
      cam.read(bg); 
      Imgproc.cvtColor(bg,bg, Imgproc.COLOR_RGB2GRAY); 
      Imgproc.equalizeHist(bg,bg); 
      Imgproc.GaussianBlur(bg, bg,new Size(5,5), 3); 
      Imgproc.adaptiveThreshold(bg,bg,255,Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV,5,2); 
      last = bg.clone(); 


     } 
     //set start flage to false 
     start = false; 

     //get current image 
     cam.read(current); 
     orginal = current.clone(); 

     //turn the image grey, then blur 
     Imgproc.cvtColor(current,grey, Imgproc.COLOR_RGB2GRAY); 
     Imgproc.GaussianBlur(grey, blur,new Size(5,5), 50); 

     /* 
     * Image recognition testing 

     Find the Key points 
     MatOfKeyPoint points = new MatOfKeyPoint(); 
     Mat descriptor = new Mat(); 
     detector.detect(grey, points); 
     temp = grey.clone(); 
     extractor.compute(temp, points, descriptor); 

     Match the key points 
     List<MatOfDMatch> matchedPoints = new ArrayList<MatOfDMatch>() ; 
     match.knnMatch(descriptor, ball01Descriptor ,matchedPoints,3); 

     System.out.println(matchedPoints.size()); 
     */ 

     //Thresh the image 
     Imgproc.adaptiveThreshold(blur,thresh,255,Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV,5,2); 

     //Subtract the base frame and the last frame 
     Core.subtract(thresh, bg, sub); 
     Core.subtract(sub, last, sub2); 
     last = blur.clone(); 

     //reblur the image 
     Imgproc.GaussianBlur(sub2, blur2,new Size(5,5), 50); 

     //dilate then erode 
     Imgproc.dilate(blur2, dilate,Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2,2))); 
     Imgproc.erode(dilate, erode,Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2,2))); 

     //Thresh the image again. 
     Imgproc.threshold(erode, thresh2 , 100,255,Imgproc.THRESH_BINARY_INV); 

     //find the edges 
     Imgproc.Canny(thresh2, canny, 200, 300,5, start); 

     //Blur the image again 
     Imgproc.GaussianBlur(canny, blur3,new Size(5,5), 250); 


     //Get the rectangle to put around the objects 
     array = detection_contours(blur3); 

     //Draw the rectangle on the image 
     if (array.size() > 0) 
     { 

      Iterator<Rect> it2 = array.iterator(); 

      while (it2.hasNext()) 
      { 
       Rect obj = it2.next(); 
       Core.rectangle(orginal, obj.br(), obj.tl(), 
         new Scalar(255, 255, 255), 5); 
      } 

     } 


     //convert the imaeg 
     BufferedImage image = Mat2Buff(orginal); 
     //BufferedImage image = Mat2Buff(blur3); 
     //update the image 
     g.drawImage(image,10,10,image.getWidth(),image.getHeight(), null); 

    } 

    //convert a mat to a buff 
    public static BufferedImage Mat2Buff(Mat m) 
    { 

     int type = BufferedImage.TYPE_BYTE_GRAY; 
     if (m.channels() > 1) 
     { 
      type = BufferedImage.TYPE_3BYTE_BGR; 
     } 
     int bufferSize = m.channels() * m.cols() * m.rows(); 
     byte[] b = new byte[bufferSize]; 
     m.get(0, 0, b); // get all the pixels 
     BufferedImage img = new BufferedImage(m.cols(), m.rows(), type); 
     final byte[] targetPixels = ((DataBufferByte) img.getRaster().getDataBuffer()).getData(); 
     System.arraycopy(b, 0, targetPixels, 0, b.length); 
     return img; 


    } 

    //convert a buff 2 a mat 
    public static Mat Buff2Mat(BufferedImage image) 
    { 
     byte[] data = ((DataBufferByte) image.getRaster().getDataBuffer()).getData(); 
     Mat mat = new Mat(image.getHeight(), image.getWidth(), CvType.CV_8UC3); 
     mat.put(0, 0, data); 
     return mat; 

    } 

    //get the rectangles to draw in image 
    //use this to calibrate the size change 
    public static ArrayList<Rect> detection_contours(Mat outmat) 
    { 
     Mat v = new Mat(); 
     Mat vv = outmat.clone(); 
     List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); 
     Imgproc.findContours(vv, contours, v, Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE); 

     //Change this numbers 
     double maxArea = 50000; 
     double minArea = 1000; 
     int maxAreaIdx = -1; 
     //Rect r = null; 
     ArrayList<Rect> rect_array = new ArrayList<Rect>(); 

     for (int i = 0; i < contours.size(); i++) 
     { 
      Mat contour = contours.get(i); 
      double contourarea = Imgproc.contourArea(contour); 
      maxAreaIdx = i; 
      if(Imgproc.contourArea(contours.get(i)) < maxArea && Imgproc.contourArea(contours.get(i)) > minArea) 
      { 
       rect_array.add(Imgproc.boundingRect(contours.get(maxAreaIdx))); 
      } 

     } 

     return rect_array; 

    } 

    //image recognition, find similar areas 
    private ArrayList<Rect> getRects(Mat img) 
    { 

     List<MatOfPoint> contours = new ArrayList<MatOfPoint>(); 
     List<MatOfPoint> edges = new ArrayList<MatOfPoint>(); 
     Imgproc.findContours(img, contours, new Mat(), Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE); 

     //Imgproc.approxPolyDP(contours, edges, TOP_ALIGNMENT, isEnabled())); 



     return null; 
    } 



    /** 
    * @param args 
    */ 
    public static void main(String[] args) 
    { 


    } 

} 
+0

왜'main' 메쏘드에서 tight-looping을하고'repaint()'를 계속 호출합니까? – Thor84no

+0

덤프 한 코드를 정리하려고 시도했지만, 아무도 읽고 싶어하지 않는 엄청난 양의 낭비 일 뿐이며 절반을 포기했습니다. 당신은 그것을 읽기 쉽게하기 위해 아무런 노력도하지 않았습니다. 사람들이 당신을 위해 그것을 살펴볼 시간을 가지기를 원한다면 조금 향상 시키도록 (그리고 다른 클래스를 위해 다른 코드 덩어리로 나누십시오) 제안하십시오. – Thor84no

+0

코드를 정리했습니다. 나는 다른 수업을했다. 나는 그것을 다른 블록에 게시하지 않았다. – user1376339

답변

0

및 CPU 사용률. 유용하다고 생각하면 here에 대한 자세한 정보를 찾을 수 있습니다.

관련 문제