2016-12-28 1 views
1

금속 화면을 통해 BGRA 형식의 비디오 프레임을 렌더링하는 간단한 텍스처 디스플레이를 만듭니다. 나는 Metal WWDC 세션에서 말한 것과 같은 단계를 따른다. 하지만 렌더 인코더를 만드는 데 문제가 있습니다. 내 코드가 "아니오 찾을 렌더 타겟"라는 선에서metal framework on macOS

id <MTLDevice> device = MTLCreateSystemDefaultDevice(); 
id<MTLCommandQueue> commandQueue = [device newCommandQueue]; 

id<MTLLibrary> library = [device newDefaultLibrary]; 

// Create Render Command Descriptor. 
MTLRenderPipelineDescriptor* renderPipelineDesc = [MTLRenderPipelineDescriptor new]; 
renderPipelineDesc.colorAttachments[0].pixelFormat = MTLPixelFormatBGRA8Unorm; 
renderPipelineDesc.vertexFunction = [library newFunctionWithName:@"basic_vertex"]; 
renderPipelineDesc.fragmentFunction = [library newFunctionWithName:@"basic_fragment"]; 

NSError* error = nil; 
id<MTLRenderPipelineState> renderPipelineState = [device newRenderPipelineStateWithDescriptor:renderPipelineDesc 
                   error:&error]; 

id<MTLCommandBuffer> commandBuffer = [commandQueue commandBuffer]; 

MTLRenderPassDescriptor* renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor]; 

id<CAMetalDrawable> drawable = [_metalLayer nextDrawable]; 

MTLRenderPassColorAttachmentDescriptor* colorAttachmentDesc = [MTLRenderPassColorAttachmentDescriptor new]; 
colorAttachmentDesc.texture = drawable.texture; 
colorAttachmentDesc.loadAction = MTLLoadActionLoad; 
colorAttachmentDesc.storeAction = MTLStoreActionStore; 
colorAttachmentDesc.clearColor = MTLClearColorMake(0, 0, 0, 1); 

[renderPassDesc.colorAttachments setObject:colorAttachmentDesc atIndexedSubscript:0]; 

[inTexture replaceRegion:region 
     mipmapLevel:0 
      withBytes:imageBytes 
     bytesPerRow:CVPixelBufferGetBytesPerRow(_image)]; 

id<MTLRenderCommandEncoder> renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor:renderPassDesc]; 

[renderCmdEncoder setRenderPipelineState:_renderPipelineState]; 
[renderCmdEncoder endEncoding]; 

이 코드 충돌이다 ID renderCmdEncoder = [commandBuffer renderCommandEncoderWithDescriptor : renderPassDesc] 렌더 대상을 설정하는 위치와 방법을 파악할 수 없습니다.

답변

0

완벽하게 작동합니다. 당신이 그것을 구현하는 도움이 필요하면 알려주세요 :

@import UIKit; 
@import AVFoundation; 
@import CoreMedia; 
#import <MetalKit/MetalKit.h> 
#import <Metal/Metal.h> 
#import <MetalPerformanceShaders/MetalPerformanceShaders.h> 

@interface ViewController : UIViewController <MTKViewDelegate, AVCaptureVideoDataOutputSampleBufferDelegate> { 
    NSString *_displayName; 
    NSString *serviceType; 
} 

@property (retain, nonatomic) SessionContainer *session; 
@property (retain, nonatomic) AVCaptureSession *avSession; 

@end; 

#import "ViewController.h" 

@interface ViewController() { 
    MTKView *_metalView; 

    id<MTLDevice> _device; 
    id<MTLCommandQueue> _commandQueue; 
    id<MTLTexture> _texture; 

    CVMetalTextureCacheRef _textureCache; 
} 

@property (strong, nonatomic) AVCaptureDevice *videoDevice; 
@property (nonatomic) dispatch_queue_t sessionQueue; 

@end 

@implementation ViewController 

- (void)viewDidLoad { 
    NSLog(@"%s", __PRETTY_FUNCTION__); 
    [super viewDidLoad]; 

    _device = MTLCreateSystemDefaultDevice(); 
    _metalView = [[MTKView alloc] initWithFrame:self.view.bounds]; 
    [_metalView setContentMode:UIViewContentModeScaleAspectFit]; 
    _metalView.device = _device; 
    _metalView.delegate = self; 
    _metalView.clearColor = MTLClearColorMake(1, 1, 1, 1); 
    _metalView.colorPixelFormat = MTLPixelFormatBGRA8Unorm; 
    _metalView.framebufferOnly = NO; 
    _metalView.autoResizeDrawable = NO; 

    CVMetalTextureCacheCreate(NULL, NULL, _device, NULL, &_textureCache); 

    [self.view addSubview:_metalView]; 

    self.sessionQueue = dispatch_queue_create("session queue", DISPATCH_QUEUE_SERIAL); 

    if ([self setupCamera]) { 
     [_avSession startRunning]; 
    } 
} 

- (BOOL)setupCamera { 
    NSLog(@"%s", __PRETTY_FUNCTION__); 
    @try { 
     NSError * error; 

      _avSession = [[AVCaptureSession alloc] init]; 
      [_avSession beginConfiguration]; 
      [_avSession setSessionPreset:AVCaptureSessionPreset640x480]; 

      // get list of devices; connect to front-facing camera 
      self.videoDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; 
      if (self.videoDevice == nil) return FALSE; 

      AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:self.videoDevice error:&error]; 
      [_avSession addInput:input]; 

      dispatch_queue_t sampleBufferQueue = dispatch_queue_create("CameraMulticaster", DISPATCH_QUEUE_SERIAL); 

      AVCaptureVideoDataOutput * dataOutput = [[AVCaptureVideoDataOutput alloc] init]; 
      [dataOutput setAlwaysDiscardsLateVideoFrames:YES]; 
      [dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey: @(kCVPixelFormatType_32BGRA)}]; 
      [dataOutput setSampleBufferDelegate:self queue:sampleBufferQueue]; 

      [_avSession addOutput:dataOutput]; 
      [_avSession commitConfiguration]; 
    } @catch (NSException *exception) { 
     NSLog(@"%s - %@", __PRETTY_FUNCTION__, exception.description); 
     return FALSE; 
    } @finally { 
     return TRUE; 
    } 

} 

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{ 
    CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    { 
     size_t width = CVPixelBufferGetWidth(pixelBuffer); 
     size_t height = CVPixelBufferGetHeight(pixelBuffer); 

     CVMetalTextureRef texture = NULL; 
     CVReturn status = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, _textureCache, pixelBuffer, NULL, MTLPixelFormatBGRA8Unorm, width, height, 0, &texture); 
     if(status == kCVReturnSuccess) 
     { 
      _metalView.drawableSize = CGSizeMake(width, height); 
      _texture = CVMetalTextureGetTexture(texture); 
      _commandQueue = [_device newCommandQueue]; 
      CFRelease(texture); 
     } 
    } 
} 

- (void)drawInMTKView:(MTKView *)view { 
    // creating command encoder 
    if (_texture) { 
     id<MTLCommandBuffer> commandBuffer = [_commandQueue commandBuffer]; 
     id<MTLTexture> drawingTexture = view.currentDrawable.texture; 

     // set up and encode the filter 
     MPSImageGaussianBlur *filter = [[MPSImageGaussianBlur alloc] initWithDevice:_device sigma:5]; 

     [filter encodeToCommandBuffer:commandBuffer sourceTexture:_texture destinationTexture:drawingTexture]; 

     // committing the drawing 
     [commandBuffer presentDrawable:view.currentDrawable]; 
     [commandBuffer commit]; 
     _texture = nil; 
    } 
} 

- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size { 

} 

@end 
0

당신이 현재 사용하는 다음 사항

새로운 렌더링 패스 기술자를 만드는

1.Instead 중 하나를 시도 MTKView이 object.this에서 통과 설명 개체를 렌더링한다 렌더링 패스 기술자 이미 configured.you이

if let currentPassDesc = view.currentRenderPassDescriptor, 
let currentDrawable = view.currentDrawable 
{ 
let renderCommandEncoder =   

commandBuffer.makeRenderCommandEncoder(descriptor: currentPassDesc) 


renderCommandEncoder.setRenderPipelineState(renderPipeline) 

//set vertex buffers and call draw apis 
....... 
....... 
commandBuffer.present(currentDrawable) 

} 

2.you below- 주어진 샘플 코드를 그릴 수 개체의 텍스처에 의해 색 첨부 파일을 설정 한 후 새로운 렌더링 패스 기술자를 만들고됩니다 anything.try 설정하지 않아도 될 것 그래서 doi 대신에 이 경우 새 텍스처 개체를 만든 다음이 텍스처의 사용을 렌더링 대상으로 설정해야합니다. 그러면 새 텍스처에 렌더링 된 콘텐츠가 표시되지만 화면에 표시되지 않으므로 텍스트의 내용을 표시 할 수 있습니다 drawable 텍스처로 텍스처의 내용을 복사 한 다음 drawable을 표시합니다.

renderPassDescriptor.colorAttachments[0].clearColor = 

MTLClearColor(red: 

0.0,green: 0.0,blue: 0.0,alpha: 1.0) 
renderPassDescriptor.colorAttachments[0].loadAction = .clear 
renderPassDescriptor.colorAttachments[0].storeAction = .store 

renderPassDescriptor.depthAttachment.clearDepth = 1.0 
renderPassDescriptor.depthAttachment.loadAction = .clear 
renderPassDescriptor.depthAttachment.storeAction = .dontCare 

let view = self.view as!MTKView 
let textDesc = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: 

.bgra8Unorm, width: Int(view.frame.width), 
height: Int(view.frame.height), mipmapped: false) 
textDesc.depth = 1 
//see below line  
textDesc.usage = 
[MTLTextureUsage.renderTarget,MTLTextureUsage.shaderRead] 
textDesc.storageMode = .private 
mainPassFrameBuffer = device.makeTexture(descriptor: textDesc) 
renderPassDescriptor.colorAttachments[0].texture = mainPassFrameBuffer 
- 아래

는 렌더링 타겟 만드는 코드입니다