视频硬编解码初级篇

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/pk_sir/article/details/72473007

一:硬编码
1,初始化设备
a: 创建一个会话AVCaptureSession,
b: 添加输入(AVCaptureDeviceInput )
c: 输出源(AVCaptureVideoDataOutput)
d: 添加预览(AVCaptureVideoPreviewLayer)

- (void)initVideoCapture
{
    _captureSession =[[AVCaptureSession alloc]init];
    [_captureSession setSessionPreset:AVCaptureSessionPreset640x480];

    AVCaptureDevice * captureDevice =[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
    if (captureDevice == nil) { NSLog(@"captureDevice is  nil !"); return;}

    NSError * error;
    // input device  for session
    AVCaptureDeviceInput *inputDevice =[AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error];
    if (error) { NSLog(@"AVCaptureDeviceInput is  error %@",error); return;}

    if ([_captureSession canAddInput:inputDevice]) {
        [_captureSession addInput:inputDevice];
    }

    // output device for session 细化为 视频数据输出,
    AVCaptureVideoDataOutput * videoDataOutput =[[AVCaptureVideoDataOutput alloc]init];
    /* ONLY support pixel format : 420v, 420f, BGRA */
    videoDataOutput.videoSettings =[NSDictionary dictionaryWithObject:
                                    [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]forKey:(NSString *)kCVPixelBufferPixelFormatTypeKey];
    //指定接收方是否应该抛弃任何视频帧之前不是处理下一个帧捕获
    [videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
    //
    [videoDataOutput setSampleBufferDelegate:self queue:dispatch_get_global_queue(0, 0)];

    if ([_captureSession canAddOutput:videoDataOutput]) {
        [_captureSession addOutput:videoDataOutput];
    }else{NSLog(@"not canAddOutput ! ");}

    //添加预览
    AVCaptureVideoPreviewLayer * videoLayer =[AVCaptureVideoPreviewLayer layerWithSession:_captureSession];
    videoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
    videoLayer.frame = CGRectMake(0, 0, self.view.bounds.size.width, self.view.bounds.size.height-100);
    [self.view.layer addSublayer:videoLayer];

}

2,创建压缩会话

#pragma mark - 图像送入编码器前 要先设置  - VTCompressionSessionRef
- (int)startEncodeSessionWith:(int)width height:(int)height frmaeRate:(int)fps bitRate:(int)bt
{
    OSStatus osstatus;
    _frameCount = 0;
    //  compressionframe 这是执行 函数块方法 的地址
    VTCompressionOutputCallback outputCallBack =  compressionframe;
    //为压缩视频帧创建一个会话
    osstatus = VTCompressionSessionCreate(kCFAllocatorDefault, width, height, kCMVideoCodecType_H264, NULL, NULL, NULL, outputCallBack, (__bridge void * _Nullable)(self), &_compressionSession);
    if (osstatus != noErr) { NSLog(@"VTCompressionSessionCreate failed. ret=%d", osstatus);return -1;}

    // 设置实时编码输出,降低编码延迟
    osstatus=VTSessionSetProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);
    NSLog(@"kVTCompressionPropertyKey_RealTime - %d",osstatus);

    // 准备编码 资源分配,然后再开始编码帧。
    osstatus = VTCompressionSessionPrepareToEncodeFrames(_compressionSession);

    return 0;
}

static int i = 0;
static ViewController * selfCalss = nil;
// 将该方法的地址 传入 VTCompressionSessionCreate 方法中回调 
// 可以在这里生产 h264流, 用于传输
void compressionframe(void *useData, void *sourceFrameRefCon, OSStatus status,VTEncodeInfoFlags infoFlags,CMSampleBufferRef sampleBuffer)
{
//    NSLog(@"sampleBuffer = %@", sampleBuffer);

    OSStatus err;
    CMFormatDescriptionRef formatDes;
    const uint8_t * sps ;
    size_t spsSize;
    size_t spsCount;
//    CMVideoFormatDescriptionCreateFromH264ParameterSets
    formatDes = CMSampleBufferGetFormatDescription(sampleBuffer);
    NSLog(@"formatDes = [%@]",formatDes);
    err =  CMVideoFormatDescriptionGetH264ParameterSetAtIndex(formatDes, 0, &sps, &spsSize, &spsCount, nil);
    NSLog(@"sps = %s spsSize = %zu spsCount = %zu",sps, spsSize, spsCount);



    const uint8_t * pps ;
    size_t ppsSize;
    size_t ppsLen;
    err = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(formatDes, 1, &pps, &ppsSize, &ppsLen, nil);
    NSLog(@"pps = %s ppsSize = %zu ppsLen = %zu",pps, ppsSize, ppsLen);
    NSLog(@"code = %d",err);

    if (i == 0) {
        i = 1;

        [selfCalss writeH264Data:(void*)sps length:spsSize addStartCode:YES];
        [selfCalss writeH264Data:(void*)pps length:ppsSize addStartCode:YES];
    }

    size_t offset,len;
    char * dataPointer;
    CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    err = CMBlockBufferGetDataPointer(blockBuffer, 0, &offset, &len, &dataPointer);
    NSLog(@"code = %d",err);


    if (err == noErr) {
        size_t offset = 0;
        const int lengthInfoSize = 4; // 返回的nalu数据前四个字节不是0001的startcode,而是大端模式的帧长度length

        // 循环获取nalu数据
        while (offset < len - lengthInfoSize) {
            uint32_t naluLength = 0;
            memcpy(&naluLength, dataPointer + offset, lengthInfoSize); // 获取nalu的长度,

            // 大端模式转化为系统端模式
            naluLength = CFSwapInt32BigToHost(naluLength);
            NSLog(@"got nalu data, length=%d, totalLength=%zu", naluLength, len);

            // 保存nalu数据到文件
            [selfCalss writeH264Data:dataPointer+offset+lengthInfoSize length:naluLength addStartCode:YES];

            // 读取下一个nalu,一次回调可能包含多个nalu
            offset += lengthInfoSize + naluLength;
        }
    }



//    [selfCalss showH264DataWith:sampleBuffer];

//    [selfCalss decode];
//    [selfCalss decompressionSession];


}

3,获取输入源
在输出源的代理方法中 可以得到原始的 CMSampleBufferRef 数据。在用它编码前,还需要创建一个压缩会话 VTCompressionSessionRef

#pragma mark - 输出视频数据deta 代理
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
    // 得到原始的  CMSampleBufferRef 数据
    [self encodeFrame:sampleBuffer];
}
// 编码每一帧图像 注意异步,
- (void)encodeFrame:(CMSampleBufferRef)sampleBuf
{
    // CMSampleBuffer = CMTime + FormatDesc + CMBlockBuffer
    // 编码顺序,同步线程  试试异步会怎么样
    dispatch_sync(dispatch_get_global_queue(0, 0), ^{
        CVImageBufferRef imgBuf= CMSampleBufferGetImageBuffer(sampleBuf);
        // pts,必须设置,否则会导致编码出来的数据非常大,原因未知 --->> 是显示的时间
        CMTime pts = CMTimeMake(_frameCount, 1000);
        // 使用 kCMTimeInvalid 初始化一个无效的CMTime
        CMTime duration = kCMTimeInvalid;

        VTEncodeInfoFlags flags;
         // 送入编码器编码
        OSStatus status = VTCompressionSessionEncodeFrame(_compressionSession, imgBuf, pts, duration, NULL, NULL, &flags);
        if (status != noErr) {
            NSLog(@"H264: VTCompressionSessionEncodeFrame failed with %d", (int)status);
            return;
        }

    });
}

4,最后保存到文件 或者 网络传输

- (void) writeH264Data:(void*)data length:(size_t)length addStartCode:(BOOL)b
{
    // 添加4字节的 h264 协议 start code
    const Byte bytes[] = "\x00\x00\x00\x01";

    if (_h264File) {
        if(b)
            fwrite(bytes, 1, 4, _h264File);

        fwrite(data, 1, length, _h264File);
    } else {
        NSLog(@"_h264File null error, check if it open successed");
    }
}

// 不要忘记开始
- (IBAction)startCLick:(UIButton *)sender {
    _h264File = fopen([[NSString stringWithFormat:@"%@/Documents/vt_encode.h264", NSHomeDirectory()] UTF8String], "wb");
    [self startEncodeSessionWith:480 height:640 frmaeRate:25 bitRate:640*1000];
    [_captureSession startRunning];
}






二:硬解码

// 如果编码没有问题的话。这个方法可以直接拿到 compressionframe 方法中使用,可以播放原始视频了
// 这里没有使用OpenGL 渲染。而是使用 系统的 AVSampleBufferDisplayLayer API。
- (void)showH264DataWith:(CMSampleBufferRef)sampleBuffer
{
    if (_sampleBufLayer == nil) {        
        _sampleBufLayer =[[AVSampleBufferDisplayLayer alloc]init];
        _sampleBufLayer.videoGravity = AVLayerVideoGravityResizeAspect;
        _sampleBufLayer.opaque = YES;
        _sampleBufLayer.frame = CGRectMake(100, 200, 240, 320);
        dispatch_async(dispatch_get_main_queue(), ^{

            [self.view.layer addSublayer:_sampleBufLayer];
        });
    }
        if([_sampleBufLayer isReadyForMoreMediaData]){
            dispatch_sync(dispatch_get_main_queue(),^{

                [_sampleBufLayer enqueueSampleBuffer:sampleBuffer];
                [_sampleBufLayer setNeedsDisplay];
                [_sampleBufLayer flush];
        });

        //理论上要释放,但是这里线不能释放。
//        CFRelease(sampleBuffer);

    }
}

解码。就是 得到数据的 sps。pps。以及 blockBuffer .并将他们组合成为解码后的CMSampleBuffer.用AVSampleBufferDisplayLayer 播放出来,当然没有任何的渲染(美白渲染)
1, 创建一个 格式的描述 CMVideoFormatDescriptionRef,由 CMVideoFormatDescriptionRef,生产 解码会话 VTDecompressionSessionRef

/// 解码
    AVSampleBufferDisplayLayer * _sampleBufLayer;
    VTDecompressionSessionRef _decompressionSession;
    CMVideoFormatDescriptionRef _decodeVideoFormatDes;
    const uint8_t * const _sps;
    const uint8_t * const _pps;
    const size_t _spsSize, _ppsSize;

// 创建一个 格式的描述  _decodeVideoFormatDes
- (void)decompressionSession
{
    const uint8_t* const parameterSetPointers[2] = { _sps, _pps };
    const size_t parameterSetSizes[2] = { _spsSize, _ppsSize };
    OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 
                                                    2, //param count 
                                                    parameterSetPointers,
                                                    parameterSetSizes,
                                                    4, //nal start code size
                                                    &_decodeVideoFormatDes);

    NSLog(@"_decodeVideoFormatDes = %@",_decodeVideoFormatDes);

    VTDecompressionOutputCallbackRecord outputCBRecord;
    outputCBRecord.decompressionOutputCallback = NULL;
    outputCBRecord.decompressionOutputRefCon = NULL;
    status = VTDecompressionSessionCreate(kCFAllocatorDefault, _decodeVideoFormatDes, NULL,
                                          NULL,
                                          &outputCBRecord,
                                          &_decompressionSession);

}

2, 使用 VTDecompressionSessionRef 解码会话 进行 硬解码

- (CVPixelBufferRef)decode
{
    /*VTDecompressionSessionCreate 创建解码 session
     VTDecompressionSessionDecodeFrame 解码一个frame
     VTDecompressionSessionInvalidate 销毁解码 session*/
    OSStatus status;

    //其中 sampleBuffer是输入的H.264视频数据,每次输入一个frame。
    //先用CMBlockBufferCreateWithMemoryBlock 从H.264数据创建一个CMBlockBufferRef实例。
    //然后用 CMSampleBufferCreateReady创建CMSampleBufferRef实例。
    uint8_t * buffer;
    size_t blockLen = 0;
    CMBlockBufferRef blobkBuffer;
    // 由解码会话生产 blockBuffer 图像data
    status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, &buffer,
                                       blockLen, kCFAllocatorNull, NULL, 0,
                                       blockLen, 0,
                                       &blobkBuffer);
    if (status != noErr) { NSLog(@"CMBlockBufferCreateWithMemoryBlock error ! %d",status); return nil;}

    // size_t是无符号的,并且是平台无关的,表示0-MAXINT的范围
    const size_t sampleSizeArray[] = {};
    CMSampleBufferRef decodeSampleBuffer = NULL;
    // 用生产的 blockBuffer图像资源 去生产  decodeSampleBuffer
    // 到这里理论上可以 用 AVSampleBufferDisplayLayer 播放 decodeSampleBuffer 了
    status = CMSampleBufferCreateReady(kCFAllocatorDefault, blobkBuffer,
                                       _decodeVideoFormatDes, 1, 0, NULL, 1,
                                       sampleSizeArray,
                                       &decodeSampleBuffer);

    if (status != noErr) { NSLog(@"CMSampleBufferCreateReady error ! %d",status); return nil;}


    //3, 这里是生成  CVPixelBufferRef,用于图片渲染的
    VTDecodeInfoFlags outputFlag = 0;
    VTDecodeFrameFlags flags = 0;
    CVPixelBufferRef pixelBuffer;
    status = VTDecompressionSessionDecodeFrame(_decompressionSession, decodeSampleBuffer,
                                               flags,
                                               &pixelBuffer,
                                               &outputFlag);

    if (status != noErr) { NSLog(@"VTDecompressionSessionDecodeFrame error ! %d",status); return nil;}
    return pixelBuffer;

}

注意:这个版本没有进行任何优化。

猜你喜欢

转载自blog.csdn.net/pk_sir/article/details/72473007