在输入视频帧之前,为AVAssetWriterInput添加一个空的CMSampleBuffer,以确保捕获的音频数据和视频帧都能与完整的持续时间对应。
示例代码:
//定义一些变量 AVAssetWriterInput *videoInput; AVAssetWriterInput *audioInput; AVAssetWriter *writer; CMTime videoTime; CMSampleBufferRef emptySampleBuffer; CMTime audioStartTime; CMTime videoStartTime;
//初始化AVAssetWriter和输入流 //...
//为videoInput设置一个初始时间戳 videoStartTime = CMTimeMake(0, videoTrack.naturalTimeScale); [videoInput setMediaTimeScale:videoTrack.naturalTimeScale]; [videoInput setMediaTimeRange:CMTimeRangeMake(kCMTimeZero, videoDuration)]; [writer startWriting]; [writer startSessionAtSourceTime:videoStartTime];
//添加一个空的CMSampleBuffer //该样本缓冲区保证视频帧和音频数据都能与持续时间相对应 emptySampleBuffer = [self createEmptySampleBufferForAudioTrack:audioTrack atTime:audioStartTime]; if ([audioInput isReadyForMoreMediaData]) { [audioInput appendSampleBuffer:emptySampleBuffer]; }
//添加视频帧和音频数据 //...
//创建一个空的CMSampleBuffer
(CMSampleBufferRef)createEmptySampleBufferForAudioTrack:(AVAssetTrack *)audioTrack atTime:(CMTime)time { CMSampleBufferRef sampleBuffer = NULL; CMBlockBufferRef blockBuffer = NULL;
OSStatus status = CMBlockBufferCreateEmpty(kCFAllocatorDefault, 0, kCMBlockBufferAssureMemoryNowFlag, &blockBuffer); if (status != noErr) { NSLog(@"Error creating empty CMSampleBuffer for audio: %d", status); return NULL; }
AudioStreamBasicDescription audioFormat = *(audioTrack.formatDescriptions[0]); CMSampleTimingInfo timingInfo = {.duration = kCMTimeInvalid, .presentationTimeStamp = time, .decodeTimeStamp = kCMTimeInvalid};
CMSampleBufferCreate(kCFAllocatorDefault, blockBuffer, true, NULL, NULL, audioTrack.formatDescriptionsCount, 1, &timingInfo, 0, NULL, &sampleBuffer); CFRelease(blockBuffer);
return sampleBuffer; }