ios - Unable to get audio file header with AV Foundation -
for application, on 1 side, need audio file itunes library store on disk. that, have callback stream each audio buffer client.
on other side, have callback audio buffer , build playable audio file in original format store on ios device.
i've tried days thing works without success. in deed, on client side have audio file original size without audio header. result in unplayable audio file.
for use piece of code :
1) open audio file
nsurl* url = [ nsurl fileurlwithpath : [ [ nsbundle mainbundle ] pathforresource : @"test" oftype : @"m4a" ] ]; audiofileopenurl( ( __bridge cfurlref ) url, permission, 0, & _audiofileid ); nsdictionary* options = @{ avurlassetpreferprecisedurationandtimingkey : @yes }; _asset = [ avurlasset urlassetwithurl : url options: options ];
2) create avasset
nserror* asseterror = nil; _assetreader = [ avassetreader alloc ] initwithasset: _asset error: & asseterror ]; if( nullptr == _assetreader ) { return false; } _assettrack = [ _asset.tracks objectatindex:0 ];
3) create avassetoutput configured read file on disk
_assetoutput = [ avassetreadertrackoutput assetreadertrackoutputwithtrack: _assettrack outputsettings:nil ];
4) create dump file ( test ) , audiofiledescription
std::ofstream savefile( "/tmp/dump.m4a" ); audiostreambasicdescription _audiofiledescription; uint32 size = sizeof( description ); const auto ret = audiofilegetproperty( _audiofileid, kaudiofilepropertydataformat, & size, & _audiofiledescription ); if( noerr != ret ) { std::memset( & _audiofiledescription, 0, sizeof( _audiofiledescription ) ); }
5) loop dump audio file ( same streaming audio file on network )
// read , dump audio file const cmtime starttime = cmtimemake( 0, _audiofiledescription.msamplerate * ( _audiofiledescription.mbitsperchannel / 8 ) * _audiofiledescription.mchannelsperframe ); const cmtimerange range = cmtimerangemake( starttime, kcmtimepositiveinfinity ); _assetreader.timerange = range; [_assetreader addoutput: _assetoutput ]; [_assetreader startreading ]; while( [ _assetreader status] == avassetreaderstatusreading ) { if( cmsamplebufferref samplebuffer = [ _assetoutput copynextsamplebuffer ] ) { // samples. cmblockbufferref audiobuffer = cmsamplebuffergetdatabuffer( samplebuffer ); size_t lengthatoffset; size_t totallength; char *samples; cmblockbuffergetdatapointer( audiobuffer, 0, &lengthatoffset, &totallength, &samples ); savefile.write( samples, totallength ); if( ::cmsamplebuffergetdatabuffer( samplebuffer ) ) { // time of buffer received, should preceed time requested cmtime presentationtime = ::cmsamplebuffergetpresentationtimestamp( samplebuffer ); // time requested in presentation units cmtime requestedtime = [ _assettrack samplepresentationtimefortracktime: starttime ]; // trim = priming frames + offset buffer satisfy our requested time cfdictionaryref trimatstartdictionary = (cfdictionaryref)::cmgetattachment(samplebuffer, kcmsamplebufferattachmentkey_trimdurationatstart, null); cmtime trimatstart = trimatstartdictionary ? ::cmtimemakefromdictionary(trimatstartdictionary) : kcmtimezero; // therefore, priming frames = trim - offset cmtime offset = ::cmtimesubtract(requestedtime, presentationtime); cmtime primingframes = cmtimesubtract(trimatstart, offset); printf("priming frames: "); cmtimeshow(primingframes); } cfrelease( samplebuffer ); } } savefile.close();
my conclusion av foundation framework seems not provide way audio file header.
if correct, there way headers differently ?
and if wrong, point me right way of doing this.
thanks in advance,
regards
jeremy s
Comments
Post a Comment