Now properly supports sample format changing

Sample format can now change dynamically at play time, and the player
will resample it as necessary, extrapolating edges between changes to
reduce the potential for gaps.

Currently supported formats for this:

- FLAC
- Ogg Vorbis
- Any format supported by FFmpeg, such as MP3 or AAC

Signed-off-by: Christopher Snowhill <kode54@gmail.com>
CQTexperiment
Christopher Snowhill 2022-02-07 19:18:45 -08:00
parent 0b8a659bc2
commit 477feaab1d
19 changed files with 231 additions and 75 deletions

View File

@ -66,8 +66,6 @@
- (BOOL)endOfInputReached;
- (BOOL)setTrack:(NSURL *)track;
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig;
- (BOOL)isRunning;
- (id)controller;

View File

@ -62,7 +62,7 @@
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
@ -91,7 +91,7 @@
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
@ -125,7 +125,7 @@
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
@ -193,10 +193,6 @@
[controller launchOutputThread];
}
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig {
DLog(@"FORMAT DID CHANGE!");
}
- (InputNode *)inputNode {
return inputNode;
}

View File

@ -21,6 +21,7 @@ NS_ASSUME_NONNULL_BEGIN
BOOL inAdder;
BOOL inRemover;
BOOL inPeeker;
BOOL stopping;
}
@ -37,6 +38,8 @@ NS_ASSUME_NONNULL_BEGIN
- (void)addChunk:(AudioChunk *)chunk;
- (AudioChunk *)removeSamples:(size_t)maxFrameCount;
- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config;
@end
NS_ASSUME_NONNULL_END

View File

@ -22,6 +22,7 @@
inAdder = NO;
inRemover = NO;
inPeeker = NO;
stopping = NO;
}
@ -30,7 +31,7 @@
- (void)dealloc {
stopping = YES;
while(inAdder || inRemover) {
while(inAdder || inRemover || inPeeker) {
usleep(500);
}
}
@ -96,4 +97,17 @@
}
}
- (BOOL)peekFormat:(AudioStreamBasicDescription *)format channelConfig:(uint32_t *)config {
if(stopping) return NO;
@synchronized(chunkList) {
if([chunkList count]) {
AudioChunk *chunk = [chunkList objectAtIndex:0];
*format = [chunk format];
*config = [chunk channelConfig];
return YES;
}
}
return NO;
}
@end

View File

@ -71,6 +71,12 @@
uint32_t inputChannelConfig;
uint32_t outputChannelConfig;
BOOL streamFormatChanged;
AudioStreamBasicDescription newInputFormat;
uint32_t newInputChannelConfig;
AudioChunk *lastChunkIn;
AudioStreamBasicDescription previousOutputFormat;
uint32_t previousOutputConfig;
AudioStreamBasicDescription rememberedInputFormat;

View File

@ -440,6 +440,10 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[self cleanUp];
[self setupWithInputFormat:rememberedInputFormat withInputConfig:rememberedInputConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless];
continue;
} else if(streamFormatChanged) {
[self cleanUp];
[self setupWithInputFormat:newInputFormat withInputConfig:newInputChannelConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless];
continue;
} else
break;
}
@ -452,6 +456,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
int amountReadFromFC;
int amountRead = 0;
int amountToSkip;
int amountToIgnorePostExtrapolated = 0;
if(stopping)
return 0;
@ -498,14 +503,36 @@ tryagain:
ssize_t bytesReadFromInput = 0;
while(bytesReadFromInput < amountToWrite && !stopping && [self shouldContinue] == YES && [self endOfStream] == NO) {
while(bytesReadFromInput < amountToWrite && !stopping && !streamFormatChanged && [self shouldContinue] == YES && [self endOfStream] == NO) {
AudioStreamBasicDescription inf;
uint32_t config;
if([self peekFormat:&inf channelConfig:&config]) {
if(config != inputChannelConfig || memcmp(&inf, &inputFormat, sizeof(inf)) != 0) {
if(inputChannelConfig == 0 && memcmp(&inf, &inputFormat, sizeof(inf)) == 0) {
inputChannelConfig = config;
continue;
} else {
newInputFormat = inf;
newInputChannelConfig = config;
streamFormatChanged = YES;
break;
}
}
}
AudioChunk *chunk = [self readChunk:((amountToWrite - bytesReadFromInput) / inputFormat.mBytesPerPacket)];
AudioStreamBasicDescription inf = [chunk format];
inf = [chunk format];
size_t frameCount = [chunk frameCount];
config = [chunk channelConfig];
size_t bytesRead = frameCount * inf.mBytesPerPacket;
if(frameCount) {
NSData *samples = [chunk removeSamples:frameCount];
memcpy(inputBuffer + bytesReadFromInput + amountToSkip, [samples bytes], bytesRead);
lastChunkIn = [[AudioChunk alloc] init];
[lastChunkIn setFormat:inf];
[lastChunkIn setChannelConfig:config];
[lastChunkIn setLossless:[chunk lossless]];
[lastChunkIn assignSamples:[samples bytes] frameCount:frameCount];
}
bytesReadFromInput += bytesRead;
if(!frameCount) {
@ -518,7 +545,7 @@ tryagain:
// Pad end of track with input format silence
if(stopping || [self shouldContinue] == NO || [self endOfStream] == YES) {
if(stopping || streamFormatChanged || [self shouldContinue] == NO || [self endOfStream] == YES) {
if(!skipResampler && !is_postextrapolated_) {
if(dsd2pcm) {
amountToSkip = dsd2pcmLatency * inputFormat.mBytesPerPacket;
@ -532,6 +559,24 @@ tryagain:
}
}
size_t bitsPerSample = inputFormat.mBitsPerChannel;
BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian);
if(!bytesReadFromInput && streamFormatChanged && !skipResampler && is_postextrapolated_ < 2) {
AudioChunk *chunk = lastChunkIn;
lastChunkIn = nil;
AudioStreamBasicDescription inf = [chunk format];
size_t frameCount = [chunk frameCount];
size_t bytesRead = frameCount * inf.mBytesPerPacket;
if(frameCount) {
amountToIgnorePostExtrapolated = (int)frameCount;
NSData *samples = [chunk removeSamples:frameCount];
memcpy(inputBuffer, [samples bytes], bytesRead);
}
bytesReadFromInput += bytesRead;
amountToSkip = 0;
}
if(!bytesReadFromInput) {
convertEntered = NO;
return amountRead;
@ -544,8 +589,7 @@ tryagain:
dsdLatencyEaten = (int)ceil(dsd2pcmLatency * sampleRatio);
}
if(bytesReadFromInput &&
(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian)) {
if(bytesReadFromInput && isBigEndian) {
// Time for endian swap!
convert_be_to_le(inputBuffer, inputFormat.mBitsPerChannel, bytesReadFromInput);
}
@ -560,7 +604,6 @@ tryagain:
if(bytesReadFromInput && !isFloat) {
float gain = 1.0;
size_t bitsPerSample = inputFormat.mBitsPerChannel;
if(bitsPerSample == 1) {
samplesRead = bytesReadFromInput / inputFormat.mBytesPerPacket;
convert_dsd_to_f32(inputBuffer + bytesReadFromInput, inputBuffer, samplesRead, inputFormat.mChannelsPerFrame, dsd2pcm);
@ -699,7 +742,7 @@ tryagain:
// Input now contains bytesReadFromInput worth of floats, in the input sample rate
inpSize = bytesReadFromInput;
inpOffset = 0;
inpOffset = amountToIgnorePostExtrapolated * floatFormat.mBytesPerPacket;
}
if(inpOffset != inpSize && floatOffset == floatSize) {
@ -948,6 +991,7 @@ static float db_to_scale(float db) {
convertEntered = NO;
paused = NO;
outputFormatChanged = NO;
streamFormatChanged = NO;
return YES;
}

View File

@ -48,7 +48,7 @@
nodeFormat = propertiesToASBD(properties);
if([properties valueForKey:@"channelConfig"])
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
shouldContinue = YES;
@ -69,7 +69,7 @@
nodeFormat = propertiesToASBD(properties);
if([properties valueForKey:@"channelConfig"])
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
[self registerObservers];
@ -105,7 +105,14 @@
DLog(@"Input format changed");
// Converter may need resetting, it'll do that when it reaches the new chunks
NSDictionary *properties = [decoder properties];
int bitsPerSample = [[properties objectForKey:@"bitsPerSample"] intValue];
int channels = [[properties objectForKey:@"channels"] intValue];
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
nodeFormat = propertiesToASBD(properties);
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue];
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
} else if([keyPath isEqual:@"metadata"]) {
// Inform something of metadata change
@ -114,7 +121,8 @@
- (void)process {
int amountInBuffer = 0;
void *inputBuffer = malloc(CHUNK_SIZE);
int bytesInBuffer = 0;
void *inputBuffer = malloc(CHUNK_SIZE * 18); // Maximum 18 channels, dunno what we'll receive
BOOL shouldClose = YES;
BOOL seekError = NO;
@ -142,13 +150,15 @@
}
if(amountInBuffer < CHUNK_SIZE) {
int framesToRead = (CHUNK_SIZE - amountInBuffer) / bytesPerFrame;
int framesRead = [decoder readAudio:((char *)inputBuffer) + amountInBuffer frames:framesToRead];
int framesToRead = CHUNK_SIZE - amountInBuffer;
int framesRead = [decoder readAudio:((char *)inputBuffer) + bytesInBuffer frames:framesToRead];
if(framesRead > 0 && !seekError) {
amountInBuffer += (framesRead * bytesPerFrame);
[self writeData:inputBuffer amount:amountInBuffer];
amountInBuffer += framesRead;
bytesInBuffer += framesRead * bytesPerFrame;
[self writeData:inputBuffer amount:bytesInBuffer];
amountInBuffer = 0;
bytesInBuffer = 0;
} else {
if(initialBufferFilled == NO) {
[controller initialBufferFilled:self];

View File

@ -32,33 +32,35 @@
uint32_t nodeChannelConfig;
BOOL nodeLossless;
}
- (id)initWithController:(id)c previous:(id)p;
- (id _Nullable)initWithController:(id _Nonnull)c previous:(id _Nullable)p;
- (void)writeData:(const void *)ptr amount:(size_t)a;
- (AudioChunk *)readChunk:(size_t)maxFrames;
- (void)writeData:(const void *_Nonnull)ptr amount:(size_t)a;
- (AudioChunk *_Nonnull)readChunk:(size_t)maxFrames;
- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config;
- (void)process; // Should be overwriten by subclass
- (void)threadEntry:(id)arg;
- (void)threadEntry:(id _Nullable)arg;
- (void)launchThread;
- (void)setShouldReset:(BOOL)s;
- (BOOL)shouldReset;
- (void)setPreviousNode:(id)p;
- (id)previousNode;
- (void)setPreviousNode:(id _Nullable)p;
- (id _Nullable)previousNode;
- (BOOL)shouldContinue;
- (void)setShouldContinue:(BOOL)s;
- (ChunkList *)buffer;
- (ChunkList *_Nonnull)buffer;
- (void)resetBuffer; // WARNING! DANGER WILL ROBINSON!
- (AudioStreamBasicDescription)nodeFormat;
- (uint32_t)nodeChannelConfig;
- (BOOL)nodeLossless;
- (Semaphore *)semaphore;
- (Semaphore *_Nonnull)semaphore;
//-(void)resetBuffer;

View File

@ -95,6 +95,16 @@
}
}
- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config {
[accessLock lock];
BOOL ret = [[previousNode buffer] peekFormat:format channelConfig:config];
[accessLock unlock];
return ret;
}
- (AudioChunk *)readChunk:(size_t)maxFrames {
[accessLock lock];

View File

@ -12,7 +12,6 @@
@interface CogDecoderMulti : NSObject <CogDecoder> {
NSArray *theDecoders;
id<CogDecoder> theDecoder;
NSMutableArray *cachedObservers;
}
- (id)initWithDecoders:(NSArray *)decoders;

View File

@ -31,6 +31,15 @@ NSArray *sortClassesByPriority(NSArray *theClasses) {
return sortedClasses;
}
@interface CogDecoderMulti (Private)
- (void)registerObservers;
- (void)removeObservers;
- (void)observeValueForKeyPath:(NSString *)keyPath
ofObject:(id)object
change:(NSDictionary *)change
context:(void *)context;
@end
@implementation CogDecoderMulti
+ (NSArray *)mimeTypes {
@ -54,7 +63,6 @@ NSArray *sortClassesByPriority(NSArray *theClasses) {
if(self) {
theDecoders = sortClassesByPriority(decoders);
theDecoder = nil;
cachedObservers = [[NSMutableArray alloc] init];
}
return self;
}
@ -73,17 +81,10 @@ NSArray *sortClassesByPriority(NSArray *theClasses) {
for(NSString *classString in theDecoders) {
Class decoder = NSClassFromString(classString);
theDecoder = [[decoder alloc] init];
for(NSDictionary *obsItem in cachedObservers) {
[theDecoder addObserver:[obsItem objectForKey:@"observer"]
forKeyPath:[obsItem objectForKey:@"keyPath"]
options:[[obsItem objectForKey:@"options"] unsignedIntegerValue]
context:(__bridge void *)([obsItem objectForKey:@"context"])];
}
[self registerObservers];
if([theDecoder open:source])
return YES;
for(NSDictionary *obsItem in cachedObservers) {
[theDecoder removeObserver:[obsItem objectForKey:@"observer"] forKeyPath:[obsItem objectForKey:@"keyPath"]];
}
[self removeObservers];
if([source seekable])
[source seek:0 whence:SEEK_SET];
}
@ -98,43 +99,40 @@ NSArray *sortClassesByPriority(NSArray *theClasses) {
- (void)close {
if(theDecoder != nil) {
for(NSDictionary *obsItem in cachedObservers) {
[theDecoder removeObserver:[obsItem objectForKey:@"observer"] forKeyPath:[obsItem objectForKey:@"keyPath"]];
}
[cachedObservers removeAllObjects];
[self removeObservers];
[theDecoder close];
theDecoder = nil;
}
}
- (void)registerObservers {
[theDecoder addObserver:self
forKeyPath:@"properties"
options:(NSKeyValueObservingOptionNew)
context:NULL];
[theDecoder addObserver:self
forKeyPath:@"metadata"
options:(NSKeyValueObservingOptionNew)
context:NULL];
}
- (void)removeObservers {
[theDecoder removeObserver:self forKeyPath:@"properties"];
[theDecoder removeObserver:self forKeyPath:@"metadata"];
}
- (BOOL)setTrack:(NSURL *)track {
if(theDecoder != nil && [theDecoder respondsToSelector:@selector(setTrack:)]) return [theDecoder setTrack:track];
return NO;
}
/* By the current design, the core adds its observers to decoders before they are opened */
- (void)addObserver:(NSObject *)observer forKeyPath:(NSString *)keyPath options:(NSKeyValueObservingOptions)options context:(void *)context {
if(context != nil) {
[cachedObservers addObject:[NSDictionary dictionaryWithObjectsAndKeys:observer, @"observer", keyPath, @"keyPath", @(options), @"options", context, @"context", nil]];
} else {
[cachedObservers addObject:[NSDictionary dictionaryWithObjectsAndKeys:observer, @"observer", keyPath, @"keyPath", @(options), @"options", nil]];
}
if(theDecoder) {
[theDecoder addObserver:observer forKeyPath:keyPath options:options context:context];
}
}
/* And this is currently called after the decoder is closed */
- (void)removeObserver:(NSObject *)observer forKeyPath:(NSString *)keyPath {
for(NSDictionary *obsItem in cachedObservers) {
if([obsItem objectForKey:@"observer"] == observer && [keyPath isEqualToString:[obsItem objectForKey:@"keyPath"]]) {
[cachedObservers removeObject:obsItem];
break;
}
}
if(theDecoder) {
[theDecoder removeObserver:observer forKeyPath:keyPath];
}
- (void)observeValueForKeyPath:(NSString *)keyPath
ofObject:(id)object
change:(NSDictionary *)change
context:(void *)context {
[self willChangeValueForKey:keyPath];
[self didChangeValueForKey:keyPath];
}
@end

View File

@ -666,6 +666,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
if(logFile) {
fwrite(inputData->mBuffers[0].mData, 1, inputData->mBuffers[0].mDataByteSize, logFile);
}
// memset(inputData->mBuffers[0].mData, 0, inputData->mBuffers[0].mDataByteSize);
#endif
inputData->mBuffers[0].mNumberChannels = channels;

View File

@ -391,7 +391,7 @@ static SInt64 getSizeProc(void *clientData) {
- (NSDictionary *)properties {
return [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:channels], @"channels",
[NSNumber numberWithInt:channelConfig], @"channelConfig",
[NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig",
[NSNumber numberWithInt:bitsPerSample], @"bitsPerSample",
[NSNumber numberWithBool:floatingPoint], @"floatingPoint",
[NSNumber numberWithInt:bitrate], @"bitrate",

View File

@ -26,6 +26,7 @@
BOOL embedded;
BOOL noFragment;
BOOL observersAdded;
NSURL *baseURL;
CueSheet *cuesheet;

View File

@ -55,6 +55,7 @@
NSDictionary *fileMetadata;
noFragment = NO;
observersAdded = NO;
NSString *ext = [url pathExtension];
if([ext caseInsensitiveCompare:@"cue"] != NSOrderedSame) {
@ -143,6 +144,8 @@
decoder = [NSClassFromString(@"AudioDecoder") audioDecoderForSource:source skipCue:YES];
[self registerObservers];
if(![decoder open:source]) {
ALog(@"Could not open cuesheet decoder");
return NO;
@ -166,8 +169,40 @@
return NO;
}
- (void)registerObservers {
DLog(@"REGISTERING OBSERVERS");
[decoder addObserver:self
forKeyPath:@"properties"
options:(NSKeyValueObservingOptionNew)
context:NULL];
[decoder addObserver:self
forKeyPath:@"metadata"
options:(NSKeyValueObservingOptionNew)
context:NULL];
observersAdded = YES;
}
- (void)removeObservers {
if(observersAdded) {
[decoder removeObserver:self forKeyPath:@"properties"];
[decoder removeObserver:self forKeyPath:@"metadata"];
observersAdded = NO;
}
}
- (void)observeValueForKeyPath:(NSString *)keyPath
ofObject:(id)object
change:(NSDictionary *)change
context:(void *)context {
[self willChangeValueForKey:keyPath];
[self didChangeValueForKey:keyPath];
}
- (void)close {
if(decoder) {
[self removeObservers];
[decoder close];
decoder = nil;
}

View File

@ -575,6 +575,20 @@ int64_t ffmpeg_seek(void *opaque, int64_t offset, int whence) {
bytesRead += toConsume;
}
int _channels = codecCtx->channels;
uint32_t _channelConfig = (uint32_t)codecCtx->channel_layout;
float _frequency = codecCtx->sample_rate;
if(_channels != channels ||
_channelConfig != channelConfig ||
_frequency != frequency) {
channels = _channels;
channelConfig = _channelConfig;
frequency = _frequency;
[self willChangeValueForKey:@"properties"];
[self didChangeValueForKey:@"properties"];
}
int framesReadNow = bytesRead / frameSize;
if(totalFrames && (framesRead + framesReadNow > totalFrames))
framesReadNow = (int)(totalFrames - framesRead);
@ -617,7 +631,7 @@ int64_t ffmpeg_seek(void *opaque, int64_t offset, int whence) {
- (NSDictionary *)properties {
return [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:channels], @"channels",
[NSNumber numberWithInt:channelConfig], @"channelConfig",
[NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig",
[NSNumber numberWithInt:bitsPerSample], @"bitsPerSample",
[NSNumber numberWithBool:(bitsPerSample == 8)], @"Unsigned",
[NSNumber numberWithFloat:frequency], @"sampleRate",

View File

@ -33,6 +33,7 @@
long fileSize;
BOOL hasStreamInfo;
BOOL streamOpened;
}
- (void)setSource:(id<CogSource>)s;

View File

@ -99,6 +99,23 @@ FLAC__StreamDecoderLengthStatus LengthCallback(const FLAC__StreamDecoder *decode
FLAC__StreamDecoderWriteStatus WriteCallback(const FLAC__StreamDecoder *decoder, const FLAC__Frame *frame, const FLAC__int32 *const sampleblockBuffer[], void *client_data) {
FlacDecoder *flacDecoder = (__bridge FlacDecoder *)client_data;
uint32_t channels = frame->header.channels;
uint32_t bitsPerSample = frame->header.bits_per_sample;
uint32_t frequency = frame->header.sample_rate;
if(channels != flacDecoder->channels ||
bitsPerSample != flacDecoder->bitsPerSample ||
frequency != flacDecoder->frequency) {
if(channels != flacDecoder->channels) {
flacDecoder->channelConfig = 0;
}
flacDecoder->channels = channels;
flacDecoder->bitsPerSample = bitsPerSample;
flacDecoder->frequency = frequency;
[flacDecoder willChangeValueForKey:@"properties"];
[flacDecoder didChangeValueForKey:@"properties"];
}
void *blockBuffer = [flacDecoder blockBuffer];
int8_t *alias8;
@ -185,6 +202,7 @@ void MetadataCallback(const FLAC__StreamDecoder *decoder, const FLAC__StreamMeta
if(!flacDecoder->hasStreamInfo) {
flacDecoder->channels = metadata->data.stream_info.channels;
flacDecoder->channelConfig = 0;
flacDecoder->frequency = metadata->data.stream_info.sample_rate;
flacDecoder->bitsPerSample = metadata->data.stream_info.bits_per_sample;
@ -243,9 +261,12 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
- (int)readAudio:(void *)buffer frames:(UInt32)frames {
int framesRead = 0;
int bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
while(framesRead < frames) {
if(blockBufferFrames == 0) {
if(framesRead) {
break;
}
if(FLAC__stream_decoder_get_state(decoder) == FLAC__STREAM_DECODER_END_OF_STREAM) {
break;
}
@ -253,6 +274,8 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
FLAC__stream_decoder_process_single(decoder);
}
int bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
int framesToRead = blockBufferFrames;
if(blockBufferFrames > frames) {
framesToRead = frames;
@ -333,7 +356,7 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
- (NSDictionary *)properties {
return [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:channels], @"channels",
[NSNumber numberWithInt:channelConfig], @"channelConfig",
[NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig",
[NSNumber numberWithInt:bitsPerSample], @"bitsPerSample",
[NSNumber numberWithFloat:frequency], @"sampleRate",
[NSNumber numberWithDouble:totalFrames], @"totalFrames",

View File

@ -283,7 +283,7 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
- (NSDictionary *)properties {
return [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:channels], @"channels",
[NSNumber numberWithInt:channelConfig], @"channelConfig",
[NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig",
[NSNumber numberWithInt:bitsPerSample], @"bitsPerSample",
[NSNumber numberWithInt:bitrate], @"bitrate",
[NSNumber numberWithFloat:frequency], @"sampleRate",