Cog Audio: Major rewrite of audio buffering

Rewrite attempt number two. Now using array lists of audio chunks, with
each chunk having its format and optionally losslessness stashed along
with it. This replaces the old virtual ring buffer method. As a result
of this, the HRIR toggle now works instantaneously.

Signed-off-by: Christopher Snowhill <kode54@gmail.com>
CQTexperiment
Christopher Snowhill 2022-02-06 03:08:34 -08:00
parent 11ecb1cd8b
commit 62edb39761
18 changed files with 846 additions and 411 deletions

42
Audio/Chain/AudioChunk.h Normal file
View File

@ -0,0 +1,42 @@
//
// AudioChunk.h
// CogAudio Framework
//
// Created by Christopher Snowhill on 2/5/22.
//
#ifndef AudioChunk_h
#define AudioChunk_h
#import <Foundation/Foundation.h>
#import <CoreAudio/CoreAudio.h>
NS_ASSUME_NONNULL_BEGIN
@interface AudioChunk : NSObject {
AudioStreamBasicDescription format;
NSMutableData * chunkData;
BOOL formatAssigned;
BOOL lossless;
}
@property AudioStreamBasicDescription format;
@property BOOL lossless;
- (id) init;
- (void) assignSamples:(const void *)data frameCount:(size_t)count;
- (NSData *) removeSamples:(size_t)frameCount;
- (BOOL) isEmpty;
- (size_t) frameCount;
- (double) duration;
@end
NS_ASSUME_NONNULL_END
#endif /* AudioChunk_h */

74
Audio/Chain/AudioChunk.m Normal file
View File

@ -0,0 +1,74 @@
//
// AudioChunk.m
// CogAudio Framework
//
// Created by Christopher Snowhill on 2/5/22.
//
#import "AudioChunk.h"
@implementation AudioChunk
- (id) init {
self = [super init];
if (self) {
chunkData = [[NSMutableData alloc] init];
formatAssigned = NO;
lossless = NO;
}
return self;
}
@synthesize lossless;
- (AudioStreamBasicDescription) format {
return format;
}
- (void) setFormat:(AudioStreamBasicDescription)informat {
formatAssigned = YES;
format = informat;
}
- (void) assignSamples:(const void *)data frameCount:(size_t)count {
if (formatAssigned) {
const size_t bytesPerPacket = format.mBytesPerPacket;
[chunkData appendBytes:data length:bytesPerPacket * count];
}
}
- (NSData *) removeSamples:(size_t)frameCount {
if (formatAssigned) {
const size_t bytesPerPacket = format.mBytesPerPacket;
const size_t byteCount = bytesPerPacket * frameCount;
NSData * ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)];
[chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0];
return ret;
}
return [NSData data];
}
- (BOOL) isEmpty {
return [chunkData length] == 0;
}
- (size_t) frameCount {
if (formatAssigned) {
const size_t bytesPerPacket = format.mBytesPerPacket;
return [chunkData length] / bytesPerPacket;
}
return 0;
}
- (double) duration {
if (formatAssigned) {
const size_t bytesPerPacket = format.mBytesPerPacket;
const double sampleRate = format.mSampleRate;
return (double)([chunkData length] / bytesPerPacket) / sampleRate;
}
return 0.0;
}
@end

View File

@ -64,8 +64,14 @@
return NO;
NSDictionary * properties = [inputNode properties];
inputFormat = [inputNode nodeFormat];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];
@ -85,8 +91,14 @@
NSDictionary * properties = [inputNode properties];
DLog(@"Input Properties: %@", properties);
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
inputFormat = [inputNode nodeFormat];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
DLog(@"Input Properties: %@", properties);
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];
@ -107,7 +119,14 @@
NSDictionary * properties = [inputNode properties];
DLog(@"Input Properties: %@", properties);
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
inputFormat = [inputNode nodeFormat];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];

42
Audio/Chain/ChunkList.h Normal file
View File

@ -0,0 +1,42 @@
//
// ChunkList.h
// CogAudio Framework
//
// Created by Christopher Snowhill on 2/5/22.
//
#import <Foundation/Foundation.h>
#import <CoreAudio/CoreAudio.h>
#import "AudioChunk.h"
#import "Semaphore.h"
NS_ASSUME_NONNULL_BEGIN
@interface ChunkList : NSObject {
NSMutableArray<AudioChunk *> * chunkList;
double listDuration;
double maxDuration;
BOOL inAdder;
BOOL inRemover;
BOOL stopping;
}
@property (readonly) double listDuration;
@property (readonly) double maxDuration;
- (id) initWithMaximumDuration:(double)duration;
- (void) reset;
- (BOOL) isEmpty;
- (BOOL) isFull;
- (void) addChunk:(AudioChunk *)chunk;
- (AudioChunk *) removeSamples:(size_t)maxFrameCount;
@end
NS_ASSUME_NONNULL_END

96
Audio/Chain/ChunkList.m Normal file
View File

@ -0,0 +1,96 @@
//
// ChunkList.m
// CogAudio Framework
//
// Created by Christopher Snowhill on 2/5/22.
//
#import "ChunkList.h"
@implementation ChunkList
@synthesize listDuration;
@synthesize maxDuration;
- (id) initWithMaximumDuration:(double)duration {
self = [super init];
if (self) {
chunkList = [[NSMutableArray alloc] init];
listDuration = 0.0;
maxDuration = duration;
inAdder = NO;
inRemover = NO;
stopping = NO;
}
return self;
}
- (void) dealloc {
stopping = YES;
while (inAdder || inRemover) {
usleep(500);
}
}
- (void) reset {
@synchronized (chunkList) {
[chunkList removeAllObjects];
listDuration = 0.0;
}
}
- (BOOL) isEmpty {
@synchronized (chunkList) {
return [chunkList count] == 0;
}
}
- (BOOL) isFull {
return listDuration >= maxDuration;
}
- (void) addChunk:(AudioChunk *)chunk {
if (stopping) return;
inAdder = YES;
const double chunkDuration = [chunk duration];
@synchronized(chunkList) {
[chunkList addObject:chunk];
listDuration += chunkDuration;
}
inAdder = NO;
}
- (AudioChunk *) removeSamples:(size_t)maxFrameCount {
if (stopping) {
return [[AudioChunk alloc] init];
}
@synchronized (chunkList) {
inRemover = YES;
if (![chunkList count])
return [[AudioChunk alloc] init];
AudioChunk * chunk = [chunkList objectAtIndex:0];
if ([chunk frameCount] <= maxFrameCount) {
[chunkList removeObjectAtIndex:0];
listDuration -= [chunk duration];
inRemover = NO;
return chunk;
}
NSData * removedData = [chunk removeSamples:maxFrameCount];
AudioChunk * ret = [[AudioChunk alloc] init];
[ret setFormat:[chunk format]];
[ret assignSamples:[removedData bytes] frameCount:maxFrameCount];
listDuration -= [ret duration];
inRemover = NO;
return ret;
}
}
@end

View File

@ -72,189 +72,11 @@ void PrintStreamDesc (AudioStreamBasicDescription *inDesc)
hdcd_decoder = NULL;
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
}
return self;
}
static const float STEREO_DOWNMIX[8-2][8][2]={
/*3.0*/
{
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
},
/*quadrophonic*/
{
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
},
/*5.0*/
{
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
{0.3254F,0.5636F}
},
/*5.1*/
{
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
{0.2645F,0.4582F}
},
/*6.1*/
{
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
{0.3943F,0.2277F},{0.2277F,0.3943F}
},
/*7.1*/
{
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
}
};
static void downmix_to_stereo(float * buffer, int channels, size_t count)
{
if (channels >= 3 && channels <= 8)
for (size_t i = 0; i < count; ++i)
{
float left = 0, right = 0;
for (int j = 0; j < channels; ++j)
{
left += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
right += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
}
buffer[i * 2 + 0] = left;
buffer[i * 2 + 1] = right;
}
}
static void downmix_to_mono(float * buffer, int channels, size_t count)
{
if (channels >= 3 && channels <= 8)
{
downmix_to_stereo(buffer, channels, count);
channels = 2;
}
float invchannels = 1.0 / (float)channels;
for (size_t i = 0; i < count; ++i)
{
float sample = 0;
for (int j = 0; j < channels; ++j)
{
sample += buffer[i * channels + j];
}
buffer[i] = sample * invchannels;
}
}
static void upmix(float * buffer, int inchannels, int outchannels, size_t count)
{
for (ssize_t i = count - 1; i >= 0; --i)
{
if (inchannels == 1 && outchannels == 2)
{
// upmix mono to stereo
float sample = buffer[i];
buffer[i * 2 + 0] = sample;
buffer[i * 2 + 1] = sample;
}
else if (inchannels == 1 && outchannels == 4)
{
// upmix mono to quad
float sample = buffer[i];
buffer[i * 4 + 0] = sample;
buffer[i * 4 + 1] = sample;
buffer[i * 4 + 2] = 0;
buffer[i * 4 + 3] = 0;
}
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
{
// upmix mono to center channel
float sample = buffer[i];
buffer[i * outchannels + 2] = sample;
for (int j = 0; j < 2; ++j)
{
buffer[i * outchannels + j] = 0;
}
for (int j = 3; j < outchannels; ++j)
{
buffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 4 && outchannels >= 5)
{
float fl = buffer[i * 4 + 0];
float fr = buffer[i * 4 + 1];
float bl = buffer[i * 4 + 2];
float br = buffer[i * 4 + 3];
const int skipclfe = (outchannels == 5) ? 1 : 2;
buffer[i * outchannels + 0] = fl;
buffer[i * outchannels + 1] = fr;
buffer[i * outchannels + skipclfe + 2] = bl;
buffer[i * outchannels + skipclfe + 3] = br;
for (int j = 0; j < skipclfe; ++j)
{
buffer[i * outchannels + 2 + j] = 0;
}
for (int j = 4 + skipclfe; j < outchannels; ++j)
{
buffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 5 && outchannels >= 6)
{
float fl = buffer[i * 5 + 0];
float fr = buffer[i * 5 + 1];
float c = buffer[i * 5 + 2];
float bl = buffer[i * 5 + 3];
float br = buffer[i * 5 + 4];
buffer[i * outchannels + 0] = fl;
buffer[i * outchannels + 1] = fr;
buffer[i * outchannels + 2] = c;
buffer[i * outchannels + 3] = 0;
buffer[i * outchannels + 4] = bl;
buffer[i * outchannels + 5] = br;
for (int j = 6; j < outchannels; ++j)
{
buffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 7 && outchannels == 8)
{
float fl = buffer[i * 7 + 0];
float fr = buffer[i * 7 + 1];
float c = buffer[i * 7 + 2];
float lfe = buffer[i * 7 + 3];
float sl = buffer[i * 7 + 4];
float sr = buffer[i * 7 + 5];
float bc = buffer[i * 7 + 6];
buffer[i * 8 + 0] = fl;
buffer[i * 8 + 1] = fr;
buffer[i * 8 + 2] = c;
buffer[i * 8 + 3] = lfe;
buffer[i * 8 + 4] = bc;
buffer[i * 8 + 5] = bc;
buffer[i * 8 + 6] = sl;
buffer[i * 8 + 7] = sr;
}
else
{
// upmix N channels to N channels plus silence the empty channels
float samples[inchannels];
for (int j = 0; j < inchannels; ++j)
{
samples[j] = buffer[i * inchannels + j];
}
for (int j = 0; j < inchannels; ++j)
{
buffer[i * outchannels + j] = samples[j];
}
for (int j = inchannels; j < outchannels; ++j)
{
buffer[i * outchannels + j] = 0;
}
}
}
}
void scale_by_volume(float * buffer, size_t count, float volume)
{
if ( volume != 1.0 )
@ -636,7 +458,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
-(void)process
{
char writeBuf[CHUNK_SIZE];
char writeBuf[CHUNK_SIZE];
// Removed endOfStream check from here, since we want to be able to flush the converter
// when the end of stream is reached. Convert function instead processes what it can,
@ -665,7 +487,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
}
else break;
}
[self writeData:writeBuf amount:amountConverted];
[self writeData:writeBuf amount:amountConverted];
}
}
@ -698,7 +520,7 @@ tryagain:
BOOL isUnsigned = !isFloat && !(inputFormat.mFormatFlags & kAudioFormatFlagIsSignedInteger);
// Approximately the most we want on input
ioNumberPackets = (amount - amountRead) / outputFormat.mBytesPerPacket;
ioNumberPackets = CHUNK_SIZE;
if (!skipResampler && ioNumberPackets < PRIME_LEN_)
ioNumberPackets = PRIME_LEN_;
@ -725,9 +547,16 @@ tryagain:
while (bytesReadFromInput < amountToWrite && !stopping && [self shouldContinue] == YES && [self endOfStream] == NO)
{
size_t bytesRead = [self readData:inputBuffer + amountToSkip + bytesReadFromInput amount:(int)(amountToWrite - bytesReadFromInput)];
AudioChunk * chunk = [self readChunk:((amountToWrite - bytesReadFromInput) / inputFormat.mBytesPerPacket)];
AudioStreamBasicDescription inf = [chunk format];
size_t frameCount = [chunk frameCount];
size_t bytesRead = frameCount * inf.mBytesPerPacket;
if (frameCount) {
NSData * samples = [chunk removeSamples:frameCount];
memcpy(inputBuffer + bytesReadFromInput + amountToSkip, [samples bytes], bytesRead);
}
bytesReadFromInput += bytesRead;
if (!bytesRead)
if (!frameCount)
{
if (refillNode)
[self setEndOfStream:YES];
@ -1019,32 +848,7 @@ tryagain:
amountReadFromFC = (int)(outputDone * floatFormat.mBytesPerPacket);
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
if ( hFilter ) {
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
[hFilter process:floatBuffer sampleCount:samples toBuffer:floatBuffer + amountReadFromFC];
memmove(floatBuffer, floatBuffer + amountReadFromFC, samples * sizeof(float) * 2);
amountReadFromFC = samples * sizeof(float) * 2;
}
else if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
{
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
downmix_to_stereo( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
amountReadFromFC = samples * sizeof(float) * 2;
}
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
{
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
downmix_to_mono( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
amountReadFromFC = samples * sizeof(float);
}
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
{
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
upmix( (float*) floatBuffer, inputFormat.mChannelsPerFrame, outputFormat.mChannelsPerFrame, samples );
amountReadFromFC = samples * sizeof(float) * outputFormat.mChannelsPerFrame;
}
floatSize = amountReadFromFC;
floatOffset = 0;
}
@ -1056,6 +860,8 @@ tryagain:
if (ioNumberPackets > (floatSize - floatOffset))
ioNumberPackets = (UInt32)(floatSize - floatOffset);
ioNumberPackets -= ioNumberPackets % outputFormat.mBytesPerPacket;
memcpy(dest + amountRead, floatBuffer + floatOffset, ioNumberPackets);
floatOffset += ioNumberPackets;
@ -1075,15 +881,6 @@ tryagain:
//User reset the volume scaling option
[self refreshVolumeScaling];
}
else if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
[keyPath isEqualToString:@"values.hrirPath"]) {
// Reset the converter, without rebuffering
if (outputFormat.mChannelsPerFrame == 2 &&
inputFormat.mChannelsPerFrame >= 1 &&
inputFormat.mChannelsPerFrame <= 8) {
[self inputFormatDidChange:inputFormat];
}
}
}
static float db_to_scale(float db)
@ -1141,6 +938,8 @@ static float db_to_scale(float db)
inputFormat = inf;
outputFormat = outf;
nodeFormat = outputFormat;
rememberedLossless = lossless;
// These are the only sample formats we support translating
@ -1192,33 +991,6 @@ static float db_to_scale(float db)
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
if (hVirt &&
outputFormat.mChannelsPerFrame == 2 &&
inputFormat.mChannelsPerFrame >= 1 &&
inputFormat.mChannelsPerFrame <= 8) {
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
NSURL * presetUrl = nil;
if (userPreset && ![userPreset isEqualToString:@""]) {
presetUrl = [NSURL fileURLWithPath:userPreset];
if (![HeadphoneFilter validateImpulseFile:presetUrl])
presetUrl = nil;
}
if (!presetUrl) {
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
if (![HeadphoneFilter validateImpulseFile:presetUrl])
presetUrl = nil;
}
if (presetUrl) {
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
}
}
skipResampler = outputFormat.mSampleRate == floatFormat.mSampleRate;
sampleRatio = (double)outputFormat.mSampleRate / (double)floatFormat.mSampleRate;
@ -1271,8 +1043,6 @@ static float db_to_scale(float db)
DLog(@"Decoder dealloc");
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
paused = NO;
[self cleanUp];
@ -1300,16 +1070,16 @@ static float db_to_scale(float db)
originalPreviousNode = previousNode;
refillNode = [[RefillNode alloc] initWithController:controller previous:nil];
[self setPreviousNode:refillNode];
int dataRead = 0;
[refillNode setFormat:previousOutputFormat];
for (;;)
{
void * ptr;
dataRead = [buffer lengthAvailableToReadReturningPointer:&ptr];
if (dataRead) {
[refillNode writeData:(float*)ptr floatCount:dataRead / sizeof(float)];
[buffer didReadLength:dataRead];
AudioChunk * chunk = [buffer removeSamples:16384];
size_t frameCount = [chunk frameCount];
if (frameCount) {
NSData * samples = [chunk removeSamples:frameCount];
[refillNode writeData:[samples bytes] amount:frameCount];
}
else
break;
@ -1383,7 +1153,7 @@ static float db_to_scale(float db)
- (double) secondsBuffered
{
return ((double)[buffer bufferedLength] / (outputFormat.mSampleRate * outputFormat.mBytesPerPacket));
return [buffer listDuration];
}
@end

26
Audio/Chain/Downmix.h Normal file
View File

@ -0,0 +1,26 @@
//
// Downmix.h
// Cog
//
// Created by Christopher Snowhill on 2/05/22.
// Copyright 2022 __LoSnoCo__. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <CoreAudio/CoreAudio.h>
#import "HeadphoneFilter.h"
@interface DownmixProcessor : NSObject {
HeadphoneFilter *hFilter;
AudioStreamBasicDescription inputFormat;
AudioStreamBasicDescription outputFormat;
}
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf;
- (void) process:(const void*)inBuffer frameCount:(size_t)frames output:(void *)outBuffer;
@end

303
Audio/Chain/Downmix.m Normal file
View File

@ -0,0 +1,303 @@
//
// Downmix.m
// Cog
//
// Created by Christopher Snowhill on 2/05/22.
// Copyright 2022 __LoSnoCo__. All rights reserved.
//
#import "Downmix.h"
#import "Logging.h"
static const float STEREO_DOWNMIX[8-2][8][2]={
/*3.0*/
{
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
},
/*quadrophonic*/
{
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
},
/*5.0*/
{
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
{0.3254F,0.5636F}
},
/*5.1*/
{
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
{0.2645F,0.4582F}
},
/*6.1*/
{
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
{0.3943F,0.2277F},{0.2277F,0.3943F}
},
/*7.1*/
{
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
}
};
static void downmix_to_stereo(const float * inBuffer, int channels, float * outBuffer, size_t count)
{
if (channels >= 3 && channels <= 8)
for (size_t i = 0; i < count; ++i)
{
float left = 0, right = 0;
for (int j = 0; j < channels; ++j)
{
left += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
right += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
}
outBuffer[i * 2 + 0] = left;
outBuffer[i * 2 + 1] = right;
}
}
static void downmix_to_mono(const float * inBuffer, int channels, float * outBuffer, size_t count)
{
float tempBuffer[count * 2];
if (channels >= 3 && channels <= 8)
{
downmix_to_stereo(inBuffer, channels, tempBuffer, count);
inBuffer = tempBuffer;
channels = 2;
}
float invchannels = 1.0 / (float)channels;
for (size_t i = 0; i < count; ++i)
{
float sample = 0;
for (int j = 0; j < channels; ++j)
{
sample += inBuffer[i * channels + j];
}
outBuffer[i] = sample * invchannels;
}
}
static void upmix(const float * inBuffer, int inchannels, float * outBuffer, int outchannels, size_t count)
{
for (ssize_t i = 0; i < count; ++i)
{
if (inchannels == 1 && outchannels == 2)
{
// upmix mono to stereo
float sample = inBuffer[i];
outBuffer[i * 2 + 0] = sample;
outBuffer[i * 2 + 1] = sample;
}
else if (inchannels == 1 && outchannels == 4)
{
// upmix mono to quad
float sample = inBuffer[i];
outBuffer[i * 4 + 0] = sample;
outBuffer[i * 4 + 1] = sample;
outBuffer[i * 4 + 2] = 0;
outBuffer[i * 4 + 3] = 0;
}
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
{
// upmix mono to center channel
float sample = inBuffer[i];
outBuffer[i * outchannels + 2] = sample;
for (int j = 0; j < 2; ++j)
{
outBuffer[i * outchannels + j] = 0;
}
for (int j = 3; j < outchannels; ++j)
{
outBuffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 4 && outchannels >= 5)
{
float fl = inBuffer[i * 4 + 0];
float fr = inBuffer[i * 4 + 1];
float bl = inBuffer[i * 4 + 2];
float br = inBuffer[i * 4 + 3];
const int skipclfe = (outchannels == 5) ? 1 : 2;
outBuffer[i * outchannels + 0] = fl;
outBuffer[i * outchannels + 1] = fr;
outBuffer[i * outchannels + skipclfe + 2] = bl;
outBuffer[i * outchannels + skipclfe + 3] = br;
for (int j = 0; j < skipclfe; ++j)
{
outBuffer[i * outchannels + 2 + j] = 0;
}
for (int j = 4 + skipclfe; j < outchannels; ++j)
{
outBuffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 5 && outchannels >= 6)
{
float fl = inBuffer[i * 5 + 0];
float fr = inBuffer[i * 5 + 1];
float c = inBuffer[i * 5 + 2];
float bl = inBuffer[i * 5 + 3];
float br = inBuffer[i * 5 + 4];
outBuffer[i * outchannels + 0] = fl;
outBuffer[i * outchannels + 1] = fr;
outBuffer[i * outchannels + 2] = c;
outBuffer[i * outchannels + 3] = 0;
outBuffer[i * outchannels + 4] = bl;
outBuffer[i * outchannels + 5] = br;
for (int j = 6; j < outchannels; ++j)
{
outBuffer[i * outchannels + j] = 0;
}
}
else if (inchannels == 7 && outchannels == 8)
{
float fl = inBuffer[i * 7 + 0];
float fr = inBuffer[i * 7 + 1];
float c = inBuffer[i * 7 + 2];
float lfe = inBuffer[i * 7 + 3];
float sl = inBuffer[i * 7 + 4];
float sr = inBuffer[i * 7 + 5];
float bc = inBuffer[i * 7 + 6];
outBuffer[i * 8 + 0] = fl;
outBuffer[i * 8 + 1] = fr;
outBuffer[i * 8 + 2] = c;
outBuffer[i * 8 + 3] = lfe;
outBuffer[i * 8 + 4] = bc;
outBuffer[i * 8 + 5] = bc;
outBuffer[i * 8 + 6] = sl;
outBuffer[i * 8 + 7] = sr;
}
else
{
// upmix N channels to N channels plus silence the empty channels
float samples[inchannels];
for (int j = 0; j < inchannels; ++j)
{
samples[j] = inBuffer[i * inchannels + j];
}
for (int j = 0; j < inchannels; ++j)
{
outBuffer[i * outchannels + j] = samples[j];
}
for (int j = inchannels; j < outchannels; ++j)
{
outBuffer[i * outchannels + j] = 0;
}
}
}
}
@implementation DownmixProcessor
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf {
self = [super init];
if (self) {
if (inf.mFormatID != kAudioFormatLinearPCM ||
(inf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
inf.mBitsPerChannel != 32 ||
inf.mBytesPerFrame != (4 * inf.mChannelsPerFrame) ||
inf.mBytesPerPacket != inf.mFramesPerPacket * inf.mBytesPerFrame)
return nil;
if (outf.mFormatID != kAudioFormatLinearPCM ||
(outf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
outf.mBitsPerChannel != 32 ||
outf.mBytesPerFrame != (4 * outf.mChannelsPerFrame) ||
outf.mBytesPerPacket != outf.mFramesPerPacket * outf.mBytesPerFrame)
return nil;
inputFormat = inf;
outputFormat = outf;
[self setupVirt];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
}
return self;
}
- (void) dealloc {
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
}
- (void) setupVirt {
@synchronized(hFilter) {
hFilter = nil;
}
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
if (hVirt &&
outputFormat.mChannelsPerFrame == 2 &&
inputFormat.mChannelsPerFrame >= 1 &&
inputFormat.mChannelsPerFrame <= 8) {
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
NSURL * presetUrl = nil;
if (userPreset && ![userPreset isEqualToString:@""]) {
presetUrl = [NSURL fileURLWithPath:userPreset];
if (![HeadphoneFilter validateImpulseFile:presetUrl])
presetUrl = nil;
}
if (!presetUrl) {
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
if (![HeadphoneFilter validateImpulseFile:presetUrl])
presetUrl = nil;
}
if (presetUrl) {
@synchronized(hFilter) {
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
}
}
}
}
- (void)observeValueForKeyPath:(NSString *)keyPath
ofObject:(id)object
change:(NSDictionary *)change
context:(void *)context
{
DLog(@"SOMETHING CHANGED!");
if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
[keyPath isEqualToString:@"values.hrirPath"]) {
// Reset the converter, without rebuffering
[self setupVirt];
}
}
- (void) process:(const void *)inBuffer frameCount:(size_t)frames output:(void *)outBuffer {
@synchronized (hFilter) {
if ( hFilter ) {
[hFilter process:(const float *) inBuffer sampleCount:frames toBuffer:(float *) outBuffer];
return;
}
}
if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
{
downmix_to_stereo( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
}
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
{
downmix_to_mono( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
}
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
{
upmix( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, outputFormat.mChannelsPerFrame, frames );
}
else if ( inputFormat.mChannelsPerFrame == outputFormat.mChannelsPerFrame )
{
memcpy(outBuffer, inBuffer, frames * outputFormat.mBytesPerPacket);
}
}
@end

View File

@ -51,6 +51,9 @@
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
nodeFormat = propertiesToASBD(properties);
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
shouldContinue = YES;
shouldSeek = NO;
@ -68,6 +71,9 @@
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
nodeFormat = propertiesToASBD(properties);
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
[self registerObservers];
shouldContinue = YES;
@ -102,12 +108,10 @@
DLog(@"SOMETHING CHANGED!");
if ([keyPath isEqual:@"properties"]) {
DLog(@"Input format changed");
// Converter doesn't need resetting for this, as output format hasn't changed
ConverterNode *converter = [[[controller controller] bufferChain] converter];
AudioStreamBasicDescription newInputFormat = [[[controller controller] bufferChain] inputFormat];
AudioStreamBasicDescription oldInputFormat = [converter inputFormat];
if (memcmp(&oldInputFormat, &newInputFormat, sizeof(oldInputFormat)) != 0)
[converter inputFormatDidChange:newInputFormat];
// Converter may need resetting, it'll do that when it reaches the new chunks
NSDictionary * properties = [decoder properties];
nodeFormat = propertiesToASBD(properties);
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
}
else if ([keyPath isEqual:@"metadata"]) {
//Inform something of metadata change
@ -254,8 +258,7 @@
- (double) secondsBuffered
{
AudioStreamBasicDescription inputFormat = [[[controller controller] bufferChain] inputFormat];
return ((double)[buffer bufferedLength] / (inputFormat.mSampleRate * inputFormat.mBytesPerPacket));
return [buffer listDuration];
}
@end

View File

@ -7,15 +7,17 @@
//
#import <Cocoa/Cocoa.h>
#import "VirtualRingBuffer.h"
#import "ChunkList.h"
#import "Semaphore.h"
#define BUFFER_SIZE 1024 * 1024
#define CHUNK_SIZE 16 * 1024
@interface Node : NSObject {
VirtualRingBuffer *buffer;
Semaphore *semaphore;
ChunkList *buffer;
Semaphore *semaphore;
NSRecursiveLock *accessLock;
id __weak previousNode;
id __weak controller;
@ -25,11 +27,14 @@
BOOL shouldContinue;
BOOL endOfStream; //All data is now in buffer
BOOL initialBufferFilled;
AudioStreamBasicDescription nodeFormat;
BOOL nodeLossless;
}
- (id)initWithController:(id)c previous:(id)p;
- (int)writeData:(void *)ptr amount:(int)a;
- (int)readData:(void *)ptr amount:(int)a;
- (void)writeData:(const void *)ptr amount:(size_t)a;
- (AudioChunk *)readChunk:(size_t)maxFrames;
- (void)process; //Should be overwriten by subclass
- (void)threadEntry:(id)arg;
@ -45,9 +50,12 @@
- (BOOL)shouldContinue;
- (void)setShouldContinue:(BOOL)s;
- (VirtualRingBuffer *)buffer;
- (ChunkList *)buffer;
- (void)resetBuffer; //WARNING! DANGER WILL ROBINSON!
- (AudioStreamBasicDescription)nodeFormat;
- (BOOL)nodeLossless;
- (Semaphore *)semaphore;
//-(void)resetBuffer;

View File

@ -18,14 +18,18 @@
self = [super init];
if (self)
{
buffer = [[VirtualRingBuffer alloc] initWithLength:BUFFER_SIZE];
semaphore = [[Semaphore alloc] init];
buffer = [[ChunkList alloc] initWithMaximumDuration:3.0];
semaphore = [[Semaphore alloc] init];
accessLock = [[NSRecursiveLock alloc] init];
initialBufferFilled = NO;
controller = c;
endOfStream = NO;
shouldContinue = YES;
nodeLossless = NO;
[self setPreviousNode:p];
}
@ -33,46 +37,50 @@
return self;
}
- (int)writeData:(void *)ptr amount:(int)amount
- (AudioStreamBasicDescription)nodeFormat
{
void *writePtr;
int amountToCopy, availOutput;
int amountLeft = amount;
while (shouldContinue == YES && amountLeft > 0)
{
availOutput = [buffer lengthAvailableToWriteReturningPointer:&writePtr];
if (availOutput == 0) {
if (initialBufferFilled == NO) {
initialBufferFilled = YES;
if ([controller respondsToSelector:@selector(initialBufferFilled:)])
[controller performSelector:@selector(initialBufferFilled:) withObject:self];
}
}
if (availOutput == 0 || shouldReset)
{
if (availOutput)
{
// Unlock the buffer
[buffer didWriteLength:0];
return nodeFormat;
}
- (BOOL)nodeLossless
{
return nodeLossless;
}
- (void)writeData:(const void *)ptr amount:(size_t)amount
{
[accessLock lock];
AudioChunk * chunk = [[AudioChunk alloc] init];
[chunk setFormat:nodeFormat];
[chunk setLossless:nodeLossless];
[chunk assignSamples:ptr frameCount:amount / nodeFormat.mBytesPerPacket];
const double chunkDuration = [chunk duration];
double durationLeft = [buffer maxDuration] - [buffer listDuration];
while (shouldContinue == YES && chunkDuration > durationLeft)
{
if (durationLeft < chunkDuration) {
if (initialBufferFilled == NO) {
initialBufferFilled = YES;
if ([controller respondsToSelector:@selector(initialBufferFilled:)])
[controller performSelector:@selector(initialBufferFilled:) withObject:self];
}
[semaphore wait];
}
else
{
amountToCopy = availOutput;
if (amountToCopy > amountLeft)
amountToCopy = amountLeft;
memcpy(writePtr, &((char *)ptr)[amount - amountLeft], amountToCopy);
[buffer didWriteLength:amountToCopy];
amountLeft -= amountToCopy;
}
}
return (amount - amountLeft);
}
if (durationLeft < chunkDuration || shouldReset) {
[accessLock unlock];
[semaphore wait];
[accessLock lock];
}
durationLeft = [buffer maxDuration] - [buffer listDuration];
}
[buffer addChunk:chunk];
[accessLock unlock];
}
//Should be overwriten by subclass.
@ -87,53 +95,36 @@
}
}
- (int)readData:(void *)ptr amount:(int)amount
- (AudioChunk *)readChunk:(size_t)maxFrames
{
void *readPtr;
int amountToCopy;
int availInput;
[accessLock lock];
if ([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES)
{
endOfStream = YES;
return 0;
[accessLock unlock];
return [[AudioChunk alloc] init];
}
availInput = [[previousNode buffer] lengthAvailableToReadReturningPointer:&readPtr];
/* if (availInput <= 0) {
DLog(@"BUFFER RAN DRY!");
}
else if (availInput < amount) {
DLog(@"BUFFER IN DANGER");
}
*/
if ([previousNode shouldReset] == YES) {
[buffer empty];
[buffer reset];
shouldReset = YES;
[previousNode setShouldReset: NO];
[[previousNode semaphore] signal];
[[previousNode semaphore] signal];
}
amountToCopy = availInput;
if (amountToCopy > amount)
{
amountToCopy = amount;
}
memcpy(ptr, readPtr, amountToCopy);
[[previousNode buffer] didReadLength:amountToCopy];
AudioChunk * ret = [[previousNode buffer] removeSamples:maxFrames];
[accessLock unlock];
if (amountToCopy > 0)
{
[[previousNode semaphore] signal];
}
return amountToCopy;
if ([ret frameCount])
{
[[previousNode semaphore] signal];
}
return ret;
}
- (void)launchThread
@ -161,7 +152,7 @@
shouldContinue = s;
}
- (VirtualRingBuffer *)buffer
- (ChunkList *)buffer
{
return buffer;
}
@ -170,7 +161,9 @@
{
shouldReset = YES; //Will reset on next write.
if (previousNode == nil) {
[buffer empty];
[accessLock lock];
[buffer reset];
[accessLock unlock];
}
}

View File

@ -32,7 +32,7 @@
- (double)amountPlayed;
- (void)incrementAmountPlayed:(long)count;
- (void)incrementAmountPlayed:(double)seconds;
- (void)resetAmountPlayed;
- (void)endOfInputPlayed;
@ -46,7 +46,7 @@
- (void)close;
- (void)seek:(double)time;
- (int)readData:(void *)ptr amount:(int)amount;
- (AudioChunk *)readChunk:(size_t)amount;
- (void)setFormat:(AudioStreamBasicDescription *)f;
- (AudioStreamBasicDescription) format;

View File

@ -54,9 +54,9 @@
[output resume];
}
- (void)incrementAmountPlayed:(long)count
- (void)incrementAmountPlayed:(double)seconds
{
amountPlayed += (double)count * sampleRatio;
amountPlayed += seconds;
}
- (void)resetAmountPlayed
@ -76,23 +76,21 @@
- (double)secondsBuffered
{
return (double)([buffer bufferedLength]) / (format.mSampleRate * format.mBytesPerPacket);
return [buffer listDuration];
}
- (int)readData:(void *)ptr amount:(int)amount
- (AudioChunk *)readChunk:(size_t)amount
{
@autoreleasepool {
int n;
[self setPreviousNode:[[controller bufferChain] finalNode]];
n = [super readData:ptr amount:amount];
AudioChunk * ret = [super readChunk:amount];
/* if (n == 0) {
DLog(@"Output Buffer dry!");
}
*/
return n;
return ret;
}
}

View File

@ -21,6 +21,6 @@
// This node just slaps pre-converted data into its buffer for re-buffering
}
- (void)writeData:(float *)data floatCount:(size_t)count;
- (void) setFormat:(AudioStreamBasicDescription)format;
@end

View File

@ -19,7 +19,7 @@
if (self)
{
// This special node should be able to handle up to four buffers
buffer = [[VirtualRingBuffer alloc] initWithLength:BUFFER_SIZE * 4];
buffer = [[ChunkList alloc] initWithMaximumDuration:12.0];
semaphore = [[Semaphore alloc] init];
initialBufferFilled = NO;
@ -27,6 +27,8 @@
controller = c;
endOfStream = NO;
shouldContinue = YES;
nodeLossless = NO;
[self setPreviousNode:p];
}
@ -34,15 +36,14 @@
return self;
}
- (void)writeData:(float *)data floatCount:(size_t)count
{
[self writeData:data amount:(int)(count * sizeof(float))];
}
- (void)dealloc
{
DLog(@"Refill Node dealloc");
}
- (void)setFormat:(AudioStreamBasicDescription)format
{
nodeFormat = format;
}
@end

View File

@ -45,6 +45,12 @@
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */ = {isa = PBXBuildFile; fileRef = 17F94DDC0B8D101100A34E87 /* Plugin.h */; settings = {ATTRIBUTES = (Public, ); }; };
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */ = {isa = PBXBuildFile; fileRef = 8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */; };
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */ = {isa = PBXBuildFile; fileRef = 8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */; };
834FD4EB27AF8F380063BC83 /* AudioChunk.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4EA27AF8F380063BC83 /* AudioChunk.h */; };
834FD4ED27AF91220063BC83 /* AudioChunk.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4EC27AF91220063BC83 /* AudioChunk.m */; };
834FD4F027AF93680063BC83 /* ChunkList.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4EE27AF93680063BC83 /* ChunkList.h */; };
834FD4F127AF93680063BC83 /* ChunkList.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4EF27AF93680063BC83 /* ChunkList.m */; };
834FD4F427AFA2150063BC83 /* Downmix.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4F227AFA2150063BC83 /* Downmix.h */; };
834FD4F527AFA2150063BC83 /* Downmix.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4F327AFA2150063BC83 /* Downmix.m */; };
835C88A82797D4D400E28EAE /* LICENSE.LGPL in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A42797D4D400E28EAE /* LICENSE.LGPL */; };
835C88A92797D4D400E28EAE /* License.txt in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A52797D4D400E28EAE /* License.txt */; };
835C88AA2797D4D400E28EAE /* lpc.c in Sources */ = {isa = PBXBuildFile; fileRef = 835C88A62797D4D400E28EAE /* lpc.c */; };
@ -138,6 +144,12 @@
32DBCF5E0370ADEE00C91783 /* CogAudio_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CogAudio_Prefix.pch; sourceTree = "<group>"; };
8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSFileHandle+CreateFile.h"; path = "../../Utils/NSFileHandle+CreateFile.h"; sourceTree = "<group>"; };
8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "NSFileHandle+CreateFile.m"; path = "../../Utils/NSFileHandle+CreateFile.m"; sourceTree = "<group>"; };
834FD4EA27AF8F380063BC83 /* AudioChunk.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AudioChunk.h; sourceTree = "<group>"; };
834FD4EC27AF91220063BC83 /* AudioChunk.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AudioChunk.m; sourceTree = "<group>"; };
834FD4EE27AF93680063BC83 /* ChunkList.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ChunkList.h; sourceTree = "<group>"; };
834FD4EF27AF93680063BC83 /* ChunkList.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ChunkList.m; sourceTree = "<group>"; };
834FD4F227AFA2150063BC83 /* Downmix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Downmix.h; sourceTree = "<group>"; };
834FD4F327AFA2150063BC83 /* Downmix.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Downmix.m; sourceTree = "<group>"; };
835C88A42797D4D400E28EAE /* LICENSE.LGPL */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = LICENSE.LGPL; sourceTree = "<group>"; };
835C88A52797D4D400E28EAE /* License.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = License.txt; sourceTree = "<group>"; };
835C88A62797D4D400E28EAE /* lpc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lpc.c; sourceTree = "<group>"; };
@ -284,6 +296,12 @@
17D21C750B8BE4BA00D1EBDE /* Chain */ = {
isa = PBXGroup;
children = (
834FD4EA27AF8F380063BC83 /* AudioChunk.h */,
834FD4EC27AF91220063BC83 /* AudioChunk.m */,
834FD4EE27AF93680063BC83 /* ChunkList.h */,
834FD4EF27AF93680063BC83 /* ChunkList.m */,
834FD4F227AFA2150063BC83 /* Downmix.h */,
834FD4F327AFA2150063BC83 /* Downmix.m */,
83A44A00279119B50049B6E2 /* RefillNode.h */,
83A449FF279119B50049B6E2 /* RefillNode.m */,
17D21C760B8BE4BA00D1EBDE /* BufferChain.h */,
@ -434,14 +452,17 @@
17D21CA70B8BE4BA00D1EBDE /* Node.h in Headers */,
17D21CA90B8BE4BA00D1EBDE /* OutputNode.h in Headers */,
17D21CC50B8BE4BA00D1EBDE /* OutputCoreAudio.h in Headers */,
834FD4F427AFA2150063BC83 /* Downmix.h in Headers */,
17D21CC70B8BE4BA00D1EBDE /* Status.h in Headers */,
17D21CDF0B8BE5B400D1EBDE /* VirtualRingBuffer.h in Headers */,
835C88AB2797D4D400E28EAE /* lpc.h in Headers */,
17D21CF30B8BE5EF00D1EBDE /* Semaphore.h in Headers */,
17D21DC70B8BE79700D1EBDE /* CoreAudioUtils.h in Headers */,
17D21EBD0B8BF44000D1EBDE /* AudioPlayer.h in Headers */,
834FD4F027AF93680063BC83 /* ChunkList.h in Headers */,
17F94DD50B8D0F7000A34E87 /* PluginController.h in Headers */,
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */,
834FD4EB27AF8F380063BC83 /* AudioChunk.h in Headers */,
17A2D3C50B8D1D37000778C4 /* AudioDecoder.h in Headers */,
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */,
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
@ -539,9 +560,11 @@
17D21CA60B8BE4BA00D1EBDE /* InputNode.m in Sources */,
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
17D21CAA0B8BE4BA00D1EBDE /* OutputNode.m in Sources */,
834FD4F527AFA2150063BC83 /* Downmix.m in Sources */,
17D21CC60B8BE4BA00D1EBDE /* OutputCoreAudio.m in Sources */,
17D21CE00B8BE5B400D1EBDE /* VirtualRingBuffer.m in Sources */,
835C88B2279811A500E28EAE /* hdcd_decode2.c in Sources */,
834FD4ED27AF91220063BC83 /* AudioChunk.m in Sources */,
17D21CF40B8BE5EF00D1EBDE /* Semaphore.m in Sources */,
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */,
17D21DC80B8BE79700D1EBDE /* CoreAudioUtils.m in Sources */,
@ -554,6 +577,7 @@
17B619310B909BC300BC003F /* AudioPropertiesReader.m in Sources */,
17ADB13D0B97926D00257CA2 /* AudioSource.m in Sources */,
83A44A01279119B50049B6E2 /* RefillNode.m in Sources */,
834FD4F127AF93680063BC83 /* ChunkList.m in Sources */,
8EC122600B993BD500C5B3AD /* ConverterNode.m in Sources */,
8E8D3D300CBAEE6E00135C1B /* AudioContainer.m in Sources */,
B0575F300D687A4000411D77 /* Helper.m in Sources */,

View File

@ -17,6 +17,8 @@
#import <stdatomic.h>
#import "Downmix.h"
#import "Semaphore.h"
//#define OUTPUT_LOG
@ -42,6 +44,8 @@
BOOL eqEnabled;
BOOL streamFormatStarted;
atomic_long bytesRendered;
atomic_long bytesHdcdSustained;
@ -54,12 +58,15 @@
AudioDeviceID outputDeviceID;
AudioStreamBasicDescription deviceFormat; // info about the default device
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
AUAudioUnit *_au;
size_t _bufferSize;
AudioUnit _eq;
DownmixProcessor * downmixer;
#ifdef OUTPUT_LOG
FILE *_logFile;
#endif

View File

@ -62,7 +62,7 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
const int channels = _self->deviceFormat.mChannelsPerFrame;
const int bytesPerPacket = channels * sizeof(float);
int amountToRead, amountRead = 0;
size_t amountToRead, amountRead = 0;
amountToRead = inNumberFrames * bytesPerPacket;
@ -82,44 +82,59 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
atomic_fetch_add(&_self->bytesRendered, amountToRead);
return 0;
}
AudioChunk * chunk = [[_self->outputController buffer] removeSamples:(amountToRead / bytesPerPacket)];
void * readPtr;
int toRead = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
size_t frameCount = [chunk frameCount];
AudioStreamBasicDescription format = [chunk format];
if (toRead > amountToRead)
toRead = amountToRead;
if (toRead) {
fillBuffers(ioData, (float*)readPtr, toRead / bytesPerPacket, 0);
amountRead = toRead;
[[_self->outputController buffer] didReadLength:toRead];
[_self->outputController incrementAmountPlayed:amountRead];
if (frameCount) {
if (!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
_self->streamFormat = format;
_self->streamFormatStarted = YES;
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
}
double chunkDuration = [chunk duration];
NSData * samples = [chunk removeSamples:frameCount];
float downmixedData[frameCount * channels];
[_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData];
fillBuffers(ioData, downmixedData, frameCount, 0);
amountRead = frameCount * bytesPerPacket;
[_self->outputController incrementAmountPlayed:chunkDuration];
atomic_fetch_add(&_self->bytesRendered, amountRead);
[_self->writeSemaphore signal];
}
else
[[_self->outputController buffer] didReadLength:0];
// Try repeatedly! Buffer wraps can cause a slight data shortage, as can
// unexpected track changes.
while ((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES)
{
int amountRead2; //Use this since return type of readdata isnt known...may want to fix then can do a simple += to readdata
amountRead2 = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
if (amountRead2 > (amountToRead - amountRead))
amountRead2 = amountToRead - amountRead;
if (amountRead2) {
atomic_fetch_add(&_self->bytesRendered, amountRead2);
fillBuffers(ioData, (float*)readPtr, amountRead2 / bytesPerPacket, amountRead / bytesPerPacket);
[[_self->outputController buffer] didReadLength:amountRead2];
chunk = [[_self->outputController buffer] removeSamples:((amountToRead - amountRead) / bytesPerPacket)];
frameCount = [chunk frameCount];
format = [chunk format];
if (frameCount) {
if (!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
_self->streamFormat = format;
_self->streamFormatStarted = YES;
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
}
atomic_fetch_add(&_self->bytesRendered, frameCount * bytesPerPacket);
double chunkDuration = [chunk duration];
NSData * samples = [chunk removeSamples:frameCount];
float downmixedData[frameCount * channels];
[_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData];
fillBuffers(ioData, downmixedData, frameCount, amountRead / bytesPerPacket);
[_self->outputController incrementAmountPlayed:amountRead2];
[_self->outputController incrementAmountPlayed:chunkDuration];
amountRead += amountRead2;
amountRead += frameCount * bytesPerPacket;
[_self->writeSemaphore signal];
}
else {
[[_self->outputController buffer] didReadLength:0];
[_self->readSemaphore timedWait:500];
}
}
@ -165,6 +180,8 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
started = NO;
stopNext = NO;
streamFormatStarted = NO;
atomic_init(&bytesRendered, 0);
atomic_init(&bytesHdcdSustained, 0);
@ -223,7 +240,7 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
}
if ([outputController shouldReset]) {
[[outputController buffer] empty];
[[outputController buffer] reset];
[outputController setShouldReset:NO];
[delayedEvents removeAllObjects];
delayedEventsPopped = YES;
@ -231,7 +248,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
while ([delayedEvents count]) {
size_t localBytesRendered = atomic_load_explicit(&bytesRendered, memory_order_relaxed);
if (localBytesRendered >= [[delayedEvents objectAtIndex:0] longValue]) {
double secondsRendered = (double)localBytesRendered / (double)(deviceFormat.mBytesPerPacket * deviceFormat.mSampleRate);
if (secondsRendered >= [[delayedEvents objectAtIndex:0] doubleValue]) {
if ([outputController chainQueueHasTracks])
delayedEventsPopped = YES;
[self signalEndOfStream];
@ -242,22 +260,24 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
if (stopping)
break;
size_t frameCount = 0;
void *writePtr;
int toWrite = [[outputController buffer] lengthAvailableToWriteReturningPointer:&writePtr];
int bytesRead = 0;
if (toWrite > CHUNK_SIZE)
toWrite = CHUNK_SIZE;
if (toWrite)
bytesRead = [outputController readData:writePtr amount:toWrite];
[[outputController buffer] didWriteLength:bytesRead];
if (bytesRead) {
if (![[outputController buffer] isFull]) {
AudioChunk * chunk = [outputController readChunk:512];
frameCount = [chunk frameCount];
if (frameCount) {
[[outputController buffer] addChunk:chunk];
}
}
if (frameCount) {
[readSemaphore signal];
continue;
}
else if ([outputController shouldContinue] == NO)
break;
else if (!toWrite) {
else if ([[outputController buffer] isFull]) {
if (!started) {
started = YES;
if (!paused) {
@ -270,20 +290,21 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
// End of input possibly reached
if (delayedEventsPopped && [outputController endOfStream] == YES)
{
long bytesBuffered = [[outputController buffer] bufferedLength];
bytesBuffered += atomic_load_explicit(&bytesRendered, memory_order_relaxed);
double secondsBuffered = [[outputController buffer] listDuration];
size_t _bytesRendered = atomic_load_explicit(&bytesRendered, memory_order_relaxed);
secondsBuffered += (double)_bytesRendered / (double)(deviceFormat.mBytesPerPacket * deviceFormat.mSampleRate);
if ([outputController chainQueueHasTracks])
{
if (bytesBuffered < CHUNK_SIZE / 2)
bytesBuffered = 0;
if (secondsBuffered <= 0.005)
secondsBuffered = 0.0;
else
bytesBuffered -= CHUNK_SIZE / 2;
secondsBuffered -= 0.005;
}
else {
stopNext = YES;
break;
}
[delayedEvents addObject:[NSNumber numberWithLong:bytesBuffered]];
[delayedEvents addObject:[NSNumber numberWithDouble:secondsBuffered]];
delayedEventsPopped = NO;
if (!started) {
started = YES;
@ -477,8 +498,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
NSError *err;
AVAudioFormat *renderFormat;
[outputController incrementAmountPlayed:[[outputController buffer] bufferedLength]];
[[outputController buffer] empty];
[outputController incrementAmountPlayed:[[outputController buffer] listDuration]];
[[outputController buffer] reset];
_deviceFormat = format;
deviceFormat = *(format.streamDescription);
@ -562,6 +583,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
paused = NO;
stopNext = NO;
outputDeviceID = -1;
downmixer = nil;
AudioComponentDescription desc;
NSError *err;
@ -668,6 +691,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
return 0;
};
[_au setMaximumFramesToRender:512];
UInt32 value;
UInt32 size = sizeof(value);
@ -781,6 +806,10 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
[outputController endEqualizer:_eq];
_eq = NULL;
}
if (downmixer)
{
downmixer = nil;
}
#ifdef OUTPUT_LOG
if (_logFile)
{