Cog Audio: Major rewrite of audio buffering
Rewrite attempt number two. Now using array lists of audio chunks, with each chunk having its format and optionally losslessness stashed along with it. This replaces the old virtual ring buffer method. As a result of this, the HRIR toggle now works instantaneously. Signed-off-by: Christopher Snowhill <kode54@gmail.com>CQTexperiment
parent
11ecb1cd8b
commit
62edb39761
|
@ -0,0 +1,42 @@
|
||||||
|
//
|
||||||
|
// AudioChunk.h
|
||||||
|
// CogAudio Framework
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/5/22.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef AudioChunk_h
|
||||||
|
#define AudioChunk_h
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <CoreAudio/CoreAudio.h>
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
@interface AudioChunk : NSObject {
|
||||||
|
AudioStreamBasicDescription format;
|
||||||
|
NSMutableData * chunkData;
|
||||||
|
BOOL formatAssigned;
|
||||||
|
BOOL lossless;
|
||||||
|
}
|
||||||
|
|
||||||
|
@property AudioStreamBasicDescription format;
|
||||||
|
@property BOOL lossless;
|
||||||
|
|
||||||
|
- (id) init;
|
||||||
|
|
||||||
|
- (void) assignSamples:(const void *)data frameCount:(size_t)count;
|
||||||
|
|
||||||
|
- (NSData *) removeSamples:(size_t)frameCount;
|
||||||
|
|
||||||
|
- (BOOL) isEmpty;
|
||||||
|
|
||||||
|
- (size_t) frameCount;
|
||||||
|
|
||||||
|
- (double) duration;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_END
|
||||||
|
|
||||||
|
#endif /* AudioChunk_h */
|
|
@ -0,0 +1,74 @@
|
||||||
|
//
|
||||||
|
// AudioChunk.m
|
||||||
|
// CogAudio Framework
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/5/22.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import "AudioChunk.h"
|
||||||
|
|
||||||
|
@implementation AudioChunk
|
||||||
|
|
||||||
|
- (id) init {
|
||||||
|
self = [super init];
|
||||||
|
|
||||||
|
if (self) {
|
||||||
|
chunkData = [[NSMutableData alloc] init];
|
||||||
|
formatAssigned = NO;
|
||||||
|
lossless = NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
@synthesize lossless;
|
||||||
|
|
||||||
|
- (AudioStreamBasicDescription) format {
|
||||||
|
return format;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) setFormat:(AudioStreamBasicDescription)informat {
|
||||||
|
formatAssigned = YES;
|
||||||
|
format = informat;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) assignSamples:(const void *)data frameCount:(size_t)count {
|
||||||
|
if (formatAssigned) {
|
||||||
|
const size_t bytesPerPacket = format.mBytesPerPacket;
|
||||||
|
[chunkData appendBytes:data length:bytesPerPacket * count];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (NSData *) removeSamples:(size_t)frameCount {
|
||||||
|
if (formatAssigned) {
|
||||||
|
const size_t bytesPerPacket = format.mBytesPerPacket;
|
||||||
|
const size_t byteCount = bytesPerPacket * frameCount;
|
||||||
|
NSData * ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)];
|
||||||
|
[chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0];
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
return [NSData data];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL) isEmpty {
|
||||||
|
return [chunkData length] == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (size_t) frameCount {
|
||||||
|
if (formatAssigned) {
|
||||||
|
const size_t bytesPerPacket = format.mBytesPerPacket;
|
||||||
|
return [chunkData length] / bytesPerPacket;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (double) duration {
|
||||||
|
if (formatAssigned) {
|
||||||
|
const size_t bytesPerPacket = format.mBytesPerPacket;
|
||||||
|
const double sampleRate = format.mSampleRate;
|
||||||
|
return (double)([chunkData length] / bytesPerPacket) / sampleRate;
|
||||||
|
}
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -64,8 +64,14 @@
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
|
inputFormat = [inputNode nodeFormat];
|
||||||
|
|
||||||
|
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
||||||
|
|
||||||
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
|
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
@ -85,8 +91,14 @@
|
||||||
|
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
DLog(@"Input Properties: %@", properties);
|
inputFormat = [inputNode nodeFormat];
|
||||||
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
|
||||||
|
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
||||||
|
|
||||||
|
DLog(@"Input Properties: %@", properties);
|
||||||
|
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
@ -107,7 +119,14 @@
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
DLog(@"Input Properties: %@", properties);
|
DLog(@"Input Properties: %@", properties);
|
||||||
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
|
||||||
|
inputFormat = [inputNode nodeFormat];
|
||||||
|
|
||||||
|
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
||||||
|
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
||||||
|
|
||||||
|
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
//
|
||||||
|
// ChunkList.h
|
||||||
|
// CogAudio Framework
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/5/22.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <CoreAudio/CoreAudio.h>
|
||||||
|
|
||||||
|
#import "AudioChunk.h"
|
||||||
|
|
||||||
|
#import "Semaphore.h"
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
@interface ChunkList : NSObject {
|
||||||
|
NSMutableArray<AudioChunk *> * chunkList;
|
||||||
|
double listDuration;
|
||||||
|
double maxDuration;
|
||||||
|
|
||||||
|
BOOL inAdder;
|
||||||
|
BOOL inRemover;
|
||||||
|
BOOL stopping;
|
||||||
|
}
|
||||||
|
|
||||||
|
@property (readonly) double listDuration;
|
||||||
|
@property (readonly) double maxDuration;
|
||||||
|
|
||||||
|
- (id) initWithMaximumDuration:(double)duration;
|
||||||
|
|
||||||
|
- (void) reset;
|
||||||
|
|
||||||
|
- (BOOL) isEmpty;
|
||||||
|
- (BOOL) isFull;
|
||||||
|
|
||||||
|
- (void) addChunk:(AudioChunk *)chunk;
|
||||||
|
- (AudioChunk *) removeSamples:(size_t)maxFrameCount;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_END
|
|
@ -0,0 +1,96 @@
|
||||||
|
//
|
||||||
|
// ChunkList.m
|
||||||
|
// CogAudio Framework
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/5/22.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import "ChunkList.h"
|
||||||
|
|
||||||
|
@implementation ChunkList
|
||||||
|
|
||||||
|
@synthesize listDuration;
|
||||||
|
@synthesize maxDuration;
|
||||||
|
|
||||||
|
- (id) initWithMaximumDuration:(double)duration {
|
||||||
|
self = [super init];
|
||||||
|
|
||||||
|
if (self) {
|
||||||
|
chunkList = [[NSMutableArray alloc] init];
|
||||||
|
listDuration = 0.0;
|
||||||
|
maxDuration = duration;
|
||||||
|
|
||||||
|
inAdder = NO;
|
||||||
|
inRemover = NO;
|
||||||
|
stopping = NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) dealloc {
|
||||||
|
stopping = YES;
|
||||||
|
while (inAdder || inRemover) {
|
||||||
|
usleep(500);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) reset {
|
||||||
|
@synchronized (chunkList) {
|
||||||
|
[chunkList removeAllObjects];
|
||||||
|
listDuration = 0.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL) isEmpty {
|
||||||
|
@synchronized (chunkList) {
|
||||||
|
return [chunkList count] == 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL) isFull {
|
||||||
|
return listDuration >= maxDuration;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) addChunk:(AudioChunk *)chunk {
|
||||||
|
if (stopping) return;
|
||||||
|
|
||||||
|
inAdder = YES;
|
||||||
|
|
||||||
|
const double chunkDuration = [chunk duration];
|
||||||
|
|
||||||
|
@synchronized(chunkList) {
|
||||||
|
[chunkList addObject:chunk];
|
||||||
|
listDuration += chunkDuration;
|
||||||
|
}
|
||||||
|
|
||||||
|
inAdder = NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *) removeSamples:(size_t)maxFrameCount {
|
||||||
|
if (stopping) {
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
}
|
||||||
|
|
||||||
|
@synchronized (chunkList) {
|
||||||
|
inRemover = YES;
|
||||||
|
if (![chunkList count])
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
AudioChunk * chunk = [chunkList objectAtIndex:0];
|
||||||
|
if ([chunk frameCount] <= maxFrameCount) {
|
||||||
|
[chunkList removeObjectAtIndex:0];
|
||||||
|
listDuration -= [chunk duration];
|
||||||
|
inRemover = NO;
|
||||||
|
return chunk;
|
||||||
|
}
|
||||||
|
NSData * removedData = [chunk removeSamples:maxFrameCount];
|
||||||
|
AudioChunk * ret = [[AudioChunk alloc] init];
|
||||||
|
[ret setFormat:[chunk format]];
|
||||||
|
[ret assignSamples:[removedData bytes] frameCount:maxFrameCount];
|
||||||
|
listDuration -= [ret duration];
|
||||||
|
inRemover = NO;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -72,189 +72,11 @@ void PrintStreamDesc (AudioStreamBasicDescription *inDesc)
|
||||||
hdcd_decoder = NULL;
|
hdcd_decoder = NULL;
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const float STEREO_DOWNMIX[8-2][8][2]={
|
|
||||||
/*3.0*/
|
|
||||||
{
|
|
||||||
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
|
|
||||||
},
|
|
||||||
/*quadrophonic*/
|
|
||||||
{
|
|
||||||
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
|
|
||||||
},
|
|
||||||
/*5.0*/
|
|
||||||
{
|
|
||||||
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
|
|
||||||
{0.3254F,0.5636F}
|
|
||||||
},
|
|
||||||
/*5.1*/
|
|
||||||
{
|
|
||||||
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
|
|
||||||
{0.2645F,0.4582F}
|
|
||||||
},
|
|
||||||
/*6.1*/
|
|
||||||
{
|
|
||||||
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
|
|
||||||
{0.3943F,0.2277F},{0.2277F,0.3943F}
|
|
||||||
},
|
|
||||||
/*7.1*/
|
|
||||||
{
|
|
||||||
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
|
|
||||||
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static void downmix_to_stereo(float * buffer, int channels, size_t count)
|
|
||||||
{
|
|
||||||
if (channels >= 3 && channels <= 8)
|
|
||||||
for (size_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
float left = 0, right = 0;
|
|
||||||
for (int j = 0; j < channels; ++j)
|
|
||||||
{
|
|
||||||
left += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
|
|
||||||
right += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
|
|
||||||
}
|
|
||||||
buffer[i * 2 + 0] = left;
|
|
||||||
buffer[i * 2 + 1] = right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void downmix_to_mono(float * buffer, int channels, size_t count)
|
|
||||||
{
|
|
||||||
if (channels >= 3 && channels <= 8)
|
|
||||||
{
|
|
||||||
downmix_to_stereo(buffer, channels, count);
|
|
||||||
channels = 2;
|
|
||||||
}
|
|
||||||
float invchannels = 1.0 / (float)channels;
|
|
||||||
for (size_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
float sample = 0;
|
|
||||||
for (int j = 0; j < channels; ++j)
|
|
||||||
{
|
|
||||||
sample += buffer[i * channels + j];
|
|
||||||
}
|
|
||||||
buffer[i] = sample * invchannels;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void upmix(float * buffer, int inchannels, int outchannels, size_t count)
|
|
||||||
{
|
|
||||||
for (ssize_t i = count - 1; i >= 0; --i)
|
|
||||||
{
|
|
||||||
if (inchannels == 1 && outchannels == 2)
|
|
||||||
{
|
|
||||||
// upmix mono to stereo
|
|
||||||
float sample = buffer[i];
|
|
||||||
buffer[i * 2 + 0] = sample;
|
|
||||||
buffer[i * 2 + 1] = sample;
|
|
||||||
}
|
|
||||||
else if (inchannels == 1 && outchannels == 4)
|
|
||||||
{
|
|
||||||
// upmix mono to quad
|
|
||||||
float sample = buffer[i];
|
|
||||||
buffer[i * 4 + 0] = sample;
|
|
||||||
buffer[i * 4 + 1] = sample;
|
|
||||||
buffer[i * 4 + 2] = 0;
|
|
||||||
buffer[i * 4 + 3] = 0;
|
|
||||||
}
|
|
||||||
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
|
|
||||||
{
|
|
||||||
// upmix mono to center channel
|
|
||||||
float sample = buffer[i];
|
|
||||||
buffer[i * outchannels + 2] = sample;
|
|
||||||
for (int j = 0; j < 2; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
for (int j = 3; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 4 && outchannels >= 5)
|
|
||||||
{
|
|
||||||
float fl = buffer[i * 4 + 0];
|
|
||||||
float fr = buffer[i * 4 + 1];
|
|
||||||
float bl = buffer[i * 4 + 2];
|
|
||||||
float br = buffer[i * 4 + 3];
|
|
||||||
const int skipclfe = (outchannels == 5) ? 1 : 2;
|
|
||||||
buffer[i * outchannels + 0] = fl;
|
|
||||||
buffer[i * outchannels + 1] = fr;
|
|
||||||
buffer[i * outchannels + skipclfe + 2] = bl;
|
|
||||||
buffer[i * outchannels + skipclfe + 3] = br;
|
|
||||||
for (int j = 0; j < skipclfe; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + 2 + j] = 0;
|
|
||||||
}
|
|
||||||
for (int j = 4 + skipclfe; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 5 && outchannels >= 6)
|
|
||||||
{
|
|
||||||
float fl = buffer[i * 5 + 0];
|
|
||||||
float fr = buffer[i * 5 + 1];
|
|
||||||
float c = buffer[i * 5 + 2];
|
|
||||||
float bl = buffer[i * 5 + 3];
|
|
||||||
float br = buffer[i * 5 + 4];
|
|
||||||
buffer[i * outchannels + 0] = fl;
|
|
||||||
buffer[i * outchannels + 1] = fr;
|
|
||||||
buffer[i * outchannels + 2] = c;
|
|
||||||
buffer[i * outchannels + 3] = 0;
|
|
||||||
buffer[i * outchannels + 4] = bl;
|
|
||||||
buffer[i * outchannels + 5] = br;
|
|
||||||
for (int j = 6; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 7 && outchannels == 8)
|
|
||||||
{
|
|
||||||
float fl = buffer[i * 7 + 0];
|
|
||||||
float fr = buffer[i * 7 + 1];
|
|
||||||
float c = buffer[i * 7 + 2];
|
|
||||||
float lfe = buffer[i * 7 + 3];
|
|
||||||
float sl = buffer[i * 7 + 4];
|
|
||||||
float sr = buffer[i * 7 + 5];
|
|
||||||
float bc = buffer[i * 7 + 6];
|
|
||||||
buffer[i * 8 + 0] = fl;
|
|
||||||
buffer[i * 8 + 1] = fr;
|
|
||||||
buffer[i * 8 + 2] = c;
|
|
||||||
buffer[i * 8 + 3] = lfe;
|
|
||||||
buffer[i * 8 + 4] = bc;
|
|
||||||
buffer[i * 8 + 5] = bc;
|
|
||||||
buffer[i * 8 + 6] = sl;
|
|
||||||
buffer[i * 8 + 7] = sr;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// upmix N channels to N channels plus silence the empty channels
|
|
||||||
float samples[inchannels];
|
|
||||||
for (int j = 0; j < inchannels; ++j)
|
|
||||||
{
|
|
||||||
samples[j] = buffer[i * inchannels + j];
|
|
||||||
}
|
|
||||||
for (int j = 0; j < inchannels; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = samples[j];
|
|
||||||
}
|
|
||||||
for (int j = inchannels; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
buffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void scale_by_volume(float * buffer, size_t count, float volume)
|
void scale_by_volume(float * buffer, size_t count, float volume)
|
||||||
{
|
{
|
||||||
if ( volume != 1.0 )
|
if ( volume != 1.0 )
|
||||||
|
@ -636,7 +458,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
|
||||||
|
|
||||||
-(void)process
|
-(void)process
|
||||||
{
|
{
|
||||||
char writeBuf[CHUNK_SIZE];
|
char writeBuf[CHUNK_SIZE];
|
||||||
|
|
||||||
// Removed endOfStream check from here, since we want to be able to flush the converter
|
// Removed endOfStream check from here, since we want to be able to flush the converter
|
||||||
// when the end of stream is reached. Convert function instead processes what it can,
|
// when the end of stream is reached. Convert function instead processes what it can,
|
||||||
|
@ -665,7 +487,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
|
||||||
}
|
}
|
||||||
else break;
|
else break;
|
||||||
}
|
}
|
||||||
[self writeData:writeBuf amount:amountConverted];
|
[self writeData:writeBuf amount:amountConverted];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -698,7 +520,7 @@ tryagain:
|
||||||
BOOL isUnsigned = !isFloat && !(inputFormat.mFormatFlags & kAudioFormatFlagIsSignedInteger);
|
BOOL isUnsigned = !isFloat && !(inputFormat.mFormatFlags & kAudioFormatFlagIsSignedInteger);
|
||||||
|
|
||||||
// Approximately the most we want on input
|
// Approximately the most we want on input
|
||||||
ioNumberPackets = (amount - amountRead) / outputFormat.mBytesPerPacket;
|
ioNumberPackets = CHUNK_SIZE;
|
||||||
if (!skipResampler && ioNumberPackets < PRIME_LEN_)
|
if (!skipResampler && ioNumberPackets < PRIME_LEN_)
|
||||||
ioNumberPackets = PRIME_LEN_;
|
ioNumberPackets = PRIME_LEN_;
|
||||||
|
|
||||||
|
@ -725,9 +547,16 @@ tryagain:
|
||||||
|
|
||||||
while (bytesReadFromInput < amountToWrite && !stopping && [self shouldContinue] == YES && [self endOfStream] == NO)
|
while (bytesReadFromInput < amountToWrite && !stopping && [self shouldContinue] == YES && [self endOfStream] == NO)
|
||||||
{
|
{
|
||||||
size_t bytesRead = [self readData:inputBuffer + amountToSkip + bytesReadFromInput amount:(int)(amountToWrite - bytesReadFromInput)];
|
AudioChunk * chunk = [self readChunk:((amountToWrite - bytesReadFromInput) / inputFormat.mBytesPerPacket)];
|
||||||
|
AudioStreamBasicDescription inf = [chunk format];
|
||||||
|
size_t frameCount = [chunk frameCount];
|
||||||
|
size_t bytesRead = frameCount * inf.mBytesPerPacket;
|
||||||
|
if (frameCount) {
|
||||||
|
NSData * samples = [chunk removeSamples:frameCount];
|
||||||
|
memcpy(inputBuffer + bytesReadFromInput + amountToSkip, [samples bytes], bytesRead);
|
||||||
|
}
|
||||||
bytesReadFromInput += bytesRead;
|
bytesReadFromInput += bytesRead;
|
||||||
if (!bytesRead)
|
if (!frameCount)
|
||||||
{
|
{
|
||||||
if (refillNode)
|
if (refillNode)
|
||||||
[self setEndOfStream:YES];
|
[self setEndOfStream:YES];
|
||||||
|
@ -1019,32 +848,7 @@ tryagain:
|
||||||
amountReadFromFC = (int)(outputDone * floatFormat.mBytesPerPacket);
|
amountReadFromFC = (int)(outputDone * floatFormat.mBytesPerPacket);
|
||||||
|
|
||||||
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
|
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
|
||||||
|
|
||||||
if ( hFilter ) {
|
|
||||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
|
||||||
[hFilter process:floatBuffer sampleCount:samples toBuffer:floatBuffer + amountReadFromFC];
|
|
||||||
memmove(floatBuffer, floatBuffer + amountReadFromFC, samples * sizeof(float) * 2);
|
|
||||||
amountReadFromFC = samples * sizeof(float) * 2;
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
|
||||||
{
|
|
||||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
|
||||||
downmix_to_stereo( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
|
|
||||||
amountReadFromFC = samples * sizeof(float) * 2;
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
|
|
||||||
{
|
|
||||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
|
||||||
downmix_to_mono( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
|
|
||||||
amountReadFromFC = samples * sizeof(float);
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
|
|
||||||
{
|
|
||||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
|
||||||
upmix( (float*) floatBuffer, inputFormat.mChannelsPerFrame, outputFormat.mChannelsPerFrame, samples );
|
|
||||||
amountReadFromFC = samples * sizeof(float) * outputFormat.mChannelsPerFrame;
|
|
||||||
}
|
|
||||||
|
|
||||||
floatSize = amountReadFromFC;
|
floatSize = amountReadFromFC;
|
||||||
floatOffset = 0;
|
floatOffset = 0;
|
||||||
}
|
}
|
||||||
|
@ -1056,6 +860,8 @@ tryagain:
|
||||||
if (ioNumberPackets > (floatSize - floatOffset))
|
if (ioNumberPackets > (floatSize - floatOffset))
|
||||||
ioNumberPackets = (UInt32)(floatSize - floatOffset);
|
ioNumberPackets = (UInt32)(floatSize - floatOffset);
|
||||||
|
|
||||||
|
ioNumberPackets -= ioNumberPackets % outputFormat.mBytesPerPacket;
|
||||||
|
|
||||||
memcpy(dest + amountRead, floatBuffer + floatOffset, ioNumberPackets);
|
memcpy(dest + amountRead, floatBuffer + floatOffset, ioNumberPackets);
|
||||||
|
|
||||||
floatOffset += ioNumberPackets;
|
floatOffset += ioNumberPackets;
|
||||||
|
@ -1075,15 +881,6 @@ tryagain:
|
||||||
//User reset the volume scaling option
|
//User reset the volume scaling option
|
||||||
[self refreshVolumeScaling];
|
[self refreshVolumeScaling];
|
||||||
}
|
}
|
||||||
else if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
|
|
||||||
[keyPath isEqualToString:@"values.hrirPath"]) {
|
|
||||||
// Reset the converter, without rebuffering
|
|
||||||
if (outputFormat.mChannelsPerFrame == 2 &&
|
|
||||||
inputFormat.mChannelsPerFrame >= 1 &&
|
|
||||||
inputFormat.mChannelsPerFrame <= 8) {
|
|
||||||
[self inputFormatDidChange:inputFormat];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static float db_to_scale(float db)
|
static float db_to_scale(float db)
|
||||||
|
@ -1141,6 +938,8 @@ static float db_to_scale(float db)
|
||||||
inputFormat = inf;
|
inputFormat = inf;
|
||||||
outputFormat = outf;
|
outputFormat = outf;
|
||||||
|
|
||||||
|
nodeFormat = outputFormat;
|
||||||
|
|
||||||
rememberedLossless = lossless;
|
rememberedLossless = lossless;
|
||||||
|
|
||||||
// These are the only sample formats we support translating
|
// These are the only sample formats we support translating
|
||||||
|
@ -1192,33 +991,6 @@ static float db_to_scale(float db)
|
||||||
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
|
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
|
||||||
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
|
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
|
||||||
|
|
||||||
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
|
|
||||||
|
|
||||||
if (hVirt &&
|
|
||||||
outputFormat.mChannelsPerFrame == 2 &&
|
|
||||||
inputFormat.mChannelsPerFrame >= 1 &&
|
|
||||||
inputFormat.mChannelsPerFrame <= 8) {
|
|
||||||
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
|
|
||||||
|
|
||||||
NSURL * presetUrl = nil;
|
|
||||||
|
|
||||||
if (userPreset && ![userPreset isEqualToString:@""]) {
|
|
||||||
presetUrl = [NSURL fileURLWithPath:userPreset];
|
|
||||||
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
|
||||||
presetUrl = nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!presetUrl) {
|
|
||||||
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
|
|
||||||
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
|
||||||
presetUrl = nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (presetUrl) {
|
|
||||||
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
skipResampler = outputFormat.mSampleRate == floatFormat.mSampleRate;
|
skipResampler = outputFormat.mSampleRate == floatFormat.mSampleRate;
|
||||||
|
|
||||||
sampleRatio = (double)outputFormat.mSampleRate / (double)floatFormat.mSampleRate;
|
sampleRatio = (double)outputFormat.mSampleRate / (double)floatFormat.mSampleRate;
|
||||||
|
@ -1271,8 +1043,6 @@ static float db_to_scale(float db)
|
||||||
DLog(@"Decoder dealloc");
|
DLog(@"Decoder dealloc");
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
|
|
||||||
|
|
||||||
paused = NO;
|
paused = NO;
|
||||||
[self cleanUp];
|
[self cleanUp];
|
||||||
|
@ -1300,16 +1070,16 @@ static float db_to_scale(float db)
|
||||||
originalPreviousNode = previousNode;
|
originalPreviousNode = previousNode;
|
||||||
refillNode = [[RefillNode alloc] initWithController:controller previous:nil];
|
refillNode = [[RefillNode alloc] initWithController:controller previous:nil];
|
||||||
[self setPreviousNode:refillNode];
|
[self setPreviousNode:refillNode];
|
||||||
|
|
||||||
int dataRead = 0;
|
[refillNode setFormat:previousOutputFormat];
|
||||||
|
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
void * ptr;
|
AudioChunk * chunk = [buffer removeSamples:16384];
|
||||||
dataRead = [buffer lengthAvailableToReadReturningPointer:&ptr];
|
size_t frameCount = [chunk frameCount];
|
||||||
if (dataRead) {
|
if (frameCount) {
|
||||||
[refillNode writeData:(float*)ptr floatCount:dataRead / sizeof(float)];
|
NSData * samples = [chunk removeSamples:frameCount];
|
||||||
[buffer didReadLength:dataRead];
|
[refillNode writeData:[samples bytes] amount:frameCount];
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
|
@ -1383,7 +1153,7 @@ static float db_to_scale(float db)
|
||||||
|
|
||||||
- (double) secondsBuffered
|
- (double) secondsBuffered
|
||||||
{
|
{
|
||||||
return ((double)[buffer bufferedLength] / (outputFormat.mSampleRate * outputFormat.mBytesPerPacket));
|
return [buffer listDuration];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
//
|
||||||
|
// Downmix.h
|
||||||
|
// Cog
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/05/22.
|
||||||
|
// Copyright 2022 __LoSnoCo__. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <CoreAudio/CoreAudio.h>
|
||||||
|
|
||||||
|
#import "HeadphoneFilter.h"
|
||||||
|
|
||||||
|
@interface DownmixProcessor : NSObject {
|
||||||
|
HeadphoneFilter *hFilter;
|
||||||
|
|
||||||
|
AudioStreamBasicDescription inputFormat;
|
||||||
|
AudioStreamBasicDescription outputFormat;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf;
|
||||||
|
|
||||||
|
- (void) process:(const void*)inBuffer frameCount:(size_t)frames output:(void *)outBuffer;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
|
@ -0,0 +1,303 @@
|
||||||
|
//
|
||||||
|
// Downmix.m
|
||||||
|
// Cog
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/05/22.
|
||||||
|
// Copyright 2022 __LoSnoCo__. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import "Downmix.h"
|
||||||
|
|
||||||
|
#import "Logging.h"
|
||||||
|
|
||||||
|
static const float STEREO_DOWNMIX[8-2][8][2]={
|
||||||
|
/*3.0*/
|
||||||
|
{
|
||||||
|
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
|
||||||
|
},
|
||||||
|
/*quadrophonic*/
|
||||||
|
{
|
||||||
|
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
|
||||||
|
},
|
||||||
|
/*5.0*/
|
||||||
|
{
|
||||||
|
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
|
||||||
|
{0.3254F,0.5636F}
|
||||||
|
},
|
||||||
|
/*5.1*/
|
||||||
|
{
|
||||||
|
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
|
||||||
|
{0.2645F,0.4582F}
|
||||||
|
},
|
||||||
|
/*6.1*/
|
||||||
|
{
|
||||||
|
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
|
||||||
|
{0.3943F,0.2277F},{0.2277F,0.3943F}
|
||||||
|
},
|
||||||
|
/*7.1*/
|
||||||
|
{
|
||||||
|
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
|
||||||
|
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void downmix_to_stereo(const float * inBuffer, int channels, float * outBuffer, size_t count)
|
||||||
|
{
|
||||||
|
if (channels >= 3 && channels <= 8)
|
||||||
|
for (size_t i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
float left = 0, right = 0;
|
||||||
|
for (int j = 0; j < channels; ++j)
|
||||||
|
{
|
||||||
|
left += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
|
||||||
|
right += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
|
||||||
|
}
|
||||||
|
outBuffer[i * 2 + 0] = left;
|
||||||
|
outBuffer[i * 2 + 1] = right;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void downmix_to_mono(const float * inBuffer, int channels, float * outBuffer, size_t count)
|
||||||
|
{
|
||||||
|
float tempBuffer[count * 2];
|
||||||
|
if (channels >= 3 && channels <= 8)
|
||||||
|
{
|
||||||
|
downmix_to_stereo(inBuffer, channels, tempBuffer, count);
|
||||||
|
inBuffer = tempBuffer;
|
||||||
|
channels = 2;
|
||||||
|
}
|
||||||
|
float invchannels = 1.0 / (float)channels;
|
||||||
|
for (size_t i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
float sample = 0;
|
||||||
|
for (int j = 0; j < channels; ++j)
|
||||||
|
{
|
||||||
|
sample += inBuffer[i * channels + j];
|
||||||
|
}
|
||||||
|
outBuffer[i] = sample * invchannels;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void upmix(const float * inBuffer, int inchannels, float * outBuffer, int outchannels, size_t count)
|
||||||
|
{
|
||||||
|
for (ssize_t i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
if (inchannels == 1 && outchannels == 2)
|
||||||
|
{
|
||||||
|
// upmix mono to stereo
|
||||||
|
float sample = inBuffer[i];
|
||||||
|
outBuffer[i * 2 + 0] = sample;
|
||||||
|
outBuffer[i * 2 + 1] = sample;
|
||||||
|
}
|
||||||
|
else if (inchannels == 1 && outchannels == 4)
|
||||||
|
{
|
||||||
|
// upmix mono to quad
|
||||||
|
float sample = inBuffer[i];
|
||||||
|
outBuffer[i * 4 + 0] = sample;
|
||||||
|
outBuffer[i * 4 + 1] = sample;
|
||||||
|
outBuffer[i * 4 + 2] = 0;
|
||||||
|
outBuffer[i * 4 + 3] = 0;
|
||||||
|
}
|
||||||
|
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
|
||||||
|
{
|
||||||
|
// upmix mono to center channel
|
||||||
|
float sample = inBuffer[i];
|
||||||
|
outBuffer[i * outchannels + 2] = sample;
|
||||||
|
for (int j = 0; j < 2; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
for (int j = 3; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 4 && outchannels >= 5)
|
||||||
|
{
|
||||||
|
float fl = inBuffer[i * 4 + 0];
|
||||||
|
float fr = inBuffer[i * 4 + 1];
|
||||||
|
float bl = inBuffer[i * 4 + 2];
|
||||||
|
float br = inBuffer[i * 4 + 3];
|
||||||
|
const int skipclfe = (outchannels == 5) ? 1 : 2;
|
||||||
|
outBuffer[i * outchannels + 0] = fl;
|
||||||
|
outBuffer[i * outchannels + 1] = fr;
|
||||||
|
outBuffer[i * outchannels + skipclfe + 2] = bl;
|
||||||
|
outBuffer[i * outchannels + skipclfe + 3] = br;
|
||||||
|
for (int j = 0; j < skipclfe; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + 2 + j] = 0;
|
||||||
|
}
|
||||||
|
for (int j = 4 + skipclfe; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 5 && outchannels >= 6)
|
||||||
|
{
|
||||||
|
float fl = inBuffer[i * 5 + 0];
|
||||||
|
float fr = inBuffer[i * 5 + 1];
|
||||||
|
float c = inBuffer[i * 5 + 2];
|
||||||
|
float bl = inBuffer[i * 5 + 3];
|
||||||
|
float br = inBuffer[i * 5 + 4];
|
||||||
|
outBuffer[i * outchannels + 0] = fl;
|
||||||
|
outBuffer[i * outchannels + 1] = fr;
|
||||||
|
outBuffer[i * outchannels + 2] = c;
|
||||||
|
outBuffer[i * outchannels + 3] = 0;
|
||||||
|
outBuffer[i * outchannels + 4] = bl;
|
||||||
|
outBuffer[i * outchannels + 5] = br;
|
||||||
|
for (int j = 6; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 7 && outchannels == 8)
|
||||||
|
{
|
||||||
|
float fl = inBuffer[i * 7 + 0];
|
||||||
|
float fr = inBuffer[i * 7 + 1];
|
||||||
|
float c = inBuffer[i * 7 + 2];
|
||||||
|
float lfe = inBuffer[i * 7 + 3];
|
||||||
|
float sl = inBuffer[i * 7 + 4];
|
||||||
|
float sr = inBuffer[i * 7 + 5];
|
||||||
|
float bc = inBuffer[i * 7 + 6];
|
||||||
|
outBuffer[i * 8 + 0] = fl;
|
||||||
|
outBuffer[i * 8 + 1] = fr;
|
||||||
|
outBuffer[i * 8 + 2] = c;
|
||||||
|
outBuffer[i * 8 + 3] = lfe;
|
||||||
|
outBuffer[i * 8 + 4] = bc;
|
||||||
|
outBuffer[i * 8 + 5] = bc;
|
||||||
|
outBuffer[i * 8 + 6] = sl;
|
||||||
|
outBuffer[i * 8 + 7] = sr;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// upmix N channels to N channels plus silence the empty channels
|
||||||
|
float samples[inchannels];
|
||||||
|
for (int j = 0; j < inchannels; ++j)
|
||||||
|
{
|
||||||
|
samples[j] = inBuffer[i * inchannels + j];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < inchannels; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = samples[j];
|
||||||
|
}
|
||||||
|
for (int j = inchannels; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
outBuffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@implementation DownmixProcessor
|
||||||
|
|
||||||
|
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf {
|
||||||
|
self = [super init];
|
||||||
|
|
||||||
|
if (self) {
|
||||||
|
if (inf.mFormatID != kAudioFormatLinearPCM ||
|
||||||
|
(inf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
|
||||||
|
inf.mBitsPerChannel != 32 ||
|
||||||
|
inf.mBytesPerFrame != (4 * inf.mChannelsPerFrame) ||
|
||||||
|
inf.mBytesPerPacket != inf.mFramesPerPacket * inf.mBytesPerFrame)
|
||||||
|
return nil;
|
||||||
|
|
||||||
|
if (outf.mFormatID != kAudioFormatLinearPCM ||
|
||||||
|
(outf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
|
||||||
|
outf.mBitsPerChannel != 32 ||
|
||||||
|
outf.mBytesPerFrame != (4 * outf.mChannelsPerFrame) ||
|
||||||
|
outf.mBytesPerPacket != outf.mFramesPerPacket * outf.mBytesPerFrame)
|
||||||
|
return nil;
|
||||||
|
|
||||||
|
inputFormat = inf;
|
||||||
|
outputFormat = outf;
|
||||||
|
|
||||||
|
[self setupVirt];
|
||||||
|
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) dealloc {
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) setupVirt {
|
||||||
|
@synchronized(hFilter) {
|
||||||
|
hFilter = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
|
||||||
|
|
||||||
|
if (hVirt &&
|
||||||
|
outputFormat.mChannelsPerFrame == 2 &&
|
||||||
|
inputFormat.mChannelsPerFrame >= 1 &&
|
||||||
|
inputFormat.mChannelsPerFrame <= 8) {
|
||||||
|
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
|
||||||
|
|
||||||
|
NSURL * presetUrl = nil;
|
||||||
|
|
||||||
|
if (userPreset && ![userPreset isEqualToString:@""]) {
|
||||||
|
presetUrl = [NSURL fileURLWithPath:userPreset];
|
||||||
|
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
||||||
|
presetUrl = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!presetUrl) {
|
||||||
|
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
|
||||||
|
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
||||||
|
presetUrl = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (presetUrl) {
|
||||||
|
@synchronized(hFilter) {
|
||||||
|
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)observeValueForKeyPath:(NSString *)keyPath
|
||||||
|
ofObject:(id)object
|
||||||
|
change:(NSDictionary *)change
|
||||||
|
context:(void *)context
|
||||||
|
{
|
||||||
|
DLog(@"SOMETHING CHANGED!");
|
||||||
|
if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
|
||||||
|
[keyPath isEqualToString:@"values.hrirPath"]) {
|
||||||
|
// Reset the converter, without rebuffering
|
||||||
|
[self setupVirt];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void) process:(const void *)inBuffer frameCount:(size_t)frames output:(void *)outBuffer {
|
||||||
|
@synchronized (hFilter) {
|
||||||
|
if ( hFilter ) {
|
||||||
|
[hFilter process:(const float *) inBuffer sampleCount:frames toBuffer:(float *) outBuffer];
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
||||||
|
{
|
||||||
|
downmix_to_stereo( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
|
||||||
|
{
|
||||||
|
downmix_to_mono( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
|
||||||
|
{
|
||||||
|
upmix( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, outputFormat.mChannelsPerFrame, frames );
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame == outputFormat.mChannelsPerFrame )
|
||||||
|
{
|
||||||
|
memcpy(outBuffer, inBuffer, frames * outputFormat.mBytesPerPacket);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
|
@ -51,6 +51,9 @@
|
||||||
|
|
||||||
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
||||||
|
|
||||||
|
nodeFormat = propertiesToASBD(properties);
|
||||||
|
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
|
||||||
|
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
shouldSeek = NO;
|
shouldSeek = NO;
|
||||||
|
|
||||||
|
@ -68,6 +71,9 @@
|
||||||
|
|
||||||
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
||||||
|
|
||||||
|
nodeFormat = propertiesToASBD(properties);
|
||||||
|
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
|
||||||
|
|
||||||
[self registerObservers];
|
[self registerObservers];
|
||||||
|
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
|
@ -102,12 +108,10 @@
|
||||||
DLog(@"SOMETHING CHANGED!");
|
DLog(@"SOMETHING CHANGED!");
|
||||||
if ([keyPath isEqual:@"properties"]) {
|
if ([keyPath isEqual:@"properties"]) {
|
||||||
DLog(@"Input format changed");
|
DLog(@"Input format changed");
|
||||||
// Converter doesn't need resetting for this, as output format hasn't changed
|
// Converter may need resetting, it'll do that when it reaches the new chunks
|
||||||
ConverterNode *converter = [[[controller controller] bufferChain] converter];
|
NSDictionary * properties = [decoder properties];
|
||||||
AudioStreamBasicDescription newInputFormat = [[[controller controller] bufferChain] inputFormat];
|
nodeFormat = propertiesToASBD(properties);
|
||||||
AudioStreamBasicDescription oldInputFormat = [converter inputFormat];
|
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
|
||||||
if (memcmp(&oldInputFormat, &newInputFormat, sizeof(oldInputFormat)) != 0)
|
|
||||||
[converter inputFormatDidChange:newInputFormat];
|
|
||||||
}
|
}
|
||||||
else if ([keyPath isEqual:@"metadata"]) {
|
else if ([keyPath isEqual:@"metadata"]) {
|
||||||
//Inform something of metadata change
|
//Inform something of metadata change
|
||||||
|
@ -254,8 +258,7 @@
|
||||||
|
|
||||||
- (double) secondsBuffered
|
- (double) secondsBuffered
|
||||||
{
|
{
|
||||||
AudioStreamBasicDescription inputFormat = [[[controller controller] bufferChain] inputFormat];
|
return [buffer listDuration];
|
||||||
return ((double)[buffer bufferedLength] / (inputFormat.mSampleRate * inputFormat.mBytesPerPacket));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -7,15 +7,17 @@
|
||||||
//
|
//
|
||||||
|
|
||||||
#import <Cocoa/Cocoa.h>
|
#import <Cocoa/Cocoa.h>
|
||||||
#import "VirtualRingBuffer.h"
|
#import "ChunkList.h"
|
||||||
#import "Semaphore.h"
|
#import "Semaphore.h"
|
||||||
|
|
||||||
#define BUFFER_SIZE 1024 * 1024
|
#define BUFFER_SIZE 1024 * 1024
|
||||||
#define CHUNK_SIZE 16 * 1024
|
#define CHUNK_SIZE 16 * 1024
|
||||||
|
|
||||||
@interface Node : NSObject {
|
@interface Node : NSObject {
|
||||||
VirtualRingBuffer *buffer;
|
ChunkList *buffer;
|
||||||
Semaphore *semaphore;
|
Semaphore *semaphore;
|
||||||
|
|
||||||
|
NSRecursiveLock *accessLock;
|
||||||
|
|
||||||
id __weak previousNode;
|
id __weak previousNode;
|
||||||
id __weak controller;
|
id __weak controller;
|
||||||
|
@ -25,11 +27,14 @@
|
||||||
BOOL shouldContinue;
|
BOOL shouldContinue;
|
||||||
BOOL endOfStream; //All data is now in buffer
|
BOOL endOfStream; //All data is now in buffer
|
||||||
BOOL initialBufferFilled;
|
BOOL initialBufferFilled;
|
||||||
|
|
||||||
|
AudioStreamBasicDescription nodeFormat;
|
||||||
|
BOOL nodeLossless;
|
||||||
}
|
}
|
||||||
- (id)initWithController:(id)c previous:(id)p;
|
- (id)initWithController:(id)c previous:(id)p;
|
||||||
|
|
||||||
- (int)writeData:(void *)ptr amount:(int)a;
|
- (void)writeData:(const void *)ptr amount:(size_t)a;
|
||||||
- (int)readData:(void *)ptr amount:(int)a;
|
- (AudioChunk *)readChunk:(size_t)maxFrames;
|
||||||
|
|
||||||
- (void)process; //Should be overwriten by subclass
|
- (void)process; //Should be overwriten by subclass
|
||||||
- (void)threadEntry:(id)arg;
|
- (void)threadEntry:(id)arg;
|
||||||
|
@ -45,9 +50,12 @@
|
||||||
- (BOOL)shouldContinue;
|
- (BOOL)shouldContinue;
|
||||||
- (void)setShouldContinue:(BOOL)s;
|
- (void)setShouldContinue:(BOOL)s;
|
||||||
|
|
||||||
- (VirtualRingBuffer *)buffer;
|
- (ChunkList *)buffer;
|
||||||
- (void)resetBuffer; //WARNING! DANGER WILL ROBINSON!
|
- (void)resetBuffer; //WARNING! DANGER WILL ROBINSON!
|
||||||
|
|
||||||
|
- (AudioStreamBasicDescription)nodeFormat;
|
||||||
|
- (BOOL)nodeLossless;
|
||||||
|
|
||||||
- (Semaphore *)semaphore;
|
- (Semaphore *)semaphore;
|
||||||
|
|
||||||
//-(void)resetBuffer;
|
//-(void)resetBuffer;
|
||||||
|
|
|
@ -18,14 +18,18 @@
|
||||||
self = [super init];
|
self = [super init];
|
||||||
if (self)
|
if (self)
|
||||||
{
|
{
|
||||||
buffer = [[VirtualRingBuffer alloc] initWithLength:BUFFER_SIZE];
|
buffer = [[ChunkList alloc] initWithMaximumDuration:3.0];
|
||||||
semaphore = [[Semaphore alloc] init];
|
semaphore = [[Semaphore alloc] init];
|
||||||
|
|
||||||
|
accessLock = [[NSRecursiveLock alloc] init];
|
||||||
|
|
||||||
initialBufferFilled = NO;
|
initialBufferFilled = NO;
|
||||||
|
|
||||||
controller = c;
|
controller = c;
|
||||||
endOfStream = NO;
|
endOfStream = NO;
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
|
|
||||||
|
nodeLossless = NO;
|
||||||
|
|
||||||
[self setPreviousNode:p];
|
[self setPreviousNode:p];
|
||||||
}
|
}
|
||||||
|
@ -33,46 +37,50 @@
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (int)writeData:(void *)ptr amount:(int)amount
|
- (AudioStreamBasicDescription)nodeFormat
|
||||||
{
|
{
|
||||||
void *writePtr;
|
return nodeFormat;
|
||||||
int amountToCopy, availOutput;
|
}
|
||||||
int amountLeft = amount;
|
|
||||||
|
- (BOOL)nodeLossless
|
||||||
while (shouldContinue == YES && amountLeft > 0)
|
{
|
||||||
{
|
return nodeLossless;
|
||||||
availOutput = [buffer lengthAvailableToWriteReturningPointer:&writePtr];
|
}
|
||||||
if (availOutput == 0) {
|
|
||||||
if (initialBufferFilled == NO) {
|
- (void)writeData:(const void *)ptr amount:(size_t)amount
|
||||||
initialBufferFilled = YES;
|
{
|
||||||
if ([controller respondsToSelector:@selector(initialBufferFilled:)])
|
[accessLock lock];
|
||||||
[controller performSelector:@selector(initialBufferFilled:) withObject:self];
|
|
||||||
}
|
AudioChunk * chunk = [[AudioChunk alloc] init];
|
||||||
}
|
[chunk setFormat:nodeFormat];
|
||||||
|
[chunk setLossless:nodeLossless];
|
||||||
if (availOutput == 0 || shouldReset)
|
[chunk assignSamples:ptr frameCount:amount / nodeFormat.mBytesPerPacket];
|
||||||
{
|
|
||||||
if (availOutput)
|
const double chunkDuration = [chunk duration];
|
||||||
{
|
double durationLeft = [buffer maxDuration] - [buffer listDuration];
|
||||||
// Unlock the buffer
|
|
||||||
[buffer didWriteLength:0];
|
while (shouldContinue == YES && chunkDuration > durationLeft)
|
||||||
|
{
|
||||||
|
if (durationLeft < chunkDuration) {
|
||||||
|
if (initialBufferFilled == NO) {
|
||||||
|
initialBufferFilled = YES;
|
||||||
|
if ([controller respondsToSelector:@selector(initialBufferFilled:)])
|
||||||
|
[controller performSelector:@selector(initialBufferFilled:) withObject:self];
|
||||||
}
|
}
|
||||||
[semaphore wait];
|
}
|
||||||
}
|
|
||||||
else
|
if (durationLeft < chunkDuration || shouldReset) {
|
||||||
{
|
[accessLock unlock];
|
||||||
amountToCopy = availOutput;
|
[semaphore wait];
|
||||||
if (amountToCopy > amountLeft)
|
[accessLock lock];
|
||||||
amountToCopy = amountLeft;
|
}
|
||||||
|
|
||||||
memcpy(writePtr, &((char *)ptr)[amount - amountLeft], amountToCopy);
|
durationLeft = [buffer maxDuration] - [buffer listDuration];
|
||||||
[buffer didWriteLength:amountToCopy];
|
}
|
||||||
|
|
||||||
amountLeft -= amountToCopy;
|
[buffer addChunk:chunk];
|
||||||
}
|
|
||||||
}
|
[accessLock unlock];
|
||||||
|
|
||||||
return (amount - amountLeft);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Should be overwriten by subclass.
|
//Should be overwriten by subclass.
|
||||||
|
@ -87,53 +95,36 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
- (int)readData:(void *)ptr amount:(int)amount
|
- (AudioChunk *)readChunk:(size_t)maxFrames
|
||||||
{
|
{
|
||||||
void *readPtr;
|
[accessLock lock];
|
||||||
int amountToCopy;
|
|
||||||
int availInput;
|
|
||||||
|
|
||||||
if ([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES)
|
if ([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES)
|
||||||
{
|
{
|
||||||
endOfStream = YES;
|
endOfStream = YES;
|
||||||
return 0;
|
[accessLock unlock];
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
}
|
}
|
||||||
|
|
||||||
availInput = [[previousNode buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
|
||||||
|
|
||||||
/* if (availInput <= 0) {
|
|
||||||
DLog(@"BUFFER RAN DRY!");
|
|
||||||
}
|
|
||||||
else if (availInput < amount) {
|
|
||||||
DLog(@"BUFFER IN DANGER");
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
if ([previousNode shouldReset] == YES) {
|
if ([previousNode shouldReset] == YES) {
|
||||||
[buffer empty];
|
[buffer reset];
|
||||||
|
|
||||||
shouldReset = YES;
|
shouldReset = YES;
|
||||||
[previousNode setShouldReset: NO];
|
[previousNode setShouldReset: NO];
|
||||||
|
|
||||||
[[previousNode semaphore] signal];
|
[[previousNode semaphore] signal];
|
||||||
}
|
}
|
||||||
|
|
||||||
amountToCopy = availInput;
|
AudioChunk * ret = [[previousNode buffer] removeSamples:maxFrames];
|
||||||
if (amountToCopy > amount)
|
|
||||||
{
|
[accessLock unlock];
|
||||||
amountToCopy = amount;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(ptr, readPtr, amountToCopy);
|
|
||||||
|
|
||||||
[[previousNode buffer] didReadLength:amountToCopy];
|
|
||||||
|
|
||||||
if (amountToCopy > 0)
|
if ([ret frameCount])
|
||||||
{
|
{
|
||||||
[[previousNode semaphore] signal];
|
[[previousNode semaphore] signal];
|
||||||
}
|
}
|
||||||
|
|
||||||
return amountToCopy;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)launchThread
|
- (void)launchThread
|
||||||
|
@ -161,7 +152,7 @@
|
||||||
shouldContinue = s;
|
shouldContinue = s;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (VirtualRingBuffer *)buffer
|
- (ChunkList *)buffer
|
||||||
{
|
{
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
@ -170,7 +161,9 @@
|
||||||
{
|
{
|
||||||
shouldReset = YES; //Will reset on next write.
|
shouldReset = YES; //Will reset on next write.
|
||||||
if (previousNode == nil) {
|
if (previousNode == nil) {
|
||||||
[buffer empty];
|
[accessLock lock];
|
||||||
|
[buffer reset];
|
||||||
|
[accessLock unlock];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
|
|
||||||
- (double)amountPlayed;
|
- (double)amountPlayed;
|
||||||
|
|
||||||
- (void)incrementAmountPlayed:(long)count;
|
- (void)incrementAmountPlayed:(double)seconds;
|
||||||
- (void)resetAmountPlayed;
|
- (void)resetAmountPlayed;
|
||||||
|
|
||||||
- (void)endOfInputPlayed;
|
- (void)endOfInputPlayed;
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
- (void)close;
|
- (void)close;
|
||||||
- (void)seek:(double)time;
|
- (void)seek:(double)time;
|
||||||
|
|
||||||
- (int)readData:(void *)ptr amount:(int)amount;
|
- (AudioChunk *)readChunk:(size_t)amount;
|
||||||
|
|
||||||
- (void)setFormat:(AudioStreamBasicDescription *)f;
|
- (void)setFormat:(AudioStreamBasicDescription *)f;
|
||||||
- (AudioStreamBasicDescription) format;
|
- (AudioStreamBasicDescription) format;
|
||||||
|
|
|
@ -54,9 +54,9 @@
|
||||||
[output resume];
|
[output resume];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)incrementAmountPlayed:(long)count
|
- (void)incrementAmountPlayed:(double)seconds
|
||||||
{
|
{
|
||||||
amountPlayed += (double)count * sampleRatio;
|
amountPlayed += seconds;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)resetAmountPlayed
|
- (void)resetAmountPlayed
|
||||||
|
@ -76,23 +76,21 @@
|
||||||
|
|
||||||
- (double)secondsBuffered
|
- (double)secondsBuffered
|
||||||
{
|
{
|
||||||
return (double)([buffer bufferedLength]) / (format.mSampleRate * format.mBytesPerPacket);
|
return [buffer listDuration];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (int)readData:(void *)ptr amount:(int)amount
|
- (AudioChunk *)readChunk:(size_t)amount
|
||||||
{
|
{
|
||||||
@autoreleasepool {
|
@autoreleasepool {
|
||||||
int n;
|
|
||||||
[self setPreviousNode:[[controller bufferChain] finalNode]];
|
[self setPreviousNode:[[controller bufferChain] finalNode]];
|
||||||
|
|
||||||
n = [super readData:ptr amount:amount];
|
AudioChunk * ret = [super readChunk:amount];
|
||||||
|
|
||||||
/* if (n == 0) {
|
/* if (n == 0) {
|
||||||
DLog(@"Output Buffer dry!");
|
DLog(@"Output Buffer dry!");
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
return ret;
|
||||||
return n;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,6 @@
|
||||||
// This node just slaps pre-converted data into its buffer for re-buffering
|
// This node just slaps pre-converted data into its buffer for re-buffering
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)writeData:(float *)data floatCount:(size_t)count;
|
- (void) setFormat:(AudioStreamBasicDescription)format;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
if (self)
|
if (self)
|
||||||
{
|
{
|
||||||
// This special node should be able to handle up to four buffers
|
// This special node should be able to handle up to four buffers
|
||||||
buffer = [[VirtualRingBuffer alloc] initWithLength:BUFFER_SIZE * 4];
|
buffer = [[ChunkList alloc] initWithMaximumDuration:12.0];
|
||||||
semaphore = [[Semaphore alloc] init];
|
semaphore = [[Semaphore alloc] init];
|
||||||
|
|
||||||
initialBufferFilled = NO;
|
initialBufferFilled = NO;
|
||||||
|
@ -27,6 +27,8 @@
|
||||||
controller = c;
|
controller = c;
|
||||||
endOfStream = NO;
|
endOfStream = NO;
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
|
|
||||||
|
nodeLossless = NO;
|
||||||
|
|
||||||
[self setPreviousNode:p];
|
[self setPreviousNode:p];
|
||||||
}
|
}
|
||||||
|
@ -34,15 +36,14 @@
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)writeData:(float *)data floatCount:(size_t)count
|
|
||||||
{
|
|
||||||
[self writeData:data amount:(int)(count * sizeof(float))];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
- (void)dealloc
|
- (void)dealloc
|
||||||
{
|
{
|
||||||
DLog(@"Refill Node dealloc");
|
DLog(@"Refill Node dealloc");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (void)setFormat:(AudioStreamBasicDescription)format
|
||||||
|
{
|
||||||
|
nodeFormat = format;
|
||||||
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -45,6 +45,12 @@
|
||||||
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */ = {isa = PBXBuildFile; fileRef = 17F94DDC0B8D101100A34E87 /* Plugin.h */; settings = {ATTRIBUTES = (Public, ); }; };
|
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */ = {isa = PBXBuildFile; fileRef = 17F94DDC0B8D101100A34E87 /* Plugin.h */; settings = {ATTRIBUTES = (Public, ); }; };
|
||||||
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */ = {isa = PBXBuildFile; fileRef = 8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */; };
|
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */ = {isa = PBXBuildFile; fileRef = 8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */; };
|
||||||
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */ = {isa = PBXBuildFile; fileRef = 8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */; };
|
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */ = {isa = PBXBuildFile; fileRef = 8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */; };
|
||||||
|
834FD4EB27AF8F380063BC83 /* AudioChunk.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4EA27AF8F380063BC83 /* AudioChunk.h */; };
|
||||||
|
834FD4ED27AF91220063BC83 /* AudioChunk.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4EC27AF91220063BC83 /* AudioChunk.m */; };
|
||||||
|
834FD4F027AF93680063BC83 /* ChunkList.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4EE27AF93680063BC83 /* ChunkList.h */; };
|
||||||
|
834FD4F127AF93680063BC83 /* ChunkList.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4EF27AF93680063BC83 /* ChunkList.m */; };
|
||||||
|
834FD4F427AFA2150063BC83 /* Downmix.h in Headers */ = {isa = PBXBuildFile; fileRef = 834FD4F227AFA2150063BC83 /* Downmix.h */; };
|
||||||
|
834FD4F527AFA2150063BC83 /* Downmix.m in Sources */ = {isa = PBXBuildFile; fileRef = 834FD4F327AFA2150063BC83 /* Downmix.m */; };
|
||||||
835C88A82797D4D400E28EAE /* LICENSE.LGPL in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A42797D4D400E28EAE /* LICENSE.LGPL */; };
|
835C88A82797D4D400E28EAE /* LICENSE.LGPL in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A42797D4D400E28EAE /* LICENSE.LGPL */; };
|
||||||
835C88A92797D4D400E28EAE /* License.txt in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A52797D4D400E28EAE /* License.txt */; };
|
835C88A92797D4D400E28EAE /* License.txt in Resources */ = {isa = PBXBuildFile; fileRef = 835C88A52797D4D400E28EAE /* License.txt */; };
|
||||||
835C88AA2797D4D400E28EAE /* lpc.c in Sources */ = {isa = PBXBuildFile; fileRef = 835C88A62797D4D400E28EAE /* lpc.c */; };
|
835C88AA2797D4D400E28EAE /* lpc.c in Sources */ = {isa = PBXBuildFile; fileRef = 835C88A62797D4D400E28EAE /* lpc.c */; };
|
||||||
|
@ -138,6 +144,12 @@
|
||||||
32DBCF5E0370ADEE00C91783 /* CogAudio_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CogAudio_Prefix.pch; sourceTree = "<group>"; };
|
32DBCF5E0370ADEE00C91783 /* CogAudio_Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CogAudio_Prefix.pch; sourceTree = "<group>"; };
|
||||||
8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSFileHandle+CreateFile.h"; path = "../../Utils/NSFileHandle+CreateFile.h"; sourceTree = "<group>"; };
|
8347C73F2796C58800FA8A7D /* NSFileHandle+CreateFile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "NSFileHandle+CreateFile.h"; path = "../../Utils/NSFileHandle+CreateFile.h"; sourceTree = "<group>"; };
|
||||||
8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "NSFileHandle+CreateFile.m"; path = "../../Utils/NSFileHandle+CreateFile.m"; sourceTree = "<group>"; };
|
8347C7402796C58800FA8A7D /* NSFileHandle+CreateFile.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = "NSFileHandle+CreateFile.m"; path = "../../Utils/NSFileHandle+CreateFile.m"; sourceTree = "<group>"; };
|
||||||
|
834FD4EA27AF8F380063BC83 /* AudioChunk.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AudioChunk.h; sourceTree = "<group>"; };
|
||||||
|
834FD4EC27AF91220063BC83 /* AudioChunk.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AudioChunk.m; sourceTree = "<group>"; };
|
||||||
|
834FD4EE27AF93680063BC83 /* ChunkList.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ChunkList.h; sourceTree = "<group>"; };
|
||||||
|
834FD4EF27AF93680063BC83 /* ChunkList.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = ChunkList.m; sourceTree = "<group>"; };
|
||||||
|
834FD4F227AFA2150063BC83 /* Downmix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Downmix.h; sourceTree = "<group>"; };
|
||||||
|
834FD4F327AFA2150063BC83 /* Downmix.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Downmix.m; sourceTree = "<group>"; };
|
||||||
835C88A42797D4D400E28EAE /* LICENSE.LGPL */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = LICENSE.LGPL; sourceTree = "<group>"; };
|
835C88A42797D4D400E28EAE /* LICENSE.LGPL */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = LICENSE.LGPL; sourceTree = "<group>"; };
|
||||||
835C88A52797D4D400E28EAE /* License.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = License.txt; sourceTree = "<group>"; };
|
835C88A52797D4D400E28EAE /* License.txt */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = License.txt; sourceTree = "<group>"; };
|
||||||
835C88A62797D4D400E28EAE /* lpc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lpc.c; sourceTree = "<group>"; };
|
835C88A62797D4D400E28EAE /* lpc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = lpc.c; sourceTree = "<group>"; };
|
||||||
|
@ -284,6 +296,12 @@
|
||||||
17D21C750B8BE4BA00D1EBDE /* Chain */ = {
|
17D21C750B8BE4BA00D1EBDE /* Chain */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
|
834FD4EA27AF8F380063BC83 /* AudioChunk.h */,
|
||||||
|
834FD4EC27AF91220063BC83 /* AudioChunk.m */,
|
||||||
|
834FD4EE27AF93680063BC83 /* ChunkList.h */,
|
||||||
|
834FD4EF27AF93680063BC83 /* ChunkList.m */,
|
||||||
|
834FD4F227AFA2150063BC83 /* Downmix.h */,
|
||||||
|
834FD4F327AFA2150063BC83 /* Downmix.m */,
|
||||||
83A44A00279119B50049B6E2 /* RefillNode.h */,
|
83A44A00279119B50049B6E2 /* RefillNode.h */,
|
||||||
83A449FF279119B50049B6E2 /* RefillNode.m */,
|
83A449FF279119B50049B6E2 /* RefillNode.m */,
|
||||||
17D21C760B8BE4BA00D1EBDE /* BufferChain.h */,
|
17D21C760B8BE4BA00D1EBDE /* BufferChain.h */,
|
||||||
|
@ -434,14 +452,17 @@
|
||||||
17D21CA70B8BE4BA00D1EBDE /* Node.h in Headers */,
|
17D21CA70B8BE4BA00D1EBDE /* Node.h in Headers */,
|
||||||
17D21CA90B8BE4BA00D1EBDE /* OutputNode.h in Headers */,
|
17D21CA90B8BE4BA00D1EBDE /* OutputNode.h in Headers */,
|
||||||
17D21CC50B8BE4BA00D1EBDE /* OutputCoreAudio.h in Headers */,
|
17D21CC50B8BE4BA00D1EBDE /* OutputCoreAudio.h in Headers */,
|
||||||
|
834FD4F427AFA2150063BC83 /* Downmix.h in Headers */,
|
||||||
17D21CC70B8BE4BA00D1EBDE /* Status.h in Headers */,
|
17D21CC70B8BE4BA00D1EBDE /* Status.h in Headers */,
|
||||||
17D21CDF0B8BE5B400D1EBDE /* VirtualRingBuffer.h in Headers */,
|
17D21CDF0B8BE5B400D1EBDE /* VirtualRingBuffer.h in Headers */,
|
||||||
835C88AB2797D4D400E28EAE /* lpc.h in Headers */,
|
835C88AB2797D4D400E28EAE /* lpc.h in Headers */,
|
||||||
17D21CF30B8BE5EF00D1EBDE /* Semaphore.h in Headers */,
|
17D21CF30B8BE5EF00D1EBDE /* Semaphore.h in Headers */,
|
||||||
17D21DC70B8BE79700D1EBDE /* CoreAudioUtils.h in Headers */,
|
17D21DC70B8BE79700D1EBDE /* CoreAudioUtils.h in Headers */,
|
||||||
17D21EBD0B8BF44000D1EBDE /* AudioPlayer.h in Headers */,
|
17D21EBD0B8BF44000D1EBDE /* AudioPlayer.h in Headers */,
|
||||||
|
834FD4F027AF93680063BC83 /* ChunkList.h in Headers */,
|
||||||
17F94DD50B8D0F7000A34E87 /* PluginController.h in Headers */,
|
17F94DD50B8D0F7000A34E87 /* PluginController.h in Headers */,
|
||||||
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */,
|
17F94DDD0B8D101100A34E87 /* Plugin.h in Headers */,
|
||||||
|
834FD4EB27AF8F380063BC83 /* AudioChunk.h in Headers */,
|
||||||
17A2D3C50B8D1D37000778C4 /* AudioDecoder.h in Headers */,
|
17A2D3C50B8D1D37000778C4 /* AudioDecoder.h in Headers */,
|
||||||
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */,
|
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */,
|
||||||
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
|
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
|
||||||
|
@ -539,9 +560,11 @@
|
||||||
17D21CA60B8BE4BA00D1EBDE /* InputNode.m in Sources */,
|
17D21CA60B8BE4BA00D1EBDE /* InputNode.m in Sources */,
|
||||||
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
|
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
|
||||||
17D21CAA0B8BE4BA00D1EBDE /* OutputNode.m in Sources */,
|
17D21CAA0B8BE4BA00D1EBDE /* OutputNode.m in Sources */,
|
||||||
|
834FD4F527AFA2150063BC83 /* Downmix.m in Sources */,
|
||||||
17D21CC60B8BE4BA00D1EBDE /* OutputCoreAudio.m in Sources */,
|
17D21CC60B8BE4BA00D1EBDE /* OutputCoreAudio.m in Sources */,
|
||||||
17D21CE00B8BE5B400D1EBDE /* VirtualRingBuffer.m in Sources */,
|
17D21CE00B8BE5B400D1EBDE /* VirtualRingBuffer.m in Sources */,
|
||||||
835C88B2279811A500E28EAE /* hdcd_decode2.c in Sources */,
|
835C88B2279811A500E28EAE /* hdcd_decode2.c in Sources */,
|
||||||
|
834FD4ED27AF91220063BC83 /* AudioChunk.m in Sources */,
|
||||||
17D21CF40B8BE5EF00D1EBDE /* Semaphore.m in Sources */,
|
17D21CF40B8BE5EF00D1EBDE /* Semaphore.m in Sources */,
|
||||||
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */,
|
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */,
|
||||||
17D21DC80B8BE79700D1EBDE /* CoreAudioUtils.m in Sources */,
|
17D21DC80B8BE79700D1EBDE /* CoreAudioUtils.m in Sources */,
|
||||||
|
@ -554,6 +577,7 @@
|
||||||
17B619310B909BC300BC003F /* AudioPropertiesReader.m in Sources */,
|
17B619310B909BC300BC003F /* AudioPropertiesReader.m in Sources */,
|
||||||
17ADB13D0B97926D00257CA2 /* AudioSource.m in Sources */,
|
17ADB13D0B97926D00257CA2 /* AudioSource.m in Sources */,
|
||||||
83A44A01279119B50049B6E2 /* RefillNode.m in Sources */,
|
83A44A01279119B50049B6E2 /* RefillNode.m in Sources */,
|
||||||
|
834FD4F127AF93680063BC83 /* ChunkList.m in Sources */,
|
||||||
8EC122600B993BD500C5B3AD /* ConverterNode.m in Sources */,
|
8EC122600B993BD500C5B3AD /* ConverterNode.m in Sources */,
|
||||||
8E8D3D300CBAEE6E00135C1B /* AudioContainer.m in Sources */,
|
8E8D3D300CBAEE6E00135C1B /* AudioContainer.m in Sources */,
|
||||||
B0575F300D687A4000411D77 /* Helper.m in Sources */,
|
B0575F300D687A4000411D77 /* Helper.m in Sources */,
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
|
|
||||||
#import <stdatomic.h>
|
#import <stdatomic.h>
|
||||||
|
|
||||||
|
#import "Downmix.h"
|
||||||
|
|
||||||
#import "Semaphore.h"
|
#import "Semaphore.h"
|
||||||
|
|
||||||
//#define OUTPUT_LOG
|
//#define OUTPUT_LOG
|
||||||
|
@ -42,6 +44,8 @@
|
||||||
|
|
||||||
BOOL eqEnabled;
|
BOOL eqEnabled;
|
||||||
|
|
||||||
|
BOOL streamFormatStarted;
|
||||||
|
|
||||||
atomic_long bytesRendered;
|
atomic_long bytesRendered;
|
||||||
atomic_long bytesHdcdSustained;
|
atomic_long bytesHdcdSustained;
|
||||||
|
|
||||||
|
@ -54,12 +58,15 @@
|
||||||
|
|
||||||
AudioDeviceID outputDeviceID;
|
AudioDeviceID outputDeviceID;
|
||||||
AudioStreamBasicDescription deviceFormat; // info about the default device
|
AudioStreamBasicDescription deviceFormat; // info about the default device
|
||||||
|
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
|
||||||
|
|
||||||
AUAudioUnit *_au;
|
AUAudioUnit *_au;
|
||||||
size_t _bufferSize;
|
size_t _bufferSize;
|
||||||
|
|
||||||
AudioUnit _eq;
|
AudioUnit _eq;
|
||||||
|
|
||||||
|
DownmixProcessor * downmixer;
|
||||||
|
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
FILE *_logFile;
|
FILE *_logFile;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -62,7 +62,7 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
const int channels = _self->deviceFormat.mChannelsPerFrame;
|
const int channels = _self->deviceFormat.mChannelsPerFrame;
|
||||||
const int bytesPerPacket = channels * sizeof(float);
|
const int bytesPerPacket = channels * sizeof(float);
|
||||||
|
|
||||||
int amountToRead, amountRead = 0;
|
size_t amountToRead, amountRead = 0;
|
||||||
|
|
||||||
amountToRead = inNumberFrames * bytesPerPacket;
|
amountToRead = inNumberFrames * bytesPerPacket;
|
||||||
|
|
||||||
|
@ -82,44 +82,59 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
atomic_fetch_add(&_self->bytesRendered, amountToRead);
|
atomic_fetch_add(&_self->bytesRendered, amountToRead);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AudioChunk * chunk = [[_self->outputController buffer] removeSamples:(amountToRead / bytesPerPacket)];
|
||||||
|
|
||||||
void * readPtr;
|
size_t frameCount = [chunk frameCount];
|
||||||
int toRead = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
AudioStreamBasicDescription format = [chunk format];
|
||||||
|
|
||||||
if (toRead > amountToRead)
|
if (frameCount) {
|
||||||
toRead = amountToRead;
|
if (!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
|
||||||
|
_self->streamFormat = format;
|
||||||
if (toRead) {
|
_self->streamFormatStarted = YES;
|
||||||
fillBuffers(ioData, (float*)readPtr, toRead / bytesPerPacket, 0);
|
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
|
||||||
amountRead = toRead;
|
}
|
||||||
[[_self->outputController buffer] didReadLength:toRead];
|
|
||||||
[_self->outputController incrementAmountPlayed:amountRead];
|
double chunkDuration = [chunk duration];
|
||||||
|
|
||||||
|
NSData * samples = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
|
float downmixedData[frameCount * channels];
|
||||||
|
[_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData];
|
||||||
|
|
||||||
|
fillBuffers(ioData, downmixedData, frameCount, 0);
|
||||||
|
amountRead = frameCount * bytesPerPacket;
|
||||||
|
[_self->outputController incrementAmountPlayed:chunkDuration];
|
||||||
atomic_fetch_add(&_self->bytesRendered, amountRead);
|
atomic_fetch_add(&_self->bytesRendered, amountRead);
|
||||||
[_self->writeSemaphore signal];
|
[_self->writeSemaphore signal];
|
||||||
}
|
}
|
||||||
else
|
|
||||||
[[_self->outputController buffer] didReadLength:0];
|
|
||||||
|
|
||||||
// Try repeatedly! Buffer wraps can cause a slight data shortage, as can
|
// Try repeatedly! Buffer wraps can cause a slight data shortage, as can
|
||||||
// unexpected track changes.
|
// unexpected track changes.
|
||||||
while ((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES)
|
while ((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES)
|
||||||
{
|
{
|
||||||
int amountRead2; //Use this since return type of readdata isnt known...may want to fix then can do a simple += to readdata
|
chunk = [[_self->outputController buffer] removeSamples:((amountToRead - amountRead) / bytesPerPacket)];
|
||||||
amountRead2 = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
frameCount = [chunk frameCount];
|
||||||
if (amountRead2 > (amountToRead - amountRead))
|
format = [chunk format];
|
||||||
amountRead2 = amountToRead - amountRead;
|
if (frameCount) {
|
||||||
if (amountRead2) {
|
if (!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
|
||||||
atomic_fetch_add(&_self->bytesRendered, amountRead2);
|
_self->streamFormat = format;
|
||||||
fillBuffers(ioData, (float*)readPtr, amountRead2 / bytesPerPacket, amountRead / bytesPerPacket);
|
_self->streamFormatStarted = YES;
|
||||||
[[_self->outputController buffer] didReadLength:amountRead2];
|
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
|
||||||
|
}
|
||||||
|
atomic_fetch_add(&_self->bytesRendered, frameCount * bytesPerPacket);
|
||||||
|
double chunkDuration = [chunk duration];
|
||||||
|
NSData * samples = [chunk removeSamples:frameCount];
|
||||||
|
float downmixedData[frameCount * channels];
|
||||||
|
[_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData];
|
||||||
|
fillBuffers(ioData, downmixedData, frameCount, amountRead / bytesPerPacket);
|
||||||
|
|
||||||
[_self->outputController incrementAmountPlayed:amountRead2];
|
[_self->outputController incrementAmountPlayed:chunkDuration];
|
||||||
|
|
||||||
amountRead += amountRead2;
|
amountRead += frameCount * bytesPerPacket;
|
||||||
[_self->writeSemaphore signal];
|
[_self->writeSemaphore signal];
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
[[_self->outputController buffer] didReadLength:0];
|
|
||||||
[_self->readSemaphore timedWait:500];
|
[_self->readSemaphore timedWait:500];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -165,6 +180,8 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
started = NO;
|
started = NO;
|
||||||
stopNext = NO;
|
stopNext = NO;
|
||||||
|
|
||||||
|
streamFormatStarted = NO;
|
||||||
|
|
||||||
atomic_init(&bytesRendered, 0);
|
atomic_init(&bytesRendered, 0);
|
||||||
atomic_init(&bytesHdcdSustained, 0);
|
atomic_init(&bytesHdcdSustained, 0);
|
||||||
|
|
||||||
|
@ -223,7 +240,7 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
}
|
}
|
||||||
|
|
||||||
if ([outputController shouldReset]) {
|
if ([outputController shouldReset]) {
|
||||||
[[outputController buffer] empty];
|
[[outputController buffer] reset];
|
||||||
[outputController setShouldReset:NO];
|
[outputController setShouldReset:NO];
|
||||||
[delayedEvents removeAllObjects];
|
[delayedEvents removeAllObjects];
|
||||||
delayedEventsPopped = YES;
|
delayedEventsPopped = YES;
|
||||||
|
@ -231,7 +248,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
|
|
||||||
while ([delayedEvents count]) {
|
while ([delayedEvents count]) {
|
||||||
size_t localBytesRendered = atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
size_t localBytesRendered = atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
||||||
if (localBytesRendered >= [[delayedEvents objectAtIndex:0] longValue]) {
|
double secondsRendered = (double)localBytesRendered / (double)(deviceFormat.mBytesPerPacket * deviceFormat.mSampleRate);
|
||||||
|
if (secondsRendered >= [[delayedEvents objectAtIndex:0] doubleValue]) {
|
||||||
if ([outputController chainQueueHasTracks])
|
if ([outputController chainQueueHasTracks])
|
||||||
delayedEventsPopped = YES;
|
delayedEventsPopped = YES;
|
||||||
[self signalEndOfStream];
|
[self signalEndOfStream];
|
||||||
|
@ -242,22 +260,24 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
|
|
||||||
if (stopping)
|
if (stopping)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
size_t frameCount = 0;
|
||||||
|
|
||||||
void *writePtr;
|
if (![[outputController buffer] isFull]) {
|
||||||
int toWrite = [[outputController buffer] lengthAvailableToWriteReturningPointer:&writePtr];
|
AudioChunk * chunk = [outputController readChunk:512];
|
||||||
int bytesRead = 0;
|
frameCount = [chunk frameCount];
|
||||||
if (toWrite > CHUNK_SIZE)
|
if (frameCount) {
|
||||||
toWrite = CHUNK_SIZE;
|
[[outputController buffer] addChunk:chunk];
|
||||||
if (toWrite)
|
}
|
||||||
bytesRead = [outputController readData:writePtr amount:toWrite];
|
}
|
||||||
[[outputController buffer] didWriteLength:bytesRead];
|
|
||||||
if (bytesRead) {
|
if (frameCount) {
|
||||||
[readSemaphore signal];
|
[readSemaphore signal];
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else if ([outputController shouldContinue] == NO)
|
else if ([outputController shouldContinue] == NO)
|
||||||
break;
|
break;
|
||||||
else if (!toWrite) {
|
else if ([[outputController buffer] isFull]) {
|
||||||
if (!started) {
|
if (!started) {
|
||||||
started = YES;
|
started = YES;
|
||||||
if (!paused) {
|
if (!paused) {
|
||||||
|
@ -270,20 +290,21 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
// End of input possibly reached
|
// End of input possibly reached
|
||||||
if (delayedEventsPopped && [outputController endOfStream] == YES)
|
if (delayedEventsPopped && [outputController endOfStream] == YES)
|
||||||
{
|
{
|
||||||
long bytesBuffered = [[outputController buffer] bufferedLength];
|
double secondsBuffered = [[outputController buffer] listDuration];
|
||||||
bytesBuffered += atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
size_t _bytesRendered = atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
||||||
|
secondsBuffered += (double)_bytesRendered / (double)(deviceFormat.mBytesPerPacket * deviceFormat.mSampleRate);
|
||||||
if ([outputController chainQueueHasTracks])
|
if ([outputController chainQueueHasTracks])
|
||||||
{
|
{
|
||||||
if (bytesBuffered < CHUNK_SIZE / 2)
|
if (secondsBuffered <= 0.005)
|
||||||
bytesBuffered = 0;
|
secondsBuffered = 0.0;
|
||||||
else
|
else
|
||||||
bytesBuffered -= CHUNK_SIZE / 2;
|
secondsBuffered -= 0.005;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
stopNext = YES;
|
stopNext = YES;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
[delayedEvents addObject:[NSNumber numberWithLong:bytesBuffered]];
|
[delayedEvents addObject:[NSNumber numberWithDouble:secondsBuffered]];
|
||||||
delayedEventsPopped = NO;
|
delayedEventsPopped = NO;
|
||||||
if (!started) {
|
if (!started) {
|
||||||
started = YES;
|
started = YES;
|
||||||
|
@ -477,8 +498,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
NSError *err;
|
NSError *err;
|
||||||
AVAudioFormat *renderFormat;
|
AVAudioFormat *renderFormat;
|
||||||
|
|
||||||
[outputController incrementAmountPlayed:[[outputController buffer] bufferedLength]];
|
[outputController incrementAmountPlayed:[[outputController buffer] listDuration]];
|
||||||
[[outputController buffer] empty];
|
[[outputController buffer] reset];
|
||||||
|
|
||||||
_deviceFormat = format;
|
_deviceFormat = format;
|
||||||
deviceFormat = *(format.streamDescription);
|
deviceFormat = *(format.streamDescription);
|
||||||
|
@ -562,6 +583,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
paused = NO;
|
paused = NO;
|
||||||
stopNext = NO;
|
stopNext = NO;
|
||||||
outputDeviceID = -1;
|
outputDeviceID = -1;
|
||||||
|
|
||||||
|
downmixer = nil;
|
||||||
|
|
||||||
AudioComponentDescription desc;
|
AudioComponentDescription desc;
|
||||||
NSError *err;
|
NSError *err;
|
||||||
|
@ -668,6 +691,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
[_au setMaximumFramesToRender:512];
|
||||||
|
|
||||||
UInt32 value;
|
UInt32 value;
|
||||||
UInt32 size = sizeof(value);
|
UInt32 size = sizeof(value);
|
||||||
|
@ -781,6 +806,10 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
[outputController endEqualizer:_eq];
|
[outputController endEqualizer:_eq];
|
||||||
_eq = NULL;
|
_eq = NULL;
|
||||||
}
|
}
|
||||||
|
if (downmixer)
|
||||||
|
{
|
||||||
|
downmixer = nil;
|
||||||
|
}
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
if (_logFile)
|
if (_logFile)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue