Cog Audio: Implement support for channel config

This implements the basic output and mixing support for channel config
bits, optionally set by the input plugin.

Signed-off-by: Christopher Snowhill <kode54@gmail.com>
CQTexperiment
Christopher Snowhill 2022-02-07 00:56:05 -08:00
parent b0b1446aa7
commit 1ef8df675f
21 changed files with 590 additions and 194 deletions

View File

@ -83,7 +83,7 @@
bufferChain = [[BufferChain alloc] initWithController:self];
[self notifyStreamChanged:userInfo];
while(![bufferChain open:url withOutputFormat:[output format] withRGInfo:rgi]) {
while(![bufferChain open:url withOutputFormat:[output format] withOutputConfig:[output config] withRGInfo:rgi]) {
bufferChain = nil;
[self requestNextStream:userInfo];
@ -353,7 +353,7 @@
}
if(pathsEqual || ([[nextStream scheme] isEqualToString:[[lastChain streamURL] scheme]] && (([nextStream host] == nil && [[lastChain streamURL] host] == nil) || [[nextStream host] isEqualToString:[[lastChain streamURL] host]]) && [[nextStream path] isEqualToString:[[lastChain streamURL] path]])) {
if([lastChain setTrack:nextStream] && [newChain openWithInput:[lastChain inputNode] withOutputFormat:[output format] withRGInfo:nextStreamRGInfo]) {
if([lastChain setTrack:nextStream] && [newChain openWithInput:[lastChain inputNode] withOutputFormat:[output format] withOutputConfig:[output config] withRGInfo:nextStreamRGInfo]) {
[newChain setStreamURL:nextStream];
[newChain setUserInfo:nextStreamUserInfo];
@ -369,7 +369,7 @@
lastChain = nil;
while(shouldContinue && ![newChain open:nextStream withOutputFormat:[output format] withRGInfo:nextStreamRGInfo]) {
while(shouldContinue && ![newChain open:nextStream withOutputFormat:[output format] withOutputConfig:[output config] withRGInfo:nextStreamRGInfo]) {
if(nextStream == nil) {
newChain = nil;
atomic_fetch_sub(&refCount, 1);

View File

@ -13,16 +13,72 @@
NS_ASSUME_NONNULL_BEGIN
enum {
AudioChannelFrontLeft = 1 << 0,
AudioChannelFrontRight = 1 << 1,
AudioChannelFrontCenter = 1 << 2,
AudioChannelLFE = 1 << 3,
AudioChannelBackLeft = 1 << 4,
AudioChannelBackRight = 1 << 5,
AudioChannelFrontCenterLeft = 1 << 6,
AudioChannelFrontCenterRight = 1 << 7,
AudioChannelBackCenter = 1 << 8,
AudioChannelSideLeft = 1 << 9,
AudioChannelSideRight = 1 << 10,
AudioChannelTopCenter = 1 << 11,
AudioChannelTopFrontLeft = 1 << 12,
AudioChannelTopFrontCenter = 1 << 13,
AudioChannelTopFrontRight = 1 << 14,
AudioChannelTopBackLeft = 1 << 15,
AudioChannelTopBackCenter = 1 << 16,
AudioChannelTopBackRight = 1 << 17,
AudioConfigMono = AudioChannelFrontCenter,
AudioConfigStereo = AudioChannelFrontLeft | AudioChannelFrontRight,
AudioConfig3Point0 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter,
AudioConfig4Point0 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelBackLeft | AudioChannelBackRight,
AudioConfig5Point0 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter | AudioChannelBackLeft |
AudioChannelBackRight,
AudioConfig5Point1 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter | AudioChannelLFE |
AudioChannelBackLeft | AudioChannelBackRight,
AudioConfig5Point1Side = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter | AudioChannelLFE |
AudioChannelSideLeft | AudioChannelSideRight,
AudioConfig6Point1 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter | AudioChannelLFE |
AudioChannelBackCenter | AudioChannelSideLeft |
AudioChannelSideRight,
AudioConfig7Point1 = AudioChannelFrontLeft | AudioChannelFrontRight |
AudioChannelFrontCenter | AudioChannelLFE |
AudioChannelBackLeft | AudioChannelBackRight |
AudioChannelSideLeft | AudioChannelSideRight,
AudioChannelsBackLeftRight = AudioChannelBackLeft | AudioChannelBackRight,
AudioChannelsSideLeftRight = AudioChannelSideLeft | AudioChannelSideRight,
};
@interface AudioChunk : NSObject {
AudioStreamBasicDescription format;
NSMutableData *chunkData;
uint32_t channelConfig;
BOOL formatAssigned;
BOOL lossless;
}
@property AudioStreamBasicDescription format;
@property uint32_t channelConfig;
@property BOOL lossless;
+ (uint32_t)guessChannelConfig:(uint32_t)channelCount;
+ (uint32_t)channelIndexFromConfig:(uint32_t)channelConfig forFlag:(uint32_t)flag;
+ (uint32_t)extractChannelFlag:(uint32_t)index fromConfig:(uint32_t)channelConfig;
+ (uint32_t)countChannels:(uint32_t)channelConfig;
+ (uint32_t)findChannelIndex:(uint32_t)flag;
- (id)init;
- (void)assignSamples:(const void *)data frameCount:(size_t)count;

View File

@ -21,6 +21,86 @@
return self;
}
static const uint32_t AudioChannelConfigTable[] = {
0,
AudioConfigMono,
AudioConfigStereo,
AudioConfig3Point0,
AudioConfig4Point0,
AudioConfig5Point0,
AudioConfig5Point1,
AudioConfig6Point1,
AudioConfig7Point1,
0,
AudioConfig7Point1 | AudioChannelFrontCenterLeft | AudioChannelFrontCenterRight
};
+ (uint32_t)guessChannelConfig:(uint32_t)channelCount {
if(channelCount == 0) return 0;
if(channelCount > 32) return 0;
int ret = 0;
if(channelCount < (sizeof(AudioChannelConfigTable) / sizeof(AudioChannelConfigTable[0])))
ret = AudioChannelConfigTable[channelCount];
if(!ret) {
ret = (1 << channelCount) - 1;
}
assert([AudioChunk countChannels:ret] == channelCount);
return ret;
}
+ (uint32_t)channelIndexFromConfig:(uint32_t)channelConfig forFlag:(uint32_t)flag {
uint32_t index = 0;
for(uint32_t walk = 0; walk < 32; ++walk) {
uint32_t query = 1 << walk;
if(flag & query) return index;
if(channelConfig & query) ++index;
}
return ~0;
}
+ (uint32_t)extractChannelFlag:(uint32_t)index fromConfig:(uint32_t)channelConfig {
uint32_t toskip = index;
uint32_t flag = 1;
while(flag) {
if(channelConfig & flag) {
if(toskip == 0) break;
toskip--;
}
flag <<= 1;
}
return flag;
}
+ (uint32_t)countChannels:(uint32_t)channelConfig {
return __builtin_popcount(channelConfig);
}
+ (uint32_t)findChannelIndex:(uint32_t)flag {
uint32_t rv = 0;
if((flag & 0xFFFF) == 0) {
rv += 16;
flag >>= 16;
}
if((flag & 0xFF) == 0) {
rv += 8;
flag >>= 8;
}
if((flag & 0xF) == 0) {
rv += 4;
flag >>= 4;
}
if((flag & 0x3) == 0) {
rv += 2;
flag >>= 2;
}
if((flag & 0x1) == 0) {
rv += 1;
flag >>= 1;
}
assert(flag & 1);
return rv;
}
@synthesize lossless;
- (AudioStreamBasicDescription)format {
@ -30,6 +110,20 @@
- (void)setFormat:(AudioStreamBasicDescription)informat {
formatAssigned = YES;
format = informat;
channelConfig = [AudioChunk guessChannelConfig:format.mChannelsPerFrame];
}
- (uint32_t)channelConfig {
return channelConfig;
}
- (void)setChannelConfig:(uint32_t)config {
if(formatAssigned) {
if(config == 0) {
config = [AudioChunk guessChannelConfig:format.mChannelsPerFrame];
}
}
channelConfig = config;
}
- (void)assignSamples:(const void *)data frameCount:(size_t)count {

View File

@ -17,6 +17,7 @@
ConverterNode *converterNode;
AudioStreamBasicDescription inputFormat;
uint32_t inputChannelConfig;
NSURL *streamURL;
id userInfo;
@ -30,14 +31,15 @@
- (id)initWithController:(id)c;
- (void)buildChain;
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi;
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withOutputConfig:(uint32_t)outputConfig withRGInfo:(NSDictionary *)rgi;
// Used when changing tracks to reuse the same decoder
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi;
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withOutputConfig:(uint32_t)outputConfig withRGInfo:(NSDictionary *)rgi;
// Used when resetting the decoder on seek
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
withOutputFormat:(AudioStreamBasicDescription)outputFormat
withOutputConfig:(uint32_t)outputConfig
withRGInfo:(NSDictionary *)rgi;
- (void)seek:(double)time;
@ -64,7 +66,7 @@
- (BOOL)endOfInputReached;
- (BOOL)setTrack:(NSURL *)track;
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format;
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig;
- (BOOL)isRunning;
@ -72,6 +74,7 @@
- (ConverterNode *)converter;
- (AudioStreamBasicDescription)inputFormat;
- (uint32_t)inputConfig;
- (double)secondsBuffered;

View File

@ -40,7 +40,7 @@
finalNode = converterNode;
}
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi {
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withOutputConfig:(uint32_t)outputConfig withRGInfo:(NSDictionary *)rgi {
[self setStreamURL:url];
[self buildChain];
@ -61,12 +61,16 @@
NSDictionary *properties = [inputNode properties];
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
if(![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
outputConfig = inputChannelConfig;
if(![converterNode setupWithInputFormat:inputFormat withInputConfig:inputChannelConfig outputFormat:outputFormat outputConfig:outputConfig isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];
@ -76,7 +80,7 @@
return YES;
}
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi {
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withOutputConfig:(uint32_t)outputConfig withRGInfo:(NSDictionary *)rgi {
DLog(@"New buffer chain!");
[self buildChain];
@ -86,13 +90,17 @@
NSDictionary *properties = [inputNode properties];
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
outputConfig = inputChannelConfig;
DLog(@"Input Properties: %@", properties);
if(![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
if(![converterNode setupWithInputFormat:inputFormat withInputConfig:inputChannelConfig outputFormat:outputFormat outputConfig:outputConfig isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];
@ -102,6 +110,7 @@
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
withOutputFormat:(AudioStreamBasicDescription)outputFormat
withOutputConfig:(uint32_t)outputConfig
withRGInfo:(NSDictionary *)rgi;
{
DLog(@"New buffer chain!");
@ -115,12 +124,16 @@
DLog(@"Input Properties: %@", properties);
inputFormat = [inputNode nodeFormat];
if([properties valueForKey:@"channelConfig"])
inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
if(![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
outputConfig = inputChannelConfig;
if(![converterNode setupWithInputFormat:inputFormat withInputConfig:inputChannelConfig outputFormat:outputFormat outputConfig:outputConfig isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
return NO;
[self setRGInfo:rgi];
@ -180,7 +193,7 @@
[controller launchOutputThread];
}
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format {
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig {
DLog(@"FORMAT DID CHANGE!");
}
@ -225,6 +238,10 @@
return inputFormat;
}
- (uint32_t)inputConfig {
return inputChannelConfig;
}
- (double)secondsBuffered {
double duration = 0.0;
OutputNode *outputNode = (OutputNode *)[controller output];

View File

@ -86,6 +86,7 @@
NSData *removedData = [chunk removeSamples:maxFrameCount];
AudioChunk *ret = [[AudioChunk alloc] init];
[ret setFormat:[chunk format]];
[ret setChannelConfig:[chunk channelConfig]];
[ret assignSamples:[removedData bytes] frameCount:maxFrameCount];
listDuration -= [ret duration];
inRemover = NO;

View File

@ -68,8 +68,13 @@
AudioStreamBasicDescription dmFloatFormat; // downmixed/upmixed float format
AudioStreamBasicDescription outputFormat;
uint32_t inputChannelConfig;
uint32_t outputChannelConfig;
AudioStreamBasicDescription previousOutputFormat;
uint32_t previousOutputConfig;
AudioStreamBasicDescription rememberedInputFormat;
uint32_t rememberedInputConfig;
RefillNode *refillNode;
id __weak originalPreviousNode;
@ -82,7 +87,7 @@
- (id)initWithController:(id)c previous:(id)p;
- (BOOL)setupWithInputFormat:(AudioStreamBasicDescription)inputFormat outputFormat:(AudioStreamBasicDescription)outputFormat isLossless:(BOOL)lossless;
- (BOOL)setupWithInputFormat:(AudioStreamBasicDescription)inputFormat withInputConfig:(uint32_t)inputConfig outputFormat:(AudioStreamBasicDescription)outputFormat outputConfig:(uint32_t)outputConfig isLossless:(BOOL)lossless;
- (void)cleanUp;
- (void)process;
@ -90,9 +95,9 @@
- (void)setRGInfo:(NSDictionary *)rgi;
- (void)setOutputFormat:(AudioStreamBasicDescription)format;
- (void)setOutputFormat:(AudioStreamBasicDescription)format outputConfig:(uint32_t)outputConfig;
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format;
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig;
- (void)refreshVolumeScaling;

View File

@ -435,7 +435,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[self setShouldContinue:YES];
refillNode = nil;
[self cleanUp];
[self setupWithInputFormat:rememberedInputFormat outputFormat:outputFormat isLossless:rememberedLossless];
[self setupWithInputFormat:rememberedInputFormat withInputConfig:rememberedInputConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless];
continue;
} else
break;
@ -843,12 +843,16 @@ static float db_to_scale(float db) {
volumeScale = scale;
}
- (BOOL)setupWithInputFormat:(AudioStreamBasicDescription)inf outputFormat:(AudioStreamBasicDescription)outf isLossless:(BOOL)lossless {
- (BOOL)setupWithInputFormat:(AudioStreamBasicDescription)inf withInputConfig:(uint32_t)inputConfig outputFormat:(AudioStreamBasicDescription)outf outputConfig:(uint32_t)outputConfig isLossless:(BOOL)lossless {
// Make the converter
inputFormat = inf;
outputFormat = outf;
inputChannelConfig = inputConfig;
outputChannelConfig = outputConfig;
nodeFormat = outputFormat;
nodeChannelConfig = outputChannelConfig;
rememberedLossless = lossless;
@ -954,26 +958,31 @@ static float db_to_scale(float db) {
[self cleanUp];
}
- (void)setOutputFormat:(AudioStreamBasicDescription)format {
- (void)setOutputFormat:(AudioStreamBasicDescription)format outputConfig:(uint32_t)outputConfig {
DLog(@"SETTING OUTPUT FORMAT!");
previousOutputFormat = outputFormat;
previousOutputConfig = outputChannelConfig;
outputFormat = format;
outputChannelConfig = outputConfig;
outputFormatChanged = YES;
}
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format {
- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig {
DLog(@"FORMAT CHANGED");
paused = YES;
[self cleanUp];
if(outputFormatChanged && ![buffer isEmpty] &&
memcmp(&outputFormat, &previousOutputFormat, sizeof(outputFormat)) != 0) {
(outputChannelConfig != previousOutputConfig ||
memcmp(&outputFormat, &previousOutputFormat, sizeof(outputFormat)) != 0)) {
// Transfer previously buffered data, remember input format
rememberedInputFormat = format;
rememberedInputConfig = inputChannelConfig;
originalPreviousNode = previousNode;
refillNode = [[RefillNode alloc] initWithController:controller previous:nil];
[self setPreviousNode:refillNode];
[refillNode setFormat:previousOutputFormat];
[refillNode setChannelConfig:previousOutputConfig];
for(;;) {
AudioChunk *chunk = [buffer removeSamples:16384];
@ -985,9 +994,9 @@ static float db_to_scale(float db) {
break;
}
[self setupWithInputFormat:previousOutputFormat outputFormat:outputFormat isLossless:rememberedLossless];
[self setupWithInputFormat:previousOutputFormat withInputConfig:[AudioChunk guessChannelConfig:previousOutputFormat.mChannelsPerFrame] outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless];
} else {
[self setupWithInputFormat:format outputFormat:outputFormat isLossless:rememberedLossless];
[self setupWithInputFormat:format withInputConfig:inputConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless];
}
}

View File

@ -16,9 +16,12 @@
AudioStreamBasicDescription inputFormat;
AudioStreamBasicDescription outputFormat;
uint32_t inConfig;
uint32_t outConfig;
}
- (id)initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf;
- (id)initWithInputFormat:(AudioStreamBasicDescription)inf inputConfig:(uint32_t)iConfig andOutputFormat:(AudioStreamBasicDescription)outf outputConfig:(uint32_t)oConfig;
- (void)process:(const void *)inBuffer frameCount:(size_t)frames output:(void *)outBuffer;

View File

@ -10,74 +10,144 @@
#import "Logging.h"
static const float STEREO_DOWNMIX[8 - 2][8][2] = {
/*3.0*/
{
{ 0.5858F, 0.0F },
{ 0.0F, 0.5858F },
{ 0.4142F, 0.4142F } },
/*quadrophonic*/
{
{ 0.4226F, 0.0F },
{ 0.0F, 0.4226F },
{ 0.366F, 0.2114F },
{ 0.2114F, 0.336F } },
/*5.0*/
{
{ 0.651F, 0.0F },
{ 0.0F, 0.651F },
{ 0.46F, 0.46F },
{ 0.5636F, 0.3254F },
{ 0.3254F, 0.5636F } },
/*5.1*/
{
{ 0.529F, 0.0F },
{ 0.0F, 0.529F },
{ 0.3741F, 0.3741F },
{ 0.3741F, 0.3741F },
{ 0.4582F, 0.2645F },
{ 0.2645F, 0.4582F } },
/*6.1*/
{
{ 0.4553F, 0.0F },
{ 0.0F, 0.4553F },
{ 0.322F, 0.322F },
{ 0.322F, 0.322F },
{ 0.2788F, 0.2788F },
{ 0.3943F, 0.2277F },
{ 0.2277F, 0.3943F } },
/*7.1*/
{
{ 0.3886F, 0.0F },
{ 0.0F, 0.3886F },
{ 0.2748F, 0.2748F },
{ 0.2748F, 0.2748F },
{ 0.3366F, 0.1943F },
{ 0.1943F, 0.3366F },
{ 0.3366F, 0.1943F },
{ 0.1943F, 0.3366F } }
};
#import "AudioChunk.h"
static void downmix_to_stereo(const float *inBuffer, int channels, float *outBuffer, size_t count) {
if(channels >= 3 && channels <= 8)
for(size_t i = 0; i < count; ++i) {
float left = 0, right = 0;
for(int j = 0; j < channels; ++j) {
left += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
right += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
static void downmix_to_stereo(const float *inBuffer, int channels, uint32_t config, float *outBuffer, size_t count) {
float FrontRatios[2] = { 0.0F, 0.0F };
float FrontCenterRatio = 0.0F;
float LFERatio = 0.0F;
float BackRatios[2] = { 0.0F, 0.0F };
float BackCenterRatio = 0.0F;
float SideRatios[2] = { 0.0F, 0.0F };
if(config & (AudioChannelFrontLeft | AudioChannelFrontRight)) {
FrontRatios[0] = 1.0F;
}
if(config & AudioChannelFrontCenter) {
FrontRatios[0] = 0.5858F;
FrontCenterRatio = 0.4142F;
}
if(config & (AudioChannelBackLeft | AudioChannelBackRight)) {
if(config & AudioChannelFrontCenter) {
FrontRatios[0] = 0.651F;
FrontCenterRatio = 0.46F;
BackRatios[0] = 0.5636F;
BackRatios[1] = 0.3254F;
} else {
FrontRatios[0] = 0.4226F;
BackRatios[0] = 0.366F;
BackRatios[1] = 0.2114F;
}
}
if(config & AudioChannelLFE) {
FrontRatios[0] *= 0.8F;
FrontCenterRatio *= 0.8F;
LFERatio = FrontCenterRatio;
BackRatios[0] *= 0.8F;
BackRatios[1] *= 0.8F;
}
if(config & AudioChannelBackCenter) {
FrontRatios[0] *= 0.86F;
FrontCenterRatio *= 0.86F;
LFERatio *= 0.86F;
BackRatios[0] *= 0.86F;
BackRatios[1] *= 0.86F;
BackCenterRatio = FrontCenterRatio * 0.86F;
}
if(config & (AudioChannelSideLeft | AudioChannelSideRight)) {
float ratio = 0.73F;
if(config & AudioChannelBackCenter) ratio = 0.85F;
FrontRatios[0] *= ratio;
FrontCenterRatio *= ratio;
LFERatio *= ratio;
BackRatios[0] *= ratio;
BackRatios[1] *= ratio;
BackCenterRatio *= ratio;
SideRatios[0] = 0.463882352941176 * ratio;
SideRatios[1] = 0.267882352941176 * ratio;
}
int32_t channelIndexes[channels];
for(int i = 0; i < channels; ++i) {
channelIndexes[i] = [AudioChunk findChannelIndex:[AudioChunk extractChannelFlag:i fromConfig:config]];
}
for(size_t i = 0; i < count; ++i) {
float left = 0.0F, right = 0.0F;
for(uint32_t j = 0; j < channels; ++j) {
float inSample = inBuffer[i * channels + j];
switch(channelIndexes[j]) {
case 0:
left += inSample * FrontRatios[0];
right += inSample * FrontRatios[1];
break;
case 1:
left += inSample * FrontRatios[1];
right += inSample * FrontRatios[0];
break;
case 2:
left += inSample * FrontCenterRatio;
right += inSample * FrontCenterRatio;
break;
case 3:
left += inSample * LFERatio;
right += inSample * LFERatio;
break;
case 4:
left += inSample * BackRatios[0];
right += inSample * BackRatios[1];
break;
case 5:
left += inSample * BackRatios[1];
right += inSample * BackRatios[0];
break;
case 6:
case 7:
break;
case 8:
left += inSample * BackCenterRatio;
right += inSample * BackCenterRatio;
break;
case 9:
left += inSample * SideRatios[0];
right += inSample * SideRatios[1];
break;
case 10:
left += inSample * SideRatios[1];
right += inSample * SideRatios[0];
break;
case 11:
case 12:
case 13:
case 14:
case 15:
case 16:
case 17:
default:
break;
}
outBuffer[i * 2 + 0] = left;
outBuffer[i * 2 + 1] = right;
}
}
}
static void downmix_to_mono(const float *inBuffer, int channels, float *outBuffer, size_t count) {
static void downmix_to_mono(const float *inBuffer, int channels, uint32_t config, float *outBuffer, size_t count) {
float tempBuffer[count * 2];
if(channels >= 3 && channels <= 8) {
downmix_to_stereo(inBuffer, channels, tempBuffer, count);
inBuffer = tempBuffer;
channels = 2;
}
downmix_to_stereo(inBuffer, channels, config, tempBuffer, count);
inBuffer = tempBuffer;
channels = 2;
config = AudioConfigStereo;
float invchannels = 1.0 / (float)channels;
for(size_t i = 0; i < count; ++i) {
float sample = 0;
@ -88,62 +158,100 @@ static void downmix_to_mono(const float *inBuffer, int channels, float *outBuffe
}
}
static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int outchannels, size_t count) {
for(ssize_t i = 0; i < count; ++i) {
if(inchannels == 1 && outchannels == 2) {
static void upmix(const float *inBuffer, int inchannels, uint32_t inconfig, float *outBuffer, int outchannels, uint32_t outconfig, size_t count) {
if(inconfig == AudioConfigMono && outconfig == AudioConfigStereo) {
for(size_t i = 0; i < count; ++i) {
// upmix mono to stereo
float sample = inBuffer[i];
outBuffer[i * 2 + 0] = sample;
outBuffer[i * 2 + 1] = sample;
} else if(inchannels == 1 && outchannels == 4) {
}
} else if(inconfig == AudioConfigMono && outconfig == AudioConfig4Point0) {
for(size_t i = 0; i < count; ++i) {
// upmix mono to quad
float sample = inBuffer[i];
outBuffer[i * 4 + 0] = sample;
outBuffer[i * 4 + 1] = sample;
outBuffer[i * 4 + 2] = 0;
outBuffer[i * 4 + 3] = 0;
} else if(inchannels == 1 && (outchannels == 3 || outchannels >= 5)) {
}
} else if(inconfig == AudioConfigMono && (outconfig & AudioChannelFrontCenter)) {
uint32_t cIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontCenter];
for(size_t i = 0; i < count; ++i) {
// upmix mono to center channel
float sample = inBuffer[i];
outBuffer[i * outchannels + 2] = sample;
for(int j = 0; j < 2; ++j) {
outBuffer[i * outchannels + cIndex] = sample;
for(int j = 0; j < cIndex; ++j) {
outBuffer[i * outchannels + j] = 0;
}
for(int j = 3; j < outchannels; ++j) {
for(int j = cIndex + 1; j < outchannels; ++j) {
outBuffer[i * outchannels + j] = 0;
}
} else if(inchannels == 4 && outchannels >= 5) {
}
} else if(inconfig == AudioConfig4Point0 && outchannels >= 5) {
uint32_t flIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontLeft];
uint32_t frIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontRight];
uint32_t blIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackLeft];
uint32_t brIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackRight];
for(size_t i = 0; i < count; ++i) {
float fl = inBuffer[i * 4 + 0];
float fr = inBuffer[i * 4 + 1];
float bl = inBuffer[i * 4 + 2];
float br = inBuffer[i * 4 + 3];
const int skipclfe = (outchannels == 5) ? 1 : 2;
outBuffer[i * outchannels + 0] = fl;
outBuffer[i * outchannels + 1] = fr;
outBuffer[i * outchannels + skipclfe + 2] = bl;
outBuffer[i * outchannels + skipclfe + 3] = br;
for(int j = 0; j < skipclfe; ++j) {
outBuffer[i * outchannels + 2 + j] = 0;
memset(outBuffer + i * outchannels, 0, sizeof(float) * outchannels);
if(flIndex != ~0) {
outBuffer[i * outchannels + flIndex] = fl;
}
for(int j = 4 + skipclfe; j < outchannels; ++j) {
outBuffer[i * outchannels + j] = 0;
if(frIndex != ~0) {
outBuffer[i * outchannels + frIndex] = fr;
}
} else if(inchannels == 5 && outchannels >= 6) {
if(blIndex != ~0) {
outBuffer[i * outchannels + blIndex] = bl;
}
if(brIndex != ~0) {
outBuffer[i * outchannels + brIndex] = br;
}
}
} else if(inconfig == AudioConfig5Point0 && outchannels >= 6) {
uint32_t flIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontLeft];
uint32_t frIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontRight];
uint32_t cIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontCenter];
uint32_t blIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackLeft];
uint32_t brIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackRight];
for(size_t i = 0; i < count; ++i) {
float fl = inBuffer[i * 5 + 0];
float fr = inBuffer[i * 5 + 1];
float c = inBuffer[i * 5 + 2];
float bl = inBuffer[i * 5 + 3];
float br = inBuffer[i * 5 + 4];
outBuffer[i * outchannels + 0] = fl;
outBuffer[i * outchannels + 1] = fr;
outBuffer[i * outchannels + 2] = c;
outBuffer[i * outchannels + 3] = 0;
outBuffer[i * outchannels + 4] = bl;
outBuffer[i * outchannels + 5] = br;
for(int j = 6; j < outchannels; ++j) {
outBuffer[i * outchannels + j] = 0;
memset(outBuffer + i * outchannels, 0, sizeof(float) * outchannels);
if(flIndex != ~0) {
outBuffer[i * outchannels + flIndex] = fl;
}
} else if(inchannels == 7 && outchannels == 8) {
if(frIndex != ~0) {
outBuffer[i * outchannels + frIndex] = fr;
}
if(cIndex != ~0) {
outBuffer[i * outchannels + cIndex] = c;
}
if(blIndex != ~0) {
outBuffer[i * outchannels + blIndex] = bl;
}
if(brIndex != ~0) {
outBuffer[i * outchannels + brIndex] = br;
}
}
} else if(inconfig == AudioConfig6Point1 && outchannels >= 8) {
uint32_t flIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontLeft];
uint32_t frIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontRight];
uint32_t cIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelFrontCenter];
uint32_t lfeIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelLFE];
uint32_t blIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackLeft];
uint32_t brIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackRight];
uint32_t bcIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelBackCenter];
uint32_t slIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelSideLeft];
uint32_t srIndex = [AudioChunk channelIndexFromConfig:outconfig forFlag:AudioChannelSideRight];
for(size_t i = 0; i < count; ++i) {
float fl = inBuffer[i * 7 + 0];
float fr = inBuffer[i * 7 + 1];
float c = inBuffer[i * 7 + 2];
@ -151,25 +259,48 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
float sl = inBuffer[i * 7 + 4];
float sr = inBuffer[i * 7 + 5];
float bc = inBuffer[i * 7 + 6];
outBuffer[i * 8 + 0] = fl;
outBuffer[i * 8 + 1] = fr;
outBuffer[i * 8 + 2] = c;
outBuffer[i * 8 + 3] = lfe;
outBuffer[i * 8 + 4] = bc;
outBuffer[i * 8 + 5] = bc;
outBuffer[i * 8 + 6] = sl;
outBuffer[i * 8 + 7] = sr;
} else {
memset(outBuffer + i * outchannels, 0, sizeof(float) * outchannels);
if(flIndex != ~0) {
outBuffer[i * outchannels + flIndex] = fl;
}
if(frIndex != ~0) {
outBuffer[i * outchannels + frIndex] = fr;
}
if(cIndex != ~0) {
outBuffer[i * outchannels + cIndex] = c;
}
if(lfeIndex != ~0) {
outBuffer[i * outchannels + lfeIndex] = lfe;
}
if(slIndex != ~0) {
outBuffer[i * outchannels + slIndex] = sl;
}
if(srIndex != ~0) {
outBuffer[i * outchannels + srIndex] = sr;
}
if(bcIndex != ~0) {
outBuffer[i * outchannels + bcIndex] = bc;
} else {
if(blIndex != ~0) {
outBuffer[i * outchannels + blIndex] = bc;
}
if(brIndex != ~0) {
outBuffer[i * outchannels + brIndex] = bc;
}
}
}
} else {
uint32_t outIndexes[inchannels];
for(int i = 0; i < inchannels; ++i) {
uint32_t channelFlag = [AudioChunk extractChannelFlag:i fromConfig:inconfig];
outIndexes[i] = [AudioChunk channelIndexFromConfig:outconfig forFlag:channelFlag];
}
for(size_t i = 0; i < count; ++i) {
// upmix N channels to N channels plus silence the empty channels
float samples[inchannels];
for(int j = 0; j < inchannels; ++j) {
samples[j] = inBuffer[i * inchannels + j];
}
for(int j = 0; j < inchannels; ++j) {
outBuffer[i * outchannels + j] = samples[j];
}
for(int j = inchannels; j < outchannels; ++j) {
outBuffer[i * outchannels + j] = 0;
if(outIndexes[j] != ~0) {
outBuffer[i * outchannels + outIndexes[j]] = inBuffer[i * inchannels + j];
}
}
}
}
@ -177,7 +308,7 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
@implementation DownmixProcessor
- (id)initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf {
- (id)initWithInputFormat:(AudioStreamBasicDescription)inf inputConfig:(uint32_t)iConfig andOutputFormat:(AudioStreamBasicDescription)outf outputConfig:(uint32_t)oConfig {
self = [super init];
if(self) {
@ -198,6 +329,9 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
inputFormat = inf;
outputFormat = outf;
inConfig = iConfig;
outConfig = oConfig;
[self setupVirt];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
@ -221,8 +355,9 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
if(hVirt &&
outputFormat.mChannelsPerFrame == 2 &&
outConfig == AudioConfigStereo &&
inputFormat.mChannelsPerFrame >= 1 &&
inputFormat.mChannelsPerFrame <= 8) {
(inConfig & (AudioConfig7Point1 | AudioChannelBackCenter)) != 0) {
NSString *userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
NSURL *presetUrl = nil;
@ -241,7 +376,7 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
if(presetUrl) {
@synchronized(hFilter) {
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame withConfig:inConfig];
}
}
}
@ -267,13 +402,13 @@ static void upmix(const float *inBuffer, int inchannels, float *outBuffer, int o
}
}
if(inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2) {
downmix_to_stereo((const float *)inBuffer, inputFormat.mChannelsPerFrame, (float *)outBuffer, frames);
} else if(inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1) {
downmix_to_mono((const float *)inBuffer, inputFormat.mChannelsPerFrame, (float *)outBuffer, frames);
if(inputFormat.mChannelsPerFrame > 2 && outConfig == AudioConfigStereo) {
downmix_to_stereo((const float *)inBuffer, inputFormat.mChannelsPerFrame, inConfig, (float *)outBuffer, frames);
} else if(inputFormat.mChannelsPerFrame > 1 && outConfig == AudioConfigMono) {
downmix_to_mono((const float *)inBuffer, inputFormat.mChannelsPerFrame, inConfig, (float *)outBuffer, frames);
} else if(inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame) {
upmix((const float *)inBuffer, inputFormat.mChannelsPerFrame, (float *)outBuffer, outputFormat.mChannelsPerFrame, frames);
} else if(inputFormat.mChannelsPerFrame == outputFormat.mChannelsPerFrame) {
upmix((const float *)inBuffer, inputFormat.mChannelsPerFrame, inConfig, (float *)outBuffer, outputFormat.mChannelsPerFrame, outConfig, frames);
} else if(inConfig == outConfig) {
memcpy(outBuffer, inBuffer, frames * outputFormat.mBytesPerPacket);
}
}

View File

@ -42,7 +42,7 @@
+ (BOOL)validateImpulseFile:(NSURL *)url;
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels;
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels withConfig:(uint32_t)config;
- (void)process:(const float *)inBuffer sampleCount:(size_t)count toBuffer:(float *)outBuffer;

View File

@ -6,6 +6,7 @@
//
#import "HeadphoneFilter.h"
#import "AudioChunk.h"
#import "AudioDecoder.h"
#import "AudioSource.h"
@ -17,44 +18,61 @@
@implementation HeadphoneFilter
// Symmetrical / no-reverb sets
static const int8_t speakers_to_hesuvi_7[8][2][8] = {
// mono/center
{ { 6 }, { 6 } },
// left/right
{ { 0, 1 }, { 1, 0 } },
// left/right/center
{ { 0, 1, 6 }, { 1, 0, 6 } },
// left/right/back lef/back right
{ { 0, 1, 4, 5 }, { 1, 0, 5, 4 } },
// left/right/center/back left/back right
{ { 0, 1, 6, 4, 5 }, { 1, 0, 6, 5, 4 } },
// left/right/center/lfe(center)/back left/back right
{ { 0, 1, 6, 6, 4, 5 }, { 1, 0, 6, 6, 5, 4 } },
// left/right/center/lfe(center)/back center(special)/side left/side right
{ { 0, 1, 6, 6, -1, 2, 3 }, { 1, 0, 6, 6, -1, 3, 2 } },
// left/right/center/lfe(center)/back left/back right/side left/side right
{ { 0, 1, 6, 6, 4, 5, 2, 3 }, { 1, 0, 6, 6, 5, 4, 3, 2 } }
enum {
speaker_is_back_center = -1,
speaker_not_present = -2,
};
// Asymmetrical / reverb sets
static const int8_t speakers_to_hesuvi_14[8][2][8] = {
// mono/center
{ { 6 }, { 13 } },
// left/right
{ { 0, 8 }, { 1, 7 } },
// left/right/center
{ { 0, 8, 6 }, { 1, 7, 13 } },
// left/right/back left/back right
{ { 0, 8, 4, 12 }, { 1, 7, 5, 11 } },
// left/right/center/back left/back right
{ { 0, 8, 6, 4, 12 }, { 1, 7, 13, 5, 11 } },
// left/right/center/lfe(center)/back left/back right
{ { 0, 8, 6, 6, 4, 12 }, { 1, 7, 13, 13, 5, 11 } },
// left/right/center/lfe(center)/back center(special)/side left/side right
{ { 0, 8, 6, 6, -1, 2, 10 }, { 1, 7, 13, 13, -1, 3, 9 } },
// left/right/center/lfe(center)/back left/back right/side left/side right
{ { 0, 8, 6, 6, 4, 12, 2, 10 }, { 1, 7, 13, 13, 5, 11, 3, 9 } }
static const uint32_t max_speaker_index = 10;
static const int8_t speakers_to_hesuvi_7[11][2] = {
// front left
{ 0, 1 },
// front right
{ 1, 0 },
// front center
{ 6, 6 },
// lfe
{ 6, 6 },
// back left
{ 4, 5 },
// back right
{ 5, 4 },
// front center left
{ speaker_not_present, speaker_not_present },
// front center right
{ speaker_not_present, speaker_not_present },
// back center
{ speaker_is_back_center, speaker_is_back_center },
// side left
{ 2, 3 },
// side right
{ 3, 2 }
};
static const int8_t speakers_to_hesuvi_14[11][2] = {
// front left
{ 0, 1 },
// front right
{ 8, 7 },
// front center
{ 6, 13 },
// lfe
{ 6, 13 },
// back left
{ 4, 5 },
// back right
{ 12, 11 },
// front center left
{ speaker_not_present, speaker_not_present },
// front center right
{ speaker_not_present, speaker_not_present },
// back center
{ speaker_is_back_center, speaker_is_back_center },
// side left
{ 2, 3 },
// side right
{ 10, 9 }
};
+ (BOOL)validateImpulseFile:(NSURL *)url {
@ -99,7 +117,7 @@ static const int8_t speakers_to_hesuvi_14[8][2][8] = {
return YES;
}
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels {
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels withConfig:(uint32_t)config {
self = [super init];
if(self) {
@ -246,7 +264,7 @@ static const int8_t speakers_to_hesuvi_14[8][2][8] = {
}
fftSize = 2 << pow;
float *deinterleavedImpulseBuffer = (float *)_mm_malloc(fftSize * sizeof(float) * impulseChannels, 16);
float *deinterleavedImpulseBuffer = (float *)_mm_malloc(fftSize * sizeof(float) * (impulseChannels + 1), 16);
if(!deinterleavedImpulseBuffer) {
free(impulseBuffer);
return nil;
@ -259,6 +277,9 @@ static const int8_t speakers_to_hesuvi_14[8][2][8] = {
free(impulseBuffer);
// Null impulse
vDSP_vclr(deinterleavedImpulseBuffer + impulseChannels * fftSize, 1, fftSize);
paddedBufferSize = fftSize;
fftSizeOver2 = (fftSize + 1) / 2;
log2n = log2f(fftSize);
@ -317,18 +338,25 @@ static const int8_t speakers_to_hesuvi_14[8][2][8] = {
return nil;
}
int leftInChannel;
int rightInChannel;
uint32_t channelFlag = [AudioChunk extractChannelFlag:(uint32_t)i fromConfig:config];
uint32_t channelIndex = [AudioChunk findChannelIndex:channelFlag];
int leftInChannel = speaker_not_present;
int rightInChannel = speaker_not_present;
if(impulseChannels == 7) {
leftInChannel = speakers_to_hesuvi_7[channels - 1][0][i];
rightInChannel = speakers_to_hesuvi_7[channels - 1][1][i];
if(channelIndex <= max_speaker_index) {
leftInChannel = speakers_to_hesuvi_7[channelIndex][0];
rightInChannel = speakers_to_hesuvi_7[channelIndex][1];
}
} else {
leftInChannel = speakers_to_hesuvi_14[channels - 1][0][i];
rightInChannel = speakers_to_hesuvi_14[channels - 1][1][i];
if(channelIndex <= max_speaker_index) {
leftInChannel = speakers_to_hesuvi_14[channelIndex][0];
rightInChannel = speakers_to_hesuvi_14[channelIndex][1];
}
}
if(leftInChannel == -1 || rightInChannel == -1) {
if(leftInChannel == speaker_is_back_center || rightInChannel == speaker_is_back_center) {
float *temp;
if(impulseChannels == 7) {
temp = (float *)malloc(sizeof(float) * fftSize);
@ -360,6 +388,9 @@ static const int8_t speakers_to_hesuvi_14[8][2][8] = {
}
free(temp);
} else if(leftInChannel == speaker_not_present || rightInChannel == speaker_not_present) {
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + impulseChannels * fftSize), 2, &impulse_responses[i * 2 + 0], 1, fftSizeOver2);
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + impulseChannels * fftSize), 2, &impulse_responses[i * 2 + 1], 1, fftSizeOver2);
} else {
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + leftInChannel * fftSize), 2, &impulse_responses[i * 2 + 0], 1, fftSizeOver2);
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + rightInChannel * fftSize), 2, &impulse_responses[i * 2 + 1], 1, fftSizeOver2);

View File

@ -47,6 +47,8 @@
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
nodeFormat = propertiesToASBD(properties);
if([properties valueForKey:@"channelConfig"])
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
shouldContinue = YES;
@ -66,6 +68,8 @@
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
nodeFormat = propertiesToASBD(properties);
if([properties valueForKey:@"channelConfig"])
nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue];
nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"];
[self registerObservers];
@ -117,14 +121,15 @@
while([self shouldContinue] == YES && [self endOfStream] == NO) {
if(shouldSeek == YES) {
ConverterNode *converter = [[[controller controller] bufferChain] converter];
BufferChain *bufferChain = [[controller controller] bufferChain];
ConverterNode *converter = [bufferChain converter];
DLog(@"SEEKING! Resetting Buffer");
amountInBuffer = 0;
// This resets the converter's buffer
[self resetBuffer];
[converter resetBuffer];
[converter inputFormatDidChange:[[[controller controller] bufferChain] inputFormat]];
[converter inputFormatDidChange:[bufferChain inputFormat] inputConfig:[bufferChain inputConfig]];
DLog(@"Reset buffer!");

View File

@ -29,6 +29,7 @@
BOOL initialBufferFilled;
AudioStreamBasicDescription nodeFormat;
uint32_t nodeChannelConfig;
BOOL nodeLossless;
}
- (id)initWithController:(id)c previous:(id)p;
@ -54,6 +55,7 @@
- (void)resetBuffer; // WARNING! DANGER WILL ROBINSON!
- (AudioStreamBasicDescription)nodeFormat;
- (uint32_t)nodeChannelConfig;
- (BOOL)nodeLossless;
- (Semaphore *)semaphore;

View File

@ -27,6 +27,7 @@
endOfStream = NO;
shouldContinue = YES;
nodeChannelConfig = 0;
nodeLossless = NO;
[self setPreviousNode:p];
@ -39,6 +40,10 @@
return nodeFormat;
}
- (uint32_t)nodeChannelConfig {
return nodeChannelConfig;
}
- (BOOL)nodeLossless {
return nodeLossless;
}
@ -48,6 +53,9 @@
AudioChunk *chunk = [[AudioChunk alloc] init];
[chunk setFormat:nodeFormat];
if(nodeChannelConfig) {
[chunk setChannelConfig:nodeChannelConfig];
}
[chunk setLossless:nodeLossless];
[chunk assignSamples:ptr frameCount:amount / nodeFormat.mBytesPerPacket];

View File

@ -17,6 +17,7 @@
@interface OutputNode : Node {
AudioStreamBasicDescription format;
uint32_t config;
double amountPlayed;
double sampleRatio;
@ -48,8 +49,9 @@
- (AudioChunk *)readChunk:(size_t)amount;
- (void)setFormat:(AudioStreamBasicDescription *)f;
- (void)setFormat:(AudioStreamBasicDescription *)f channelConfig:(uint32_t)channelConfig;
- (AudioStreamBasicDescription)format;
- (uint32_t)config;
- (void)setVolume:(double)v;

View File

@ -91,8 +91,13 @@
return format;
}
- (void)setFormat:(AudioStreamBasicDescription *)f {
- (uint32_t)config {
return config;
}
- (void)setFormat:(AudioStreamBasicDescription *)f channelConfig:(uint32_t)channelConfig {
format = *f;
config = channelConfig;
// Calculate a ratio and add to double(seconds) instead, as format may change
// double oldSampleRatio = sampleRatio;
sampleRatio = 1.0 / (format.mSampleRate * format.mBytesPerPacket);
@ -110,8 +115,9 @@
if (oldSampleRatio)
amountPlayed += oldSampleRatio * [[converter buffer] bufferedLength];
#endif
[converter setOutputFormat:format];
[converter inputFormatDidChange:[bufferChain inputFormat]];
[converter setOutputFormat:format
outputConfig:channelConfig];
[converter inputFormatDidChange:[bufferChain inputFormat] inputConfig:[bufferChain inputConfig]];
}
}
}

View File

@ -22,5 +22,6 @@
}
- (void)setFormat:(AudioStreamBasicDescription)format;
- (void)setChannelConfig:(uint32_t)config;
@end

View File

@ -42,4 +42,8 @@
nodeFormat = format;
}
- (void)setChannelConfig:(uint32_t)config {
nodeChannelConfig = config;
}
@end

View File

@ -60,6 +60,9 @@
AudioStreamBasicDescription deviceFormat; // info about the default device
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
uint32_t deviceChannelConfig;
uint32_t streamChannelConfig;
AUAudioUnit *_au;
size_t _bufferSize;

View File

@ -76,12 +76,14 @@ static OSStatus renderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioAct
size_t frameCount = [chunk frameCount];
AudioStreamBasicDescription format = [chunk format];
uint32_t config = [chunk channelConfig];
if(frameCount) {
if(!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
if(!_self->streamFormatStarted || config != _self->streamChannelConfig || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
_self->streamFormat = format;
_self->streamChannelConfig = config;
_self->streamFormatStarted = YES;
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format inputConfig:config andOutputFormat:_self->deviceFormat outputConfig:_self->deviceChannelConfig];
}
double chunkDuration = [chunk duration];
@ -104,11 +106,12 @@ static OSStatus renderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioAct
chunk = [[_self->outputController buffer] removeSamples:((amountToRead - amountRead) / bytesPerPacket)];
frameCount = [chunk frameCount];
format = [chunk format];
config = [chunk channelConfig];
if(frameCount) {
if(!_self->streamFormatStarted || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
if(!_self->streamFormatStarted || config != _self->streamChannelConfig || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) {
_self->streamFormat = format;
_self->streamFormatStarted = YES;
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format andOutputFormat:_self->deviceFormat];
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format inputConfig:config andOutputFormat:_self->deviceFormat outputConfig:_self->deviceChannelConfig];
}
atomic_fetch_add(&_self->bytesRendered, frameCount * bytesPerPacket);
double chunkDuration = [chunk duration];
@ -485,27 +488,35 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
switch(deviceFormat.mChannelsPerFrame) {
case 1:
tag = kAudioChannelLayoutTag_Mono;
deviceChannelConfig = AudioConfigMono;
break;
case 2:
tag = kAudioChannelLayoutTag_Stereo;
deviceChannelConfig = AudioConfigStereo;
break;
case 3:
tag = kAudioChannelLayoutTag_DVD_4;
deviceChannelConfig = AudioConfig3Point0;
break;
case 4:
tag = kAudioChannelLayoutTag_Quadraphonic;
deviceChannelConfig = AudioConfig4Point0;
break;
case 5:
tag = kAudioChannelLayoutTag_MPEG_5_0_A;
deviceChannelConfig = AudioConfig5Point0;
break;
case 6:
tag = kAudioChannelLayoutTag_MPEG_5_1_A;
deviceChannelConfig = AudioConfig5Point1;
break;
case 7:
tag = kAudioChannelLayoutTag_MPEG_6_1_A;
deviceChannelConfig = AudioConfig6Point1;
break;
case 8:
tag = kAudioChannelLayoutTag_MPEG_7_1_A;
deviceChannelConfig = AudioConfig7Point1;
break;
}
@ -514,7 +525,7 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
if(err != nil)
return NO;
[outputController setFormat:&deviceFormat];
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
AudioStreamBasicDescription asbd = deviceFormat;