New feature: Implemented headphone virtualization
This new virtualizer uses the Accelerate framework to process samples. I've bundled a HeSuVi impulse for now, and will add an option to select an impulse in the future. It will validate the selection before sending it to the actual filter, which outright fails if it receives invalid input. Impulses will be supported in any arbitrary format that Cog supports, but let's not go too hog wild, it requires HeSuVi 14 channel presets.CQTexperiment
parent
4567df54b7
commit
e7b78085ca
|
@ -67,6 +67,7 @@ NSString *CogPlaybackDidStopNotficiation = @"CogPlaybackDidStopNotficiation";
|
|||
[NSNumber numberWithInt:-1], @"GraphicEQpreset",
|
||||
[NSNumber numberWithBool:NO], @"GraphicEQtrackgenre",
|
||||
[NSNumber numberWithBool:YES], @"volumeLimit",
|
||||
[NSNumber numberWithBool:NO], @"headphoneVirtualization",
|
||||
nil];
|
||||
|
||||
[[NSUserDefaults standardUserDefaults] registerDefaults:defaultsDictionary];
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#import "Node.h"
|
||||
#import "RefillNode.h"
|
||||
|
||||
#import "HeadphoneFilter.h"
|
||||
|
||||
@interface ConverterNode : Node {
|
||||
NSDictionary * rgInfo;
|
||||
|
||||
|
@ -75,6 +77,8 @@
|
|||
NSString *outputResampling;
|
||||
|
||||
void *hdcd_decoder;
|
||||
|
||||
HeadphoneFilter *hFilter;
|
||||
}
|
||||
|
||||
@property AudioStreamBasicDescription inputFormat;
|
||||
|
|
|
@ -87,6 +87,7 @@ void PrintStreamDesc (AudioStreamBasicDescription *inDesc)
|
|||
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputResampling" options:0 context:nil];
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
|
||||
}
|
||||
|
||||
return self;
|
||||
|
@ -1045,7 +1046,13 @@ tryagain:
|
|||
|
||||
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
|
||||
|
||||
if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
||||
if ( hFilter ) {
|
||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||
[hFilter process:floatBuffer sampleCount:samples toBuffer:floatBuffer + amountReadFromFC];
|
||||
memmove(floatBuffer, floatBuffer + amountReadFromFC, samples * sizeof(float) * 2);
|
||||
amountReadFromFC = samples * sizeof(float) * 2;
|
||||
}
|
||||
else if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
||||
{
|
||||
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||
downmix_to_stereo( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
|
||||
|
@ -1090,11 +1097,11 @@ tryagain:
|
|||
context:(void *)context
|
||||
{
|
||||
DLog(@"SOMETHING CHANGED!");
|
||||
if ([keyPath isEqual:@"values.volumeScaling"]) {
|
||||
if ([keyPath isEqualToString:@"values.volumeScaling"]) {
|
||||
//User reset the volume scaling option
|
||||
[self refreshVolumeScaling];
|
||||
}
|
||||
else if ([keyPath isEqual:@"values.outputResampling"]) {
|
||||
else if ([keyPath isEqualToString:@"values.outputResampling"]) {
|
||||
// Reset resampler
|
||||
if (resampler && resampler_data) {
|
||||
NSString *value = [[NSUserDefaults standardUserDefaults] stringForKey:@"outputResampling"];
|
||||
|
@ -1102,6 +1109,14 @@ tryagain:
|
|||
[self inputFormatDidChange:inputFormat];
|
||||
}
|
||||
}
|
||||
else if ([keyPath isEqualToString:@"values.headphoneVirtualization"]) {
|
||||
// Reset the converter, without rebuffering
|
||||
if (outputFormat.mChannelsPerFrame == 2 &&
|
||||
inputFormat.mChannelsPerFrame >= 1 &&
|
||||
inputFormat.mChannelsPerFrame <= 8) {
|
||||
[self inputFormatDidChange:inputFormat];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static float db_to_scale(float db)
|
||||
|
@ -1209,6 +1224,19 @@ static float db_to_scale(float db)
|
|||
dmFloatFormat.mChannelsPerFrame = outputFormat.mChannelsPerFrame;
|
||||
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
|
||||
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
|
||||
|
||||
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
|
||||
|
||||
if (hVirt &&
|
||||
outputFormat.mChannelsPerFrame == 2 &&
|
||||
inputFormat.mChannelsPerFrame >= 1 &&
|
||||
inputFormat.mChannelsPerFrame <= 8) {
|
||||
CFURLRef appUrlRef = CFBundleCopyResourceURL(CFBundleGetMainBundle(), CFSTR("gsx"), CFSTR("wv"), NULL);
|
||||
|
||||
if (appUrlRef) {
|
||||
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:(__bridge NSURL *)appUrlRef forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
|
||||
}
|
||||
}
|
||||
|
||||
convert_s16_to_float_init_simd();
|
||||
convert_s32_to_float_init_simd();
|
||||
|
@ -1276,6 +1304,7 @@ static float db_to_scale(float db)
|
|||
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputResampling"];
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
|
||||
|
||||
paused = NO;
|
||||
[self cleanUp];
|
||||
|
@ -1340,6 +1369,9 @@ static float db_to_scale(float db)
|
|||
{
|
||||
usleep(500);
|
||||
}
|
||||
if (hFilter) {
|
||||
hFilter = nil;
|
||||
}
|
||||
if (hdcd_decoder)
|
||||
{
|
||||
free(hdcd_decoder);
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
//
|
||||
// HeadphoneFilter.h
|
||||
// CogAudio Framework
|
||||
//
|
||||
// Created by Christopher Snowhill on 1/24/22.
|
||||
//
|
||||
|
||||
#ifndef HeadphoneFilter_h
|
||||
#define HeadphoneFilter_h
|
||||
|
||||
#import <Cocoa/Cocoa.h>
|
||||
#import <Accelerate/Accelerate.h>
|
||||
|
||||
@interface HeadphoneFilter : NSObject {
|
||||
FFTSetup fftSetup;
|
||||
|
||||
size_t fftSize;
|
||||
size_t fftSizeOver2;
|
||||
size_t log2n;
|
||||
size_t log2nhalf;
|
||||
size_t bufferSize;
|
||||
size_t paddedBufferSize;
|
||||
size_t channelCount;
|
||||
|
||||
double sampleRate;
|
||||
|
||||
COMPLEX_SPLIT signal_fft;
|
||||
COMPLEX_SPLIT input_filtered_signal_per_channel[2];
|
||||
COMPLEX_SPLIT * impulse_responses;
|
||||
|
||||
float * left_result;
|
||||
float * right_result;
|
||||
|
||||
float * left_mix_result;
|
||||
float * right_mix_result;
|
||||
|
||||
float * paddedSignal;
|
||||
|
||||
float * prevOverlap[2];
|
||||
|
||||
int prevOverlapLength;
|
||||
}
|
||||
|
||||
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels;
|
||||
|
||||
- (void)process:(const float*)inBuffer sampleCount:(size_t)count toBuffer:(float *)outBuffer;
|
||||
|
||||
@end
|
||||
|
||||
#endif /* HeadphoneFilter_h */
|
|
@ -0,0 +1,295 @@
|
|||
//
|
||||
// HeadphoneFilter.m
|
||||
// CogAudio Framework
|
||||
//
|
||||
// Created by Christopher Snowhill on 1/24/22.
|
||||
//
|
||||
|
||||
#import "HeadphoneFilter.h"
|
||||
#import "AudioSource.h"
|
||||
#import "AudioDecoder.h"
|
||||
|
||||
#import <audio/audio_resampler.h>
|
||||
#import <memalign.h>
|
||||
|
||||
@implementation HeadphoneFilter
|
||||
|
||||
- (id)initWithImpulseFile:(NSURL *)url forSampleRate:(double)sampleRate withInputChannels:(size_t)channels {
|
||||
self = [super init];
|
||||
|
||||
if (self) {
|
||||
id<CogSource> source = [AudioSource audioSourceForURL:url];
|
||||
if (!source)
|
||||
return nil;
|
||||
|
||||
if (![source open:url])
|
||||
return nil;
|
||||
|
||||
id<CogDecoder> decoder = [AudioDecoder audioDecoderForSource:source];
|
||||
|
||||
if (decoder == nil)
|
||||
return nil;
|
||||
|
||||
if (![decoder open:source])
|
||||
{
|
||||
return nil;
|
||||
}
|
||||
|
||||
NSDictionary *properties = [decoder properties];
|
||||
|
||||
double sampleRateOfSource = [[properties objectForKey:@"sampleRate"] floatValue];
|
||||
|
||||
int sampleCount = [[properties objectForKey:@"totalFrames"] intValue];
|
||||
int impulseChannels = [[properties objectForKey:@"channels"] intValue];
|
||||
|
||||
if ([[properties objectForKey:@"floatingPoint"] boolValue] != YES ||
|
||||
[[properties objectForKey:@"bitsPerSample"] intValue] != 32 ||
|
||||
!([[properties objectForKey:@"endian"] isEqualToString:@"native"] ||
|
||||
[[properties objectForKey:@"endian"] isEqualToString:@"little"]) ||
|
||||
impulseChannels != 14)
|
||||
return nil;
|
||||
|
||||
float * impulseBuffer = calloc(sizeof(float), (sampleCount + 1024) * sizeof(float) * impulseChannels);
|
||||
[decoder readAudio:impulseBuffer frames:sampleCount];
|
||||
[decoder close];
|
||||
decoder = nil;
|
||||
source = nil;
|
||||
|
||||
if (sampleRateOfSource != sampleRate) {
|
||||
double sampleRatio = sampleRate / sampleRateOfSource;
|
||||
int resampledCount = (int)ceil((double)sampleCount * sampleRatio);
|
||||
|
||||
void *resampler_data = NULL;
|
||||
const retro_resampler_t *resampler = NULL;
|
||||
|
||||
if (!retro_resampler_realloc(&resampler_data, &resampler, "sinc", RESAMPLER_QUALITY_NORMAL, impulseChannels, sampleRatio)) {
|
||||
free(impulseBuffer);
|
||||
return nil;
|
||||
}
|
||||
|
||||
int resamplerLatencyIn = (int) resampler->latency(resampler_data);
|
||||
int resamplerLatencyOut = (int)ceil(resamplerLatencyIn * sampleRatio);
|
||||
|
||||
float * resampledImpulse = calloc(sizeof(float), (resampledCount + resamplerLatencyOut * 2 + 128) * sizeof(float) * impulseChannels);
|
||||
|
||||
memmove(impulseBuffer + resamplerLatencyIn * impulseChannels, impulseBuffer, sampleCount * sizeof(float) * impulseChannels);
|
||||
memset(impulseBuffer, 0, resamplerLatencyIn * sizeof(float) * impulseChannels);
|
||||
memset(impulseBuffer + (resamplerLatencyIn + sampleCount) * impulseChannels, 0, resamplerLatencyIn * sizeof(float) * impulseChannels);
|
||||
|
||||
struct resampler_data src_data;
|
||||
src_data.data_in = impulseBuffer;
|
||||
src_data.input_frames = sampleCount + resamplerLatencyIn * 2;
|
||||
src_data.data_out = resampledImpulse;
|
||||
src_data.output_frames = 0;
|
||||
src_data.ratio = sampleRatio;
|
||||
|
||||
resampler->process(resampler_data, &src_data);
|
||||
|
||||
resampler->free(resampler, resampler_data);
|
||||
|
||||
src_data.output_frames -= resamplerLatencyOut * 2;
|
||||
|
||||
memmove(resampledImpulse, resampledImpulse + resamplerLatencyOut * impulseChannels, src_data.output_frames * sizeof(float) * impulseChannels);
|
||||
|
||||
free(impulseBuffer);
|
||||
impulseBuffer = resampledImpulse;
|
||||
sampleCount = (int) src_data.output_frames;
|
||||
}
|
||||
|
||||
channelCount = channels;
|
||||
|
||||
bufferSize = 512;
|
||||
fftSize = sampleCount + bufferSize;
|
||||
|
||||
int pow = 1;
|
||||
while (fftSize > 2) { pow++; fftSize /= 2; }
|
||||
fftSize = 2 << pow;
|
||||
|
||||
float * deinterleavedImpulseBuffer = (float *) memalign_calloc(128, sizeof(float), fftSize * impulseChannels);
|
||||
for (size_t i = 0; i < impulseChannels; ++i) {
|
||||
for (size_t j = 0; j < sampleCount; ++j) {
|
||||
deinterleavedImpulseBuffer[i * fftSize + j] = impulseBuffer[i + impulseChannels * j];
|
||||
}
|
||||
}
|
||||
|
||||
free(impulseBuffer);
|
||||
|
||||
paddedBufferSize = fftSize;
|
||||
fftSizeOver2 = (fftSize + 1) / 2;
|
||||
log2n = log2f(fftSize);
|
||||
log2nhalf = log2n / 2;
|
||||
|
||||
fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);
|
||||
|
||||
paddedSignal = (float *) memalign_calloc(128, sizeof(float), paddedBufferSize);
|
||||
|
||||
signal_fft.realp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
signal_fft.imagp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
|
||||
input_filtered_signal_per_channel[0].realp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
input_filtered_signal_per_channel[0].imagp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
input_filtered_signal_per_channel[1].realp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
input_filtered_signal_per_channel[1].imagp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
|
||||
impulse_responses = (COMPLEX_SPLIT *) calloc(sizeof(COMPLEX_SPLIT), channels * 2);
|
||||
|
||||
const int speakers_to_hesuvi[8][2][8] = {
|
||||
{ { 6, }, { 13, } }, // mono/center
|
||||
{ { 0, 8 }, { 1, 7 } }, // left/right
|
||||
{ { 0, 8, 6 }, { 1, 7, 13 } }, // left/right/center
|
||||
{ { 0, 8, 4, 12 }, { 1, 7, 5, 11 } },// left/right/left back/right back
|
||||
{ { 0, 8, 6, 4, 12 }, { 1, 7, 13, 5, 11 } }, // left/right/center/back left/back right
|
||||
{ { 0, 8, 6, 6, 4, 12 }, { 1, 7, 13, 13, 5, 11 } }, // left/right/center/lfe(center)/back left/back right
|
||||
{ { 0, 8, 6, 6, -1, 2, 10 }, { 1, 7, 13, 13, -1, 3, 9 } }, // left/right/center/lfe(center)/back center(special)/side left/side right
|
||||
{ { 0, 8, 6, 6, 4, 12, 2, 10 }, { 1, 7, 13, 13, 5, 11, 3, 9 } } // left/right/center/lfe(center)/back left/back right/side left/side right
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < channels; ++i) {
|
||||
impulse_responses[i * 2 + 0].realp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
impulse_responses[i * 2 + 0].imagp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
impulse_responses[i * 2 + 1].realp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
impulse_responses[i * 2 + 1].imagp = (float *) memalign_calloc(128, sizeof(float), fftSizeOver2);
|
||||
|
||||
int leftInChannel = speakers_to_hesuvi[channels-1][0][i];
|
||||
int rightInChannel = speakers_to_hesuvi[channels-1][1][i];
|
||||
|
||||
if (leftInChannel == -1 || rightInChannel == -1) {
|
||||
float * temp = calloc(sizeof(float), fftSize * 2);
|
||||
for (size_t i = 0; i < fftSize; i++) {
|
||||
temp[i] = deinterleavedImpulseBuffer[i + 2 * fftSize] + deinterleavedImpulseBuffer[i + 9 * fftSize];
|
||||
temp[i + fftSize] = deinterleavedImpulseBuffer[i + 3 * fftSize] + deinterleavedImpulseBuffer[i + 10 * fftSize];
|
||||
}
|
||||
|
||||
vDSP_ctoz((DSPComplex *)temp, 2, &impulse_responses[i * 2 + 0], 1, fftSizeOver2);
|
||||
vDSP_ctoz((DSPComplex *)(temp + fftSize), 2, &impulse_responses[i * 2 + 1], 1, fftSizeOver2);
|
||||
|
||||
free(temp);
|
||||
}
|
||||
else {
|
||||
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + leftInChannel * fftSize), 2, &impulse_responses[i * 2 + 0], 1, fftSizeOver2);
|
||||
vDSP_ctoz((DSPComplex *)(deinterleavedImpulseBuffer + rightInChannel * fftSize), 2, &impulse_responses[i * 2 + 1], 1, fftSizeOver2);
|
||||
}
|
||||
|
||||
vDSP_fft_zrip(fftSetup, &impulse_responses[i * 2 + 0], 1, log2n, FFT_FORWARD);
|
||||
vDSP_fft_zrip(fftSetup, &impulse_responses[i * 2 + 1], 1, log2n, FFT_FORWARD);
|
||||
}
|
||||
|
||||
memalign_free(deinterleavedImpulseBuffer);
|
||||
|
||||
left_result = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
right_result = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
|
||||
prevOverlap[0] = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
prevOverlap[1] = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
|
||||
left_mix_result = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
right_mix_result = (float *) memalign_calloc(128, sizeof(float), fftSize);
|
||||
|
||||
prevOverlapLength = 0;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
if (paddedSignal) memalign_free(paddedSignal);
|
||||
|
||||
if (signal_fft.realp) memalign_free(signal_fft.realp);
|
||||
if (signal_fft.imagp) memalign_free(signal_fft.imagp);
|
||||
|
||||
if (input_filtered_signal_per_channel[0].realp) memalign_free(input_filtered_signal_per_channel[0].realp);
|
||||
if (input_filtered_signal_per_channel[0].imagp) memalign_free(input_filtered_signal_per_channel[0].imagp);
|
||||
if (input_filtered_signal_per_channel[1].realp) memalign_free(input_filtered_signal_per_channel[1].realp);
|
||||
if (input_filtered_signal_per_channel[1].imagp) memalign_free(input_filtered_signal_per_channel[1].imagp);
|
||||
|
||||
if (impulse_responses) {
|
||||
for (size_t i = 0; i < channelCount * 2; ++i) {
|
||||
if (impulse_responses[i].realp) memalign_free(impulse_responses[i].realp);
|
||||
if (impulse_responses[i].imagp) memalign_free(impulse_responses[i].imagp);
|
||||
}
|
||||
free(impulse_responses);
|
||||
}
|
||||
|
||||
memalign_free(left_result);
|
||||
memalign_free(right_result);
|
||||
|
||||
if (prevOverlap[0]) memalign_free(prevOverlap[0]);
|
||||
if (prevOverlap[1]) memalign_free(prevOverlap[1]);
|
||||
|
||||
memalign_free(left_mix_result);
|
||||
memalign_free(right_mix_result);
|
||||
}
|
||||
|
||||
- (void)process:(const float*)inBuffer sampleCount:(size_t)count toBuffer:(float *)outBuffer {
|
||||
float scale = 1.0 / (8.0 * (float)fftSize);
|
||||
|
||||
while (count > 0) {
|
||||
size_t countToDo = (count > bufferSize) ? bufferSize : count;
|
||||
|
||||
vDSP_vclr(left_mix_result, 1, fftSize);
|
||||
vDSP_vclr(right_mix_result, 1, fftSize);
|
||||
|
||||
for (size_t i = 0; i < channelCount; ++i) {
|
||||
cblas_scopy((int)countToDo, inBuffer + i, (int)channelCount, paddedSignal, 1);
|
||||
|
||||
vDSP_vclr(paddedSignal + countToDo, 1, paddedBufferSize - countToDo);
|
||||
|
||||
vDSP_ctoz((DSPComplex *)paddedSignal, 2, &signal_fft, 1, fftSizeOver2);
|
||||
|
||||
vDSP_fft_zrip(fftSetup, &signal_fft, 1, log2n, FFT_FORWARD);
|
||||
|
||||
// One channel forward, then multiply and back twice
|
||||
|
||||
float preserveIRNyq = impulse_responses[i * 2 + 0].imagp[0];
|
||||
float preserveSigNyq = signal_fft.imagp[0];
|
||||
impulse_responses[i * 2 + 0].imagp[0] = 0;
|
||||
signal_fft.imagp[0] = 0;
|
||||
|
||||
vDSP_zvmul(&signal_fft, 1, &impulse_responses[i * 2 + 0], 1, &input_filtered_signal_per_channel[0], 1, fftSizeOver2, 1);
|
||||
|
||||
input_filtered_signal_per_channel[0].imagp[0] = preserveIRNyq * preserveSigNyq;
|
||||
impulse_responses[i * 2 + 0].imagp[0] = preserveIRNyq;
|
||||
|
||||
preserveIRNyq = impulse_responses[i * 2 + 1].imagp[0];
|
||||
impulse_responses[i * 2 + 1].imagp[0] = 0;
|
||||
|
||||
vDSP_zvmul(&signal_fft, 1, &impulse_responses[i * 2 + 1], 1, &input_filtered_signal_per_channel[1], 1, fftSizeOver2, 1);
|
||||
|
||||
input_filtered_signal_per_channel[1].imagp[0] = preserveIRNyq * preserveSigNyq;
|
||||
impulse_responses[i * 2 + 1].imagp[0] = preserveIRNyq;
|
||||
|
||||
vDSP_fft_zrip(fftSetup, &input_filtered_signal_per_channel[0], 1, log2n, FFT_INVERSE);
|
||||
vDSP_fft_zrip(fftSetup, &input_filtered_signal_per_channel[1], 1, log2n, FFT_INVERSE);
|
||||
|
||||
vDSP_ztoc(&input_filtered_signal_per_channel[0], 1, (DSPComplex *)left_result, 2, fftSizeOver2);
|
||||
vDSP_ztoc(&input_filtered_signal_per_channel[1], 1, (DSPComplex *)right_result, 2, fftSizeOver2);
|
||||
|
||||
vDSP_vadd(left_mix_result, 1, left_result, 1, left_mix_result, 1, fftSize);
|
||||
vDSP_vadd(right_mix_result, 1, right_result, 1, right_mix_result, 1, fftSize);
|
||||
}
|
||||
|
||||
// Integrate previous overlap
|
||||
if (prevOverlapLength) {
|
||||
vDSP_vadd(prevOverlap[0], 1, left_mix_result, 1, left_mix_result, 1, prevOverlapLength);
|
||||
vDSP_vadd(prevOverlap[1], 1, right_mix_result, 1, right_mix_result, 1, prevOverlapLength);
|
||||
}
|
||||
|
||||
prevOverlapLength = (int)(fftSize - countToDo);
|
||||
|
||||
cblas_scopy(prevOverlapLength, left_mix_result + countToDo, 1, prevOverlap[0], 1);
|
||||
cblas_scopy(prevOverlapLength, right_mix_result + countToDo, 1, prevOverlap[1], 1);
|
||||
|
||||
vDSP_vsmul(left_mix_result, 1, &scale, left_mix_result, 1, countToDo);
|
||||
vDSP_vsmul(right_mix_result, 1, &scale, right_mix_result, 1, countToDo);
|
||||
|
||||
cblas_scopy((int)countToDo, left_mix_result, 1, outBuffer + 0, 2);
|
||||
cblas_scopy((int)countToDo, right_mix_result, 1, outBuffer + 1, 2);
|
||||
|
||||
inBuffer += countToDo * channelCount;
|
||||
outBuffer += countToDo * 2;
|
||||
|
||||
count -= countToDo;
|
||||
}
|
||||
}
|
||||
|
||||
@end
|
|
@ -53,6 +53,9 @@
|
|||
835C88AD2797DA5800E28EAE /* util.h in Headers */ = {isa = PBXBuildFile; fileRef = 835C88AC2797DA5800E28EAE /* util.h */; };
|
||||
835C88B1279811A500E28EAE /* hdcd_decode2.h in Headers */ = {isa = PBXBuildFile; fileRef = 835C88AF279811A500E28EAE /* hdcd_decode2.h */; };
|
||||
835C88B2279811A500E28EAE /* hdcd_decode2.c in Sources */ = {isa = PBXBuildFile; fileRef = 835C88B0279811A500E28EAE /* hdcd_decode2.c */; };
|
||||
835EDD7B279FE23A001EDCCE /* HeadphoneFilter.m in Sources */ = {isa = PBXBuildFile; fileRef = 835EDD7A279FE23A001EDCCE /* HeadphoneFilter.m */; };
|
||||
835EDD7D279FE307001EDCCE /* HeadphoneFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 835EDD7C279FE307001EDCCE /* HeadphoneFilter.h */; };
|
||||
835EDD7F27A00089001EDCCE /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 835EDD7E27A00089001EDCCE /* Accelerate.framework */; };
|
||||
8384912718080FF100E7332D /* Logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384912618080FF100E7332D /* Logging.h */; };
|
||||
8389F270278E64590074164C /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = 8389F225278E64590074164C /* config.h */; };
|
||||
8389F279278E64590074164C /* utf.h in Headers */ = {isa = PBXBuildFile; fileRef = 8389F236278E64590074164C /* utf.h */; };
|
||||
|
@ -168,6 +171,9 @@
|
|||
835C88AC2797DA5800E28EAE /* util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = util.h; path = ThirdParty/lvqcl/util.h; sourceTree = SOURCE_ROOT; };
|
||||
835C88AF279811A500E28EAE /* hdcd_decode2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hdcd_decode2.h; sourceTree = "<group>"; };
|
||||
835C88B0279811A500E28EAE /* hdcd_decode2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = hdcd_decode2.c; sourceTree = "<group>"; };
|
||||
835EDD7A279FE23A001EDCCE /* HeadphoneFilter.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = HeadphoneFilter.m; sourceTree = "<group>"; };
|
||||
835EDD7C279FE307001EDCCE /* HeadphoneFilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = HeadphoneFilter.h; sourceTree = "<group>"; };
|
||||
835EDD7E27A00089001EDCCE /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; };
|
||||
8384912618080FF100E7332D /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = ../../Utils/Logging.h; sourceTree = "<group>"; };
|
||||
8389F225278E64590074164C /* config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = config.h; sourceTree = "<group>"; };
|
||||
8389F228278E64590074164C /* encoding_utf.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = encoding_utf.c; sourceTree = "<group>"; };
|
||||
|
@ -237,6 +243,7 @@
|
|||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
835EDD7F27A00089001EDCCE /* Accelerate.framework in Frameworks */,
|
||||
832BEF04278DD06D005E1BC4 /* AVFoundation.framework in Frameworks */,
|
||||
8DC2EF570486A6940098B216 /* Cocoa.framework in Frameworks */,
|
||||
17D21DAD0B8BE76800D1EBDE /* AudioToolbox.framework in Frameworks */,
|
||||
|
@ -357,6 +364,8 @@
|
|||
17D21C7D0B8BE4BA00D1EBDE /* Node.m */,
|
||||
17D21C7E0B8BE4BA00D1EBDE /* OutputNode.h */,
|
||||
17D21C7F0B8BE4BA00D1EBDE /* OutputNode.m */,
|
||||
835EDD7C279FE307001EDCCE /* HeadphoneFilter.h */,
|
||||
835EDD7A279FE23A001EDCCE /* HeadphoneFilter.m */,
|
||||
);
|
||||
path = Chain;
|
||||
sourceTree = "<group>";
|
||||
|
@ -423,6 +432,7 @@
|
|||
832BEF02278DD06D005E1BC4 /* Frameworks */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
835EDD7E27A00089001EDCCE /* Accelerate.framework */,
|
||||
832BEF03278DD06D005E1BC4 /* AVFoundation.framework */,
|
||||
);
|
||||
name = Frameworks;
|
||||
|
@ -776,6 +786,7 @@
|
|||
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
|
||||
8389F294278E64590074164C /* libretro.h in Headers */,
|
||||
17B619300B909BC300BC003F /* AudioPropertiesReader.h in Headers */,
|
||||
835EDD7D279FE307001EDCCE /* HeadphoneFilter.h in Headers */,
|
||||
8389F286278E64590074164C /* config_file_userdata.h in Headers */,
|
||||
839366671815923C006DD712 /* CogPluginMulti.h in Headers */,
|
||||
17ADB13C0B97926D00257CA2 /* AudioSource.h in Headers */,
|
||||
|
@ -869,6 +880,7 @@
|
|||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
835EDD7B279FE23A001EDCCE /* HeadphoneFilter.m in Sources */,
|
||||
17D21CA20B8BE4BA00D1EBDE /* BufferChain.m in Sources */,
|
||||
17D21CA60B8BE4BA00D1EBDE /* InputNode.m in Sources */,
|
||||
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
|
||||
|
|
|
@ -31,6 +31,8 @@ RETRO_BEGIN_DECLS
|
|||
|
||||
void *memalign_alloc(size_t boundary, size_t size);
|
||||
|
||||
void *memalign_calloc(size_t boundary, size_t unit, size_t size);
|
||||
|
||||
void *memalign_alloc_aligned(size_t size);
|
||||
|
||||
void memalign_free(void *ptr);
|
||||
|
|
|
@ -41,6 +41,23 @@ void *memalign_alloc(size_t boundary, size_t size)
|
|||
return (void*)addr;
|
||||
}
|
||||
|
||||
void *memalign_calloc(size_t boundary, size_t unit, size_t size)
|
||||
{
|
||||
size *= unit;
|
||||
|
||||
void **place = NULL;
|
||||
uintptr_t addr = 0;
|
||||
void *ptr = (void*)calloc(1, boundary + size + sizeof(uintptr_t));
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ((uintptr_t)ptr + sizeof(uintptr_t) + boundary)
|
||||
& ~(boundary - 1);
|
||||
place = (void**)addr;
|
||||
place[-1] = ptr;
|
||||
|
||||
return (void*)addr;
|
||||
}
|
||||
void memalign_free(void *ptr)
|
||||
{
|
||||
void **p = NULL;
|
||||
|
|
|
@ -113,6 +113,7 @@
|
|||
8355D6B8180613FB00D05687 /* Security.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8355D6B7180613FB00D05687 /* Security.framework */; };
|
||||
8359009D17FF06570060F3ED /* ArchiveSource.bundle in CopyFiles */ = {isa = PBXBuildFile; fileRef = 8359FF3117FEF35D0060F3ED /* ArchiveSource.bundle */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
|
||||
835A8FD327957310005B3C39 /* json.c in Sources */ = {isa = PBXBuildFile; fileRef = 835A8FC627957310005B3C39 /* json.c */; };
|
||||
835EDD8A27A000E8001EDCCE /* gsx.wv in Resources */ = {isa = PBXBuildFile; fileRef = 835EDD8027A000E8001EDCCE /* gsx.wv */; };
|
||||
835F00BB279BD1CD00055FCF /* SecondsFormatter.m in Sources */ = {isa = PBXBuildFile; fileRef = 835F00B8279BD1CD00055FCF /* SecondsFormatter.m */; };
|
||||
8360EF6D17F92E56005208A4 /* HighlyComplete.bundle in CopyFiles */ = {isa = PBXBuildFile; fileRef = 8360EF0517F92B24005208A4 /* HighlyComplete.bundle */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
|
||||
836D28A818086386005B7299 /* MiniModeMenuTitleTransformer.m in Sources */ = {isa = PBXBuildFile; fileRef = 836D28A718086386005B7299 /* MiniModeMenuTitleTransformer.m */; };
|
||||
|
@ -924,6 +925,7 @@
|
|||
835C888B22CC1881001B4B3F /* en */ = {isa = PBXFileReference; lastKnownFileType = text.html; name = en; path = en.lproj/Credits.html; sourceTree = "<group>"; };
|
||||
835C888C22CC1882001B4B3F /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; };
|
||||
835C888D22CC1882001B4B3F /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/Localizable.strings; sourceTree = "<group>"; };
|
||||
835EDD8027A000E8001EDCCE /* gsx.wv */ = {isa = PBXFileReference; lastKnownFileType = file; path = gsx.wv; sourceTree = "<group>"; };
|
||||
835F00B4279BD1CD00055FCF /* SecondsFormatter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SecondsFormatter.h; sourceTree = "<group>"; };
|
||||
835F00B8279BD1CD00055FCF /* SecondsFormatter.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SecondsFormatter.m; sourceTree = "<group>"; };
|
||||
8360EF0017F92B23005208A4 /* HighlyComplete.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = HighlyComplete.xcodeproj; path = Plugins/HighlyComplete/HighlyComplete.xcodeproj; sourceTree = "<group>"; };
|
||||
|
@ -1467,6 +1469,7 @@
|
|||
1791005C0CB44D6D0070BC5C /* Cog.scriptSuite */,
|
||||
1791005D0CB44D6D0070BC5C /* Cog.scriptTerminology */,
|
||||
832923AE279FAC400048201E /* Cog.q1.json */,
|
||||
835EDD8027A000E8001EDCCE /* gsx.wv */,
|
||||
17D1B0D00F6320EA00694C57 /* InfoInspector.xib */,
|
||||
17342A980D5FD20B00E8D854 /* MainMenu.xib */,
|
||||
17342ABD0D5FD36400E8D854 /* OpenURLPanel.xib */,
|
||||
|
@ -2367,6 +2370,7 @@
|
|||
17818A950C0B27AC001C4916 /* aiff.icns in Resources */,
|
||||
8384915D18083EAB00E7332D /* pauseDockBadge.png in Resources */,
|
||||
8384916818083EAB00E7332D /* shuffleOffTemplate.pdf in Resources */,
|
||||
835EDD8A27A000E8001EDCCE /* gsx.wv in Resources */,
|
||||
8384915F18083EAB00E7332D /* playDockBadge.png in Resources */,
|
||||
17818A960C0B27AC001C4916 /* ape.icns in Resources */,
|
||||
17818A970C0B27AC001C4916 /* m3u.icns in Resources */,
|
||||
|
|
|
@ -349,6 +349,17 @@
|
|||
<binding destination="52" name="value" keyPath="values.volumeLimit" id="7Sl-LJ-ljd"/>
|
||||
</connections>
|
||||
</button>
|
||||
<button verticalHuggingPriority="750" fixedFrame="YES" translatesAutoresizingMaskIntoConstraints="NO" id="bQz-MX-59V">
|
||||
<rect key="frame" x="251" y="121" width="227" height="18"/>
|
||||
<autoresizingMask key="autoresizingMask" flexibleMaxX="YES" flexibleMinY="YES"/>
|
||||
<buttonCell key="cell" type="check" title="Enable headphone virtualization" bezelStyle="regularSquare" imagePosition="left" state="on" inset="2" id="W0w-wC-1ug">
|
||||
<behavior key="behavior" changeContents="YES" doesNotDimImage="YES" lightByContents="YES"/>
|
||||
<font key="font" metaFont="system"/>
|
||||
</buttonCell>
|
||||
<connections>
|
||||
<binding destination="52" name="value" keyPath="values.headphoneVirtualization" id="wwT-OG-ulF"/>
|
||||
</connections>
|
||||
</button>
|
||||
</subviews>
|
||||
<point key="canvasLocation" x="-151" y="318"/>
|
||||
</customView>
|
||||
|
|
Loading…
Reference in New Issue