Updated modplay and DUMB, with no real major effects on playback

CQTexperiment
Chris Moeller 2014-03-25 20:37:11 -07:00
parent d1afe662b5
commit d00e1d227d
8 changed files with 614 additions and 388 deletions

View File

@ -379,6 +379,15 @@ DUH_SIGRENDERER *dumb_it_start_at_order(DUH *duh, int n_channels, int startorder
void dumb_it_set_resampling_quality(DUMB_IT_SIGRENDERER * sigrenderer, int quality);
enum
{
DUMB_IT_RAMP_NONE = 0,
DUMB_IT_RAMP_ONOFF_ONLY = 1,
DUMB_IT_RAMP_FULL = 2
};
void dumb_it_set_ramp_style(DUMB_IT_SIGRENDERER * sigrenderer, int ramp_style);
void dumb_it_set_loop_callback(DUMB_IT_SIGRENDERER *sigrenderer, int (*callback)(void *data), void *data);
void dumb_it_set_xm_speed_zero_callback(DUMB_IT_SIGRENDERER *sigrenderer, int (*callback)(void *data), void *data);
void dumb_it_set_midi_callback(DUMB_IT_SIGRENDERER *sigrenderer, int (*callback)(void *data, int channel, unsigned char midi_byte), void *data);

View File

@ -743,6 +743,8 @@ struct DUMB_IT_SIGRENDERER
long gvz_time;
int gvz_sub_time;
int ramp_style;
//int max_output;
};

View File

@ -3922,6 +3922,7 @@ static void playing_volume_setup(DUMB_IT_SIGRENDERER * sigrenderer, IT_PLAYING *
int pan;
float vol, span;
float rampScale;
int ramp_style = sigrenderer->ramp_style;
pan = apply_pan_envelope(playing);
@ -3944,23 +3945,39 @@ static void playing_volume_setup(DUMB_IT_SIGRENDERER * sigrenderer, IT_PLAYING *
playing->float_volume[0] *= vol;
playing->float_volume[1] *= vol;
rampScale = 4;
if (playing->declick_stage == 0) {
playing->ramp_volume[0] = 0;
playing->ramp_volume[1] = 0;
rampScale = 48;
playing->declick_stage++;
} else if (playing->declick_stage == 1) {
rampScale = 48;
} else if (playing->declick_stage >= 3) {
playing->float_volume[0] = 0;
playing->float_volume[1] = 0;
if (playing->declick_stage == 3)
if (ramp_style == 0 || (ramp_style < 2 && playing->declick_stage == 2)) {
if (playing->declick_stage < 2) {
playing->ramp_volume[0] = playing->float_volume[0];
playing->ramp_volume[1] = playing->float_volume[1];
playing->declick_stage = 2;
} else if (playing->declick_stage > 2) {
playing->float_volume[0] = 0;
playing->float_volume[1] = 0;
playing->ramp_volume[0] = 0;
playing->ramp_volume[1] = 0;
playing->declick_stage = 4;
}
playing->ramp_delta[0] = 0;
playing->ramp_delta[1] = 0;
} else {
rampScale = 4;
if (playing->declick_stage == 0) {
playing->ramp_volume[0] = 0;
playing->ramp_volume[1] = 0;
rampScale = 48;
playing->declick_stage++;
rampScale = 48;
} else if (playing->declick_stage == 1) {
rampScale = 48;
} else if (playing->declick_stage >= 3) {
playing->float_volume[0] = 0;
playing->float_volume[1] = 0;
if (playing->declick_stage == 3)
playing->declick_stage++;
rampScale = 48;
}
playing->ramp_delta[0] = rampScale * invt2g * (playing->float_volume[0] - playing->ramp_volume[0]);
playing->ramp_delta[1] = rampScale * invt2g * (playing->float_volume[1] - playing->ramp_volume[1]);
}
playing->ramp_delta[0] = rampScale * invt2g * (playing->float_volume[0] - playing->ramp_volume[0]);
playing->ramp_delta[1] = rampScale * invt2g * (playing->float_volume[1] - playing->ramp_volume[1]);
}
static void process_playing(DUMB_IT_SIGRENDERER *sigrenderer, IT_PLAYING *playing, float invt2g)
@ -5154,6 +5171,7 @@ static DUMB_IT_SIGRENDERER *init_sigrenderer(DUMB_IT_SIGDATA *sigdata, int n_cha
sigrenderer->sigdata = sigdata;
sigrenderer->n_channels = n_channels;
sigrenderer->resampling_quality = dumb_resampling_quality;
sigrenderer->ramp_style = DUMB_IT_RAMP_FULL;
sigrenderer->globalvolume = sigdata->global_volume;
sigrenderer->tempo = sigdata->tempo;
@ -5341,6 +5359,13 @@ void dumb_it_set_resampling_quality(DUMB_IT_SIGRENDERER * sigrenderer, int quali
}
void dumb_it_set_ramp_style(DUMB_IT_SIGRENDERER * sigrenderer, int ramp_style) {
if (sigrenderer && ramp_style >= 0 && ramp_style <= 2) {
sigrenderer->ramp_style = ramp_style;
}
}
void dumb_it_set_loop_callback(DUMB_IT_SIGRENDERER *sigrenderer, int (*callback)(void *data), void *data)
{
if (sigrenderer) {

View File

@ -358,6 +358,9 @@ typedef struct
// pre-initialized variables
int8_t samplingInterpolation;// = 1;
#ifdef USE_VOL_RAMP
int8_t rampStyle;
#endif
float *masterBufferL;// = NULL;
float *masterBufferR;// = NULL;
int32_t samplesLeft;// = 0; // must be signed
@ -1984,7 +1987,7 @@ static void MainPlayer(PLAYER *p) // periodically called from mixer
ch = &p->Stm[i];
#ifdef USE_VOL_RAMP
if ((ch->Status & (IS_Vol | IS_NyTon)) == IS_Vol)
if ((ch->Status & (IS_Vol | (p->rampStyle > 0 ? IS_NyTon : 0))) == IS_Vol)
#else
if (ch->Status & IS_Vol)
#endif
@ -1996,11 +1999,14 @@ static void MainPlayer(PLAYER *p) // periodically called from mixer
if (ch->Status & IS_NyTon)
{
#ifdef USE_VOL_RAMP
p->voice[ch->Nr + 127] = p->voice[ch->Nr];
voiceSetVolume(p, ch->Nr, ch->FinalVol, ch->FinalPan, 1);
voiceSetVolume(p, ch->Nr + 127, 0, ch->FinalPan, 1);
resampler_dup_inplace(p->resampler[ch->Nr + 127], p->resampler[ch->Nr]);
resampler_dup_inplace(p->resampler[ch->Nr + 127 + 254], p->resampler[ch->Nr + 254]);
if (p->rampStyle > 0)
{
p->voice[ch->Nr + 127] = p->voice[ch->Nr];
voiceSetVolume(p, ch->Nr, ch->FinalVol, ch->FinalPan, 1);
voiceSetVolume(p, ch->Nr + 127, 0, ch->FinalPan, 1);
resampler_dup_inplace(p->resampler[ch->Nr + 127], p->resampler[ch->Nr]);
resampler_dup_inplace(p->resampler[ch->Nr + 127 + 254], p->resampler[ch->Nr + 254]);
}
#endif
s = ch->InstrOfs;
@ -2715,21 +2721,34 @@ void voiceSetSamplePosition(PLAYER *p, uint8_t i, uint16_t value)
void voiceSetVolume(PLAYER *p, uint8_t i, float vol, uint8_t pan, uint8_t sharp)
{
#ifdef USE_VOL_RAMP
const float rampRate = sharp ? p->f_samplesPerFrameSharp : p->f_samplesPerFrame;
if (sharp)
if (p->rampStyle > 1 || (p->rampStyle > 0 && sharp))
{
if (vol)
const float rampRate = sharp ? p->f_samplesPerFrameSharp : p->f_samplesPerFrame;
if (sharp)
{
p->voice[i].volumeL = 0.0f;
p->voice[i].volumeR = 0.0f;
if (vol)
{
p->voice[i].volumeL = 0.0f;
p->voice[i].volumeR = 0.0f;
}
else
p->voice[i].rampTerminates = 1;
}
else
p->voice[i].rampTerminates = 1;
p->voice[i].targetVolL = vol * p->PanningTab[256 - pan];
p->voice[i].targetVolR = vol * p->PanningTab[ pan];
p->voice[i].volDeltaL = (p->voice[i].targetVolL - p->voice[i].volumeL) * rampRate;
p->voice[i].volDeltaR = (p->voice[i].targetVolR - p->voice[i].volumeR) * rampRate;
}
else
{
p->voice[i].volumeL = vol * p->PanningTab[256 - pan];
p->voice[i].volumeR = vol * p->PanningTab[ pan];
p->voice[i].targetVolL = p->voice[i].volumeL;
p->voice[i].targetVolR = p->voice[i].volumeR;
p->voice[i].volDeltaL = 0;
p->voice[i].volDeltaR = 0;
}
p->voice[i].targetVolL = vol * p->PanningTab[256 - pan];
p->voice[i].targetVolR = vol * p->PanningTab[ pan];
p->voice[i].volDeltaL = (p->voice[i].targetVolL - p->voice[i].volumeL) * rampRate;
p->voice[i].volDeltaR = (p->voice[i].targetVolR - p->voice[i].volumeR) * rampRate;
#else
p->voice[i].volumeL = vol * p->PanningTab[256 - pan];
p->voice[i].volumeR = vol * p->PanningTab[ pan];
@ -2763,6 +2782,10 @@ static inline void mix8b(PLAYER *p, uint32_t ch, uint32_t samples)
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
void * resampler;
sampleLength = p->voice[ch].sampleLength;
@ -2852,37 +2875,40 @@ static inline void mix8b(PLAYER *p, uint32_t ch, uint32_t samples)
sampleR = (sample * p->voice[ch].volumeR);
#ifdef USE_VOL_RAMP
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (rampStyle > 0)
{
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
}
}
#endif
@ -2912,6 +2938,10 @@ static inline void mix8bstereo(PLAYER *p, uint32_t ch, uint32_t samples)
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
void * resampler[2];
sampleLength = p->voice[ch].sampleLength;
@ -3010,37 +3040,40 @@ static inline void mix8bstereo(PLAYER *p, uint32_t ch, uint32_t samples)
sampleR = (sampleR * p->voice[ch].volumeR);
#ifdef USE_VOL_RAMP
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (rampStyle > 0)
{
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
}
}
#endif
@ -3071,6 +3104,10 @@ static inline void mix16b(PLAYER *p, uint32_t ch, uint32_t samples)
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
void * resampler;
sampleLength = p->voice[ch].sampleLength;
@ -3160,37 +3197,40 @@ static inline void mix16b(PLAYER *p, uint32_t ch, uint32_t samples)
sampleR = (sample * p->voice[ch].volumeR);
#ifdef USE_VOL_RAMP
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (rampStyle > 0)
{
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
}
}
#endif
@ -3220,6 +3260,10 @@ static inline void mix16bstereo(PLAYER *p, uint32_t ch, uint32_t samples)
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
void * resampler[2];
sampleLength = p->voice[ch].sampleLength;
@ -3318,37 +3362,40 @@ static inline void mix16bstereo(PLAYER *p, uint32_t ch, uint32_t samples)
sampleR = (sampleR * p->voice[ch].volumeR);
#ifdef USE_VOL_RAMP
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (rampStyle > 0)
{
p->voice[ch].volumeL += p->voice[ch].volDeltaL;
p->voice[ch].volumeR += p->voice[ch].volDeltaR;
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaL >= 0.0f)
{
if (p->voice[ch].volumeL > p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
else
{
if (p->voice[ch].volumeL < p->voice[ch].targetVolL)
p->voice[ch].volumeL = p->voice[ch].targetVolL;
}
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volDeltaR >= 0.0f)
{
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].volumeR > p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
else
{
if (p->voice[ch].volumeR < p->voice[ch].targetVolR)
p->voice[ch].volumeR = p->voice[ch].targetVolR;
}
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
if (p->voice[ch].rampTerminates && !p->voice[ch].volumeL && !p->voice[ch].volumeR)
{
p->voice[ch].sampleData = NULL;
p->voice[ch].samplePosition = 0;
p->voice[ch].busy = 0;
}
}
#endif
@ -3379,6 +3426,9 @@ static void mixSampleBlock(PLAYER *p, float *outputStream, uint32_t sampleBlockL
{
float *streamPointer;
uint32_t i;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
float outL;
float outR;
@ -3394,7 +3444,8 @@ static void mixSampleBlock(PLAYER *p, float *outputStream, uint32_t sampleBlockL
continue;
mixChannel(p, i, sampleBlockLength);
#ifdef USE_VOL_RAMP
mixChannel(p, i + 127, sampleBlockLength);
if (rampStyle > 0)
mixChannel(p, i + 127, sampleBlockLength);
#endif
}
@ -3485,7 +3536,7 @@ void ft2play_RenderFixed16(void *_p, int16_t *buffer, int32_t count, int8_t dept
}
}
void * ft2play_Alloc(uint32_t _samplingFrequency, int8_t interpolation)
void * ft2play_Alloc(uint32_t _samplingFrequency, int8_t interpolation, int8_t ramp_style)
{
uint8_t j;
uint16_t i;
@ -3510,6 +3561,9 @@ void * ft2play_Alloc(uint32_t _samplingFrequency, int8_t interpolation)
goto error;
p->samplingInterpolation = interpolation;
#ifdef USE_VOL_RAMP
p->rampStyle = ramp_style;
#endif
resampler_init();

View File

@ -7,7 +7,22 @@
extern "C" {
#endif
void * ft2play_Alloc(uint32_t _samplingFrequency, int8_t interpolation);
enum
{
FT2_RAMP_NONE = 0,
FT2_RAMP_ONOFF_ONLY = 1,
FT2_RAMP_FULL = 2
};
enum
{
FT2_INTERPOLATE_ZOH = 0,
FT2_INTERPOLATE_LINEAR = 1,
FT2_INTERPOLATE_CUBIC = 2,
FT2_INTERPOLATE_SINC = 3
};
void * ft2play_Alloc(uint32_t _samplingFrequency, int8_t interpolation, int8_t ramp_style);
void ft2play_Free(void *);
int8_t ft2play_LoadModule(void *, const uint8_t *buffer, size_t size);

View File

@ -209,6 +209,9 @@ typedef struct
// pre-initialized variables
int8_t samplingInterpolation;// = 1;
#ifdef USE_VOL_RAMP
int8_t rampStyle;
#endif
float *masterBufferL;// = NULL;
float *masterBufferR;// = NULL;
int32_t samplesLeft;// = 0; // must be signed
@ -318,6 +321,9 @@ static const int16_t vibramp[64] =
static void setSamplesPerFrame(PLAYER *, uint32_t val);
static void setSamplingInterpolation(PLAYER *, int8_t value);
#ifdef USE_VOL_RAMP
static void setRampStyle(PLAYER *, int8_t value);
#endif
static void setStereoMode(PLAYER *, int8_t value);
static void setMasterVolume(PLAYER *, uint8_t value);
static void voiceSetSource(PLAYER *, uint8_t voiceNumber, const int8_t *sampleData,
@ -475,7 +481,7 @@ static effect_routine sotherjmp[27] =
// CODE START
void * st3play_Alloc(uint32_t outputFreq, int8_t interpolation)
void * st3play_Alloc(uint32_t outputFreq, int8_t interpolation, int8_t ramp_style)
{
int i;
@ -520,6 +526,9 @@ void * st3play_Alloc(uint32_t outputFreq, int8_t interpolation)
p->f_outputFreq = (float)outputFreq;
setSamplingInterpolation(p, interpolation);
#ifdef USE_VOL_RAMP
setRampStyle(p, ramp_style);
#endif
setSamplesPerFrame(p, ((outputFreq * 5UL) / 2 / 125));
return p;
@ -926,26 +935,29 @@ static inline void doamiga(PLAYER *p, uint8_t ch)
loop = 1;
#ifdef USE_VOL_RAMP
p->voice[ch + 32] = p->voice[ch];
setvol(p, ch, 2);
resampler_dup_inplace(p->resampler[ch + 32], p->resampler[ch]);
resampler_dup_inplace(p->resampler[ch + 32 + 64], p->resampler[ch + 64]);
if (p->chn[ch].vol != 255)
if (p->rampStyle > 0)
{
if (p->chn[ch].vol <= 64)
p->voice[ch + 32] = p->voice[ch];
setvol(p, ch, 2);
resampler_dup_inplace(p->resampler[ch + 32], p->resampler[ch]);
resampler_dup_inplace(p->resampler[ch + 32 + 64], p->resampler[ch + 64]);
if (p->chn[ch].vol != 255)
{
p->chn[ch].avol = p->chn[ch].vol;
p->chn[ch].aorgvol = p->chn[ch].vol;
}
else
// NON-ST3
if ((p->chn[ch].vol >= 128) && (p->chn[ch].vol <= 192))
{
p->chn[ch].apanpos = (p->chn[ch].vol - 128) << 2;
setpan(p, ch);
if (p->chn[ch].vol <= 64)
{
p->chn[ch].avol = p->chn[ch].vol;
p->chn[ch].aorgvol = p->chn[ch].vol;
}
else
// NON-ST3
if ((p->chn[ch].vol >= 128) && (p->chn[ch].vol <= 192))
{
p->chn[ch].apanpos = (p->chn[ch].vol - 128) << 2;
setpan(p, ch);
}
}
volassigned = 1;
}
volassigned = 1;
#endif
setvol(p, ch, 1);
@ -1014,7 +1026,7 @@ static inline void doamiga(PLAYER *p, uint8_t ch)
}
#ifdef USE_VOL_RAMP
if (p->chn[ch].vol != 255 && !volassigned)
if (p->chn[ch].vol != 255 && (p->rampStyle < 1 || !volassigned))
#else
if (p->chn[ch].vol != 255)
#endif
@ -2585,6 +2597,13 @@ void setSamplingInterpolation(PLAYER *p, int8_t value)
p->samplingInterpolation = value;
}
#ifdef USE_VOL_RAMP
void setRampStyle(PLAYER *p, int8_t value)
{
p->rampStyle = value;
}
#endif
void setStereoMode(PLAYER *p, int8_t value)
{
p->stereomode = value;
@ -2664,16 +2683,25 @@ void voiceSetSamplePosition(PLAYER *p, uint8_t voiceNumber, uint16_t value)
void voiceSetVolume(PLAYER *p, uint8_t voiceNumber, float volume, uint8_t sharp)
{
#ifdef USE_VOL_RAMP
const float rampRate = sharp ? p->f_samplesPerFrameSharp : p->f_samplesPerFrame;
if (sharp)
if (p->rampStyle > 1 || (p->rampStyle > 0 && sharp != 0))
{
if (volume)
p->voice[voiceNumber].volume = 0.0f;
else
p->voice[voiceNumber].rampTerminates = 1;
const float rampRate = sharp ? p->f_samplesPerFrameSharp : p->f_samplesPerFrame;
if (sharp)
{
if (volume)
p->voice[voiceNumber].volume = 0.0f;
else
p->voice[voiceNumber].rampTerminates = 1;
}
p->voice[voiceNumber].targetVol = volume;
p->voice[voiceNumber].volDelta = (p->voice[voiceNumber].targetVol - p->voice[voiceNumber].volume) * rampRate;
}
else
{
p->voice[voiceNumber].volume = volume;
p->voice[voiceNumber].targetVol = volume;
p->voice[voiceNumber].volDelta = 0;
}
p->voice[voiceNumber].targetVol = volume;
p->voice[voiceNumber].volDelta = (p->voice[voiceNumber].targetVol - p->voice[voiceNumber].volume) * rampRate;
#else
p->voice[voiceNumber].volume = volume;
#endif
@ -2682,12 +2710,24 @@ void voiceSetVolume(PLAYER *p, uint8_t voiceNumber, float volume, uint8_t sharp)
void voiceSetSurround(PLAYER *p, uint8_t voiceNumber, int8_t surround)
{
#ifdef USE_VOL_RAMP
const float rampRate = p->f_samplesPerFrameSharp;
if (surround)
p->voice[voiceNumber].targetPanR = -p->voice[voiceNumber].orgPanR;
if (p->rampStyle > 1)
{
const float rampRate = p->f_samplesPerFrameSharp;
if (surround)
p->voice[voiceNumber].targetPanR = -p->voice[voiceNumber].orgPanR;
else
p->voice[voiceNumber].targetPanR = p->voice[voiceNumber].orgPanR;
p->voice[voiceNumber].panDeltaR = (p->voice[voiceNumber].targetPanR - p->voice[voiceNumber].panningR) * rampRate;
}
else
p->voice[voiceNumber].targetPanR = p->voice[voiceNumber].orgPanR;
p->voice[voiceNumber].panDeltaR = (p->voice[voiceNumber].targetPanR - p->voice[voiceNumber].panningR) * rampRate;
{
if (surround)
p->voice[voiceNumber].panningR = -p->voice[voiceNumber].orgPanR;
else
p->voice[voiceNumber].panningR = p->voice[voiceNumber].orgPanR;
p->voice[voiceNumber].targetPanR = p->voice[voiceNumber].panningR;
p->voice[voiceNumber].panDeltaR = 0;
}
#else
if (surround)
p->voice[voiceNumber].panningR = -p->voice[voiceNumber].orgPanR;
@ -2707,10 +2747,22 @@ void voiceSetPanning(PLAYER *p, uint8_t voiceNumber, uint16_t pan)
pf = (float)(pan) / 256.0f;
#ifdef USE_VOL_RAMP
p->voice[voiceNumber].targetPanL = 1.0f - pf;
p->voice[voiceNumber].targetPanR = pf;
p->voice[voiceNumber].panDeltaL = (p->voice[voiceNumber].targetPanL - p->voice[voiceNumber].panningL) * rampRate;
p->voice[voiceNumber].panDeltaR = (p->voice[voiceNumber].targetPanR - p->voice[voiceNumber].panningR) * rampRate;
if (p->rampStyle > 1)
{
p->voice[voiceNumber].targetPanL = 1.0f - pf;
p->voice[voiceNumber].targetPanR = pf;
p->voice[voiceNumber].panDeltaL = (p->voice[voiceNumber].targetPanL - p->voice[voiceNumber].panningL) * rampRate;
p->voice[voiceNumber].panDeltaR = (p->voice[voiceNumber].targetPanR - p->voice[voiceNumber].panningR) * rampRate;
}
else
{
p->voice[voiceNumber].panningL = 1.0f - pf;
p->voice[voiceNumber].targetPanL = 1.0f - pf;
p->voice[voiceNumber].panningR = pf;
p->voice[voiceNumber].targetPanR = pf;
p->voice[voiceNumber].panDeltaL = 0;
p->voice[voiceNumber].panDeltaR = 0;
}
#else
p->voice[voiceNumber].panningL = 1.0f - pf;
p->voice[voiceNumber].panningR = pf;
@ -2732,6 +2784,9 @@ static inline void mix8b(PLAYER *p, uint8_t ch, uint32_t samples)
int32_t sampleLoopLength;
int32_t samplePosition;
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle;
#endif
uint32_t j;
float volume;
float sample;
@ -2739,6 +2794,10 @@ static inline void mix8b(PLAYER *p, uint8_t ch, uint32_t samples)
float panningR;
void *resampler;
#ifdef USE_VOL_RAMP
rampStyle = p->rampStyle;
#endif
sampleLength = p->voice[ch].sampleLength;
sampleLoopLength = p->voice[ch].sampleLoopLength;
sampleLoopEnd = p->voice[ch].sampleLoopEnd;
@ -2797,47 +2856,50 @@ static inline void mix8b(PLAYER *p, uint8_t ch, uint32_t samples)
p->masterBufferR[j] += (sample * panningR);
#ifdef USE_VOL_RAMP
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (rampStyle > 0)
{
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
}
}
#endif
}
@ -2857,6 +2919,9 @@ static inline void mix8bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
int32_t sampleLoopLength;
int32_t samplePosition;
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle;
#endif
uint32_t j;
float volume;
float sampleL;
@ -2865,6 +2930,10 @@ static inline void mix8bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
float panningR;
void *resampler[2];
#ifdef USE_VOL_RAMP
rampStyle = p->rampStyle;
#endif
sampleLength = p->voice[ch].sampleLength;
sampleLoopLength = p->voice[ch].sampleLoopLength;
sampleLoopEnd = p->voice[ch].sampleLoopEnd;
@ -2933,47 +3002,50 @@ static inline void mix8bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
p->masterBufferR[j] += (sampleR * panningR);
#ifdef USE_VOL_RAMP
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (rampStyle > 0)
{
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
}
}
#endif
}
@ -2993,6 +3065,9 @@ static inline void mix16b(PLAYER *p, uint8_t ch, uint32_t samples)
int32_t sampleLoopLength;
int32_t samplePosition;
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle;
#endif
uint32_t j;
float volume;
float sample;
@ -3000,6 +3075,10 @@ static inline void mix16b(PLAYER *p, uint8_t ch, uint32_t samples)
float panningR;
void *resampler;
#ifdef USE_VOL_RAMP
rampStyle = p->rampStyle;
#endif
sampleLength = p->voice[ch].sampleLength;
sampleLoopLength = p->voice[ch].sampleLoopLength;
sampleLoopEnd = p->voice[ch].sampleLoopEnd;
@ -3058,47 +3137,50 @@ static inline void mix16b(PLAYER *p, uint8_t ch, uint32_t samples)
p->masterBufferR[j] += (sample * panningR);
#ifdef USE_VOL_RAMP
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (rampStyle > 0)
{
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
}
}
#endif
}
@ -3118,6 +3200,9 @@ static inline void mix16bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
int32_t sampleLoopLength;
int32_t samplePosition;
int32_t interpolating;
#ifdef USE_VOL_RAMP
int32_t rampStyle;
#endif
uint32_t j;
float volume;
float sampleL;
@ -3126,6 +3211,10 @@ static inline void mix16bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
float panningR;
void *resampler[2];
#ifdef USE_VOL_RAMP
rampStyle = p->rampStyle;
#endif
sampleLength = p->voice[ch].sampleLength;
sampleLoopLength = p->voice[ch].sampleLoopLength;
sampleLoopEnd = p->voice[ch].sampleLoopEnd;
@ -3194,47 +3283,50 @@ static inline void mix16bstereo(PLAYER *p, uint8_t ch, uint32_t samples)
p->masterBufferR[j] += (sampleR * panningR);
#ifdef USE_VOL_RAMP
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (rampStyle > 0)
{
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
}
}
#endif
}
@ -3261,6 +3353,9 @@ static inline void mixadpcm(PLAYER *p, uint8_t ch, uint32_t samples)
int32_t sampleLoopEnd;
int32_t sampleLoopLength;
int32_t samplePosition;
#ifdef USE_VOL_RAMP
int32_t rampStyle;
#endif
int8_t lastDelta;
int32_t interpolating;
uint32_t j;
@ -3270,6 +3365,10 @@ static inline void mixadpcm(PLAYER *p, uint8_t ch, uint32_t samples)
float panningR;
void *resampler;
#ifdef USE_VOL_RAMP
rampStyle = p->rampStyle;
#endif
sampleLength = p->voice[ch].sampleLength;
sampleLoopLength = p->voice[ch].sampleLoopLength;
sampleLoopEnd = p->voice[ch].sampleLoopEnd;
@ -3351,47 +3450,50 @@ static inline void mixadpcm(PLAYER *p, uint8_t ch, uint32_t samples)
p->masterBufferR[j] += (sample * panningR);
#ifdef USE_VOL_RAMP
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (rampStyle > 0)
{
volume += p->voice[ch].volDelta;
panningL += p->voice[ch].panDeltaL;
panningR += p->voice[ch].panDeltaR;
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].volDelta >= 0.0f)
{
if (volume > p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
else
{
if (volume < p->voice[ch].targetVol)
volume = p->voice[ch].targetVol;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaL >= 0.0f)
{
if (panningL > p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
else
{
if (panningL < p->voice[ch].targetPanL)
panningL = p->voice[ch].targetPanL;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].panDeltaR >= 0.0f)
{
if (panningR > p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
else
{
if (panningR < p->voice[ch].targetPanR)
panningR = p->voice[ch].targetPanR;
}
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
if (p->voice[ch].rampTerminates && !volume)
{
p->voice[ch].mixing = 0;
break;
}
}
#endif
}
@ -3430,6 +3532,9 @@ void mixSampleBlock(PLAYER *p, float *outputStream, uint32_t sampleBlockLength)
float *streamPointer;
uint8_t i;
uint32_t j;
#ifdef USE_VOL_RAMP
int32_t rampStyle = p->rampStyle;
#endif
float outL;
float outR;
@ -3445,7 +3550,8 @@ void mixSampleBlock(PLAYER *p, float *outputStream, uint32_t sampleBlockLength)
continue;
mixChannel(p, i, sampleBlockLength);
#ifdef USE_VOL_RAMP
mixChannel(p, i + 32, sampleBlockLength);
if (rampStyle > 0)
mixChannel(p, i + 32, sampleBlockLength);
#endif
}

View File

@ -7,7 +7,22 @@
extern "C" {
#endif
void * st3play_Alloc(uint32_t outputFreq, int8_t interpolation);
enum
{
ST3_RAMP_NONE = 0,
ST3_RAMP_ONOFF_ONLY = 1,
ST3_RAMP_FULL = 2
};
enum
{
ST3_INTERPOLATE_ZOH = 0,
ST3_INTERPOLATE_LINEAR = 1,
ST3_INTERPOLATE_CUBIC = 2,
ST3_INTERPOLATE_SINC = 3
};
void * st3play_Alloc(uint32_t outputFreq, int8_t interpolation, int8_t ramp_style);
void st3play_Free(void *);
int8_t st3play_LoadModule(void *, const uint8_t *module, size_t size);

View File

@ -14,7 +14,7 @@
BOOL s3m_probe_length( unsigned long * intro_length, unsigned long * loop_length, const void * src, unsigned long size, unsigned int subsong )
{
void * st3play = st3play_Alloc( 44100, 1 );
void * st3play = st3play_Alloc( 44100, 1, 2 );
if ( !st3play ) return NO;
if ( !st3play_LoadModule( st3play, src, size ) )
@ -66,7 +66,7 @@ BOOL s3m_probe_length( unsigned long * intro_length, unsigned long * loop_length
BOOL xm_probe_length( unsigned long * intro_length, unsigned long * loop_length, const void * src, unsigned long size, unsigned int subsong )
{
void * ft2play = ft2play_Alloc( 44100, 1 );
void * ft2play = ft2play_Alloc( 44100, 1, 2 );
if ( !ft2play ) return NO;
if ( !ft2play_LoadModule( ft2play, src, size ) )
@ -184,7 +184,7 @@ BOOL xm_probe_length( unsigned long * intro_length, unsigned long * loop_length,
{
if ( type == TYPE_S3M )
{
player = st3play_Alloc( 44100, 1 );
player = st3play_Alloc( 44100, 1, 2 );
if ( !player )
return NO;
@ -195,7 +195,7 @@ BOOL xm_probe_length( unsigned long * intro_length, unsigned long * loop_length,
}
else if ( type == TYPE_XM )
{
player = ft2play_Alloc( 44100, 1 );
player = ft2play_Alloc( 44100, 1, 2 );
if ( !player )
return NO;