parent
5a43e34b15
commit
5466e69d17
@ -0,0 +1,118 @@ |
||||
/*
|
||||
* AudioCalcEnvelope_F32 |
||||
*
|
||||
* Created: Chip Audette, Feb 2017 |
||||
* Purpose: This module extracts the envelope of the audio signal. |
||||
* Derived From: Core envelope extraction algorithm is from "smooth_env" |
||||
* WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
|
||||
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?" |
||||
*
|
||||
* This processes a single stream fo audio data (ie, it is mono)
|
||||
*
|
||||
* MIT License. use at your own risk. |
||||
*/ |
||||
|
||||
#ifndef _AudioCalcEnvelope_F32_h |
||||
#define _AudioCalcEnvelope_F32_h |
||||
|
||||
#include <arm_math.h> //ARM DSP extensions. for speed! |
||||
#include <AudioStream_F32.h> |
||||
|
||||
class AudioCalcEnvelope_F32 : public AudioStream_F32 |
||||
{ |
||||
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:calc_envelope
|
||||
public: |
||||
//default constructor
|
||||
AudioCalcEnvelope_F32(void) : AudioStream_F32(1, inputQueueArray_f32), |
||||
sample_rate_Hz(AUDIO_SAMPLE_RATE) { setDefaultValues(); }; |
||||
AudioCalcEnvelope_F32(const AudioSettings_F32 &settings) : AudioStream_F32(1, inputQueueArray_f32), |
||||
sample_rate_Hz(settings.sample_rate_Hz) { setDefaultValues(); }; |
||||
|
||||
//here's the method that does all the work
|
||||
void update(void) { |
||||
|
||||
//get the input audio data block
|
||||
audio_block_f32_t *in_block = AudioStream_F32::receiveReadOnly_f32(); |
||||
if (!in_block) return; |
||||
|
||||
//check format
|
||||
if (in_block->fs_Hz != sample_rate_Hz) { |
||||
Serial.println("AudioComputeEnvelope_F32: *** WARNING ***: Data sample rate does not match expected."); |
||||
Serial.println("AudioComputeEnvelope_F32: Changing sample rate."); |
||||
setSampleRate_Hz(in_block->fs_Hz); |
||||
} |
||||
|
||||
//prepare an output data block
|
||||
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32(); |
||||
if (!out_block) return; |
||||
|
||||
// //////////////////////add your processing here!
|
||||
smooth_env(in_block->data, out_block->data, in_block->length); |
||||
out_block->length = in_block->length; out_block->fs_Hz = in_block->fs_Hz; |
||||
|
||||
//transmit the block and be done
|
||||
AudioStream_F32::transmit(out_block); |
||||
AudioStream_F32::release(out_block); |
||||
AudioStream_F32::release(in_block); |
||||
|
||||
} |
||||
|
||||
//compute the smoothed signal envelope
|
||||
//compute the envelope of the signal, not of the signal power)
|
||||
void smooth_env(float x[], float y[], const int n) { |
||||
float xab, xpk; |
||||
int k; |
||||
|
||||
// find envelope of x and return as y
|
||||
//xpk = *ppk; // start with previous xpk
|
||||
xpk = state_ppk; |
||||
for (k = 0; k < n; k++) { |
||||
xab = (x[k] >= 0.0f) ? x[k] : -x[k]; |
||||
if (xab >= xpk) { |
||||
xpk = alfa * xpk + (1.f-alfa) * xab; |
||||
} else { |
||||
xpk = beta * xpk; |
||||
} |
||||
y[k] = xpk; |
||||
} |
||||
//*ppk = xpk; // save xpk for next time
|
||||
state_ppk = xpk; |
||||
} |
||||
|
||||
//convert time constants from seconds to unitless parameters, from CHAPRO, agc_prepare.c
|
||||
void setAttackRelease_msec(const float atk_msec, const float rel_msec) { |
||||
given_attack_msec = atk_msec; |
||||
given_release_msec = rel_msec; |
||||
|
||||
// convert ANSI attack & release times to filter time constants
|
||||
float ansi_atk = 0.001f * atk_msec * sample_rate_Hz / 2.425f;
|
||||
float ansi_rel = 0.001f * rel_msec * sample_rate_Hz / 1.782f;
|
||||
alfa = (float) (ansi_atk / (1.0f + ansi_atk)); |
||||
beta = (float) (ansi_rel / (10.f + ansi_rel)); |
||||
} |
||||
|
||||
void setDefaultValues(void) { |
||||
float32_t attack_msec = 5.0f; |
||||
float32_t release_msec = 50.0f; |
||||
setAttackRelease_msec(attack_msec, release_msec); |
||||
state_ppk = 0; //initialize
|
||||
} |
||||
|
||||
void setSampleRate_Hz(const float &fs_Hz) { |
||||
//change params that follow sample rate
|
||||
|
||||
sample_rate_Hz = fs_Hz; |
||||
} |
||||
|
||||
void resetStates(void) { state_ppk = 1.0; } |
||||
float getCurrentLevel(void) { return state_ppk; }
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
|
||||
float32_t sample_rate_Hz; |
||||
float32_t given_attack_msec, given_release_msec; |
||||
float32_t alfa, beta; //time constants, but in terms of samples, not seconds
|
||||
float32_t state_ppk = 1.0f; |
||||
}; |
||||
|
||||
#endif |
@ -0,0 +1,173 @@ |
||||
/*
|
||||
* AudioCalcGainWDRC_F32 |
||||
*
|
||||
* Created: Chip Audette, Feb 2017 |
||||
* Purpose: This module calculates the gain needed for wide dynamic range compression. |
||||
* Derived From: Core algorithm is from "WDRC_circuit" |
||||
* WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
|
||||
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?" |
||||
*
|
||||
* This processes a single stream fo audio data (ie, it is mono)
|
||||
*
|
||||
* MIT License. use at your own risk. |
||||
*/ |
||||
|
||||
#ifndef _AudioCalcGainWDRC_F32_h |
||||
#define _AudioCalcGainWDRC_F32_h |
||||
|
||||
#include <arm_math.h> //ARM DSP extensions. for speed! |
||||
#include <AudioStream_F32.h> |
||||
|
||||
typedef struct { |
||||
float attack; // attack time (ms), unused in this class
|
||||
float release; // release time (ms), unused in this class
|
||||
float fs; // sampling rate (Hz), set through other means in this class
|
||||
float maxdB; // maximum signal (dB SPL)...I think this is the SPL corresponding to signal with rms of 1.0
|
||||
float tkgain; // compression-start gain
|
||||
float tk; // compression-start kneepoint
|
||||
float cr; // compression ratio
|
||||
float bolt; // broadband output limiting threshold
|
||||
} CHA_WDRC; |
||||
|
||||
|
||||
class AudioCalcGainWDRC_F32 : public AudioStream_F32 |
||||
{ |
||||
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:calc_WDRCGain
|
||||
public: |
||||
//default constructor
|
||||
AudioCalcGainWDRC_F32(void) : AudioStream_F32(1, inputQueueArray_f32) { setDefaultValues(); }; |
||||
|
||||
//here's the method that does all the work
|
||||
void update(void) { |
||||
|
||||
//get the input audio data block
|
||||
audio_block_f32_t *in_block = AudioStream_F32::receiveReadOnly_f32(); // must be the envelope!
|
||||
if (!in_block) return; |
||||
|
||||
//prepare an output data block
|
||||
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32(); |
||||
if (!out_block) return; |
||||
|
||||
// //////////////////////add your processing here!
|
||||
calcGainFromEnvelope(in_block->data, out_block->data, in_block->length); |
||||
out_block->length = in_block->length; out_block->fs_Hz = in_block->fs_Hz; |
||||
|
||||
//transmit the block and be done
|
||||
AudioStream_F32::transmit(out_block); |
||||
AudioStream_F32::release(out_block); |
||||
AudioStream_F32::release(in_block); |
||||
|
||||
} |
||||
|
||||
void calcGainFromEnvelope(float *env, float *gain_out, const int n) { |
||||
//env = input, signal envelope (not the envelope of the power, but the envelope of the signal itslef)
|
||||
//gain = output, the gain in natural units (not power, not dB)
|
||||
//n = input, number of samples to process in each vector
|
||||
|
||||
//prepare intermediate data block
|
||||
audio_block_f32_t *env_dB_block = AudioStream_F32::allocate_f32(); |
||||
if (!env_dB_block) return; |
||||
|
||||
//convert to dB
|
||||
for (int k=0; k < n; k++) env_dB_block->data[k] = maxdB + db2(env[k]); //maxdb in the private section
|
||||
|
||||
// apply wide-dynamic range compression
|
||||
WDRC_circuit_gain(env_dB_block->data, gain_out, n, tkgn, tk, cr, bolt); |
||||
AudioStream_F32::release(env_dB_block); |
||||
} |
||||
|
||||
//original call to WDRC_circuit
|
||||
//void WDRC_circuit(float *x, float *y, float *pdb, int n, float tkgn, float tk, float cr, float bolt)
|
||||
//void WDRC_circuit(float *orig_signal, float *signal_out, float *env_dB, int n, float tkgn, float tk, float cr, float bolt)
|
||||
//modified to output the gain instead of the fully processed signal
|
||||
void WDRC_circuit_gain(float *env_dB, float *gain_out, const int n,
|
||||
const float tkgn, const float tk, const float cr, const float bolt) { |
||||
|
||||
float gdb, tkgo, pblt; |
||||
int k; |
||||
float *pdb = env_dB; //just rename it to keep the code below unchanged
|
||||
float tk_tmp = tk; |
||||
|
||||
if ((tk_tmp + tkgn) > bolt) { |
||||
tk_tmp = bolt - tkgn; |
||||
} |
||||
tkgo = tkgn + tk_tmp * (1.0f - 1.0f / cr); |
||||
pblt = cr * (bolt - tkgo); |
||||
const float cr_const = ((1.0f / cr) - 1.0f); |
||||
for (k = 0; k < n; k++) { |
||||
if ((pdb[k] < tk_tmp) && (cr >= 1.0f)) { |
||||
gdb = tkgn; |
||||
} else if (pdb[k] > pblt) { |
||||
gdb = bolt + ((pdb[k] - pblt) / 10.0f) - pdb[k]; |
||||
} else { |
||||
gdb = cr_const * pdb[k] + tkgo; |
||||
} |
||||
gain_out[k] = undb2(gdb); |
||||
//y[k] = x[k] * undb2(gdb); //apply the gain
|
||||
} |
||||
} |
||||
|
||||
void setDefaultValues(void) { |
||||
CHA_WDRC gha = {1.0f, // attack time (ms), IGNORED HERE
|
||||
50.0f, // release time (ms), IGNORED HERE
|
||||
24000.0f, // fs, sampling rate (Hz), IGNORED HERE
|
||||
119.0f, // maxdB, maximum signal (dB SPL)
|
||||
0.0f, // tkgain, compression-start gain
|
||||
105.0f, // tk, compression-start kneepoint
|
||||
10.0f, // cr, compression ratio
|
||||
105.0f // bolt, broadband output limiting threshold
|
||||
}; |
||||
//setParams(gha.maxdB, gha.tkgain, gha.cr, gha.tk, gha.bolt); //also sets calcEnvelope
|
||||
setParams_from_CHA_WDRC(&gha); |
||||
} |
||||
void setParams_from_CHA_WDRC(CHA_WDRC *gha) { |
||||
setParams(gha->maxdB, gha->tkgain, gha->cr, gha->tk, gha->bolt); //also sets calcEnvelope
|
||||
} |
||||
void setParams(float _maxdB, float _tkgain, float _cr, float _tk, float _bolt) { |
||||
maxdB = _maxdB; |
||||
tkgn = _tkgain; |
||||
tk = _tk; |
||||
cr = _cr; |
||||
bolt = _bolt; |
||||
} |
||||
|
||||
static float undb2(const float &x) { return expf(0.11512925464970228420089957273422f*x); } //faster: exp(log(10.0f)*x/20); this is exact
|
||||
static float db2(const float &x) { return 6.020599913279623f*log2f_approx(x); } //faster: 20*log2_approx(x)/log2(10); this is approximate
|
||||
|
||||
/* ----------------------------------------------------------------------
|
||||
** Fast approximation to the log2() function. It uses a two step |
||||
** process. First, it decomposes the floating-point number into |
||||
** a fractional component F and an exponent E. The fraction component |
||||
** is used in a polynomial approximation and then the exponent added |
||||
** to the result. A 3rd order polynomial is used and the result |
||||
** when computing db20() is accurate to 7.984884e-003 dB. |
||||
** ------------------------------------------------------------------- */ |
||||
//https://community.arm.com/tools/f/discussions/4292/cmsis-dsp-new-functionality-proposal/22621#22621
|
||||
static float log2f_approx(float X) { |
||||
//float *C = &log2f_approx_coeff[0];
|
||||
float Y; |
||||
float F; |
||||
int E; |
||||
|
||||
// This is the approximation to log2()
|
||||
F = frexpf(fabsf(X), &E); |
||||
// Y = C[0]*F*F*F + C[1]*F*F + C[2]*F + C[3] + E;
|
||||
Y = 1.23149591368684f; //C[0]
|
||||
Y *= F; |
||||
Y += -4.11852516267426f; //C[1]
|
||||
Y *= F; |
||||
Y += 6.02197014179219f; //C[2]
|
||||
Y *= F; |
||||
Y += -3.13396450166353f; //C[3]
|
||||
Y += E; |
||||
|
||||
return(Y); |
||||
} |
||||
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
|
||||
float maxdB, tkgn, tk, cr, bolt; |
||||
}; |
||||
|
||||
#endif |
@ -0,0 +1,274 @@ |
||||
/*
|
||||
* fir_filterbank.h |
||||
*
|
||||
* Created: Chip Audette, Creare LLC, Feb 2017 |
||||
* Primarly built upon CHAPRO "Generic Hearing Aid" from
|
||||
* Boys Town National Research Hospital (BTNRH): https://github.com/BTNRH/chapro
|
||||
*
|
||||
* License: MIT License. Use at your own risk. |
||||
*
|
||||
*/ |
||||
|
||||
#ifndef AudioConfigFIRFilterBank_F32_h |
||||
#define AudioConfigFIRFilterBank_F32_h |
||||
|
||||
#include "utility/rfft.c" |
||||
|
||||
#define fmove(x,y,n) memmove(x,y,(n)*sizeof(float)) |
||||
#define fcopy(x,y,n) memcpy(x,y,(n)*sizeof(float)) |
||||
#define fzero(x,n) memset(x,0,(n)*sizeof(float)) |
||||
|
||||
class AudioConfigFIRFilterBank_F32 { |
||||
//GUI: inputs:0, outputs:0 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:config_FIRbank
|
||||
public: |
||||
AudioConfigFIRFilterBank_F32(void) { |
||||
} |
||||
AudioConfigFIRFilterBank_F32(const int n_chan, const int n_fir, const float sample_rate_Hz, float *corner_freq, float *filter_coeff) { |
||||
createFilterCoeff(n_chan, n_fir, sample_rate_Hz, corner_freq, filter_coeff); |
||||
} |
||||
|
||||
|
||||
//createFilterCoeff:
|
||||
// Purpose: create all of the FIR filter coefficients for the FIR filterbank
|
||||
// Syntax: createFilterCoeff(n_chan, n_fir, sample_rate_Hz, corner_freq, filter_coeff)
|
||||
// int n_chan (input): number of channels (number of filters) you desire. Must be 2 or greater
|
||||
// int n_fir (input): length of each FIR filter (should probably be 8 or greater)
|
||||
// float sample_rate_Hz (input): sample rate of your system (used to scale the corner_freq values)
|
||||
// float *corner_freq (input): array of frequencies (Hz) seperating each band in your filter bank.
|
||||
// should contain n_chan-1 values because it should exclude the bottom (0 Hz) and the top
|
||||
// (Nyquist) as those values are already assumed by this routine. An valid example is below:
|
||||
// int n_chan = 8; float cf[] = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7};
|
||||
// float *filter_coeff (output): array of FIR filter coefficients that are computed by this
|
||||
// routine. You must have pre-allocated the array such as: float filter_coeff[N_CHAN][N_FIR];
|
||||
//Optional Usage: if you want 8 default filters spaced logarithmically, use: float *corner_freq = NULL
|
||||
void createFilterCoeff(const int n_chan, const int n_fir, const float sample_rate_Hz, float *corner_freq, float *filter_coeff) { |
||||
float *cf = corner_freq; |
||||
int flag__free_cf = 0; |
||||
if (cf == NULL) { |
||||
//compute corner frequencies that are logarithmically spaced
|
||||
cf = (float *) calloc(n_chan, sizeof(float)); |
||||
flag__free_cf = 1; |
||||
computeLogSpacedCornerFreqs(n_chan, sample_rate_Hz, cf); |
||||
} |
||||
const int window_type = 0; //0 = Hamming
|
||||
fir_filterbank(filter_coeff, cf, n_chan, n_fir, window_type, sample_rate_Hz); |
||||
if (flag__free_cf) free(cf);
|
||||
} |
||||
|
||||
//compute frequencies that space zero to nyquist. Leave zero off, because it is assumed to exist in the later code.
|
||||
//example of an *8* channel set of frequencies: cf = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7}
|
||||
void computeLogSpacedCornerFreqs(const int n_chan, const float sample_rate_Hz, float *cf) { |
||||
float cf_8_band[] = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7}; |
||||
float scale_fac = expf(logf(cf_8_band[6]/cf_8_band[0]) / ((float)(n_chan-2))); |
||||
//Serial.print("MakeFIRFilterBank: computeEvenlySpacedCornerFreqs: scale_fac = "); Serial.println(scale_fac);
|
||||
cf[0] = cf_8_band[0]; |
||||
//Serial.println("MakeFIRFilterBank: computeEvenlySpacedCornerFreqs: cf = ");Serial.print(cf[0]); Serial.print(", ");
|
||||
for (int i=1; i < n_chan-1; i++) { |
||||
cf[i] = cf[i-1]*scale_fac; |
||||
//Serial.print(cf[i]); Serial.print(", ");
|
||||
} |
||||
//Serial.println();
|
||||
} |
||||
private: |
||||
|
||||
int nextPowerOfTwo(int n) { |
||||
const int n_out_vals = 8; |
||||
int out_vals[n_out_vals] = {8, 16, 32, 64, 128, 256, 512, 1024}; |
||||
if (n < out_vals[0]) return out_vals[0]; |
||||
for (int i=1;i<n_out_vals; i++) { |
||||
if ((n > out_vals[i-1]) & (n <= out_vals[i])) { |
||||
return out_vals[i]; |
||||
} |
||||
} |
||||
return n; |
||||
} |
||||
|
||||
void fir_filterbank(float *bb, float *cf, const int nc, const int nw_orig, const int wt, const float sr) |
||||
{ |
||||
double p, w, a = 0.16, sm = 0; |
||||
float *ww, *bk, *xx, *yy; |
||||
int j, k, kk, nt, nf, ns, *be; |
||||
|
||||
int nw = nextPowerOfTwo(nw_orig); |
||||
Serial.print("fir_filterbank: nw_orig = "); Serial.print(nw_orig); |
||||
Serial.print(", nw = "); Serial.println(nw); |
||||
|
||||
nt = nw * 2; |
||||
nf = nw + 1; |
||||
ns = nf * 2; |
||||
be = (int *) calloc(nc + 1, sizeof(int)); |
||||
ww = (float *) calloc(nw, sizeof(float)); |
||||
xx = (float *) calloc(ns, sizeof(float)); |
||||
yy = (float *) calloc(ns, sizeof(float)); |
||||
|
||||
// window
|
||||
for (j = 0; j < nw; j++) ww[j]=0.0f; //clear
|
||||
for (j = 0; j < nw_orig; j++) { |
||||
p = M_PI * (2.0 * j - nw_orig) / nw_orig; |
||||
if (wt == 0) { |
||||
w = 0.54 + 0.46 * cos(p); // Hamming
|
||||
} else { |
||||
w = (1 - a + cos(p) + a * cos(2 * p)) / 2; // Blackman
|
||||
} |
||||
sm += w; |
||||
ww[j] = (float) w; |
||||
} |
||||
|
||||
// frequency bands...add the DC-facing band and add the Nyquist-facing band
|
||||
be[0] = 0; |
||||
for (k = 1; k < nc; k++) { |
||||
kk = round(nf * cf[k - 1] * (2 / sr)); |
||||
be[k] = (kk > nf) ? nf : kk; |
||||
} |
||||
be[nc] = nf; |
||||
|
||||
// channel tranfer functions
|
||||
fzero(xx, ns); |
||||
xx[nw_orig / 2] = 1; //make a single-sample impulse centered on our eventual window
|
||||
cha_fft_rc(xx, nt); |
||||
for (k = 0; k < nc; k++) { |
||||
fzero(yy, ns); //zero the temporary output
|
||||
//int nbins = (be[k + 1] - be[k]) * 2; Serial.print("fir_filterbank: chan ");Serial.print(k); Serial.print(", nbins = ");Serial.println(nbins);
|
||||
fcopy(yy + be[k] * 2, xx + be[k] * 2, (be[k + 1] - be[k]) * 2); //copy just our passband
|
||||
cha_fft_cr(yy, nt); //IFFT back into the time domain
|
||||
|
||||
// apply window to iFFT of bandpass
|
||||
for (j = 0; j < nw; j++) { |
||||
yy[j] *= ww[j]; |
||||
} |
||||
|
||||
bk = bb + k * nw_orig; //pointer to location in output array
|
||||
fcopy(bk, yy, nw_orig); //copy the filter coefficients to the output array
|
||||
|
||||
//print out the coefficients
|
||||
//for (int i=0; i<nw; i++) { Serial.print(yy[i]*1000.0f);Serial.print(" "); }; Serial.println();
|
||||
} |
||||
free(be); |
||||
free(ww); |
||||
free(xx); |
||||
free(yy); |
||||
} |
||||
}; |
||||
#endif |
||||
|
||||
// static CHA_DSL dsl = {5, 50, 119, 0, 8,
|
||||
// {317.1666,502.9734,797.6319,1264.9,2005.9,3181.1,5044.7}, //log spaced frequencies.
|
||||
// {-13.5942,-16.5909,-3.7978,6.6176,11.3050,23.7183,35.8586,37.3885},
|
||||
// {0.7,0.9,1,1.1,1.2,1.4,1.6,1.7},
|
||||
// {32.2,26.5,26.7,26.7,29.8,33.6,34.3,32.7},
|
||||
// {78.7667,88.2,90.7,92.8333,98.2,103.3,101.9,99.8}
|
||||
// };
|
||||
|
||||
// //x is the input waveform
|
||||
// //y is the processed waveform
|
||||
// //n is the length of the waveform
|
||||
// //fs is the sample rate...24000 Hz
|
||||
// //dsl are the settings for each band
|
||||
// t1 = amplify(x, y, n, fs, &dsl);
|
||||
|
||||
//amplify(float *x, float *y, int n, double fs, CHA_DSL *dsl)
|
||||
//{
|
||||
// int nc;
|
||||
// static int nw = 256; // window size
|
||||
// static int cs = 32; // chunk size
|
||||
// static int wt = 0; // window type: 0=Hamming, 1=Blackman
|
||||
// static void *cp[NPTR] = {0};
|
||||
// static CHA_WDRC gha = {1, 50, 24000, 119, 0, 105, 10, 105};
|
||||
//
|
||||
// nc = dsl->nchannel; //8?
|
||||
// cha_firfb_prepare(cp, dsl->cross_freq, nc, fs, nw, wt, cs);
|
||||
// cha_agc_prepare(cp, dsl, &gha);
|
||||
// sp_tic();
|
||||
// WDRC(cp, x, y, n, nc);
|
||||
// return (sp_toc());
|
||||
//}
|
||||
|
||||
//FUNC(int)
|
||||
//cha_firfb_prepare(CHA_PTR cp, double *cf, int nc, double fs,
|
||||
// int nw, int wt, int cs)
|
||||
//{
|
||||
// float *bb;
|
||||
// int ns, nt;
|
||||
//
|
||||
// if (cs <= 0) {
|
||||
// return (1);
|
||||
// }
|
||||
// cha_prepare(cp);
|
||||
// CHA_IVAR[_cs] = cs; //cs = 32
|
||||
// CHA_DVAR[_fs] = fs; //fs = 24000
|
||||
// // allocate window buffers
|
||||
// CHA_IVAR[_nw] = nw; //nw = 256
|
||||
// CHA_IVAR[_nc] = nc; //nc = 32
|
||||
// nt = nw * 2; //nt = 256*2 = 512
|
||||
// ns = nt + 2; //ns = 512+2 = 514
|
||||
// cha_allocate(cp, ns, sizeof(float), _ffxx); //allocate for input
|
||||
// cha_allocate(cp, ns, sizeof(float), _ffyy); //allocate for output
|
||||
// cha_allocate(cp, nc * (nw + cs), sizeof(float), _ffzz); //allocate per channel
|
||||
// // compute FIR-filterbank coefficients
|
||||
// bb = calloc(nc * nw, sizeof(float)); //allocate for filter coeff (256 long, 8 channels)
|
||||
// fir_filterbank(bb, cf, nc, nw, wt, fs); //make the fir filter bank
|
||||
// // Fourier-transform FIR coefficients
|
||||
// if (cs < nw) { // short chunk
|
||||
// fir_transform_sc(cp, bb, nc, nw, cs);
|
||||
// } else { // long chunk
|
||||
// fir_transform_lc(cp, bb, nc, nw, cs);
|
||||
// }
|
||||
// free(bb);
|
||||
//
|
||||
// return (0);
|
||||
//}
|
||||
|
||||
// fir_filterbank( float *bb, double *cf, int nc, int nw, int wt, double sr)
|
||||
// filter coeff, corner freqs, 8, 256, 0, 24000)
|
||||
//{
|
||||
// double p, w, a = 0.16, sm = 0;
|
||||
// float *ww, *bk, *xx, *yy;
|
||||
// int j, k, kk, nt, nf, ns, *be;
|
||||
//
|
||||
// nt = nw * 2; //nt = 256*2 = 512
|
||||
// nf = nw + 1; //nyquist frequency bin is 256+1 = 257
|
||||
// ns = nf * 2; //when complex, number values to carry is nyquist * 2 = 514
|
||||
// be = (int *) calloc(nc + 1, sizeof(int));
|
||||
// ww = (float *) calloc(nw, sizeof(float)); //window is 256 long
|
||||
// xx = (float *) calloc(ns, sizeof(float)); //input data is 514 points long
|
||||
// yy = (float *) calloc(ns, sizeof(float)); //output data is 514 points long
|
||||
// // window
|
||||
// for (j = 0; j < nw; j++) { //nw = 256
|
||||
// p = M_PI * (2.0 * j - nw) / nw; //phase for computing window, radians
|
||||
// if (wt == 0) { //wt is zero
|
||||
// w = 0.54 + 0.46 * cos(p); // Hamming
|
||||
// } else {
|
||||
// w = (1 - a + cos(p) + a * cos(2 * p)) / 2; // Blackman
|
||||
// }
|
||||
// sm += w; //sum the window value. Doesn't appear to be used anywhere
|
||||
// ww[j] = (float) w; //save the windowing coefficient...there are 256 of them
|
||||
// }
|
||||
// // frequency bands
|
||||
// be[0] = 0; //first channel is DC bin
|
||||
// for (k = 1; k < nc; k++) { //loop over the rest of the 8 channels
|
||||
// kk = round(nf * cf[k - 1] * (2 / sr)); //get bin of the channel (upper?) corner frequency...assumes factor of two zero-padding?
|
||||
// be[k] = (kk > nf) ? nf : kk; //make sure we don't go above the nyquist bin (bin 257, assuming a 512 FFT)
|
||||
// }
|
||||
// be[nc] = nf; //the last one is the nyquist freuquency
|
||||
// // channel tranfer functions
|
||||
// fzero(xx, ns); //zero the xx vector
|
||||
// xx[nw / 2] = 1; //create an impulse in the middle of the (non-overlapped part of the) time-domain...sample 129
|
||||
// cha_fft_rc(xx, nt); //convert to frequency domain..512 points long
|
||||
// for (k = 0; k < nc; k++) { //loop over each channel
|
||||
// bk = bb + k * nw; //bin index for this channel
|
||||
// fzero(yy, ns); //zero out the output bins
|
||||
// fcopy(yy + be[k] * 2, xx + be[k] * 2, (be[k + 1] - be[k]) * 2); //copy just the desired frequeny bins in our passband
|
||||
// cha_fft_cr(yy, nt); //convert back to time domain
|
||||
// // apply window to iFFT of bandpass
|
||||
// for (j = 0; j < nw; j++) {
|
||||
// yy[j] *= ww[j];
|
||||
// }
|
||||
// fcopy(bk, yy, nw); //copy output into the output filter...just the 256 points
|
||||
// }
|
||||
// free(be);
|
||||
// free(ww);
|
||||
// free(xx);
|
||||
// free(yy);
|
||||
//}
|
||||
|
@ -0,0 +1,278 @@ |
||||
/*
|
||||
* AudioEffectCompWDR_F32: Wide Dynamic Rnage Compressor |
||||
*
|
||||
* Created: Chip Audette (OpenAudio) Feb 2017 |
||||
* Derived From: WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
|
||||
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?" |
||||
*
|
||||
* MIT License. Use at your own risk. |
||||
*
|
||||
*/ |
||||
|
||||
#ifndef _AudioEffectCompWDRC_F32 |
||||
#define _AudioEffectCompWDRC_F32 |
||||
|
||||
#include <Arduino.h> |
||||
#include <AudioStream_F32.h> |
||||
#include <arm_math.h> |
||||
#include <AudioCalcEnvelope_F32.h> |
||||
#include "AudioCalcGainWDRC_F32.h" //has definition of CHA_WDRC |
||||
#include "utility/textAndStringUtils.h" |
||||
|
||||
|
||||
|
||||
// from CHAPRO cha_ff.h
|
||||
#define DSL_MXCH 32 |
||||
//class CHA_DSL {
|
||||
typedef struct { |
||||
//public:
|
||||
//CHA_DSL(void) {};
|
||||
//static const int DSL_MXCH = 32; // maximum number of channels
|
||||
float attack; // attack time (ms)
|
||||
float release; // release time (ms)
|
||||
float maxdB; // maximum signal (dB SPL)
|
||||
int ear; // 0=left, 1=right
|
||||
int nchannel; // number of channels
|
||||
float cross_freq[DSL_MXCH]; // cross frequencies (Hz)
|
||||
float tkgain[DSL_MXCH]; // compression-start gain
|
||||
float cr[DSL_MXCH]; // compression ratio
|
||||
float tk[DSL_MXCH]; // compression-start kneepoint
|
||||
float bolt[DSL_MXCH]; // broadband output limiting threshold
|
||||
} CHA_DSL; |
||||
/* int parseStringIntoDSL(String &text_buffer) {
|
||||
int position = 0; |
||||
float foo_val; |
||||
const bool print_debug = false; |
||||
|
||||
if (print_debug) Serial.println("parseTextAsDSL: values from file:"); |
||||
|
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
attack = foo_val; |
||||
if (print_debug) { Serial.print(" attack: "); Serial.println(attack); } |
||||
|
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
release = foo_val; |
||||
if (print_debug) { Serial.print(" release: "); Serial.println(release); } |
||||
|
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
maxdB = foo_val; |
||||
if (print_debug) { Serial.print(" maxdB: "); Serial.println(maxdB); } |
||||
|
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
ear = int(foo_val + 0.5); //round
|
||||
if (print_debug) { Serial.print(" ear: "); Serial.println(ear); } |
||||
|
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
nchannel = int(foo_val + 0.5); //round
|
||||
if (print_debug) { Serial.print(" nchannel: "); Serial.println(nchannel); } |
||||
|
||||
//check to see if the number of channels is acceptable.
|
||||
if ((nchannel < 0) || (nchannel > DSL_MXCH)) { |
||||
if (print_debug) Serial.print(" : channel number is too big (or negative). stopping.");
|
||||
return -1; |
||||
} |
||||
|
||||
//read the cross-over frequencies. There should be nchan-1 of them (0 and Nyquist are assumed)
|
||||
if (print_debug) Serial.print(" cross_freq: "); |
||||
for (int i=0; i < (nchannel-1); i++) { |
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
cross_freq[i] = foo_val; |
||||
if (print_debug) { Serial.print(cross_freq[i]); Serial.print(", ");} |
||||
} |
||||
if (print_debug) Serial.println(); |
||||
|
||||
//read the tkgain values. There should be nchan of them
|
||||
if (print_debug) Serial.print(" tkgain: "); |
||||
for (int i=0; i < nchannel; i++) { |
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
tkgain[i] = foo_val; |
||||
if (print_debug) { Serial.print(tkgain[i]); Serial.print(", ");} |
||||
} |
||||
if (print_debug) Serial.println(); |
||||
|
||||
//read the cr values. There should be nchan of them
|
||||
if (print_debug) Serial.print(" cr: "); |
||||
for (int i=0; i < nchannel; i++) { |
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
cr[i] = foo_val; |
||||
if (print_debug) { Serial.print(cr[i]); Serial.print(", ");} |
||||
} |
||||
if (print_debug) Serial.println(); |
||||
|
||||
//read the tk values. There should be nchan of them
|
||||
if (print_debug) Serial.print(" tk: "); |
||||
for (int i=0; i < nchannel; i++) { |
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
tk[i] = foo_val; |
||||
if (print_debug) { Serial.print(tk[i]); Serial.print(", ");} |
||||
} |
||||
if (print_debug) Serial.println(); |
||||
|
||||
//read the bolt values. There should be nchan of them
|
||||
if (print_debug) Serial.print(" bolt: "); |
||||
for (int i=0; i < nchannel; i++) { |
||||
position = parseNextNumberFromString(text_buffer, position, foo_val); |
||||
bolt[i] = foo_val; |
||||
if (print_debug) { Serial.print(bolt[i]); Serial.print(", ");} |
||||
} |
||||
if (print_debug) Serial.println(); |
||||
|
||||
return 0; |
||||
|
||||
} |
||||
|
||||
void printToStream(Stream *s) { |
||||
s->print("CHA_DSL: attack (ms) = "); s->println(attack); |
||||
s->print(" : release (ms) = "); s->println(release); |
||||
s->print(" : maxdB (dB SPL) = "); s->println(maxdB); |
||||
s->print(" : ear (0 = left, 1 = right) "); s->println(ear); |
||||
s->print(" : nchannel = "); s->println(nchannel); |
||||
s->print(" : cross_freq (Hz) = "); |
||||
for (int i=0; i<nchannel-1;i++) { s->print(cross_freq[i]); s->print(", ");}; s->println(); |
||||
s->print(" : tkgain = "); |
||||
for (int i=0; i<nchannel;i++) { s->print(tkgain[i]); s->print(", ");}; s->println(); |
||||
s->print(" : cr = "); |
||||
for (int i=0; i<nchannel;i++) { s->print(cr[i]); s->print(", ");}; s->println(); |
||||
s->print(" : tk = "); |
||||
for (int i=0; i<nchannel;i++) { s->print(tk[i]); s->print(", ");}; s->println(); |
||||
s->print(" : bolt = "); |
||||
for (int i=0; i<nchannel;i++) { s->print(bolt[i]); s->print(", ");}; s->println(); |
||||
} |
||||
} ; */ |
||||
|
||||
typedef struct { |
||||
float alfa; // attack constant (not time)
|
||||
float beta; // release constant (not time
|
||||
float fs; // sampling rate (Hz)
|
||||
float maxdB; // maximum signal (dB SPL)
|
||||
float tkgain; // compression-start gain
|
||||
float tk; // compression-start kneepoint
|
||||
float cr; // compression ratio
|
||||
float bolt; // broadband output limiting threshold
|
||||
} CHA_DVAR_t; |
||||
|
||||
|
||||
class AudioEffectCompWDRC_F32 : public AudioStream_F32 |
||||
{ |
||||
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName: CompWDRC
|
||||
public: |
||||
AudioEffectCompWDRC_F32(void): AudioStream_F32(1,inputQueueArray) { //need to modify this for user to set sample rate
|
||||
setSampleRate_Hz(AUDIO_SAMPLE_RATE); |
||||
setDefaultValues(); |
||||
} |
||||
|
||||
AudioEffectCompWDRC_F32(AudioSettings_F32 settings): AudioStream_F32(1,inputQueueArray) { //need to modify this for user to set sample rate
|
||||
setSampleRate_Hz(settings.sample_rate_Hz); |
||||
setDefaultValues(); |
||||
} |
||||
|
||||
//here is the method called automatically by the audio library
|
||||
void update(void) { |
||||
//receive the input audio data
|
||||
audio_block_f32_t *block = AudioStream_F32::receiveReadOnly_f32(); |
||||
if (!block) return; |
||||
|
||||
//allocate memory for the output of our algorithm
|
||||
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32(); |
||||
if (!out_block) return; |
||||
|
||||
//do the algorithm
|
||||
cha_agc_channel(block->data, out_block->data, block->length); |
||||
|
||||
// transmit the block and release memory
|
||||
AudioStream_F32::transmit(out_block); // send the FIR output
|
||||
AudioStream_F32::release(out_block); |
||||
AudioStream_F32::release(block); |
||||
} |
||||
|
||||
|
||||
//here is the function that does all the work
|
||||
void cha_agc_channel(float *input, float *output, int cs) {
|
||||
//compress(input, output, cs, &prev_env,
|
||||
// CHA_DVAR.alfa, CHA_DVAR.beta, CHA_DVAR.tkgain, CHA_DVAR.tk, CHA_DVAR.cr, CHA_DVAR.bolt, CHA_DVAR.maxdB);
|
||||
compress(input, output, cs); |
||||
} |
||||
|
||||
//void compress(float *x, float *y, int n, float *prev_env,
|
||||
// float &alfa, float &beta, float &tkgn, float &tk, float &cr, float &bolt, float &mxdB)
|
||||
void compress(float *x, float *y, int n)
|
||||
//x, input, audio waveform data
|
||||
//y, output, audio waveform data after compression
|
||||
//n, input, number of samples in this audio block
|
||||
{
|
||||
// find smoothed envelope
|
||||
audio_block_f32_t *envelope_block = AudioStream_F32::allocate_f32(); |
||||
if (!envelope_block) return; |
||||
calcEnvelope.smooth_env(x, envelope_block->data, n); |
||||
//float *xpk = envelope_block->data; //get pointer to the array of (empty) data values
|
||||
|
||||
//calculate gain
|
||||
audio_block_f32_t *gain_block = AudioStream_F32::allocate_f32(); |
||||
if (!gain_block) return; |
||||
calcGain.calcGainFromEnvelope(envelope_block->data, gain_block->data, n); |
||||
|
||||
//apply gain
|
||||
arm_mult_f32(x, gain_block->data, y, n); |
||||
|
||||
// release memory
|
||||
AudioStream_F32::release(envelope_block); |
||||
AudioStream_F32::release(gain_block); |
||||
} |
||||
|
||||
|
||||
void setDefaultValues(void) { |
||||
//set default values...taken from CHAPRO, GHA_Demo.c from "amplify()"...ignores given sample rate
|
||||
//assumes that the sample rate has already been set!!!!
|
||||
CHA_WDRC gha = {1.0f, // attack time (ms)
|
||||
50.0f, // release time (ms)
|
||||
24000.0f, // fs, sampling rate (Hz), THIS IS IGNORED!
|
||||
119.0f, // maxdB, maximum signal (dB SPL)
|
||||
0.0f, // tkgain, compression-start gain
|
||||
105.0f, // tk, compression-start kneepoint
|
||||
10.0f, // cr, compression ratio
|
||||
105.0f // bolt, broadband output limiting threshold
|
||||
}; |
||||
setParams_from_CHA_WDRC(&gha); |
||||
} |
||||
|
||||
//set all of the parameters for the compressor using the CHA_WDRC structure
|
||||
//assumes that the sample rate has already been set!!!
|
||||
void setParams_from_CHA_WDRC(CHA_WDRC *gha) { |
||||
//configure the envelope calculator...assumes that the sample rate has already been set!
|
||||
calcEnvelope.setAttackRelease_msec(gha->attack,gha->release); //these are in milliseconds
|
||||
|
||||
//configure the compressor
|
||||
calcGain.setParams_from_CHA_WDRC(gha); |
||||
} |
||||
|
||||
//set all of the user parameters for the compressor
|
||||
//assumes that the sample rate has already been set!!!
|
||||
void setParams(float attack_ms, float release_ms, float maxdB, float tkgain, float comp_ratio, float tk, float bolt) { |
||||
|
||||
//configure the envelope calculator...assumes that the sample rate has already been set!
|
||||
calcEnvelope.setAttackRelease_msec(attack_ms,release_ms); |
||||
|
||||
//configure the WDRC gains
|
||||
calcGain.setParams(maxdB, tkgain, comp_ratio, tk, bolt); |
||||
} |
||||
|
||||
void setSampleRate_Hz(const float _fs_Hz) { |
||||
//pass this data on to its components that care
|
||||
given_sample_rate_Hz = _fs_Hz; |
||||
calcEnvelope.setSampleRate_Hz(_fs_Hz); |
||||
} |
||||
|
||||
float getCurrentLevel_dB(void) { return AudioCalcGainWDRC_F32::db2(calcEnvelope.getCurrentLevel()); } //this is 20*log10(abs(signal)) after the envelope smoothing
|
||||
|
||||
AudioCalcEnvelope_F32 calcEnvelope; |
||||
AudioCalcGainWDRC_F32 calcGain; |
||||
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray[1]; |
||||
float given_sample_rate_Hz; |
||||
}; |
||||
|
||||
|
||||
#endif |
||||
|
@ -0,0 +1,49 @@ |
||||
/*
|
||||
* AudioEffectEmpty_F32 |
||||
*
|
||||
* Created: Chip Audette, Feb 2017 |
||||
* Purpose: This module does nothing. It is an empty algorithm that can one |
||||
* can build from to make their own algorithm |
||||
*
|
||||
* This processes a single stream fo audio data (ie, it is mono)
|
||||
*
|
||||
* MIT License. use at your own risk. |
||||
*/ |
||||
|
||||
#ifndef _AudioEffectEmpty_F32_h |
||||
#define _AudioEffectEmpty_F32_h |
||||
|
||||
#include <arm_math.h> //ARM DSP extensions. for speed! |
||||
#include <AudioStream_F32.h> |
||||
|
||||
class AudioEffectEmpty_F32 : public AudioStream_F32 |
||||
{ |
||||
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:empty
|
||||
public: |
||||
//constructor
|
||||
AudioEffectEmpty_F32(void) : AudioStream_F32(1, inputQueueArray_f32) {}; |
||||
|
||||
//here's the method that does all the work
|
||||
void update(void) { |
||||
|
||||
//Serial.println("AudioEffectEmpty_F32: updating."); //for debugging.
|
||||
audio_block_f32_t *block; |
||||
block = AudioStream_F32::receiveWritable_f32(); |
||||
if (!block) return; |
||||
|
||||
//add your processing here!
|
||||
|
||||
|
||||
//transmit the block and be done
|
||||
AudioStream_F32::transmit(block); |
||||
AudioStream_F32::release(block); |
||||
} |
||||
|
||||
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
|
||||
|
||||
}; |
||||
|
||||
#endif |
@ -0,0 +1,60 @@ |
||||
#include "AudioMixer_F32.h" |
||||
|
||||
void AudioMixer4_F32::update(void) { |
||||
audio_block_f32_t *in, *out=NULL; |
||||
|
||||
out = receiveWritable_f32(0); |
||||
if (!out) return; |
||||
|
||||
arm_scale_f32(out->data, multiplier[0], out->data, out->length); |
||||
|
||||
for (int channel=1; channel < 4; channel++) { |
||||
in = receiveReadOnly_f32(channel); |
||||
if (!in) { |
||||
continue; |
||||
} |
||||
|
||||
audio_block_f32_t *tmp = allocate_f32(); |
||||
|
||||
arm_scale_f32(in->data, multiplier[channel], tmp->data, tmp->length); |
||||
arm_add_f32(out->data, tmp->data, out->data, tmp->length); |
||||
|
||||
AudioStream_F32::release(tmp); |
||||
AudioStream_F32::release(in); |
||||
} |
||||
|
||||
if (out) { |
||||
AudioStream_F32::transmit(out); |
||||
AudioStream_F32::release(out); |
||||
} |
||||
} |
||||
|
||||
void AudioMixer8_F32::update(void) { |
||||
audio_block_f32_t *in, *out=NULL; |
||||
|
||||
out = receiveWritable_f32(0); //try to get the first input channel
|
||||
if (!out) return; //if it's not there, return immediately
|
||||
|
||||
arm_scale_f32(out->data, multiplier[0], out->data, out->length); //scale the first input channel
|
||||
|
||||
//load and process the rest of the channels
|
||||
for (int channel=1; channel < 8; channel++) { |
||||
in = receiveReadOnly_f32(channel); |
||||
if (!in) { |
||||
continue; |
||||
} |
||||
|
||||
audio_block_f32_t *tmp = allocate_f32(); |
||||
|
||||
arm_scale_f32(in->data, multiplier[channel], tmp->data, tmp->length); |
||||
arm_add_f32(out->data, tmp->data, out->data, tmp->length); |
||||
|
||||
AudioStream_F32::release(tmp); |
||||
AudioStream_F32::release(in); |
||||
} |
||||
|
||||
if (out) { |
||||
AudioStream_F32::transmit(out); |
||||
AudioStream_F32::release(out); |
||||
} |
||||
} |
@ -0,0 +1,63 @@ |
||||
/*
|
||||
* AudioMixer |
||||
*
|
||||
* AudioMixer4 |
||||
* Created: Patrick Radius, December 2016 |
||||
* Purpose: Mix up to 4 audio channels with individual gain controls. |
||||
* Assumes floating-point data. |
||||
*
|
||||
* This processes a single stream fo audio data (ie, it is mono)
|
||||
* |
||||
* Extended to AudioMixer8 |
||||
* By: Chip Audette, OpenAudio, Feb 2017 |
||||
*
|
||||
* MIT License. use at your own risk. |
||||
*/ |
||||
|
||||
#ifndef AUDIOMIXER_F32_H |
||||
#define AUDIOMIXER_F32_H |
||||
|
||||
#include <arm_math.h> |
||||
#include <AudioStream_F32.h> |
||||
|
||||
class AudioMixer4_F32 : public AudioStream_F32 { |
||||
//GUI: inputs:4, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:Mixer4
|
||||
public: |
||||
AudioMixer4_F32() : AudioStream_F32(4, inputQueueArray) { |
||||
for (int i=0; i<4; i++) multiplier[i] = 1.0; |
||||
} |
||||
|
||||
virtual void update(void); |
||||
|
||||
void gain(unsigned int channel, float gain) { |
||||
if (channel >= 4 || channel < 0) return; |
||||
multiplier[channel] = gain; |
||||
} |
||||
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray[4]; |
||||
float multiplier[4]; |
||||
}; |
||||
|
||||
class AudioMixer8_F32 : public AudioStream_F32 { |
||||
//GUI: inputs:8, outputs:1 //this line used for automatic generation of GUI node
|
||||
//GUI: shortName:Mixer8
|
||||
public: |
||||
AudioMixer8_F32() : AudioStream_F32(8, inputQueueArray) { |
||||
for (int i=0; i<8; i++) multiplier[i] = 1.0; |
||||
} |
||||
|
||||
virtual void update(void); |
||||
|
||||
void gain(unsigned int channel, float gain) { |
||||
if (channel >= 8 || channel < 0) return; |
||||
multiplier[channel] = gain; |
||||
} |
||||
|
||||
private: |
||||
audio_block_f32_t *inputQueueArray[8]; |
||||
float multiplier[8]; |
||||
}; |
||||
|
||||
#endif |