Add files from Tympan_Library

pull/5/head
Chip Audette 8 years ago
parent 5a43e34b15
commit 5466e69d17
  1. 118
      AudioCalcEnvelope_F32.h
  2. 173
      AudioCalcGainWDRC_F32.h
  3. 274
      AudioConfigFIRFilterBank_F32.h
  4. 4
      AudioConvert_F32.h
  5. 278
      AudioEffectCompWDRC_F32.h
  6. 49
      AudioEffectEmpty_F32.h
  7. 24
      AudioEffectGain_F32.h
  8. 101
      AudioFilterFIR_F32.h
  9. 8
      AudioFilterIIR_F32.h
  10. 60
      AudioMixer_F32.cpp
  11. 63
      AudioMixer_F32.h
  12. 9
      AudioStream_F32.cpp
  13. 69
      AudioStream_F32.h
  14. 7
      OpenAudio_ArduinoLibrary.h
  15. 11
      control_tlv320aic3206.cpp
  16. 27
      control_tlv320aic3206.h
  17. 53
      input_i2s_f32.cpp
  18. 11
      input_i2s_f32.h
  19. 140
      output_i2s_f32.cpp
  20. 16
      output_i2s_f32.h
  21. 167
      synth_pinknoise_f32.cpp
  22. 99
      synth_pinknoise_f32.h
  23. 60
      synth_sine_f32.cpp
  24. 39
      synth_sine_f32.h
  25. 35
      synth_waveform_F32.h
  26. 125
      synth_whitenoise_f32.cpp
  27. 89
      synth_whitenoise_f32.h
  28. 384
      utility/rfft.c
  29. 19
      utility/textAndStringUtils.h

@ -0,0 +1,118 @@
/*
* AudioCalcEnvelope_F32
*
* Created: Chip Audette, Feb 2017
* Purpose: This module extracts the envelope of the audio signal.
* Derived From: Core envelope extraction algorithm is from "smooth_env"
* WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?"
*
* This processes a single stream fo audio data (ie, it is mono)
*
* MIT License. use at your own risk.
*/
#ifndef _AudioCalcEnvelope_F32_h
#define _AudioCalcEnvelope_F32_h
#include <arm_math.h> //ARM DSP extensions. for speed!
#include <AudioStream_F32.h>
class AudioCalcEnvelope_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:calc_envelope
public:
//default constructor
AudioCalcEnvelope_F32(void) : AudioStream_F32(1, inputQueueArray_f32),
sample_rate_Hz(AUDIO_SAMPLE_RATE) { setDefaultValues(); };
AudioCalcEnvelope_F32(const AudioSettings_F32 &settings) : AudioStream_F32(1, inputQueueArray_f32),
sample_rate_Hz(settings.sample_rate_Hz) { setDefaultValues(); };
//here's the method that does all the work
void update(void) {
//get the input audio data block
audio_block_f32_t *in_block = AudioStream_F32::receiveReadOnly_f32();
if (!in_block) return;
//check format
if (in_block->fs_Hz != sample_rate_Hz) {
Serial.println("AudioComputeEnvelope_F32: *** WARNING ***: Data sample rate does not match expected.");
Serial.println("AudioComputeEnvelope_F32: Changing sample rate.");
setSampleRate_Hz(in_block->fs_Hz);
}
//prepare an output data block
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32();
if (!out_block) return;
// //////////////////////add your processing here!
smooth_env(in_block->data, out_block->data, in_block->length);
out_block->length = in_block->length; out_block->fs_Hz = in_block->fs_Hz;
//transmit the block and be done
AudioStream_F32::transmit(out_block);
AudioStream_F32::release(out_block);
AudioStream_F32::release(in_block);
}
//compute the smoothed signal envelope
//compute the envelope of the signal, not of the signal power)
void smooth_env(float x[], float y[], const int n) {
float xab, xpk;
int k;
// find envelope of x and return as y
//xpk = *ppk; // start with previous xpk
xpk = state_ppk;
for (k = 0; k < n; k++) {
xab = (x[k] >= 0.0f) ? x[k] : -x[k];
if (xab >= xpk) {
xpk = alfa * xpk + (1.f-alfa) * xab;
} else {
xpk = beta * xpk;
}
y[k] = xpk;
}
//*ppk = xpk; // save xpk for next time
state_ppk = xpk;
}
//convert time constants from seconds to unitless parameters, from CHAPRO, agc_prepare.c
void setAttackRelease_msec(const float atk_msec, const float rel_msec) {
given_attack_msec = atk_msec;
given_release_msec = rel_msec;
// convert ANSI attack & release times to filter time constants
float ansi_atk = 0.001f * atk_msec * sample_rate_Hz / 2.425f;
float ansi_rel = 0.001f * rel_msec * sample_rate_Hz / 1.782f;
alfa = (float) (ansi_atk / (1.0f + ansi_atk));
beta = (float) (ansi_rel / (10.f + ansi_rel));
}
void setDefaultValues(void) {
float32_t attack_msec = 5.0f;
float32_t release_msec = 50.0f;
setAttackRelease_msec(attack_msec, release_msec);
state_ppk = 0; //initialize
}
void setSampleRate_Hz(const float &fs_Hz) {
//change params that follow sample rate
sample_rate_Hz = fs_Hz;
}
void resetStates(void) { state_ppk = 1.0; }
float getCurrentLevel(void) { return state_ppk; }
private:
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
float32_t sample_rate_Hz;
float32_t given_attack_msec, given_release_msec;
float32_t alfa, beta; //time constants, but in terms of samples, not seconds
float32_t state_ppk = 1.0f;
};
#endif

@ -0,0 +1,173 @@
/*
* AudioCalcGainWDRC_F32
*
* Created: Chip Audette, Feb 2017
* Purpose: This module calculates the gain needed for wide dynamic range compression.
* Derived From: Core algorithm is from "WDRC_circuit"
* WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?"
*
* This processes a single stream fo audio data (ie, it is mono)
*
* MIT License. use at your own risk.
*/
#ifndef _AudioCalcGainWDRC_F32_h
#define _AudioCalcGainWDRC_F32_h
#include <arm_math.h> //ARM DSP extensions. for speed!
#include <AudioStream_F32.h>
typedef struct {
float attack; // attack time (ms), unused in this class
float release; // release time (ms), unused in this class
float fs; // sampling rate (Hz), set through other means in this class
float maxdB; // maximum signal (dB SPL)...I think this is the SPL corresponding to signal with rms of 1.0
float tkgain; // compression-start gain
float tk; // compression-start kneepoint
float cr; // compression ratio
float bolt; // broadband output limiting threshold
} CHA_WDRC;
class AudioCalcGainWDRC_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:calc_WDRCGain
public:
//default constructor
AudioCalcGainWDRC_F32(void) : AudioStream_F32(1, inputQueueArray_f32) { setDefaultValues(); };
//here's the method that does all the work
void update(void) {
//get the input audio data block
audio_block_f32_t *in_block = AudioStream_F32::receiveReadOnly_f32(); // must be the envelope!
if (!in_block) return;
//prepare an output data block
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32();
if (!out_block) return;
// //////////////////////add your processing here!
calcGainFromEnvelope(in_block->data, out_block->data, in_block->length);
out_block->length = in_block->length; out_block->fs_Hz = in_block->fs_Hz;
//transmit the block and be done
AudioStream_F32::transmit(out_block);
AudioStream_F32::release(out_block);
AudioStream_F32::release(in_block);
}
void calcGainFromEnvelope(float *env, float *gain_out, const int n) {
//env = input, signal envelope (not the envelope of the power, but the envelope of the signal itslef)
//gain = output, the gain in natural units (not power, not dB)
//n = input, number of samples to process in each vector
//prepare intermediate data block
audio_block_f32_t *env_dB_block = AudioStream_F32::allocate_f32();
if (!env_dB_block) return;
//convert to dB
for (int k=0; k < n; k++) env_dB_block->data[k] = maxdB + db2(env[k]); //maxdb in the private section
// apply wide-dynamic range compression
WDRC_circuit_gain(env_dB_block->data, gain_out, n, tkgn, tk, cr, bolt);
AudioStream_F32::release(env_dB_block);
}
//original call to WDRC_circuit
//void WDRC_circuit(float *x, float *y, float *pdb, int n, float tkgn, float tk, float cr, float bolt)
//void WDRC_circuit(float *orig_signal, float *signal_out, float *env_dB, int n, float tkgn, float tk, float cr, float bolt)
//modified to output the gain instead of the fully processed signal
void WDRC_circuit_gain(float *env_dB, float *gain_out, const int n,
const float tkgn, const float tk, const float cr, const float bolt) {
float gdb, tkgo, pblt;
int k;
float *pdb = env_dB; //just rename it to keep the code below unchanged
float tk_tmp = tk;
if ((tk_tmp + tkgn) > bolt) {
tk_tmp = bolt - tkgn;
}
tkgo = tkgn + tk_tmp * (1.0f - 1.0f / cr);
pblt = cr * (bolt - tkgo);
const float cr_const = ((1.0f / cr) - 1.0f);
for (k = 0; k < n; k++) {
if ((pdb[k] < tk_tmp) && (cr >= 1.0f)) {
gdb = tkgn;
} else if (pdb[k] > pblt) {
gdb = bolt + ((pdb[k] - pblt) / 10.0f) - pdb[k];
} else {
gdb = cr_const * pdb[k] + tkgo;
}
gain_out[k] = undb2(gdb);
//y[k] = x[k] * undb2(gdb); //apply the gain
}
}
void setDefaultValues(void) {
CHA_WDRC gha = {1.0f, // attack time (ms), IGNORED HERE
50.0f, // release time (ms), IGNORED HERE
24000.0f, // fs, sampling rate (Hz), IGNORED HERE
119.0f, // maxdB, maximum signal (dB SPL)
0.0f, // tkgain, compression-start gain
105.0f, // tk, compression-start kneepoint
10.0f, // cr, compression ratio
105.0f // bolt, broadband output limiting threshold
};
//setParams(gha.maxdB, gha.tkgain, gha.cr, gha.tk, gha.bolt); //also sets calcEnvelope
setParams_from_CHA_WDRC(&gha);
}
void setParams_from_CHA_WDRC(CHA_WDRC *gha) {
setParams(gha->maxdB, gha->tkgain, gha->cr, gha->tk, gha->bolt); //also sets calcEnvelope
}
void setParams(float _maxdB, float _tkgain, float _cr, float _tk, float _bolt) {
maxdB = _maxdB;
tkgn = _tkgain;
tk = _tk;
cr = _cr;
bolt = _bolt;
}
static float undb2(const float &x) { return expf(0.11512925464970228420089957273422f*x); } //faster: exp(log(10.0f)*x/20); this is exact
static float db2(const float &x) { return 6.020599913279623f*log2f_approx(x); } //faster: 20*log2_approx(x)/log2(10); this is approximate
/* ----------------------------------------------------------------------
** Fast approximation to the log2() function. It uses a two step
** process. First, it decomposes the floating-point number into
** a fractional component F and an exponent E. The fraction component
** is used in a polynomial approximation and then the exponent added
** to the result. A 3rd order polynomial is used and the result
** when computing db20() is accurate to 7.984884e-003 dB.
** ------------------------------------------------------------------- */
//https://community.arm.com/tools/f/discussions/4292/cmsis-dsp-new-functionality-proposal/22621#22621
static float log2f_approx(float X) {
//float *C = &log2f_approx_coeff[0];
float Y;
float F;
int E;
// This is the approximation to log2()
F = frexpf(fabsf(X), &E);
// Y = C[0]*F*F*F + C[1]*F*F + C[2]*F + C[3] + E;
Y = 1.23149591368684f; //C[0]
Y *= F;
Y += -4.11852516267426f; //C[1]
Y *= F;
Y += 6.02197014179219f; //C[2]
Y *= F;
Y += -3.13396450166353f; //C[3]
Y += E;
return(Y);
}
private:
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
float maxdB, tkgn, tk, cr, bolt;
};
#endif

@ -0,0 +1,274 @@
/*
* fir_filterbank.h
*
* Created: Chip Audette, Creare LLC, Feb 2017
* Primarly built upon CHAPRO "Generic Hearing Aid" from
* Boys Town National Research Hospital (BTNRH): https://github.com/BTNRH/chapro
*
* License: MIT License. Use at your own risk.
*
*/
#ifndef AudioConfigFIRFilterBank_F32_h
#define AudioConfigFIRFilterBank_F32_h
#include "utility/rfft.c"
#define fmove(x,y,n) memmove(x,y,(n)*sizeof(float))
#define fcopy(x,y,n) memcpy(x,y,(n)*sizeof(float))
#define fzero(x,n) memset(x,0,(n)*sizeof(float))
class AudioConfigFIRFilterBank_F32 {
//GUI: inputs:0, outputs:0 //this line used for automatic generation of GUI node
//GUI: shortName:config_FIRbank
public:
AudioConfigFIRFilterBank_F32(void) {
}
AudioConfigFIRFilterBank_F32(const int n_chan, const int n_fir, const float sample_rate_Hz, float *corner_freq, float *filter_coeff) {
createFilterCoeff(n_chan, n_fir, sample_rate_Hz, corner_freq, filter_coeff);
}
//createFilterCoeff:
// Purpose: create all of the FIR filter coefficients for the FIR filterbank
// Syntax: createFilterCoeff(n_chan, n_fir, sample_rate_Hz, corner_freq, filter_coeff)
// int n_chan (input): number of channels (number of filters) you desire. Must be 2 or greater
// int n_fir (input): length of each FIR filter (should probably be 8 or greater)
// float sample_rate_Hz (input): sample rate of your system (used to scale the corner_freq values)
// float *corner_freq (input): array of frequencies (Hz) seperating each band in your filter bank.
// should contain n_chan-1 values because it should exclude the bottom (0 Hz) and the top
// (Nyquist) as those values are already assumed by this routine. An valid example is below:
// int n_chan = 8; float cf[] = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7};
// float *filter_coeff (output): array of FIR filter coefficients that are computed by this
// routine. You must have pre-allocated the array such as: float filter_coeff[N_CHAN][N_FIR];
//Optional Usage: if you want 8 default filters spaced logarithmically, use: float *corner_freq = NULL
void createFilterCoeff(const int n_chan, const int n_fir, const float sample_rate_Hz, float *corner_freq, float *filter_coeff) {
float *cf = corner_freq;
int flag__free_cf = 0;
if (cf == NULL) {
//compute corner frequencies that are logarithmically spaced
cf = (float *) calloc(n_chan, sizeof(float));
flag__free_cf = 1;
computeLogSpacedCornerFreqs(n_chan, sample_rate_Hz, cf);
}
const int window_type = 0; //0 = Hamming
fir_filterbank(filter_coeff, cf, n_chan, n_fir, window_type, sample_rate_Hz);
if (flag__free_cf) free(cf);
}
//compute frequencies that space zero to nyquist. Leave zero off, because it is assumed to exist in the later code.
//example of an *8* channel set of frequencies: cf = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7}
void computeLogSpacedCornerFreqs(const int n_chan, const float sample_rate_Hz, float *cf) {
float cf_8_band[] = {317.1666, 502.9734, 797.6319, 1264.9, 2005.9, 3181.1, 5044.7};
float scale_fac = expf(logf(cf_8_band[6]/cf_8_band[0]) / ((float)(n_chan-2)));
//Serial.print("MakeFIRFilterBank: computeEvenlySpacedCornerFreqs: scale_fac = "); Serial.println(scale_fac);
cf[0] = cf_8_band[0];
//Serial.println("MakeFIRFilterBank: computeEvenlySpacedCornerFreqs: cf = ");Serial.print(cf[0]); Serial.print(", ");
for (int i=1; i < n_chan-1; i++) {
cf[i] = cf[i-1]*scale_fac;
//Serial.print(cf[i]); Serial.print(", ");
}
//Serial.println();
}
private:
int nextPowerOfTwo(int n) {
const int n_out_vals = 8;
int out_vals[n_out_vals] = {8, 16, 32, 64, 128, 256, 512, 1024};
if (n < out_vals[0]) return out_vals[0];
for (int i=1;i<n_out_vals; i++) {
if ((n > out_vals[i-1]) & (n <= out_vals[i])) {
return out_vals[i];
}
}
return n;
}
void fir_filterbank(float *bb, float *cf, const int nc, const int nw_orig, const int wt, const float sr)
{
double p, w, a = 0.16, sm = 0;
float *ww, *bk, *xx, *yy;
int j, k, kk, nt, nf, ns, *be;
int nw = nextPowerOfTwo(nw_orig);
Serial.print("fir_filterbank: nw_orig = "); Serial.print(nw_orig);
Serial.print(", nw = "); Serial.println(nw);
nt = nw * 2;
nf = nw + 1;
ns = nf * 2;
be = (int *) calloc(nc + 1, sizeof(int));
ww = (float *) calloc(nw, sizeof(float));
xx = (float *) calloc(ns, sizeof(float));
yy = (float *) calloc(ns, sizeof(float));
// window
for (j = 0; j < nw; j++) ww[j]=0.0f; //clear
for (j = 0; j < nw_orig; j++) {
p = M_PI * (2.0 * j - nw_orig) / nw_orig;
if (wt == 0) {
w = 0.54 + 0.46 * cos(p); // Hamming
} else {
w = (1 - a + cos(p) + a * cos(2 * p)) / 2; // Blackman
}
sm += w;
ww[j] = (float) w;
}
// frequency bands...add the DC-facing band and add the Nyquist-facing band
be[0] = 0;
for (k = 1; k < nc; k++) {
kk = round(nf * cf[k - 1] * (2 / sr));
be[k] = (kk > nf) ? nf : kk;
}
be[nc] = nf;
// channel tranfer functions
fzero(xx, ns);
xx[nw_orig / 2] = 1; //make a single-sample impulse centered on our eventual window
cha_fft_rc(xx, nt);
for (k = 0; k < nc; k++) {
fzero(yy, ns); //zero the temporary output
//int nbins = (be[k + 1] - be[k]) * 2; Serial.print("fir_filterbank: chan ");Serial.print(k); Serial.print(", nbins = ");Serial.println(nbins);
fcopy(yy + be[k] * 2, xx + be[k] * 2, (be[k + 1] - be[k]) * 2); //copy just our passband
cha_fft_cr(yy, nt); //IFFT back into the time domain
// apply window to iFFT of bandpass
for (j = 0; j < nw; j++) {
yy[j] *= ww[j];
}
bk = bb + k * nw_orig; //pointer to location in output array
fcopy(bk, yy, nw_orig); //copy the filter coefficients to the output array
//print out the coefficients
//for (int i=0; i<nw; i++) { Serial.print(yy[i]*1000.0f);Serial.print(" "); }; Serial.println();
}
free(be);
free(ww);
free(xx);
free(yy);
}
};
#endif
// static CHA_DSL dsl = {5, 50, 119, 0, 8,
// {317.1666,502.9734,797.6319,1264.9,2005.9,3181.1,5044.7}, //log spaced frequencies.
// {-13.5942,-16.5909,-3.7978,6.6176,11.3050,23.7183,35.8586,37.3885},
// {0.7,0.9,1,1.1,1.2,1.4,1.6,1.7},
// {32.2,26.5,26.7,26.7,29.8,33.6,34.3,32.7},
// {78.7667,88.2,90.7,92.8333,98.2,103.3,101.9,99.8}
// };
// //x is the input waveform
// //y is the processed waveform
// //n is the length of the waveform
// //fs is the sample rate...24000 Hz
// //dsl are the settings for each band
// t1 = amplify(x, y, n, fs, &dsl);
//amplify(float *x, float *y, int n, double fs, CHA_DSL *dsl)
//{
// int nc;
// static int nw = 256; // window size
// static int cs = 32; // chunk size
// static int wt = 0; // window type: 0=Hamming, 1=Blackman
// static void *cp[NPTR] = {0};
// static CHA_WDRC gha = {1, 50, 24000, 119, 0, 105, 10, 105};
//
// nc = dsl->nchannel; //8?
// cha_firfb_prepare(cp, dsl->cross_freq, nc, fs, nw, wt, cs);
// cha_agc_prepare(cp, dsl, &gha);
// sp_tic();
// WDRC(cp, x, y, n, nc);
// return (sp_toc());
//}
//FUNC(int)
//cha_firfb_prepare(CHA_PTR cp, double *cf, int nc, double fs,
// int nw, int wt, int cs)
//{
// float *bb;
// int ns, nt;
//
// if (cs <= 0) {
// return (1);
// }
// cha_prepare(cp);
// CHA_IVAR[_cs] = cs; //cs = 32
// CHA_DVAR[_fs] = fs; //fs = 24000
// // allocate window buffers
// CHA_IVAR[_nw] = nw; //nw = 256
// CHA_IVAR[_nc] = nc; //nc = 32
// nt = nw * 2; //nt = 256*2 = 512
// ns = nt + 2; //ns = 512+2 = 514
// cha_allocate(cp, ns, sizeof(float), _ffxx); //allocate for input
// cha_allocate(cp, ns, sizeof(float), _ffyy); //allocate for output
// cha_allocate(cp, nc * (nw + cs), sizeof(float), _ffzz); //allocate per channel
// // compute FIR-filterbank coefficients
// bb = calloc(nc * nw, sizeof(float)); //allocate for filter coeff (256 long, 8 channels)
// fir_filterbank(bb, cf, nc, nw, wt, fs); //make the fir filter bank
// // Fourier-transform FIR coefficients
// if (cs < nw) { // short chunk
// fir_transform_sc(cp, bb, nc, nw, cs);
// } else { // long chunk
// fir_transform_lc(cp, bb, nc, nw, cs);
// }
// free(bb);
//
// return (0);
//}
// fir_filterbank( float *bb, double *cf, int nc, int nw, int wt, double sr)
// filter coeff, corner freqs, 8, 256, 0, 24000)
//{
// double p, w, a = 0.16, sm = 0;
// float *ww, *bk, *xx, *yy;
// int j, k, kk, nt, nf, ns, *be;
//
// nt = nw * 2; //nt = 256*2 = 512
// nf = nw + 1; //nyquist frequency bin is 256+1 = 257
// ns = nf * 2; //when complex, number values to carry is nyquist * 2 = 514
// be = (int *) calloc(nc + 1, sizeof(int));
// ww = (float *) calloc(nw, sizeof(float)); //window is 256 long
// xx = (float *) calloc(ns, sizeof(float)); //input data is 514 points long
// yy = (float *) calloc(ns, sizeof(float)); //output data is 514 points long
// // window
// for (j = 0; j < nw; j++) { //nw = 256
// p = M_PI * (2.0 * j - nw) / nw; //phase for computing window, radians
// if (wt == 0) { //wt is zero
// w = 0.54 + 0.46 * cos(p); // Hamming
// } else {
// w = (1 - a + cos(p) + a * cos(2 * p)) / 2; // Blackman
// }
// sm += w; //sum the window value. Doesn't appear to be used anywhere
// ww[j] = (float) w; //save the windowing coefficient...there are 256 of them
// }
// // frequency bands
// be[0] = 0; //first channel is DC bin
// for (k = 1; k < nc; k++) { //loop over the rest of the 8 channels
// kk = round(nf * cf[k - 1] * (2 / sr)); //get bin of the channel (upper?) corner frequency...assumes factor of two zero-padding?
// be[k] = (kk > nf) ? nf : kk; //make sure we don't go above the nyquist bin (bin 257, assuming a 512 FFT)
// }
// be[nc] = nf; //the last one is the nyquist freuquency
// // channel tranfer functions
// fzero(xx, ns); //zero the xx vector
// xx[nw / 2] = 1; //create an impulse in the middle of the (non-overlapped part of the) time-domain...sample 129
// cha_fft_rc(xx, nt); //convert to frequency domain..512 points long
// for (k = 0; k < nc; k++) { //loop over each channel
// bk = bb + k * nw; //bin index for this channel
// fzero(yy, ns); //zero out the output bins
// fcopy(yy + be[k] * 2, xx + be[k] * 2, (be[k + 1] - be[k]) * 2); //copy just the desired frequeny bins in our passband
// cha_fft_cr(yy, nt); //convert back to time domain
// // apply window to iFFT of bandpass
// for (j = 0; j < nw; j++) {
// yy[j] *= ww[j];
// }
// fcopy(bk, yy, nw); //copy output into the output filter...just the 256 points
// }
// free(be);
// free(ww);
// free(xx);
// free(yy);
//}

@ -24,7 +24,7 @@ class AudioConvert_I16toF32 : public AudioStream_F32 //receive Int and transmits
}
//convert to float
convertAudio_I16toF32(int_block, float_block, AUDIO_BLOCK_SAMPLES);
convertAudio_I16toF32(int_block, float_block, float_block->length);
//transmit the audio and return it to the system
AudioStream_F32::transmit(float_block,0);
@ -65,7 +65,7 @@ class AudioConvert_F32toI16 : public AudioStream_F32 //receive Float and transmi
}
//convert back to int16
convertAudio_F32toI16(float_block, int_block, AUDIO_BLOCK_SAMPLES);
convertAudio_F32toI16(float_block, int_block, float_block->length);
//return audio to the system
AudioStream::transmit(int_block);

@ -0,0 +1,278 @@
/*
* AudioEffectCompWDR_F32: Wide Dynamic Rnage Compressor
*
* Created: Chip Audette (OpenAudio) Feb 2017
* Derived From: WDRC_circuit from CHAPRO from BTNRC: https://github.com/BTNRH/chapro
* As of Feb 2017, CHAPRO license is listed as "Creative Commons?"
*
* MIT License. Use at your own risk.
*
*/
#ifndef _AudioEffectCompWDRC_F32
#define _AudioEffectCompWDRC_F32
#include <Arduino.h>
#include <AudioStream_F32.h>
#include <arm_math.h>
#include <AudioCalcEnvelope_F32.h>
#include "AudioCalcGainWDRC_F32.h" //has definition of CHA_WDRC
#include "utility/textAndStringUtils.h"
// from CHAPRO cha_ff.h
#define DSL_MXCH 32
//class CHA_DSL {
typedef struct {
//public:
//CHA_DSL(void) {};
//static const int DSL_MXCH = 32; // maximum number of channels
float attack; // attack time (ms)
float release; // release time (ms)
float maxdB; // maximum signal (dB SPL)
int ear; // 0=left, 1=right
int nchannel; // number of channels
float cross_freq[DSL_MXCH]; // cross frequencies (Hz)
float tkgain[DSL_MXCH]; // compression-start gain
float cr[DSL_MXCH]; // compression ratio
float tk[DSL_MXCH]; // compression-start kneepoint
float bolt[DSL_MXCH]; // broadband output limiting threshold
} CHA_DSL;
/* int parseStringIntoDSL(String &text_buffer) {
int position = 0;
float foo_val;
const bool print_debug = false;
if (print_debug) Serial.println("parseTextAsDSL: values from file:");
position = parseNextNumberFromString(text_buffer, position, foo_val);
attack = foo_val;
if (print_debug) { Serial.print(" attack: "); Serial.println(attack); }
position = parseNextNumberFromString(text_buffer, position, foo_val);
release = foo_val;
if (print_debug) { Serial.print(" release: "); Serial.println(release); }
position = parseNextNumberFromString(text_buffer, position, foo_val);
maxdB = foo_val;
if (print_debug) { Serial.print(" maxdB: "); Serial.println(maxdB); }
position = parseNextNumberFromString(text_buffer, position, foo_val);
ear = int(foo_val + 0.5); //round
if (print_debug) { Serial.print(" ear: "); Serial.println(ear); }
position = parseNextNumberFromString(text_buffer, position, foo_val);
nchannel = int(foo_val + 0.5); //round
if (print_debug) { Serial.print(" nchannel: "); Serial.println(nchannel); }
//check to see if the number of channels is acceptable.
if ((nchannel < 0) || (nchannel > DSL_MXCH)) {
if (print_debug) Serial.print(" : channel number is too big (or negative). stopping.");
return -1;
}
//read the cross-over frequencies. There should be nchan-1 of them (0 and Nyquist are assumed)
if (print_debug) Serial.print(" cross_freq: ");
for (int i=0; i < (nchannel-1); i++) {
position = parseNextNumberFromString(text_buffer, position, foo_val);
cross_freq[i] = foo_val;
if (print_debug) { Serial.print(cross_freq[i]); Serial.print(", ");}
}
if (print_debug) Serial.println();
//read the tkgain values. There should be nchan of them
if (print_debug) Serial.print(" tkgain: ");
for (int i=0; i < nchannel; i++) {
position = parseNextNumberFromString(text_buffer, position, foo_val);
tkgain[i] = foo_val;
if (print_debug) { Serial.print(tkgain[i]); Serial.print(", ");}
}
if (print_debug) Serial.println();
//read the cr values. There should be nchan of them
if (print_debug) Serial.print(" cr: ");
for (int i=0; i < nchannel; i++) {
position = parseNextNumberFromString(text_buffer, position, foo_val);
cr[i] = foo_val;
if (print_debug) { Serial.print(cr[i]); Serial.print(", ");}
}
if (print_debug) Serial.println();
//read the tk values. There should be nchan of them
if (print_debug) Serial.print(" tk: ");
for (int i=0; i < nchannel; i++) {
position = parseNextNumberFromString(text_buffer, position, foo_val);
tk[i] = foo_val;
if (print_debug) { Serial.print(tk[i]); Serial.print(", ");}
}
if (print_debug) Serial.println();
//read the bolt values. There should be nchan of them
if (print_debug) Serial.print(" bolt: ");
for (int i=0; i < nchannel; i++) {
position = parseNextNumberFromString(text_buffer, position, foo_val);
bolt[i] = foo_val;
if (print_debug) { Serial.print(bolt[i]); Serial.print(", ");}
}
if (print_debug) Serial.println();
return 0;
}
void printToStream(Stream *s) {
s->print("CHA_DSL: attack (ms) = "); s->println(attack);
s->print(" : release (ms) = "); s->println(release);
s->print(" : maxdB (dB SPL) = "); s->println(maxdB);
s->print(" : ear (0 = left, 1 = right) "); s->println(ear);
s->print(" : nchannel = "); s->println(nchannel);
s->print(" : cross_freq (Hz) = ");
for (int i=0; i<nchannel-1;i++) { s->print(cross_freq[i]); s->print(", ");}; s->println();
s->print(" : tkgain = ");
for (int i=0; i<nchannel;i++) { s->print(tkgain[i]); s->print(", ");}; s->println();
s->print(" : cr = ");
for (int i=0; i<nchannel;i++) { s->print(cr[i]); s->print(", ");}; s->println();
s->print(" : tk = ");
for (int i=0; i<nchannel;i++) { s->print(tk[i]); s->print(", ");}; s->println();
s->print(" : bolt = ");
for (int i=0; i<nchannel;i++) { s->print(bolt[i]); s->print(", ");}; s->println();
}
} ; */
typedef struct {
float alfa; // attack constant (not time)
float beta; // release constant (not time
float fs; // sampling rate (Hz)
float maxdB; // maximum signal (dB SPL)
float tkgain; // compression-start gain
float tk; // compression-start kneepoint
float cr; // compression ratio
float bolt; // broadband output limiting threshold
} CHA_DVAR_t;
class AudioEffectCompWDRC_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName: CompWDRC
public:
AudioEffectCompWDRC_F32(void): AudioStream_F32(1,inputQueueArray) { //need to modify this for user to set sample rate
setSampleRate_Hz(AUDIO_SAMPLE_RATE);
setDefaultValues();
}
AudioEffectCompWDRC_F32(AudioSettings_F32 settings): AudioStream_F32(1,inputQueueArray) { //need to modify this for user to set sample rate
setSampleRate_Hz(settings.sample_rate_Hz);
setDefaultValues();
}
//here is the method called automatically by the audio library
void update(void) {
//receive the input audio data
audio_block_f32_t *block = AudioStream_F32::receiveReadOnly_f32();
if (!block) return;
//allocate memory for the output of our algorithm
audio_block_f32_t *out_block = AudioStream_F32::allocate_f32();
if (!out_block) return;
//do the algorithm
cha_agc_channel(block->data, out_block->data, block->length);
// transmit the block and release memory
AudioStream_F32::transmit(out_block); // send the FIR output
AudioStream_F32::release(out_block);
AudioStream_F32::release(block);
}
//here is the function that does all the work
void cha_agc_channel(float *input, float *output, int cs) {
//compress(input, output, cs, &prev_env,
// CHA_DVAR.alfa, CHA_DVAR.beta, CHA_DVAR.tkgain, CHA_DVAR.tk, CHA_DVAR.cr, CHA_DVAR.bolt, CHA_DVAR.maxdB);
compress(input, output, cs);
}
//void compress(float *x, float *y, int n, float *prev_env,
// float &alfa, float &beta, float &tkgn, float &tk, float &cr, float &bolt, float &mxdB)
void compress(float *x, float *y, int n)
//x, input, audio waveform data
//y, output, audio waveform data after compression
//n, input, number of samples in this audio block
{
// find smoothed envelope
audio_block_f32_t *envelope_block = AudioStream_F32::allocate_f32();
if (!envelope_block) return;
calcEnvelope.smooth_env(x, envelope_block->data, n);
//float *xpk = envelope_block->data; //get pointer to the array of (empty) data values
//calculate gain
audio_block_f32_t *gain_block = AudioStream_F32::allocate_f32();
if (!gain_block) return;
calcGain.calcGainFromEnvelope(envelope_block->data, gain_block->data, n);
//apply gain
arm_mult_f32(x, gain_block->data, y, n);
// release memory
AudioStream_F32::release(envelope_block);
AudioStream_F32::release(gain_block);
}
void setDefaultValues(void) {
//set default values...taken from CHAPRO, GHA_Demo.c from "amplify()"...ignores given sample rate
//assumes that the sample rate has already been set!!!!
CHA_WDRC gha = {1.0f, // attack time (ms)
50.0f, // release time (ms)
24000.0f, // fs, sampling rate (Hz), THIS IS IGNORED!
119.0f, // maxdB, maximum signal (dB SPL)
0.0f, // tkgain, compression-start gain
105.0f, // tk, compression-start kneepoint
10.0f, // cr, compression ratio
105.0f // bolt, broadband output limiting threshold
};
setParams_from_CHA_WDRC(&gha);
}
//set all of the parameters for the compressor using the CHA_WDRC structure
//assumes that the sample rate has already been set!!!
void setParams_from_CHA_WDRC(CHA_WDRC *gha) {
//configure the envelope calculator...assumes that the sample rate has already been set!
calcEnvelope.setAttackRelease_msec(gha->attack,gha->release); //these are in milliseconds
//configure the compressor
calcGain.setParams_from_CHA_WDRC(gha);
}
//set all of the user parameters for the compressor
//assumes that the sample rate has already been set!!!
void setParams(float attack_ms, float release_ms, float maxdB, float tkgain, float comp_ratio, float tk, float bolt) {
//configure the envelope calculator...assumes that the sample rate has already been set!
calcEnvelope.setAttackRelease_msec(attack_ms,release_ms);
//configure the WDRC gains
calcGain.setParams(maxdB, tkgain, comp_ratio, tk, bolt);
}
void setSampleRate_Hz(const float _fs_Hz) {
//pass this data on to its components that care
given_sample_rate_Hz = _fs_Hz;
calcEnvelope.setSampleRate_Hz(_fs_Hz);
}
float getCurrentLevel_dB(void) { return AudioCalcGainWDRC_F32::db2(calcEnvelope.getCurrentLevel()); } //this is 20*log10(abs(signal)) after the envelope smoothing
AudioCalcEnvelope_F32 calcEnvelope;
AudioCalcGainWDRC_F32 calcGain;
private:
audio_block_f32_t *inputQueueArray[1];
float given_sample_rate_Hz;
};
#endif

@ -0,0 +1,49 @@
/*
* AudioEffectEmpty_F32
*
* Created: Chip Audette, Feb 2017
* Purpose: This module does nothing. It is an empty algorithm that can one
* can build from to make their own algorithm
*
* This processes a single stream fo audio data (ie, it is mono)
*
* MIT License. use at your own risk.
*/
#ifndef _AudioEffectEmpty_F32_h
#define _AudioEffectEmpty_F32_h
#include <arm_math.h> //ARM DSP extensions. for speed!
#include <AudioStream_F32.h>
class AudioEffectEmpty_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:empty
public:
//constructor
AudioEffectEmpty_F32(void) : AudioStream_F32(1, inputQueueArray_f32) {};
//here's the method that does all the work
void update(void) {
//Serial.println("AudioEffectEmpty_F32: updating."); //for debugging.
audio_block_f32_t *block;
block = AudioStream_F32::receiveWritable_f32();
if (!block) return;
//add your processing here!
//transmit the block and be done
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
}
private:
audio_block_f32_t *inputQueueArray_f32[1]; //memory pointer for the input to this module
};
#endif

@ -24,18 +24,18 @@ class AudioEffectGain_F32 : public AudioStream_F32
//here's the method that does all the work
void update(void) {
//Serial.println("AudioEffectGain_F32: updating."); //for debugging.
audio_block_f32_t *block;
block = AudioStream_F32::receiveWritable_f32();
if (!block) return;
//apply the gain
//for (int i = 0; i < AUDIO_BLOCK_SAMPLES; i++) block->data[i] = gain * (block->data[i]); //non DSP way to do it
arm_scale_f32(block->data, gain, block->data, block->length); //use ARM DSP for speed!
//transmit the block and be done
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
//Serial.println("AudioEffectGain_F32: updating."); //for debugging.
audio_block_f32_t *block;
block = AudioStream_F32::receiveWritable_f32();
if (!block) return;
//apply the gain
//for (int i = 0; i < AUDIO_BLOCK_SAMPLES; i++) block->data[i] = gain * (block->data[i]); //non DSP way to do it
arm_scale_f32(block->data, gain, block->data, block->length); //use ARM DSP for speed!
//transmit the block and be done
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
}
//methods to set parameters of this module

@ -22,44 +22,50 @@
class AudioFilterFIR_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
public:
AudioFilterFIR_F32(void): AudioStream_F32(1,inputQueueArray), coeff_p(FIR_F32_PASSTHRU) {
}
void begin(const float32_t *cp, int n_coeffs) {
coeff_p = cp;
// Initialize FIR instance (ARM DSP Math Library)
if (coeff_p && (coeff_p != FIR_F32_PASSTHRU) && n_coeffs <= FIR_MAX_COEFFS) {
arm_fir_init_f32(&fir_inst, n_coeffs, (float32_t *)coeff_p, &StateF32[0], AUDIO_BLOCK_SAMPLES);
//if (arm_fir_init_f32(&fir_inst, n_coeffs, (float32_t *)coeff_p, &StateF32[0], AUDIO_BLOCK_SAMPLES) != ARM_MATH_SUCCESS) {
// n_coeffs must be an even number, 4 or larger
//coeff_p = NULL;
//}
}
}
void end(void) {
coeff_p = NULL;
}
virtual void update(void);
void setBlockDC(void) {
//helper function that sets this up for a first-order HP filter at 20Hz
}
private:
audio_block_f32_t *inputQueueArray[1];
// pointer to current coefficients or NULL or FIR_PASSTHRU
const float32_t *coeff_p;
// ARM DSP Math library filter instance
arm_fir_instance_f32 fir_inst;
float32_t StateF32[AUDIO_BLOCK_SAMPLES + FIR_MAX_COEFFS];
public:
AudioFilterFIR_F32(void): AudioStream_F32(1,inputQueueArray),
coeff_p(FIR_F32_PASSTHRU), n_coeffs(1), configured_block_size(0) { }
//initialize the FIR filter by giving it the filter coefficients
void begin(const float32_t *cp, const int _n_coeffs) { begin(cp, _n_coeffs, AUDIO_BLOCK_SAMPLES); } //assume that the block size is the maximum
void begin(const float32_t *cp, const int _n_coeffs, const int block_size) { //or, you can provide it with the block size
coeff_p = cp;
n_coeffs = _n_coeffs;
// Initialize FIR instance (ARM DSP Math Library)
if (coeff_p && (coeff_p != FIR_F32_PASSTHRU) && n_coeffs <= FIR_MAX_COEFFS) {
arm_fir_init_f32(&fir_inst, n_coeffs, (float32_t *)coeff_p, &StateF32[0], block_size);
configured_block_size = block_size;
Serial.print("AudioFilterFIR_F32: FIR is initialized. N_FIR = "); Serial.print(n_coeffs);
Serial.print(", Block Size = "); Serial.println(block_size);
//} else {
// Serial.print("AudioFilterFIR_F32: *** ERROR ***: Cound not initialize. N_FIR = "); Serial.print(n_coeffs);
// Serial.print(", Block Size = "); Serial.println(block_size);
// coeff_p = NULL;
}
}
void end(void) { coeff_p = NULL; }
virtual void update(void);
//void setBlockDC(void) {} //helper function that sets this up for a first-order HP filter at 20Hz
private:
audio_block_f32_t *inputQueueArray[1];
// pointer to current coefficients or NULL or FIR_PASSTHRU
const float32_t *coeff_p;
int n_coeffs;
int configured_block_size;
// ARM DSP Math library filter instance
arm_fir_instance_f32 fir_inst;
float32_t StateF32[AUDIO_BLOCK_SAMPLES + FIR_MAX_COEFFS];
};
void AudioFilterFIR_F32::update(void)
{
audio_block_f32_t *block, *b_new;
audio_block_f32_t *block, *block_new;
block = AudioStream_F32::receiveReadOnly_f32();
if (!block) return;
@ -75,17 +81,30 @@ void AudioFilterFIR_F32::update(void)
// Just passthrough
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
//Serial.println("AudioFilterFIR_F32: update(): PASSTHRU.");
return;
}
// get a block for the FIR output
b_new = AudioStream_F32::allocate_f32();
if (b_new) {
arm_fir_f32(&fir_inst, (float32_t *)block->data, (float32_t *)b_new->data, block->length);
AudioStream_F32::transmit(b_new); // send the FIR output
AudioStream_F32::release(b_new);
}
AudioStream_F32::release(block);
// get a block for the FIR output
block_new = AudioStream_F32::allocate_f32();
if (block_new) {
//check to make sure our FIR instance has the right size
if (block->length != configured_block_size) {
//doesn't match. re-initialize
Serial.println("AudioFilterFIR_F32: block size doesn't match. Re-initializing FIR.");
begin(coeff_p, n_coeffs, block->length); //initialize with same coefficients, just a new block length
}
//apply the FIR
arm_fir_f32(&fir_inst, block->data, block_new->data, block->length);
block_new->length = block->length;
//transmit the data
AudioStream_F32::transmit(block_new); // send the FIR output
AudioStream_F32::release(block_new);
}
AudioStream_F32::release(block);
}
#endif

@ -1,8 +1,9 @@
/*
* AudioFilterFIR_F32
* AudioFilterIIR_F32
*
* Created: Chip Audette (OpenAudio) Feb 2017
* - Building from AudioFilterFIR from Teensy Audio Library (AudioFilterFIR credited to Pete (El Supremo))
*
* License: MIT License. Use at your own risk.
*
*/
@ -22,6 +23,7 @@
class AudioFilterIIR_F32 : public AudioStream_F32
{
//GUI: inputs:1, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:IIR
public:
AudioFilterIIR_F32(void): AudioStream_F32(1,inputQueueArray), coeff_p(FIR_F32_PASSTHRU) {
}
@ -92,6 +94,8 @@ void AudioFilterIIR_F32::update(void)
// do IIR
arm_biquad_cascade_df1_f32(&iir_inst, block->data, block->data, block->length);
//transmit the data
AudioStream_F32::transmit(block); // send the IIR output
AudioStream_F32::release(block);
}

@ -0,0 +1,60 @@
#include "AudioMixer_F32.h"
void AudioMixer4_F32::update(void) {
audio_block_f32_t *in, *out=NULL;
out = receiveWritable_f32(0);
if (!out) return;
arm_scale_f32(out->data, multiplier[0], out->data, out->length);
for (int channel=1; channel < 4; channel++) {
in = receiveReadOnly_f32(channel);
if (!in) {
continue;
}
audio_block_f32_t *tmp = allocate_f32();
arm_scale_f32(in->data, multiplier[channel], tmp->data, tmp->length);
arm_add_f32(out->data, tmp->data, out->data, tmp->length);
AudioStream_F32::release(tmp);
AudioStream_F32::release(in);
}
if (out) {
AudioStream_F32::transmit(out);
AudioStream_F32::release(out);
}
}
void AudioMixer8_F32::update(void) {
audio_block_f32_t *in, *out=NULL;
out = receiveWritable_f32(0); //try to get the first input channel
if (!out) return; //if it's not there, return immediately
arm_scale_f32(out->data, multiplier[0], out->data, out->length); //scale the first input channel
//load and process the rest of the channels
for (int channel=1; channel < 8; channel++) {
in = receiveReadOnly_f32(channel);
if (!in) {
continue;
}
audio_block_f32_t *tmp = allocate_f32();
arm_scale_f32(in->data, multiplier[channel], tmp->data, tmp->length);
arm_add_f32(out->data, tmp->data, out->data, tmp->length);
AudioStream_F32::release(tmp);
AudioStream_F32::release(in);
}
if (out) {
AudioStream_F32::transmit(out);
AudioStream_F32::release(out);
}
}

@ -0,0 +1,63 @@
/*
* AudioMixer
*
* AudioMixer4
* Created: Patrick Radius, December 2016
* Purpose: Mix up to 4 audio channels with individual gain controls.
* Assumes floating-point data.
*
* This processes a single stream fo audio data (ie, it is mono)
*
* Extended to AudioMixer8
* By: Chip Audette, OpenAudio, Feb 2017
*
* MIT License. use at your own risk.
*/
#ifndef AUDIOMIXER_F32_H
#define AUDIOMIXER_F32_H
#include <arm_math.h>
#include <AudioStream_F32.h>
class AudioMixer4_F32 : public AudioStream_F32 {
//GUI: inputs:4, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:Mixer4
public:
AudioMixer4_F32() : AudioStream_F32(4, inputQueueArray) {
for (int i=0; i<4; i++) multiplier[i] = 1.0;
}
virtual void update(void);
void gain(unsigned int channel, float gain) {
if (channel >= 4 || channel < 0) return;
multiplier[channel] = gain;
}
private:
audio_block_f32_t *inputQueueArray[4];
float multiplier[4];
};
class AudioMixer8_F32 : public AudioStream_F32 {
//GUI: inputs:8, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:Mixer8
public:
AudioMixer8_F32() : AudioStream_F32(8, inputQueueArray) {
for (int i=0; i<8; i++) multiplier[i] = 1.0;
}
virtual void update(void);
void gain(unsigned int channel, float gain) {
if (channel >= 8 || channel < 0) return;
multiplier[channel] = gain;
}
private:
audio_block_f32_t *inputQueueArray[8];
float multiplier[8];
};
#endif

@ -30,6 +30,14 @@ void AudioStream_F32::initialize_f32_memory(audio_block_f32_t *data, unsigned in
__enable_irq();
} // end initialize_memory
void AudioStream_F32::initialize_f32_memory(audio_block_f32_t *data, unsigned int num, const AudioSettings_F32 &settings)
{
initialize_f32_memory(data,num);
for (unsigned int i=0; i < num; i++) {
data[i].fs_Hz = settings.sample_rate_Hz;
data[i].length = settings.audio_block_samples;
}
} // end initialize_memory
// Allocate 1 audio data block. If successful
// the caller is the only owner of this new block
@ -160,3 +168,4 @@ void AudioConnection_F32::connect(void) {
dst.active = true;
__enable_irq();
}

@ -10,31 +10,63 @@
* MIT License. use at your own risk.
*/
#ifndef _OpenAudio_ArduinoLibrary
#define _OpenAudio_ArduinoLibrary
#ifndef _AudioStream_F32_h
#define _AudioStream_F32_h
#include <arm_math.h> //ARM DSP extensions. for speed!
#include <Audio.h> //Teensy Audio Library
class AudioStream_F32;
class AudioConnection_F32;
class AudioSettings_F32;
class AudioSettings_F32 {
public:
AudioSettings_F32(float fs_Hz, int block_size) :
sample_rate_Hz(fs_Hz), audio_block_samples(block_size) {}
const float sample_rate_Hz;
const int audio_block_samples;
float cpu_load_percent(const int n) { //n is the number of cycles
#define CYCLE_COUNTER_APPROX_PERCENT(n) (((n) + (F_CPU / 32 / AUDIO_SAMPLE_RATE * AUDIO_BLOCK_SAMPLES / 100)) / (F_CPU / 16 / AUDIO_SAMPLE_RATE * AUDIO_BLOCK_SAMPLES / 100))
float foo1 = ((float)(F_CPU / 32))/sample_rate_Hz;
foo1 *= ((float)audio_block_samples);
foo1 /= 100.f;
foo1 += (float)n;
float foo2 = (float)(F_CPU / 16)/sample_rate_Hz;
foo2 *= ((float)audio_block_samples);
foo2 /= 100.f;
return foo1 / foo2;
//return (((n) + (F_CPU / 32 / sample_rate_Hz * audio_block_samples / 100)) / (F_CPU / 16 / sample_rate_Hz * audio_block_samples / 100));
}
float processorUsage(void) { return cpu_load_percent(AudioStream::cpu_cycles_total); };
float processorUsageMax(void) { return cpu_load_percent(AudioStream::cpu_cycles_total_max); }
void processorUsageMaxReset(void) { AudioStream::cpu_cycles_total_max = AudioStream::cpu_cycles_total; }
};
//create a new structure to hold audio as floating point values.
//modeled on the existing teensy audio block struct, which uses Int16
//https://github.com/PaulStoffregen/cores/blob/268848cdb0121f26b7ef6b82b4fb54abbe465427/teensy3/AudioStream.h
typedef struct audio_block_f32_struct {
unsigned char ref_count;
unsigned char memory_pool_index;
unsigned char reserved1;
unsigned char reserved2;
#if AUDIO_BLOCK_SAMPLES < 128
float32_t data[128]; //limit array size to be no smaller than 128. unstable otherwise?
#else
float32_t data[AUDIO_BLOCK_SAMPLES]; // AUDIO_BLOCK_SAMPLES is 128, from AudioStream.h
#endif
int length = AUDIO_BLOCK_SAMPLES; // AUDIO_BLOCK_SAMPLES is 128, from AudioStream.h
float fs_Hz = AUDIO_SAMPLE_RATE; // AUDIO_SAMPLE_RATE is 44117.64706 from AudioStream.h
} audio_block_f32_t;
class audio_block_f32_t {
public:
audio_block_f32_t(void) {};
audio_block_f32_t(const AudioSettings_F32 &settings) {
fs_Hz = settings.sample_rate_Hz;
length = settings.audio_block_samples;
};
unsigned char ref_count;
unsigned char memory_pool_index;
unsigned char reserved1;
unsigned char reserved2;
float32_t data[AUDIO_BLOCK_SAMPLES]; // AUDIO_BLOCK_SAMPLES is 128, from AudioStream.h
const int full_length = AUDIO_BLOCK_SAMPLES;
int length = AUDIO_BLOCK_SAMPLES; // AUDIO_BLOCK_SAMPLES is 128, from AudioStream.h
float fs_Hz = AUDIO_SAMPLE_RATE; // AUDIO_SAMPLE_RATE is 44117.64706 from AudioStream.h
};
class AudioConnection_F32
{
@ -64,6 +96,11 @@ class AudioConnection_F32
AudioStream_F32::initialize_f32_memory(data_f32, num); \
})
#define AudioMemory_F32_wSettings(num,settings) ({ \
static audio_block_f32_t data_f32[num]; \
AudioStream_F32::initialize_f32_memory(data_f32, num, settings); \
})
#define AudioMemoryUsage_F32() (AudioStream_F32::f32_memory_used)
#define AudioMemoryUsageMax_F32() (AudioStream_F32::f32_memory_used_max)
@ -80,6 +117,7 @@ class AudioStream_F32 : public AudioStream {
}
};
static void initialize_f32_memory(audio_block_f32_t *data, unsigned int num);
static void initialize_f32_memory(audio_block_f32_t *data, unsigned int num, const AudioSettings_F32 &settings);
//virtual void update(audio_block_f32_t *) = 0;
static uint8_t f32_memory_used;
static uint8_t f32_memory_used_max;
@ -93,6 +131,7 @@ class AudioStream_F32 : public AudioStream {
audio_block_f32_t * receiveReadOnly_f32(unsigned int index = 0);
audio_block_f32_t * receiveWritable_f32(unsigned int index = 0);
friend class AudioConnection_F32;
private:
AudioConnection_F32 *destination_list_f32;
audio_block_f32_t **inputQueue_f32;

@ -2,12 +2,17 @@
#include <AudioStream_F32.h>
#include <AudioControlSGTL5000_Extended.h>
#include <control_tlv320aic3206.h>
#include "AudioCalcEnvelope_F32.h"
#include "AudioCalcGainWDRC_F32.h"
#include "AudioConfigFIRFilterBank_F32.h"
#include <AudioConvert_F32.h>
#include "AudioEffectCompWDRC_F32.h"
#include "AudioEffectEmpty_F32.h"
#include <AudioEffectGain_F32.h>
#include <AudioEffectCompressor_F32.h>
#include <AudioFilterFIR_F32.h>
#include <AudioFilterIIR_F32.h>
#include <AudioMixer4_F32.h>
#include <AudioMixer_F32.h>
#include <AudioMultiply_F32.h>
#include "input_i2s_f32.h"
#include "play_queue_f32.h"

@ -1,7 +1,10 @@
/* Extension control files for TLV320AIC3206
* Copyright (c) 2016, Creare, bpf@creare.com
*
*
/*
control_tlv320aic3206
Created: Brendan Flynn (http://www.flexvoltbiosensor.com/) for Tympan, Jan-Feb 2017
Purpose: Control module for Texas Instruments TLV320AIC3206 compatible with Teensy Audio Library
License: MIT License. Use at your own risk.
*/
#include "control_tlv320aic3206.h"

@ -1,9 +1,8 @@
/*
control_tlv320aic3206
Created: BPF@creare.com Jan-Feb 2017
Purpose: Control module for TLV320AIC3206 compatible with Teensy Audio Library
Created: Brendan Flynn (http://www.flexvoltbiosensor.com/) for Tympan, Jan-Feb 2017
Purpose: Control module for Texas Instruments TLV320AIC3206 compatible with Teensy Audio Library
License: MIT License. Use at your own risk.
*/
@ -40,16 +39,18 @@ private:
#define TYMPAN_OUTPUT_HEADPHONE_JACK_OUT 1
#define TYMPAN_INPUT_LINE_IN 0
#define TYMPAN_INPUT_JACK_AS_MIC 1
#define TYMPAN_INPUT_JACK_AS_LINEIN 2
#define TYMPAN_INPUT_ON_BOARD_MIC 3
#define TYMPAN_MIC_BIAS_OFF 0
#define TYMPAN_MIC_BIAS_1_25 1
#define TYMPAN_MIC_BIAS_1_7 2
#define TYMPAN_MIC_BIAS_2_5 3
#define TYMPAN_MIC_BIAS_VSUPPLY 4
//convenience names to use with inputSelect() to set whnch analog inputs to use
#define TYMPAN_INPUT_LINE_IN 1 //uses IN1
#define TYMPAN_INPUT_ON_BOARD_MIC 2 //uses IN2 analog inputs
#define TYMPAN_INPUT_JACK_AS_LINEIN 3 //uses IN3 analog inputs
#define TYMPAN_INPUT_JACK_AS_MIC 4 //uses IN3 analog inputs *and* enables mic bias
//names to use with setMicBias() to set the amount of bias voltage to use
#define TYMPAN_MIC_BIAS_OFF 0
#define TYMPAN_MIC_BIAS_1_25 1
#define TYMPAN_MIC_BIAS_1_7 2
#define TYMPAN_MIC_BIAS_2_5 3
#define TYMPAN_MIC_BIAS_VSUPPLY 4
#define TYMPAN_DEFAULT_MIC_BIAS TYMPAN_MIC_BIAS_2_5
#endif

@ -36,6 +36,11 @@ bool AudioInputI2S_F32::update_responsibility = false;
DMAChannel AudioInputI2S_F32::dma(false);
float AudioInputI2S_F32::sample_rate_Hz = AUDIO_SAMPLE_RATE;
int AudioInputI2S_F32::audio_block_samples = AUDIO_BLOCK_SAMPLES;
#define I2S_BUFFER_TO_USE_BYTES (AudioOutputI2S_F32::audio_block_samples*sizeof(i2s_rx_buffer[0]))
void AudioInputI2S_F32::begin(void)
{
dma.begin(true); // Allocate the DMA channel first
@ -44,6 +49,8 @@ void AudioInputI2S_F32::begin(void)
//block_right_1st = NULL;
// TODO: should we set & clear the I2S_RCSR_SR bit here?
AudioOutputI2S_F32::sample_rate_Hz = sample_rate_Hz;
AudioOutputI2S_F32::audio_block_samples = audio_block_samples;
AudioOutputI2S_F32::config_i2s();
CORE_PIN13_CONFIG = PORT_PCR_MUX(4); // pin 13, PTC5, I2S0_RXD0
@ -55,9 +62,12 @@ void AudioInputI2S_F32::begin(void)
dma.TCD->SLAST = 0;
dma.TCD->DADDR = i2s_rx_buffer;
dma.TCD->DOFF = 2;
dma.TCD->CITER_ELINKNO = sizeof(i2s_rx_buffer) / 2;
dma.TCD->DLASTSGA = -sizeof(i2s_rx_buffer);
dma.TCD->BITER_ELINKNO = sizeof(i2s_rx_buffer) / 2;
//dma.TCD->CITER_ELINKNO = sizeof(i2s_tx_buffer) / 2; //original
dma.TCD->CITER_ELINKNO = I2S_BUFFER_TO_USE_BYTES / 2;
//dma.TCD->DLASTSGA = -sizeof(i2s_rx_buffer); //original
dma.TCD->DLASTSGA = -I2S_BUFFER_TO_USE_BYTES;
//dma.TCD->BITER_ELINKNO = sizeof(i2s_rx_buffer) / 2; //original
dma.TCD->BITER_ELINKNO = I2S_BUFFER_TO_USE_BYTES / 2;
dma.TCD->CSR = DMA_TCD_CSR_INTHALF | DMA_TCD_CSR_INTMAJOR;
#endif
dma.triggerAtHardwareEvent(DMAMUX_SOURCE_I2S0_RX);
@ -67,7 +77,9 @@ void AudioInputI2S_F32::begin(void)
I2S0_RCSR |= I2S_RCSR_RE | I2S_RCSR_BCE | I2S_RCSR_FRDE | I2S_RCSR_FR;
I2S0_TCSR |= I2S_TCSR_TE | I2S_TCSR_BCE; // TX clock enable, because sync'd to TX
dma.attachInterrupt(isr);
}
};
void AudioInputI2S_F32::isr(void)
{
@ -82,26 +94,33 @@ void AudioInputI2S_F32::isr(void)
#endif
dma.clearInterrupt();
if (daddr < (uint32_t)i2s_rx_buffer + sizeof(i2s_rx_buffer) / 2) {
//if (daddr < (uint32_t)i2s_rx_buffer + sizeof(i2s_rx_buffer) / 2) {
if (daddr < (uint32_t)i2s_rx_buffer + I2S_BUFFER_TO_USE_BYTES / 2) {
// DMA is receiving to the first half of the buffer
// need to remove data from the second half
src = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES/2];
end = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES];
//src = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES/2]; //original
//end = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES]; //original
src = (int16_t *)&i2s_rx_buffer[audio_block_samples/2];
end = (int16_t *)&i2s_rx_buffer[audio_block_samples];
if (AudioInputI2S_F32::update_responsibility) AudioStream_F32::update_all();
} else {
// DMA is receiving to the second half of the buffer
// need to remove data from the first half
src = (int16_t *)&i2s_rx_buffer[0];
end = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES/2];
//end = (int16_t *)&i2s_rx_buffer[AUDIO_BLOCK_SAMPLES/2]; //original
end = (int16_t *)&i2s_rx_buffer[audio_block_samples/2];
}
left = AudioInputI2S_F32::block_left;
right = AudioInputI2S_F32::block_right;
if (left != NULL && right != NULL) {
offset = AudioInputI2S_F32::block_offset;
if (offset <= AUDIO_BLOCK_SAMPLES/2) {
//if (offset <= AUDIO_BLOCK_SAMPLES/2) { //original
if (offset <= ((uint32_t) audio_block_samples/2)) {
dest_left = &(left->data[offset]);
dest_right = &(right->data[offset]);
AudioInputI2S_F32::block_offset = offset + AUDIO_BLOCK_SAMPLES/2;
//AudioInputI2S_F32::block_offset = offset + AUDIO_BLOCK_SAMPLES/2; //original
AudioInputI2S_F32::block_offset = offset + audio_block_samples/2;
do {
//n = *src++;
//*dest_left++ = (int16_t)n;
@ -114,7 +133,10 @@ void AudioInputI2S_F32::isr(void)
//digitalWriteFast(3, LOW);
}
#define I16_TO_F32_NORM_FACTOR (3.051757812500000E-05) //which is 1/32768
void AudioInputI2S_F32::convert_i16_to_f32( int16_t *p_i16, float32_t *p_f32, int len) {
for (int i=0; i<len; i++) { *p_f32++ = ((float32_t)(*p_i16++)) * I16_TO_F32_NORM_FACTOR; }
}
void AudioInputI2S_F32::update(void)
{
@ -130,7 +152,8 @@ void AudioInputI2S_F32::update(void)
}
}
__disable_irq();
if (block_offset >= AUDIO_BLOCK_SAMPLES) {
//if (block_offset >= AUDIO_BLOCK_SAMPLES) { //original
if (block_offset >= audio_block_samples) {
// the DMA filled 2 blocks, so grab them and get the
// 2 new blocks to the DMA, as quickly as possible
out_left = block_left;
@ -153,9 +176,9 @@ void AudioInputI2S_F32::update(void)
}
}
if (out_left_f32 != NULL) {
//convert to f32
arm_q15_to_float((q15_t *)out_left->data, (float32_t *)out_left_f32->data, AUDIO_BLOCK_SAMPLES);
arm_q15_to_float((q15_t *)out_right->data, (float32_t *)out_right_f32->data, AUDIO_BLOCK_SAMPLES);
//convert int16 to float 32
convert_i16_to_f32(out_left->data, out_left_f32->data, audio_block_samples);
convert_i16_to_f32(out_right->data, out_right_f32->data, audio_block_samples);
//transmit the f32 data!
AudioStream_F32::transmit(out_left_f32,0);

@ -36,9 +36,16 @@ class AudioInputI2S_F32 : public AudioStream_F32
{
//GUI: inputs:0, outputs:2 //this line used for automatic generation of GUI nodes
public:
AudioInputI2S_F32(void) : AudioStream_F32(0, NULL) { begin(); }
AudioInputI2S_F32(void) : AudioStream_F32(0, NULL) { begin(); } //uses default AUDIO_SAMPLE_RATE and BLOCK_SIZE_SAMPLES from AudioStream.h
AudioInputI2S_F32(const AudioSettings_F32 &settings) : AudioStream_F32(0, NULL) {
sample_rate_Hz = settings.sample_rate_Hz;
audio_block_samples = settings.audio_block_samples;
begin();
}
virtual void update(void);
static void convert_i16_to_f32( int16_t *p_i16, float32_t *p_f32, int len) ;
void begin(void);
//friend class AudioOutputI2S_F32;
protected:
AudioInputI2S_F32(int dummy): AudioStream_F32(0, NULL) {} // to be used only inside AudioInputI2Sslave !!
static bool update_responsibility;
@ -47,6 +54,8 @@ protected:
private:
static audio_block_t *block_left;
static audio_block_t *block_right;
static float sample_rate_Hz;
static int audio_block_samples;
static uint16_t block_offset;
};

@ -25,9 +25,54 @@
*/
#include "output_i2s_f32.h"
#include "memcpy_audio.h"
//#include "input_i2s_f32.h"
//include "memcpy_audio.h"
//#include "memcpy_interleave.h"
#include <arm_math.h>
//Here's the function to change the sample rate of the system (via changing the clocking of the I2S bus)
//https://forum.pjrc.com/threads/38753-Discussion-about-a-simple-way-to-change-the-sample-rate?p=121365&viewfull=1#post121365
float setI2SFreq(const float freq_Hz) {
int freq = (int)freq_Hz;
typedef struct {
uint8_t mult;
uint16_t div;
} __attribute__((__packed__)) tmclk;
const int numfreqs = 16;
const int samplefreqs[numfreqs] = { 2000, 8000, 11025, 16000, 22050, 24000, 32000, 44100, 44117.64706 , 48000, 88200, 44117.64706 * 2, 96000, 176400, 44117.64706 * 4, 192000};
#if (F_PLL==16000000)
const tmclk clkArr[numfreqs] = {{4, 125}, {16, 125}, {148, 839}, {32, 125}, {145, 411}, {48, 125}, {64, 125}, {151, 214}, {12, 17}, {96, 125}, {151, 107}, {24, 17}, {192, 125}, {127, 45}, {48, 17}, {255, 83} };
#elif (F_PLL==72000000)
const tmclk clkArr[numfreqs] = {{832, 1125}, {32, 1125}, {49, 1250}, {64, 1125}, {49, 625}, {32, 375}, {128, 1125}, {98, 625}, {8, 51}, {64, 375}, {196, 625}, {16, 51}, {128, 375}, {249, 397}, {32, 51}, {185, 271} };
#elif (F_PLL==96000000)
const tmclk clkArr[numfreqs] = {{2, 375},{8, 375}, {73, 2483}, {16, 375}, {147, 2500}, {8, 125}, {32, 375}, {147, 1250}, {2, 17}, {16, 125}, {147, 625}, {4, 17}, {32, 125}, {151, 321}, {8, 17}, {64, 125} };
#elif (F_PLL==120000000)
const tmclk clkArr[numfreqs] = {{8, 1875},{32, 1875}, {89, 3784}, {64, 1875}, {147, 3125}, {32, 625}, {128, 1875}, {205, 2179}, {8, 85}, {64, 625}, {89, 473}, {16, 85}, {128, 625}, {178, 473}, {32, 85}, {145, 354} };
#elif (F_PLL==144000000)
const tmclk clkArr[numfreqs] = {{4, 1125},{16, 1125}, {49, 2500}, {32, 1125}, {49, 1250}, {16, 375}, {64, 1125}, {49, 625}, {4, 51}, {32, 375}, {98, 625}, {8, 51}, {64, 375}, {196, 625}, {16, 51}, {128, 375} };
#elif (F_PLL==180000000)
const tmclk clkArr[numfreqs] = {{23, 8086}, {46, 4043}, {49, 3125}, {73, 3208}, {98, 3125}, {37, 1084}, {183, 4021}, {196, 3125}, {16, 255}, {128, 1875}, {107, 853}, {32, 255}, {219, 1604}, {214, 853}, {64, 255}, {219, 802} };
#elif (F_PLL==192000000)
const tmclk clkArr[numfreqs] = {{1, 375}, {4, 375}, {37, 2517}, {8, 375}, {73, 2483}, {4, 125}, {16, 375}, {147, 2500}, {1, 17}, {8, 125}, {147, 1250}, {2, 17}, {16, 125}, {147, 625}, {4, 17}, {32, 125} };
#elif (F_PLL==216000000)
const tmclk clkArr[numfreqs] = {{8, 3375}, {32, 3375}, {49, 3750}, {64, 3375}, {49, 1875}, {32, 1125}, {128, 3375}, {98, 1875}, {8, 153}, {64, 1125}, {196, 1875}, {16, 153}, {128, 1125}, {226, 1081}, {32, 153}, {147, 646} };
#elif (F_PLL==240000000)
const tmclk clkArr[numfreqs] = {{4, 1875}, {16, 1875}, {29, 2466}, {32, 1875}, {89, 3784}, {16, 625}, {64, 1875}, {147, 3125}, {4, 85}, {32, 625}, {205, 2179}, {8, 85}, {64, 625}, {89, 473}, {16, 85}, {128, 625} };
#endif
for (int f = 0; f < numfreqs; f++) {
if ( freq == samplefreqs[f] ) {
while (I2S0_MCR & I2S_MCR_DUF) ;
I2S0_MDR = I2S_MDR_FRACT((clkArr[f].mult - 1)) | I2S_MDR_DIVIDE((clkArr[f].div - 1));
return (float)(F_PLL / 256 * clkArr[f].mult / clkArr[f].div);
}
}
return 0.0f;
}
audio_block_t * AudioOutputI2S_F32::block_left_1st = NULL;
audio_block_t * AudioOutputI2S_F32::block_right_1st = NULL;
audio_block_t * AudioOutputI2S_F32::block_left_2nd = NULL;
@ -35,9 +80,15 @@ audio_block_t * AudioOutputI2S_F32::block_right_2nd = NULL;
uint16_t AudioOutputI2S_F32::block_left_offset = 0;
uint16_t AudioOutputI2S_F32::block_right_offset = 0;
bool AudioOutputI2S_F32::update_responsibility = false;
DMAMEM static uint32_t i2s_tx_buffer[AUDIO_BLOCK_SAMPLES];
DMAMEM static uint32_t i2s_tx_buffer[AUDIO_BLOCK_SAMPLES]; //local audio_block_samples should be no larger than global AUDIO_BLOCK_SAMPLES
DMAChannel AudioOutputI2S_F32::dma(false);
float AudioOutputI2S_F32::sample_rate_Hz = AUDIO_SAMPLE_RATE;
int AudioOutputI2S_F32::audio_block_samples = AUDIO_BLOCK_SAMPLES;
#define I2S_BUFFER_TO_USE_BYTES (AudioOutputI2S_F32::audio_block_samples*sizeof(i2s_tx_buffer[0]))
void AudioOutputI2S_F32::begin(void)
{
dma.begin(true); // Allocate the DMA channel first
@ -47,6 +98,7 @@ void AudioOutputI2S_F32::begin(void)
// TODO: should we set & clear the I2S_TCSR_SR bit here?
config_i2s();
CORE_PIN22_CONFIG = PORT_PCR_MUX(6); // pin 22, PTC1, I2S0_TXD0
#if defined(KINETISK)
@ -54,12 +106,15 @@ void AudioOutputI2S_F32::begin(void)
dma.TCD->SOFF = 2;
dma.TCD->ATTR = DMA_TCD_ATTR_SSIZE(1) | DMA_TCD_ATTR_DSIZE(1);
dma.TCD->NBYTES_MLNO = 2;
dma.TCD->SLAST = -sizeof(i2s_tx_buffer);
//dma.TCD->SLAST = -sizeof(i2s_tx_buffer); //original
dma.TCD->SLAST = -I2S_BUFFER_TO_USE_BYTES;
dma.TCD->DADDR = &I2S0_TDR0;
dma.TCD->DOFF = 0;
dma.TCD->CITER_ELINKNO = sizeof(i2s_tx_buffer) / 2;
//dma.TCD->CITER_ELINKNO = sizeof(i2s_tx_buffer) / 2; //original
dma.TCD->CITER_ELINKNO = I2S_BUFFER_TO_USE_BYTES / 2;
dma.TCD->DLASTSGA = 0;
dma.TCD->BITER_ELINKNO = sizeof(i2s_tx_buffer) / 2;
//dma.TCD->BITER_ELINKNO = sizeof(i2s_tx_buffer) / 2; //original
dma.TCD->BITER_ELINKNO = I2S_BUFFER_TO_USE_BYTES / 2;
dma.TCD->CSR = DMA_TCD_CSR_INTHALF | DMA_TCD_CSR_INTMAJOR;
#endif
dma.triggerAtHardwareEvent(DMAMUX_SOURCE_I2S0_TX);
@ -69,6 +124,13 @@ void AudioOutputI2S_F32::begin(void)
I2S0_TCSR = I2S_TCSR_SR;
I2S0_TCSR = I2S_TCSR_TE | I2S_TCSR_BCE | I2S_TCSR_FRDE;
dma.attachInterrupt(isr);
// change the I2S frequencies to make the requested sample rate
setI2SFreq(AudioOutputI2S_F32::sample_rate_Hz);
enabled = 1;
//AudioInputI2S_F32::begin_guts();
}
@ -81,10 +143,12 @@ void AudioOutputI2S_F32::isr(void)
saddr = (uint32_t)(dma.TCD->SADDR);
dma.clearInterrupt();
if (saddr < (uint32_t)i2s_tx_buffer + sizeof(i2s_tx_buffer) / 2) {
//if (saddr < (uint32_t)i2s_tx_buffer + sizeof(i2s_tx_buffer) / 2) { //original
if (saddr < (uint32_t)i2s_tx_buffer + I2S_BUFFER_TO_USE_BYTES / 2) {
// DMA is transmitting the first half of the buffer
// so we must fill the second half
dest = (int16_t *)&i2s_tx_buffer[AUDIO_BLOCK_SAMPLES/2];
//dest = (int16_t *)&i2s_tx_buffer[AUDIO_BLOCK_SAMPLES/2]; //original
dest = (int16_t *)&i2s_tx_buffer[audio_block_samples/2];
if (AudioOutputI2S_F32::update_responsibility) AudioStream_F32::update_all();
} else {
// DMA is transmitting the second half of the buffer
@ -97,6 +161,7 @@ void AudioOutputI2S_F32::isr(void)
offsetL = AudioOutputI2S_F32::block_left_offset;
offsetR = AudioOutputI2S_F32::block_right_offset;
/* Original
if (blockL && blockR) {
memcpy_tointerleaveLR(dest, blockL->data + offsetL, blockR->data + offsetR);
offsetL += AUDIO_BLOCK_SAMPLES / 2;
@ -111,8 +176,34 @@ void AudioOutputI2S_F32::isr(void)
memset(dest,0,AUDIO_BLOCK_SAMPLES * 2);
return;
}
*/
int16_t *d = dest;
if (blockL && blockR) {
//memcpy_tointerleaveLR(dest, blockL->data + offsetL, blockR->data + offsetR);
//memcpy_tointerleaveLRwLen(dest, blockL->data + offsetL, blockR->data + offsetR, audio_block_samples/2);
int16_t *pL = blockL->data + offsetL;
int16_t *pR = blockR->data + offsetR;
for (int i=0; i < audio_block_samples/2; i++) { *d++ = *pL++; *d++ = *pR++; } //interleave
offsetL += audio_block_samples / 2;
offsetR += audio_block_samples / 2;
} else if (blockL) {
//memcpy_tointerleaveLR(dest, blockL->data + offsetL, blockR->data + offsetR);
int16_t *pL = blockL->data + offsetL;
for (int i=0; i < audio_block_samples / 2 * 2; i+=2) { *(d+i) = *pL++; } //interleave
offsetL += audio_block_samples / 2;
} else if (blockR) {
int16_t *pR = blockR->data + offsetR;
for (int i=0; i < audio_block_samples /2 * 2; i+=2) { *(d+i) = *pR++; } //interleave
offsetR += audio_block_samples / 2;
} else {
//memset(dest,0,AUDIO_BLOCK_SAMPLES * 2);
memset(dest,0,audio_block_samples * 2);
return;
}
if (offsetL < AUDIO_BLOCK_SAMPLES) {
//if (offsetL < AUDIO_BLOCK_SAMPLES) { //original
if (offsetL < (uint16_t)audio_block_samples) {
AudioOutputI2S_F32::block_left_offset = offsetL;
} else {
AudioOutputI2S_F32::block_left_offset = 0;
@ -120,7 +211,8 @@ void AudioOutputI2S_F32::isr(void)
AudioOutputI2S_F32::block_left_1st = AudioOutputI2S_F32::block_left_2nd;
AudioOutputI2S_F32::block_left_2nd = NULL;
}
if (offsetR < AUDIO_BLOCK_SAMPLES) {
//if (offsetR < AUDIO_BLOCK_SAMPLES) {
if (offsetR < (uint16_t)audio_block_samples) {
AudioOutputI2S_F32::block_right_offset = offsetR;
} else {
AudioOutputI2S_F32::block_right_offset = 0;
@ -199,8 +291,9 @@ void AudioOutputI2S_F32::isr(void)
#endif
}
void AudioOutputI2S_F32::convert_f32_to_i16(float32_t *p_f32, int16_t *p_i16, int len) {
for (int i=0; i<len; i++) { *p_i16++ = max(-32768,min(32768,(int16_t)((*p_f32++) * 32768.f))); }
}
void AudioOutputI2S_F32::update(void)
{
@ -213,10 +306,18 @@ void AudioOutputI2S_F32::update(void)
audio_block_f32_t *block_f32;
block_f32 = receiveReadOnly_f32(0); // input 0 = left channel
if (block_f32) {
if (block_f32->length != audio_block_samples) {
Serial.print("AudioOutputI2S_F32: *** WARNING ***: audio_block says len = ");
Serial.print(block_f32->length);
Serial.print(", but I2S settings want it to be = ");
Serial.println(audio_block_samples);
}
//Serial.print("AudioOutputI2S_F32: audio_block_samples = ");
//Serial.println(audio_block_samples);
//convert F32 to Int16
block = AudioStream::allocate();
arm_float_to_q15((float32_t *)(block_f32->data),(q15_t *)(block->data), AUDIO_BLOCK_SAMPLES);
convert_f32_to_i16(block_f32->data, block->data, audio_block_samples);
AudioStream_F32::release(block_f32);
//now process the data blocks
@ -242,7 +343,7 @@ void AudioOutputI2S_F32::update(void)
if (block_f32) {
//convert F32 to Int16
block = AudioStream::allocate();
arm_float_to_q15((float32_t *)(block_f32->data),(q15_t *)(block->data), AUDIO_BLOCK_SAMPLES);
convert_f32_to_i16(block_f32->data, block->data, audio_block_samples);
AudioStream_F32::release(block_f32);
__disable_irq();
@ -305,11 +406,11 @@ void AudioOutputI2S_F32::update(void)
#endif
#ifndef MCLK_SRC
#if F_CPU >= 20000000
#define MCLK_SRC 3 // the PLL
#else
#define MCLK_SRC 0 // system clock
#endif
#if (F_CPU >= 20000000)
#define MCLK_SRC 3 // the PLL
#else
#define MCLK_SRC 0 // system clock
#endif
#endif
void AudioOutputI2S_F32::config_i2s(void)
@ -432,4 +533,5 @@ void AudioOutputI2Sslave::config_i2s(void)
CORE_PIN9_CONFIG = PORT_PCR_MUX(6); // pin 9, PTC3, I2S0_TX_BCLK
CORE_PIN11_CONFIG = PORT_PCR_MUX(6); // pin 11, PTC6, I2S0_MCLK
}
*/
*/

@ -32,16 +32,25 @@
#include "AudioStream.h"
#include "DMAChannel.h"
class AudioOutputI2S_F32 : public AudioStream_F32
{
//GUI: inputs:2, outputs:0 //this line used for automatic generation of GUI node
public:
AudioOutputI2S_F32(void) : AudioStream_F32(2, inputQueueArray) { begin(); }
AudioOutputI2S_F32(void) : AudioStream_F32(2, inputQueueArray) { begin();} //uses default AUDIO_SAMPLE_RATE and BLOCK_SIZE_SAMPLES from AudioStream.h
AudioOutputI2S_F32(const AudioSettings_F32 &settings) : AudioStream_F32(2, inputQueueArray)
{
sample_rate_Hz = settings.sample_rate_Hz;
audio_block_samples = settings.audio_block_samples;
begin();
}
virtual void update(void);
void begin(void);
friend class AudioInputI2S_F32;
static void convert_f32_to_i16( float32_t *p_f32, int16_t *p_i16, int len) ;
protected:
AudioOutputI2S_F32(int dummy): AudioStream_F32(2, inputQueueArray) {} // to be used only inside AudioOutputI2Sslave !!
//AudioOutputI2S_F32(const AudioSettings &settings): AudioStream_F32(2, inputQueueArray) {} // to be used only inside AudioOutputI2Sslave !!
static void config_i2s(void);
static audio_block_t *block_left_1st;
static audio_block_t *block_right_1st;
@ -54,6 +63,9 @@ private:
static uint16_t block_left_offset;
static uint16_t block_right_offset;
audio_block_f32_t *inputQueueArray[2];
static float sample_rate_Hz;
static int audio_block_samples;
volatile uint8_t enabled = 1;
};

@ -0,0 +1,167 @@
/*
Extended to f32 data
Created: Chip Audette, OpenAudio, Feb 2017
License: MIT License. Use at your own risk.
*/
/* Audio Library for Teensy 3.X
* Copyright (c) 2014, Paul Stoffregen, paul@pjrc.com
*
* Development of this audio library was funded by PJRC.COM, LLC by sales of
* Teensy and Audio Adaptor boards. Please support PJRC's efforts to develop
* open source software by purchasing Teensy or other PJRC products.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice, development funding notice, and this permission
* notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
// http://stenzel.waldorfmusic.de/post/pink/
// https://github.com/Stenzel/newshadeofpink
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// New Shade of Pink
// (c) 2014 Stefan Stenzel
// stefan at waldorfmusic.de
//
// Terms of use:
// Use for any purpose. If used in a commercial product, you should give me one.
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#include "synth_pinknoise_f32.h"
#include "input_i2s_f32.h" //for the audio_convert_i16_to_f32 routine
int16_t AudioSynthNoisePink_F32::instance_cnt = 0;
// Let preprocessor and compiler calculate two lookup tables for 12-tap FIR Filter
// with these coefficients: 1.190566, 0.162580, 0.002208, 0.025475, -0.001522,
// 0.007322, 0.001774, 0.004529, -0.001561, 0.000776, -0.000486, 0.002017
#define Fn(cf,m,shift) (2048*cf*(2*((m)>>shift&1)-1))
#define FA(n) (int32_t)(Fn(1.190566,n,0)+Fn(0.162580,n,1)+Fn(0.002208,n,2)+\
Fn(0.025475,n,3)+Fn(-0.001522,n,4)+Fn(0.007322,n,5))
#define FB(n) (int32_t)(Fn(0.001774,n,0)+Fn(0.004529,n,1)+Fn(-0.001561,n,2)+\
Fn(0.000776,n,3)+Fn(-0.000486,n,4)+Fn(0.002017,n,5))
#define FA8(n) FA(n),FA(n+1),FA(n+2),FA(n+3),FA(n+4),FA(n+5),FA(n+6),FA(n+7)
#define FB8(n) FB(n),FB(n+1),FB(n+2),FB(n+3),FB(n+4),FB(n+5),FB(n+6),FB(n+7)
const int32_t AudioSynthNoisePink_F32::pfira[64] = // 1st FIR lookup table
{FA8(0),FA8(8),FA8(16),FA8(24),FA8(32),FA8(40),FA8(48),FA8(56)};
const int32_t AudioSynthNoisePink_F32::pfirb[64] = // 2nd FIR lookup table
{FB8(0),FB8(8),FB8(16),FB8(24),FB8(32),FB8(40),FB8(48),FB8(56)};
// bitreversed lookup table
#define PM16(n) n,0x80,0x40,0x80,0x20,0x80,0x40,0x80,0x10,0x80,0x40,0x80,0x20,0x80,0x40,0x80
const uint8_t AudioSynthNoisePink_F32::pnmask[256] = {
PM16(0),PM16(8),PM16(4),PM16(8),PM16(2),PM16(8),PM16(4),PM16(8),
PM16(1),PM16(8),PM16(4),PM16(8),PM16(2),PM16(8),PM16(4),PM16(8)
};
#define PINT(bitmask, out) /* macro for processing: */\
bit = lfsr >> 31; /* spill random to all bits */\
dec &= ~bitmask; /* blank old decrement bit */\
lfsr <<= 1; /* shift lfsr */\
dec |= inc & bitmask; /* copy increment to decrement bit */\
inc ^= bit & bitmask; /* new random bit */\
accu += inc - dec; /* integrate */\
lfsr ^= bit & taps; /* update lfsr */\
out = accu + /* save output */\
pfira[lfsr & 0x3F] + /* add 1st half precalculated FIR */\
pfirb[lfsr >> 6 & 0x3F] /* add 2nd half, also correts bias */
void AudioSynthNoisePink_F32::update(void)
{
audio_block_t *block;
audio_block_f32_t *block_f32;
uint32_t *p, *end;
int32_t n1, n2;
int32_t gain;
int32_t inc, dec, accu, bit, lfsr;
int32_t taps;
if (!enabled) return;
gain = level;
if (gain == 0) return;
block = AudioStream::allocate();
block_f32 = AudioStream_F32::allocate_f32();
if (!block | !block_f32) return;
p = (uint32_t *)(block->data);
//end = p + AUDIO_BLOCK_SAMPLES/2;
end = p + (block_f32->length)/2;
taps = 0x46000001;
inc = pinc;
dec = pdec;
accu = paccu;
lfsr = plfsr;
do {
int32_t mask = pnmask[pncnt++];
PINT(mask, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0400, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0200, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0400, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0100, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0400, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0200, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
PINT(0x0400, n1);
n1 = signed_multiply_32x16b(gain, n1);
PINT(0x0800, n2);
n2 = signed_multiply_32x16b(gain, n2);
*p++ = pack_16b_16b(n2, n1);
} while (p < end);
pinc = inc;
pdec = dec;
paccu = accu;
plfsr = lfsr;
//convert int16 to f32
AudioInputI2S_F32::convert_i16_to_f32(block->data,block_f32->data,block_f32->length);
AudioStream_F32::transmit(block_f32);
AudioStream_F32::release(block_f32);
AudioStream::release(block);
}

@ -1,17 +1,41 @@
/*
* AudioSynthNoiseWhite_F32
*
* Created: Chip Audette (OpenAudio), Feb 2017
* Extended from on Teensy Audio Library
*
* License: MIT License. Use at your own risk.
/*
Extended to f32 data
Created: Chip Audette, OpenAudio, Feb 2017
License: MIT License. Use at your own risk.
*/
/* Audio Library for Teensy 3.X
* Copyright (c) 2014, Paul Stoffregen, paul@pjrc.com
*
* Development of this audio library was funded by PJRC.COM, LLC by sales of
* Teensy and Audio Adaptor boards. Please support PJRC's efforts to develop
* open source software by purchasing Teensy or other PJRC products.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice, development funding notice, and this permission
* notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef synth_pinknoise_f32_h_
#define synth_pinknoise_f32_h_
#include "Arduino.h"
#include "AudioStream.h"
#include "AudioStream_F32.h"
#include <Audio.h>
#include "utility/dspinst.h"
class AudioSynthNoisePink_F32 : public AudioStream_F32
@ -20,41 +44,32 @@ class AudioSynthNoisePink_F32 : public AudioStream_F32
//GUI: shortName:pinknoise //this line used for automatic generation of GUI node
public:
AudioSynthNoisePink_F32() : AudioStream_F32(0, NULL) {
output_queue.begin();
plfsr = 0x5EED41F5 + instance_cnt++;
paccu = 0;
pncnt = 0;
pinc = 0x0CCC;
pdec = 0x0CCC;
patchCord100 = new AudioConnection(noise, 0, i16_to_f32, 0); //noise is an Int16 audio object. So, convert it!
patchCord101 = new AudioConnection_F32(i16_to_f32, 0, output_queue, 0);
enabled = 1;
}
void amplitude(float n) {
if (n < 0.0) n = 0.0;
else if (n > 1.0) n = 1.0;
level = (int32_t)(n * 65536.0);
}
//define audio processing stack right here.
AudioSynthNoisePink noise;
AudioConvert_I16toF32 i16_to_f32;
AudioRecordQueue_F32 output_queue;
AudioConnection *patchCord100;
AudioConnection_F32 *patchCord101;
void update(void) {
output_queue.clear();
//manually update audio blocks in the desired order
noise.update(); //the output should be routed directly via the AudioConnection
i16_to_f32.update(); // output is routed via the AudioConnection
output_queue.update();
//get the output
audio_block_f32_t *block = output_queue.getAudioBlock();
if (block == NULL) return;
//transmit the block, and release memory
AudioStream_F32::transmit(block);
output_queue.freeAudioBlock();
}
void amplitude(float n) {
noise.amplitude(n);
}
private:
virtual void update(void);
int enabled = 0;
private:
static const uint8_t pnmask[256];
static const int32_t pfira[64];
static const int32_t pfirb[64];
static int16_t instance_cnt;
int32_t plfsr; // linear feedback shift register
int32_t pinc; // increment for all noise sources (bits)
int32_t pdec; // decrement for all noise sources
int32_t paccu; // accumulator
uint8_t pncnt; // overflowing counter as index to pnmask[]
int32_t level; // 0=off, 65536=max
};
#endif
#endif

@ -24,38 +24,42 @@ void AudioSynthWaveformSine_F32::update(void)
audio_block_f32_t *block;
uint32_t i, ph, inc, index, scale;
int32_t val1, val2;
if (magnitude) {
block = allocate_f32();
if (block) {
ph = phase_accumulator;
inc = phase_increment;
for (i=0; i < AUDIO_BLOCK_SAMPLES; i++) {
index = ph >> 24;
val1 = AudioWaveformSine[index];
val2 = AudioWaveformSine[index+1];
scale = (ph >> 8) & 0xFFFF;
val2 *= scale;
val1 *= 0x10000 - scale;
#if defined(KINETISK)
block->data[i] = (float) multiply_32x32_rshift32(val1 + val2, magnitude);
#elif defined(KINETISL)
block->data[i] = (float) ((((val1 + val2) >> 16) * magnitude) >> 16);
#endif
ph += inc;
static uint32_t block_length = 0;
if (enabled) {
if (magnitude) {
block = allocate_f32();
if (block) {
block_length = (uint32_t)block->length;
ph = phase_accumulator;
inc = phase_increment;
for (i=0; i < block_length; i++) {
index = ph >> 24;
val1 = AudioWaveformSine[index];
val2 = AudioWaveformSine[index+1];
scale = (ph >> 8) & 0xFFFF;
val2 *= scale;
val1 *= 0x10000 - scale;
#if defined(KINETISK)
block->data[i] = (float) multiply_32x32_rshift32(val1 + val2, magnitude);
#elif defined(KINETISL)
block->data[i] = (float) ((((val1 + val2) >> 16) * magnitude) >> 16);
#endif
ph += inc;
block->data[i] = block->data[i] / 32768.0f; // scale to float
}
phase_accumulator = ph;
block->data[i] = block->data[i] / 32768.0f; // scale to float
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
return;
}
phase_accumulator = ph;
AudioStream_F32::transmit(block);
AudioStream_F32::release(block);
return;
}
phase_accumulator += phase_increment * block_length;
}
phase_accumulator += phase_increment * AUDIO_BLOCK_SAMPLES;
}

@ -21,31 +21,44 @@
class AudioSynthWaveformSine_F32 : public AudioStream_F32
{
//GUI: inputs:0, outputs:1 //this line used for automatic generation of GUI node
//GUI: shortName:sine //this line used for automatic generation of GUI node
public:
AudioSynthWaveformSine_F32() : AudioStream_F32(0, NULL), magnitude(16384) {}
AudioSynthWaveformSine_F32() : AudioStream_F32(0, NULL), magnitude(16384) { } //uses default AUDIO_SAMPLE_RATE from AudioStream.h
AudioSynthWaveformSine_F32(const AudioSettings_F32 &settings) : AudioStream_F32(0, NULL), magnitude(16384) {
setSampleRate_Hz(settings.sample_rate_Hz);
}
void frequency(float freq) {
if (freq < 0.0) freq = 0.0;
else if (freq > AUDIO_SAMPLE_RATE_EXACT/2) freq = AUDIO_SAMPLE_RATE_EXACT/2;
phase_increment = freq * (4294967296.0 / AUDIO_SAMPLE_RATE_EXACT);
else if (freq > sample_rate_Hz/2.f) freq = sample_rate_Hz/2.f;
phase_increment = freq * (4294967296.0 / sample_rate_Hz);
}
void phase(float angle) {
if (angle < 0.0) angle = 0.0;
else if (angle > 360.0) {
angle = angle - 360.0;
if (angle >= 360.0) return;
if (angle < 0.0f) angle = 0.0f;
else if (angle > 360.0f) {
angle = angle - 360.0f;
if (angle >= 360.0f) return;
}
phase_accumulator = angle * (4294967296.0 / 360.0);
phase_accumulator = angle * (4294967296.0f / 360.0f);
}
void amplitude(float n) {
if (n < 0) n = 0;
else if (n > 1.0) n = 1.0;
magnitude = n * 65536.0;
else if (n > 1.0f) n = 1.0f;
magnitude = n * 65536.0f;
}
void setSampleRate_Hz(const float &fs_Hz) {
phase_increment *= sample_rate_Hz / fs_Hz; //change the phase increment for the new frequency
sample_rate_Hz = fs_Hz;
}
void begin(void) { enabled = true; }
void end(void) { enabled = false; }
virtual void update(void);
private:
uint32_t phase_accumulator;
uint32_t phase_increment;
int32_t magnitude;
uint32_t phase_accumulator = 0;
uint32_t phase_increment = 0;
int32_t magnitude = 0;
float sample_rate_Hz = AUDIO_SAMPLE_RATE;
volatile uint8_t enabled = 1;
};

@ -25,9 +25,29 @@ class AudioSynthWaveform_F32 : public AudioStream_F32
OSCILLATOR_MODE_TRIANGLE
};
AudioSynthWaveform_F32(void) : AudioStream_F32(1, inputQueueArray_f32),
AudioSynthWaveform_F32(const AudioSettings_F32 &settings) : AudioStream_F32(1, inputQueueArray_f32),
_PI(2*acos(0.0f)),
twoPI(2 * _PI),
sample_rate_Hz(AUDIO_SAMPLE_RATE_EXACT),
_OscillatorMode(OSCILLATOR_MODE_SINE),
_Frequency(440.0f),
_Phase(0.0f),
_PhaseIncrement(0.0f),
_PitchModAmt(0.0f),
_PortamentoIncrement(0.0f),
_PortamentoSamples(0),
_CurrentPortamentoSample(0),
_NotesPlaying(0)
{
setSampleRate(settings.sample_rate_Hz);
}
AudioSynthWaveform_F32(void) : AudioStream_F32(1, inputQueueArray_f32), //uses default AUDIO_SAMPLE_RATE from AudioStream.h
_PI(2*acos(0.0f)),
twoPI(2 * _PI),
sample_rate_Hz(AUDIO_SAMPLE_RATE_EXACT),
_OscillatorMode(OSCILLATOR_MODE_SINE),
_Frequency(440.0f),
_Phase(0.0f),
@ -39,7 +59,7 @@ class AudioSynthWaveform_F32 : public AudioStream_F32
_NotesPlaying(0) {};
void frequency(float32_t freq) {
float32_t nyquist = AUDIO_SAMPLE_RATE_EXACT/2;
float32_t nyquist = sample_rate_Hz/2.f;
if (freq < 0.0) freq = 0.0;
else if (freq > nyquist) freq = nyquist;
@ -51,7 +71,7 @@ class AudioSynthWaveform_F32 : public AudioStream_F32
_Frequency = freq;
}
_PhaseIncrement = _Frequency * twoPI / AUDIO_SAMPLE_RATE_EXACT;
_PhaseIncrement = _Frequency * twoPI / sample_rate_Hz;
}
void amplitude(float32_t n) {
@ -80,7 +100,7 @@ class AudioSynthWaveform_F32 : public AudioStream_F32
void portamentoTime(float32_t slidetime) {
_PortamentoTime = slidetime;
_PortamentoSamples = floorf(slidetime * AUDIO_SAMPLE_RATE_ROUNDED);
_PortamentoSamples = floorf(slidetime * sample_rate_Hz);
}
@ -95,10 +115,17 @@ class AudioSynthWaveform_F32 : public AudioStream_F32
}
void update(void);
void setSampleRate(const float32_t fs_Hz)
{
_PhaseIncrement = _PhaseIncrement*sample_rate_Hz / fs_Hz;
_PortamentoSamples = floorf( ((float)_PortamentoSamples) * fs_Hz / sample_rate_Hz );
sample_rate_Hz = fs_Hz;
}
private:
inline float32_t applyMod(uint32_t sample, audio_block_f32_t *lfo);
const float32_t _PI;
float32_t twoPI;
float32_t sample_rate_Hz;
OscillatorMode _OscillatorMode;
float32_t _Frequency;

@ -0,0 +1,125 @@
/*
Extended to F32
Created: Chip Audette, OpenAudio, Feb 2017
License: MIT License. Use at your own risk.
*/
/* Audio Library for Teensy 3.X
* Copyright (c) 2014, Paul Stoffregen, paul@pjrc.com
*
* Development of this audio library was funded by PJRC.COM, LLC by sales of
* Teensy and Audio Adaptor boards. Please support PJRC's efforts to develop
* open source software by purchasing Teensy or other PJRC products.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice, development funding notice, and this permission
* notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "synth_whitenoise_f32.h"
#include "input_i2s_f32.h" //for the audio_convert_i16_to_f32 routine
// Park-Miller-Carta Pseudo-Random Number Generator
// http://www.firstpr.com.au/dsp/rand31/
void AudioSynthNoiseWhite_F32::update(void)
{
audio_block_t *block;
audio_block_f32_t *block_f32;
uint32_t *p, *end;
int32_t n1, n2, gain;
uint32_t lo, hi, val1, val2;
//Serial.println("synth_whitenoise: update()");
gain = level;
if (gain == 0) {
//Serial.println(": Gain = 0, returning.");
return;
}
block = AudioStream::allocate();
block_f32 = AudioStream_F32::allocate_f32();
if (!block | !block_f32) {
//Serial.println(": NULL block. returning.");
return;
}
p = (uint32_t *)(block->data);
//end = p + AUDIO_BLOCK_SAMPLES/2;
end = p + (block_f32->length)/2;
lo = seed;
do {
#if defined(KINETISK)
hi = multiply_16bx16t(16807, lo); // 16807 * (lo >> 16)
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n1 = signed_multiply_32x16b(gain, lo);
hi = multiply_16bx16t(16807, lo); // 16807 * (lo >> 16)
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n2 = signed_multiply_32x16b(gain, lo);
val1 = pack_16b_16b(n2, n1);
hi = multiply_16bx16t(16807, lo); // 16807 * (lo >> 16)
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n1 = signed_multiply_32x16b(gain, lo);
hi = multiply_16bx16t(16807, lo); // 16807 * (lo >> 16)
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n2 = signed_multiply_32x16b(gain, lo);
val2 = pack_16b_16b(n2, n1);
*p++ = val1;
*p++ = val2;
#elif defined(KINETISL)
hi = 16807 * (lo >> 16);
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n1 = signed_multiply_32x16b(gain, lo);
hi = 16807 * (lo >> 16);
lo = 16807 * (lo & 0xFFFF);
lo += (hi & 0x7FFF) << 16;
lo += hi >> 15;
lo = (lo & 0x7FFFFFFF) + (lo >> 31);
n2 = signed_multiply_32x16b(gain, lo);
val1 = pack_16b_16b(n2, n1);
*p++ = val1;
#endif
} while (p < end);
seed = lo;
//convert int16 to f32
AudioInputI2S_F32::convert_i16_to_f32(block->data,block_f32->data,block_f32->length);
AudioStream_F32::transmit(block_f32);
AudioStream_F32::release(block_f32);
AudioStream::release(block);
//Serial.println(" Done.");
}
uint16_t AudioSynthNoiseWhite_F32::instance_count = 0;

@ -1,17 +1,42 @@
/*
* AudioSynthNoiseWhite_F32
*
* Created: Chip Audette (OpenAudio), Feb 2017
* Extended from on Teensy Audio Library
*
* License: MIT License. Use at your own risk.
/*
synth_whitenoise_F32
Extended by: Chip Audette, OpenAudio, Feb 2017
License: MIT License. Use at your own risk.
*/
/* Audio Library for Teensy 3.X
* Copyright (c) 2014, Paul Stoffregen, paul@pjrc.com
*
* Development of this audio library was funded by PJRC.COM, LLC by sales of
* Teensy and Audio Adaptor boards. Please support PJRC's efforts to develop
* open source software by purchasing Teensy or other PJRC products.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice, development funding notice, and this permission
* notice shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef synth_whitenoise_f32_h_
#define synth_whitenoise_f32_h_
#include "Arduino.h"
#include "AudioStream.h"
#include "AudioStream_F32.h"
#include <Audio.h>
#include "utility/dspinst.h"
class AudioSynthNoiseWhite_F32 : public AudioStream_F32
@ -20,41 +45,19 @@ class AudioSynthNoiseWhite_F32 : public AudioStream_F32
//GUI: shortName:whitenoise //this line used for automatic generation of GUI node
public:
AudioSynthNoiseWhite_F32() : AudioStream_F32(0, NULL) {
output_queue.begin();
patchCord100 = new AudioConnection(noise, 0, i16_to_f32, 0); //noise is an Int16 audio object. So, convert it!
patchCord101 = new AudioConnection_F32(i16_to_f32, 0, output_queue, 0);
level = 0;
seed = 1 + instance_count++;
}
//define audio processing stack right here.
AudioSynthNoiseWhite noise;
AudioConvert_I16toF32 i16_to_f32;
AudioRecordQueue_F32 output_queue;
AudioConnection *patchCord100;
AudioConnection_F32 *patchCord101;
void update(void) {
output_queue.clear();
//manually update audio blocks in the desired order
noise.update(); //the output should be routed directly via the AudioConnection
i16_to_f32.update(); // output is routed via the AudioConnection
output_queue.update();
//get the output
audio_block_f32_t *block = output_queue.getAudioBlock();
if (block == NULL) return;
//transmit the block, and release memory
AudioStream_F32::transmit(block);
output_queue.freeAudioBlock();
}
void amplitude(float n) {
noise.amplitude(n);
}
private:
void amplitude(float n) {
if (n < 0.0) n = 0.0;
else if (n > 1.0) n = 1.0;
level = (int32_t)(n * 65536.0);
}
virtual void update(void);
private:
int32_t level; // 0=off, 65536=max
uint32_t seed; // must start at 1
static uint16_t instance_count;
};
#endif
#endif

@ -0,0 +1,384 @@
#include <math.h>
//#include "chapro.h"
//#include "cha_ff.h"
/***********************************************************/
// FFT functions adapted from G. D. Bergland, "Subroutines FAST and FSST," (1979).
// In IEEE Acoustics, Speech, and Signal Processing Society.
// "Programs for Digital Signal Processing," IEEE Press, New York,
static __inline int
ilog2(int n)
{
int m;
for (m = 1; m < 32; m++)
if (n == (1 << m))
return (m);
return (-1);
}
static __inline int
bitrev(int ii, int m)
{
register int jj;
jj = ii & 1;
--m;
while (--m > 0) {
ii >>= 1;
jj <<= 1;
jj |= ii & 1;
}
return (jj);
}
static __inline void
rad2(int ii, float *x0, float *x1)
{
int k;
float t;
for (k = 0; k < ii; k++) {
t = x0[k] + x1[k];
x1[k] = x0[k] - x1[k];
x0[k] = t;
}
}
static __inline void
reorder1(int m, float *x)
{
int j, k, kl, n;
float t;
k = 4;
kl = 2;
n = 1 << m;
for (j = 4; j <= n; j += 2) {
if (k > j) {
t = x[j - 1];
x[j - 1] = x[k - 1];
x[k - 1] = t;
}
k -= 2;
if (k <= kl) {
k = 2 * j;
kl = j;
}
}
}
static __inline void
reorder2(int m, float *x)
{
int ji, ij, n;
float t;
n = 1 << m;
for (ij = 0; ij <= (n - 2); ij += 2) {
ji = bitrev(ij >> 1, m) << 1;
if (ij < ji) {
t = x[ij];
x[ij] = x[ji];
x[ji] = t;
t = x[ij + 1];
x[ij + 1] = x[ji + 1];
x[ji + 1] = t;
}
}
}
/***********************************************************/
// rcfft
static void
rcrad4(int ii, int nn,
float *x0, float *x1, float *x2, float *x3,
float *x4, float *x5, float *x6, float *x7)
{
double arg, tpiovn;
float c1, c2, c3, s1, s2, s3, pr, pi, r1, r5;
float t0, t1, t2, t3, t4, t5, t6, t7;
int i0, i4, j, j0, ji, jl, jr, jlast, k, k0, kl, m, n, ni;
n = nn / 4;
for (m = 1; (1 << m) < n; m++)
continue;
tpiovn = 2 * M_PI / nn;
ji = 3;
jl = 2;
jr = 2;
ni = (n + 1) / 2;
for (i0 = 0; i0 < ni; i0++) {
if (i0 == 0) {
for (k = 0; k < ii; k++) {
t0 = x0[k] + x2[k];
t1 = x1[k] + x3[k];
x2[k] = x0[k] - x2[k];
x3[k] = x1[k] - x3[k];
x0[k] = t0 + t1;
x1[k] = t0 - t1;
}
if (nn > 4) {
k0 = ii * 4;
kl = k0 + ii;
for (k = k0; k < kl; k++) {
pr = (float) (M_SQRT1_2 * (x1[k] - x3[k]));
pi = (float) (M_SQRT1_2 * (x1[k] + x3[k]));
x3[k] = x2[k] + pi;
x1[k] = pi - x2[k];
x2[k] = x0[k] - pr;
x0[k] += pr;
}
}
} else {
arg = tpiovn * bitrev(i0, m);
c1 = cosf(arg);
s1 = sinf(arg);
c2 = c1 * c1 - s1 * s1;
s2 = c1 * s1 + c1 * s1;
c3 = c1 * c2 - s1 * s2;
s3 = c2 * s1 + s2 * c1;
i4 = ii * 4;
j0 = jr * i4;
k0 = ji * i4;
jlast = j0 + ii;
for (j = j0; j < jlast; j++) {
k = k0 + j - j0;
r1 = x1[j] * c1 - x5[k] * s1;
r5 = x1[j] * s1 + x5[k] * c1;
t2 = x2[j] * c2 - x6[k] * s2;
t6 = x2[j] * s2 + x6[k] * c2;
t3 = x3[j] * c3 - x7[k] * s3;
t7 = x3[j] * s3 + x7[k] * c3;
t0 = x0[j] + t2;
t4 = x4[k] + t6;
t2 = x0[j] - t2;
t6 = x4[k] - t6;
t1 = r1 + t3;
t5 = r5 + t7;
t3 = r1 - t3;
t7 = r5 - t7;
x0[j] = t0 + t1;
x7[k] = t4 + t5;
x6[k] = t0 - t1;
x1[j] = t5 - t4;
x2[j] = t2 - t7;
x5[k] = t6 + t3;
x4[k] = t2 + t7;
x3[j] = t3 - t6;
}
jr += 2;
ji -= 2;
if (ji <= jl) {
ji = 2 * jr - 1;
jl = jr;
}
}
}
}
//-----------------------------------------------------------
static int
rcfft2(float *x, int m)
{
int ii, nn, m2, it, n;
n = 1 << m;;
m2 = m / 2;
// radix 2
if (m <= m2 * 2) {
nn = 1;
} else {
nn = 2;
ii = n / nn;
rad2(ii, x, x + ii);
}
// radix 4
if (m2 != 0) {
for (it = 0; it < m2; it++) {
nn = nn * 4;
ii = n / nn;
rcrad4(ii, nn, x, x + ii, x + 2 * ii, x + 3 * ii,
x, x + ii, x + 2 * ii, x + 3 * ii);
}
}
// re-order
reorder1(m, x);
reorder2(m, x);
for (it = 3; it < n; it += 2)
x[it] = -x[it];
x[n] = x[1];
x[1] = 0.0;
x[n + 1] = 0.0;
return (0);
}
/***********************************************************/
// rcfft
static void
crrad4(int jj, int nn,
float *x0, float *x1, float *x2, float *x3,
float *x4, float *x5, float *x6, float *x7)
{
double arg, tpiovn;
float c1, c2, c3, s1, s2, s3;
float t0, t1, t2, t3, t4, t5, t6, t7;
int ii, j, j0, ji, jr, jl, jlast, j4, k, k0, kl, m, n, ni;
tpiovn = 2 * M_PI / nn;
ji = 3;
jl = 2;
jr = 2;
n = nn / 4;
for (m = 1; (1 << m) < n; m++)
continue;
ni = (n + 1) / 2;
for (ii = 0; ii < ni; ii++) {
if (ii == 0) {
for (k = 0; k < jj; k++) {
t0 = x0[k] + x1[k];
t1 = x0[k] - x1[k];
t2 = x2[k] * 2;
t3 = x3[k] * 2;
x0[k] = t0 + t2;
x2[k] = t0 - t2;
x1[k] = t1 + t3;
x3[k] = t1 - t3;
}
if (nn > 4) {
k0 = jj * 4;
kl = k0 + jj;
for (k = k0; k < kl; k++) {
t2 = x0[k] - x2[k];
t3 = x1[k] + x3[k];
x0[k] = (x0[k] + x2[k]) * 2;
x2[k] = (x3[k] - x1[k]) * 2;
x1[k] = (float) ((t2 + t3) * M_SQRT2);
x3[k] = (float) ((t3 - t2) * M_SQRT2);
}
}
} else {
arg = tpiovn * bitrev(ii, m);
c1 = cosf(arg);
s1 = -sinf(arg);
c2 = c1 * c1 - s1 * s1;
s2 = c1 * s1 + c1 * s1;
c3 = c1 * c2 - s1 * s2;
s3 = c2 * s1 + s2 * c1;
j4 = jj * 4;
j0 = jr * j4;
k0 = ji * j4;
jlast = j0 + jj;
for (j = j0; j < jlast; j++) {
k = k0 + j - j0;
t0 = x0[j] + x6[k];
t1 = x7[k] - x1[j];
t2 = x0[j] - x6[k];
t3 = x7[k] + x1[j];
t4 = x2[j] + x4[k];
t5 = x5[k] - x3[j];
t6 = x5[k] + x3[j];
t7 = x4[k] - x2[j];
x0[j] = t0 + t4;
x4[k] = t1 + t5;
x1[j] = (t2 + t6) * c1 - (t3 + t7) * s1;
x5[k] = (t2 + t6) * s1 + (t3 + t7) * c1;
x2[j] = (t0 - t4) * c2 - (t1 - t5) * s2;
x6[k] = (t0 - t4) * s2 + (t1 - t5) * c2;
x3[j] = (t2 - t6) * c3 - (t3 - t7) * s3;
x7[k] = (t2 - t6) * s3 + (t3 - t7) * c3;
}
jr += 2;
ji -= 2;
if (ji <= jl) {
ji = 2 * jr - 1;
jl = jr;
}
}
}
}
//-----------------------------------------------------------
static int
crfft2(float *x, int m)
{
int n, i, it, nn, jj, m2;
n = 1 << m;
x[1] = x[n];
m2 = m / 2;
// re-order
for (i = 3; i < n; i += 2)
x[i] = -x[i];
reorder2(m, x);
reorder1(m, x);
// radix 4
if (m2 != 0) {
nn = 4 * n;
for (it = 0; it < m2; it++) {
nn = nn / 4;
jj = n / nn;
crrad4(jj, nn, x, x + jj, x + 2 * jj, x + 3 * jj,
x, x + jj, x + 2 * jj, x + 3 * jj);
}
}
// radix 2
if (m > m2 * 2) {
jj = n / 2;
rad2(jj, x, x + jj);
}
return (0);
}
/***********************************************************/
// real-to-complex FFT
//FUNC(void)
void cha_fft_rc(float *x, int n)
{
int m;
// assume n is a power of two
m = ilog2(n);
rcfft2(x, m);
}
// complex-to-real inverse FFT
//FUNC(void)
void cha_fft_cr(float *x, int n)
{
int i, m;
// assume n is a power of two
m = ilog2(n);
crfft2(x, m);
// scale inverse by 1/n
for (i = 0; i < n; i++) {
x[i] /= n;
}
}

@ -0,0 +1,19 @@
bool isNumberRelatedChar(char c) {
return (isDigit(c) || (c == '.') || (c == '+') || (c == '-'));
}
int parseNextNumberFromString(String text_buffer, int start_ind, float &value) {
//find start of number
while (!isNumberRelatedChar(text_buffer[start_ind])) start_ind++;
//find end of number
int end_ind = start_ind;
while (isNumberRelatedChar(text_buffer[end_ind])) end_ind++;
//extract number
value = text_buffer.substring(start_ind, end_ind).toFloat();
return end_ind;
}
Loading…
Cancel
Save