Refactor um_data variables for audio reactive

- change sample to sampleRaw
- add volumeSmth, volumeRaw, my_magnitude and calculate in agcAvg
- remove sampleAvg, soundAgc, sampleAgc, sampleRaw, rawSampleAgc, FFT_Magnitude, multAgc, sampleReal, sampleGain, (myVals), soundSquelch from um_data interface
- refactor all effects using above variables
This commit is contained in:
ewowi 2022-07-29 15:24:04 +02:00
parent bc67bf6826
commit c1f9445e9d
4 changed files with 154 additions and 279 deletions

View File

@ -404,7 +404,7 @@ class AudioReactive : public Usermod {
struct audioSyncPacket { struct audioSyncPacket {
char header[6]; char header[6];
int sampleAgc; // 04 Bytes int sampleAgc; // 04 Bytes
int sample; // 04 Bytes int sampleRaw; // 04 Bytes
float sampleAvg; // 04 Bytes float sampleAvg; // 04 Bytes
bool samplePeak; // 01 Bytes bool samplePeak; // 01 Bytes
uint8_t fftResult[16]; // 16 Bytes uint8_t fftResult[16]; // 16 Bytes
@ -423,8 +423,8 @@ class AudioReactive : public Usermod {
uint8_t maxVol = 10; // Reasonable value for constant volume for 'peak detector', as it won't always trigger uint8_t maxVol = 10; // Reasonable value for constant volume for 'peak detector', as it won't always trigger
uint8_t binNum = 8; // Used to select the bin for FFT based beat detection. uint8_t binNum = 8; // Used to select the bin for FFT based beat detection.
bool samplePeak = 0; // Boolean flag for peak. Responding routine must reset this flag bool samplePeak = 0; // Boolean flag for peak. Responding routine must reset this flag
int16_t sample; // either sampleRaw or rawSampleAgc depending on soundAgc float volumeSmth; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample
float sampleSmth; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample int16_t volumeRaw; // either sampleRaw or rawSampleAgc depending on soundAgc
#ifdef MIC_SAMPLING_LOG #ifdef MIC_SAMPLING_LOG
uint8_t targetAgc = 60; // This is our setPoint at 20% of max for the adjusted output (used only in logAudio()) uint8_t targetAgc = 60; // This is our setPoint at 20% of max for the adjusted output (used only in logAudio())
@ -461,6 +461,7 @@ class AudioReactive : public Usermod {
static const char _digitalmic[]; static const char _digitalmic[];
static const char UDP_SYNC_HEADER[]; static const char UDP_SYNC_HEADER[];
float my_magnitude;
// private methods // private methods
void logAudio() void logAudio()
@ -641,6 +642,14 @@ class AudioReactive : public Usermod {
//if (userVar0 > 255) userVar0 = 255; //if (userVar0 > 255) userVar0 = 255;
last_soundAgc = soundAgc; last_soundAgc = soundAgc;
volumeSmth = (soundAgc) ? sampleAgc:sampleAvg;
volumeRaw = (soundAgc) ? rawSampleAgc : sampleRaw;
my_magnitude = FFT_Magnitude; // / 16.0f, 8.0f, 4.0f done in effects
if (soundAgc) my_magnitude *= multAgc;
if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
} // agcAvg() } // agcAvg()
@ -741,7 +750,7 @@ class AudioReactive : public Usermod {
strncpy_P(transmitData.header, PSTR(UDP_SYNC_HEADER), 6); strncpy_P(transmitData.header, PSTR(UDP_SYNC_HEADER), 6);
transmitData.sampleAgc = sampleAgc; transmitData.sampleAgc = sampleAgc;
transmitData.sample = sampleRaw; transmitData.sampleRaw = sampleRaw;
transmitData.sampleAvg = sampleAvg; transmitData.sampleAvg = sampleAvg;
transmitData.samplePeak = udpSamplePeak; transmitData.samplePeak = udpSamplePeak;
udpSamplePeak = 0; // Reset udpSamplePeak after we've transmitted it udpSamplePeak = 0; // Reset udpSamplePeak after we've transmitted it
@ -782,7 +791,7 @@ class AudioReactive : public Usermod {
sampleAgc = receivedPacket->sampleAgc; sampleAgc = receivedPacket->sampleAgc;
rawSampleAgc = receivedPacket->sampleAgc; rawSampleAgc = receivedPacket->sampleAgc;
sampleRaw = receivedPacket->sample; sampleRaw = receivedPacket->sampleRaw;
sampleAvg = receivedPacket->sampleAvg; sampleAvg = receivedPacket->sampleAvg;
// Only change samplePeak IF it's currently false. // Only change samplePeak IF it's currently false.
@ -815,45 +824,29 @@ class AudioReactive : public Usermod {
// usermod exchangeable data // usermod exchangeable data
// we will assign all usermod exportable data here as pointers to original variables or arrays and allocate memory for pointers // we will assign all usermod exportable data here as pointers to original variables or arrays and allocate memory for pointers
um_data = new um_data_t; um_data = new um_data_t;
um_data->u_size = 18; um_data->u_size = 10;
um_data->u_type = new um_types_t[um_data->u_size]; um_data->u_type = new um_types_t[um_data->u_size];
um_data->u_data = new void*[um_data->u_size]; um_data->u_data = new void*[um_data->u_size];
um_data->u_data[ 0] = &sampleAvg; //*used (2D Swirl, 2D Waverly, Gravcenter, Gravcentric, Gravimeter, Midnoise, Noisefire, Noisemeter, Plasmoid, Binmap, Freqmap, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall) um_data->u_data[0] = &volumeSmth; //*used (New)
um_data->u_type[ 0] = UMT_FLOAT; um_data->u_type[0] = UMT_FLOAT;
um_data->u_data[ 1] = &soundAgc; //*used (2D Swirl, 2D Waverly, Gravcenter, Gravcentric, Gravimeter, Matripix, Midnoise, Noisefire, Noisemeter, Pixelwave, Plasmoid, Puddles, Binmap, Freqmap, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall) um_data->u_data[1] = &volumeRaw; // used (New)
um_data->u_type[ 1] = UMT_BYTE; um_data->u_type[1] = UMT_UINT16;
um_data->u_data[ 2] = &sampleAgc; //*used (can be calculated as: sampleReal * multAgc) (..., Juggles, ..., Pixels, Puddlepeak, Freqmatrix) um_data->u_data[2] = fftResult; //*used (Blurz, DJ Light, Noisemove, GEQ_base, 2D Funky Plank, Akemi)
um_data->u_type[ 2] = UMT_FLOAT; um_data->u_type[2] = UMT_BYTE_ARR;
um_data->u_data[ 3] = &sampleRaw; //*used (Matripix, Noisemeter, Pixelwave, Puddles, 2D Swirl, for debugging Gravimeter) um_data->u_data[3] = &samplePeak; //*used (Puddlepeak, Ripplepeak, Waterfall)
um_data->u_type[ 3] = UMT_INT16; um_data->u_type[3] = UMT_BYTE;
um_data->u_data[ 4] = &rawSampleAgc; //*used (Matripix, Noisemeter, Pixelwave, Puddles, 2D Swirl) um_data->u_data[4] = &FFT_MajorPeak; //*used (Ripplepeak, Freqmap, Freqmatrix, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall)
um_data->u_type[ 4] = UMT_INT16; um_data->u_type[4] = UMT_FLOAT;
um_data->u_data[ 5] = &samplePeak; //*used (Puddlepeak, Ripplepeak, Waterfall) um_data->u_data[5] = &my_magnitude; // used (New)
um_data->u_type[ 5] = UMT_BYTE; um_data->u_type[5] = UMT_FLOAT;
um_data->u_data[ 6] = &FFT_MajorPeak; //*used (Ripplepeak, Freqmap, Freqmatrix, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall) um_data->u_data[6] = &maxVol; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
um_data->u_type[ 6] = UMT_FLOAT; um_data->u_type[6] = UMT_BYTE;
um_data->u_data[ 7] = &FFT_Magnitude; //*used (Binmap, Freqmap, Freqpixels, Rocktaves, Waterfall) um_data->u_data[7] = &binNum; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
um_data->u_type[ 7] = UMT_FLOAT; um_data->u_type[7] = UMT_BYTE;
um_data->u_data[ 8] = fftResult; //*used (Blurz, DJ Light, Noisemove, GEQ_base, 2D Funky Plank, Akemi) um_data->u_data[8] = fftBin; //*used (for debugging) (only once, Binmap)
um_data->u_type[ 8] = UMT_BYTE_ARR; um_data->u_type[8] = UMT_FLOAT_ARR;
um_data->u_data[ 9] = &maxVol; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall) um_data->u_data[9] = &inputLevel; // global UI element!!! (Gravimeter, Binmap)
um_data->u_type[ 9] = UMT_BYTE; um_data->u_type[9] = UMT_BYTE;
um_data->u_data[10] = &binNum; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
um_data->u_type[10] = UMT_BYTE;
um_data->u_data[11] = &multAgc; //*used (for debugging) (Gravimeter, Binmap, Freqmap, Freqpixels, Rocktaves, Waterfall,)
um_data->u_type[11] = UMT_FLOAT;
um_data->u_data[12] = &sampleReal; //*used (for debugging) (Gravimeter)
um_data->u_type[12] = UMT_FLOAT;
um_data->u_data[13] = &sampleGain; //*used (for debugging) (Gravimeter, Binmap)
um_data->u_type[13] = UMT_FLOAT;
um_data->u_data[14] = 0; //*free (used for myVals / Pixels before)
um_data->u_type[14] = UMT_BYTE;
um_data->u_data[15] = &soundSquelch; //*used (for debugging) (only once, Binmap)
um_data->u_type[15] = UMT_BYTE;
um_data->u_data[16] = fftBin; //*used (for debugging) (only once, Binmap)
um_data->u_type[16] = UMT_FLOAT_ARR;
um_data->u_data[17] = &inputLevel; // global UI element!!! (Gravimeter, Binmap)
um_data->u_type[17] = UMT_BYTE;
} }
// Reset I2S peripheral for good measure // Reset I2S peripheral for good measure

View File

@ -5966,32 +5966,21 @@ static const char *_data_FX_MODE_2DDRIFTROSE PROGMEM = "2D Drift Rose@Fade,Blur;
/* use the following code to pass AudioReactive usermod variables to effect /* use the following code to pass AudioReactive usermod variables to effect
uint8_t *binNum = (uint8_t*)&SEGENV.aux1, *maxVol = (uint8_t*)(&SEGENV.aux1+1); // just in case assignment uint8_t *binNum = (uint8_t*)&SEGENV.aux1, *maxVol = (uint8_t*)(&SEGENV.aux1+1); // just in case assignment
uint16_t sample = 0;
uint8_t soundAgc = 0, soundSquelch = 10;
bool samplePeak = false; bool samplePeak = false;
float sampleAgc = 0.0f, sampleAgv = 0.0f, multAgc = 0.0f, sampleReal = 0.0f; float FFT_MajorPeak = 0.0;
float FFT_MajorPeak = 0.0, FFT_Magnitude = 0.0;
uint8_t *fftResult = nullptr; uint8_t *fftResult = nullptr;
float *fftBin = nullptr; float *fftBin = nullptr;
um_data_t *um_data; um_data_t *um_data;
if (usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) { if (usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
sampleAvg = *(float*) um_data->u_data[ 0]; samplePeak = *(uint8_t*) um_data->u_data[3];
soundAgc = *(uint8_t*) um_data->u_data[ 1]; FFT_MajorPeak = *(float*) um_data->u_data[4];
sampleAgc = *(float*) um_data->u_data[ 2]; fftResult = (uint8_t*) um_data->u_data[2];
sample = *(uint16_t*)um_data->u_data[ 3]; maxVol = (uint8_t*) um_data->u_data[6]; // requires UI element (SEGMENT.customX?), changes source element
rawSampleAgc = *(uint16_t*)um_data->u_data[ 4]; binNum = (uint8_t*) um_data->u_data[7]; // requires UI element (SEGMENT.customX?), changes source element
samplePeak = *(uint8_t*) um_data->u_data[ 5]; volumeSmth = *(float*) um_data->u_data[0];
FFT_MajorPeak = *(float*) um_data->u_data[ 6]; fftBin = (float*) um_data->u_data[8];
FFT_Magnitude = *(float*) um_data->u_data[ 7]; inputLevel = (uint8_t*) um_data->u_data[9]; // requires UI element (SEGMENT.customX?), changes source element
fftResult = (uint8_t*) um_data->u_data[ 8]; volumeRaw = *(float*) um_data->u_data[1];
maxVol = (uint8_t*) um_data->u_data[ 9]; // requires UI element (SEGMENT.customX?), changes source element
binNum = (uint8_t*) um_data->u_data[10]; // requires UI element (SEGMENT.customX?), changes source element
multAgc = *(float*) um_data->u_data[11];
sampleReal = *(float*) um_data->u_data[12];
sampleGain = *(float*) um_data->u_data[13];
soundSquelch = *(uint8_t*) um_data->u_data[15];
fftBin = (float*) um_data->u_data[16];
inputLevel = (uint8_t*) um_data->u_data[17]; // requires UI element (SEGMENT.customX?), changes source element
} else { } else {
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
@ -6016,10 +6005,10 @@ uint16_t mode_ripplepeak(void) { // * Ripple peak. By Andrew Tuli
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5]; uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
float FFT_MajorPeak = *(float*) um_data->u_data[6]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t *maxVol = (uint8_t*)um_data->u_data[9]; uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
uint8_t *binNum = (uint8_t*)um_data->u_data[10]; uint8_t *binNum = (uint8_t*)um_data->u_data[7];
// printUmData(); // printUmData();
@ -6106,21 +6095,17 @@ uint16_t mode_2DSwirl(void) {
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0]; //ewowi: use instead of sampleAvg???
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; int16_t volumeRaw = *(int16_t*) um_data->u_data[1];
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
// printUmData(); // printUmData();
float tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw; leds[XY( i, j)] += ColorFromPalette(strip.currentPalette, (ms / 11 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 11, 200, 255);
leds[XY( j, i)] += ColorFromPalette(strip.currentPalette, (ms / 13 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 13, 200, 255);
leds[XY( i, j)] += ColorFromPalette(strip.currentPalette, (ms / 11 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 11, 200, 255); leds[XY(ni, nj)] += ColorFromPalette(strip.currentPalette, (ms / 17 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 17, 200, 255);
leds[XY( j, i)] += ColorFromPalette(strip.currentPalette, (ms / 13 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 13, 200, 255); leds[XY(nj, ni)] += ColorFromPalette(strip.currentPalette, (ms / 29 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 29, 200, 255);
leds[XY(ni, nj)] += ColorFromPalette(strip.currentPalette, (ms / 17 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 17, 200, 255); leds[XY( i, nj)] += ColorFromPalette(strip.currentPalette, (ms / 37 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 37, 200, 255);
leds[XY(nj, ni)] += ColorFromPalette(strip.currentPalette, (ms / 29 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 29, 200, 255); leds[XY(ni, j)] += ColorFromPalette(strip.currentPalette, (ms / 41 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 41, 200, 255);
leds[XY( i, nj)] += ColorFromPalette(strip.currentPalette, (ms / 37 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 37, 200, 255);
leds[XY(ni, j)] += ColorFromPalette(strip.currentPalette, (ms / 41 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 41, 200, 255);
SEGMENT.setPixels(leds); SEGMENT.setPixels(leds);
return FRAMETIME; return FRAMETIME;
@ -6151,9 +6136,7 @@ uint16_t mode_2DWaverly(void) {
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
SEGMENT.fadeToBlackBy(leds, SEGMENT.speed); SEGMENT.fadeToBlackBy(leds, SEGMENT.speed);
@ -6163,7 +6146,7 @@ uint16_t mode_2DWaverly(void) {
// use audio if available // use audio if available
if (um_data) { if (um_data) {
thisVal /= 32; // reduce intensity of inoise8() thisVal /= 32; // reduce intensity of inoise8()
thisVal *= (soundAgc) ? sampleAgc : sampleAvg; thisVal *= volumeSmth;
} }
uint16_t thisMax = map(thisVal, 0, 512, 0, rows); uint16_t thisMax = map(thisVal, 0, 512, 0, rows);
@ -6206,15 +6189,11 @@ uint16_t mode_gravcenter(void) { // Gravcenter. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
SEGMENT.fade_out(240); SEGMENT.fade_out(240);
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0f; float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0f;
segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 32, 0, (float)SEGLEN/2.0); // map to pixels available in current segment float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 32, 0, (float)SEGLEN/2.0); // map to pixels available in current segment
@ -6257,18 +6236,14 @@ uint16_t mode_gravcentric(void) { // Gravcentric. By Andrew
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
// printUmData(); // printUmData();
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
SEGMENT.fade_out(240); SEGMENT.fade_out(240);
SEGMENT.fade_out(240); // twice? really? SEGMENT.fade_out(240); // twice? really?
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0; float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
segmentSampleAvg *= 0.125f; // divide by 8, to compensate for later "sensitivty" upscaling segmentSampleAvg *= 0.125f; // divide by 8, to compensate for later "sensitivty" upscaling
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0.0f, 32.0f, 0.0f, (float)SEGLEN/2.0); // map to pixels availeable in current segment float mySampleAvg = mapf(segmentSampleAvg*2.0, 0.0f, 32.0f, 0.0f, (float)SEGLEN/2.0); // map to pixels availeable in current segment
@ -6311,15 +6286,11 @@ uint16_t mode_gravimeter(void) { // Gravmeter. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
SEGMENT.fade_out(240); SEGMENT.fade_out(240);
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0; float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
segmentSampleAvg *= 0.25; // divide by 4, to compensate for later "sensitivty" upscaling segmentSampleAvg *= 0.25; // divide by 4, to compensate for later "sensitivty" upscaling
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 64, 0, (SEGLEN-1)); // map to pixels availeable in current segment float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 64, 0, (SEGLEN-1)); // map to pixels availeable in current segment
@ -6355,10 +6326,10 @@ uint16_t mode_juggles(void) { // Juggles. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAgc = *(float*)um_data->u_data[2]; float volumeSmth = *(float*) um_data->u_data[0];
SEGMENT.fade_out(224); SEGMENT.fade_out(224);
uint16_t my_sampleAgc = fmax(fmin(sampleAgc, 255.0), 0); uint16_t my_sampleAgc = fmax(fmin(volumeSmth, 255.0), 0);
for (size_t i=0; i<SEGMENT.intensity/32+1U; i++) { for (size_t i=0; i<SEGMENT.intensity/32+1U; i++) {
SEGMENT.setPixelColor(beatsin16(SEGMENT.speed/4+i*2,0,SEGLEN-1), color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis()/4+i*2, false, PALETTE_SOLID_WRAP, 0), my_sampleAgc)); SEGMENT.setPixelColor(beatsin16(SEGMENT.speed/4+i*2,0,SEGLEN-1), color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis()/4+i*2, false, PALETTE_SOLID_WRAP, 0), my_sampleAgc));
@ -6378,9 +6349,7 @@ uint16_t mode_matripix(void) { // Matripix. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
if (SEGENV.call == 0) SEGMENT.fill(BLACK); if (SEGENV.call == 0) SEGMENT.fill(BLACK);
@ -6388,8 +6357,7 @@ uint16_t mode_matripix(void) { // Matripix. By Andrew Tuline.
if(SEGENV.aux0 != secondHand) { if(SEGENV.aux0 != secondHand) {
SEGENV.aux0 = secondHand; SEGENV.aux0 = secondHand;
uint8_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw; int pixBri = volumeRaw * SEGMENT.intensity / 64;
int pixBri = tmpSound * SEGMENT.intensity / 64;
for (uint16_t i=0; i<SEGLEN-1; i++) SEGMENT.setPixelColor(i, SEGMENT.getPixelColor(i+1)); // shift left for (uint16_t i=0; i<SEGLEN-1; i++) SEGMENT.setPixelColor(i, SEGMENT.getPixelColor(i+1)); // shift left
SEGMENT.setPixelColor(SEGLEN-1, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri)); SEGMENT.setPixelColor(SEGLEN-1, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri));
} }
@ -6410,22 +6378,19 @@ uint16_t mode_midnoise(void) { // Midnoise. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
SEGMENT.fade_out(SEGMENT.speed); SEGMENT.fade_out(SEGMENT.speed);
SEGMENT.fade_out(SEGMENT.speed); SEGMENT.fade_out(SEGMENT.speed);
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg; float tmpSound2 = volumeSmth * (float)SEGMENT.intensity / 256.0; // Too sensitive.
float tmpSound2 = tmpSound * (float)SEGMENT.intensity / 256.0; // Too sensitive.
tmpSound2 *= (float)SEGMENT.intensity / 128.0; // Reduce sensitity/length. tmpSound2 *= (float)SEGMENT.intensity / 128.0; // Reduce sensitity/length.
int maxLen = mapf(tmpSound2, 0, 127, 0, SEGLEN/2); int maxLen = mapf(tmpSound2, 0, 127, 0, SEGLEN/2);
if (maxLen >SEGLEN/2) maxLen = SEGLEN/2; if (maxLen >SEGLEN/2) maxLen = SEGLEN/2;
for (int i=(SEGLEN/2-maxLen); i<(SEGLEN/2+maxLen); i++) { for (int i=(SEGLEN/2-maxLen); i<(SEGLEN/2+maxLen); i++) {
uint8_t index = inoise8(i*tmpSound+SEGENV.aux0, SEGENV.aux1+i*tmpSound); // Get a value from the noise function. I'm using both x and y axis. uint8_t index = inoise8(i*volumeSmth+SEGENV.aux0, SEGENV.aux1+i*volumeSmth); // Get a value from the noise function. I'm using both x and y axis.
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0)); SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0));
} }
@ -6452,17 +6417,13 @@ uint16_t mode_noisefire(void) { // Noisefire. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
for (uint16_t i = 0; i < SEGLEN; i++) { for (uint16_t i = 0; i < SEGLEN; i++) {
uint16_t index = inoise8(i*SEGMENT.speed/64,millis()*SEGMENT.speed/64*SEGLEN/255); // X location is constant, but we move along the Y at the rate of millis(). By Andrew Tuline. uint16_t index = inoise8(i*SEGMENT.speed/64,millis()*SEGMENT.speed/64*SEGLEN/255); // X location is constant, but we move along the Y at the rate of millis(). By Andrew Tuline.
index = (255 - i*256/SEGLEN) * index/(256-SEGMENT.intensity); // Now we need to scale index so that it gets blacker as we get close to one of the ends. index = (255 - i*256/SEGLEN) * index/(256-SEGMENT.intensity); // Now we need to scale index so that it gets blacker as we get close to one of the ends.
// This is a simple y=mx+b equation that's been scaled. index/128 is another scaling. // This is a simple y=mx+b equation that's been scaled. index/128 is another scaling.
uint8_t tmpSound = (soundAgc) ? sampleAgc : sampleAvg; CRGB color = ColorFromPalette(strip.currentPalette, index, volumeSmth*2, LINEARBLEND); // Use the my own palette.
CRGB color = ColorFromPalette(strip.currentPalette, index, tmpSound*2, LINEARBLEND); // Use the my own palette.
SEGMENT.setPixelColor(i, color); SEGMENT.setPixelColor(i, color);
} }
@ -6481,23 +6442,18 @@ uint16_t mode_noisemeter(void) { // Noisemeter. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
uint8_t fadeRate = map(SEGMENT.speed,0,255,224,255); uint8_t fadeRate = map(SEGMENT.speed,0,255,224,255);
SEGMENT.fade_out(fadeRate); SEGMENT.fade_out(fadeRate);
float tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw; float tmpSound2 = volumeRaw * 2.0 * (float)SEGMENT.intensity / 255.0;
float tmpSound2 = tmpSound * 2.0 * (float)SEGMENT.intensity / 255.0;
int maxLen = mapf(tmpSound2, 0, 255, 0, SEGLEN); // map to pixels availeable in current segment // Still a bit too sensitive. int maxLen = mapf(tmpSound2, 0, 255, 0, SEGLEN); // map to pixels availeable in current segment // Still a bit too sensitive.
if (maxLen >SEGLEN) maxLen = SEGLEN; if (maxLen >SEGLEN) maxLen = SEGLEN;
tmpSound = soundAgc ? sampleAgc : sampleAvg; // now use smoothed value (sampleAvg or sampleAgc)
for (int i=0; i<maxLen; i++) { // The louder the sound, the wider the soundbar. By Andrew Tuline. for (int i=0; i<maxLen; i++) { // The louder the sound, the wider the soundbar. By Andrew Tuline.
uint8_t index = inoise8(i*tmpSound+SEGENV.aux0, SEGENV.aux1+i*tmpSound); // Get a value from the noise function. I'm using both x and y axis. uint8_t index = inoise8(i*volumeSmth+SEGENV.aux0, SEGENV.aux1+i*volumeSmth); // Get a value from the noise function. I'm using both x and y axis.
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0)); SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0));
} }
@ -6520,16 +6476,13 @@ uint16_t mode_pixelwave(void) { // Pixelwave. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 16; uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 16;
if(SEGENV.aux0 != secondHand) { if(SEGENV.aux0 != secondHand) {
SEGENV.aux0 = secondHand; SEGENV.aux0 = secondHand;
uint8_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw; int pixBri = volumeRaw * SEGMENT.intensity / 64;
int pixBri = tmpSound * SEGMENT.intensity / 64;
SEGMENT.setPixelColor(SEGLEN/2, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri)); SEGMENT.setPixelColor(SEGLEN/2, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri));
@ -6560,9 +6513,7 @@ uint16_t mode_plasmoid(void) { // Plasmoid. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
float sampleAgc = *(float*) um_data->u_data[2];
SEGMENT.fade_out(64); SEGMENT.fade_out(64);
@ -6575,8 +6526,7 @@ uint16_t mode_plasmoid(void) { // Plasmoid. By Andrew Tuline.
thisbright += cos8(((i*(97 +(5*SEGMENT.speed/32)))+plasmoip->thatphase) & 0xFF)/2; // Let's munge the brightness a bit and animate it all with the phases. thisbright += cos8(((i*(97 +(5*SEGMENT.speed/32)))+plasmoip->thatphase) & 0xFF)/2; // Let's munge the brightness a bit and animate it all with the phases.
uint8_t colorIndex=thisbright; uint8_t colorIndex=thisbright;
int tmpSound = (soundAgc) ? sampleAgc : sampleAvg; if (volumeSmth * SEGMENT.intensity / 64 < thisbright) {thisbright = 0;}
if (tmpSound * SEGMENT.intensity / 64 < thisbright) {thisbright = 0;}
SEGMENT.setPixelColor(i, color_add(SEGMENT.getPixelColor(i), color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(colorIndex, false, PALETTE_SOLID_WRAP, 0), thisbright))); SEGMENT.setPixelColor(i, color_add(SEGMENT.getPixelColor(i), color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(colorIndex, false, PALETTE_SOLID_WRAP, 0), thisbright)));
} }
@ -6601,10 +6551,10 @@ uint16_t mode_puddlepeak(void) { // Puddlepeak. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAgc = *(float*) um_data->u_data[2]; uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5]; uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
uint8_t *maxVol = (uint8_t*)um_data->u_data[9]; uint8_t *binNum = (uint8_t*)um_data->u_data[7];
uint8_t *binNum = (uint8_t*)um_data->u_data[10]; float volumeSmth = *(float*) um_data->u_data[0];
if (SEGENV.call == 0) { if (SEGENV.call == 0) {
SEGMENT.custom2 = *binNum; SEGMENT.custom2 = *binNum;
@ -6617,7 +6567,7 @@ uint16_t mode_puddlepeak(void) { // Puddlepeak. By Andrew Tuline.
SEGMENT.fade_out(fadeVal); SEGMENT.fade_out(fadeVal);
if (samplePeak == 1) { if (samplePeak == 1) {
size = sampleAgc * SEGMENT.intensity /256 /4 + 1; // Determine size of the flash based on the volume. size = volumeSmth * SEGMENT.intensity /256 /4 + 1; // Determine size of the flash based on the volume.
if (pos+size>= SEGLEN) size = SEGLEN - pos; if (pos+size>= SEGLEN) size = SEGLEN - pos;
} }
@ -6645,14 +6595,10 @@ uint16_t mode_puddles(void) { // Puddles. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
uint16_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw; if (volumeRaw > 1) {
size = volumeRaw * SEGMENT.intensity /256 /8 + 1; // Determine size of the flash based on the volume.
if (tmpSound > 1) {
size = tmpSound * SEGMENT.intensity /256 /8 + 1; // Determine size of the flash based on the volume.
if (pos+size >= SEGLEN) size = SEGLEN - pos; if (pos+size >= SEGLEN) size = SEGLEN - pos;
} }
@ -6682,14 +6628,15 @@ uint16_t mode_pixels(void) { // Pixels. By Andrew Tuline.
if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) { if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAgc = *(float*) um_data->u_data[2]; float volumeSmth = *(float*) um_data->u_data[0];
myVals[millis()%32] = sampleAgc; // filling values semi randomly
myVals[millis()%32] = volumeSmth; // filling values semi randomly
SEGMENT.fade_out(64+(SEGMENT.speed>>1)); SEGMENT.fade_out(64+(SEGMENT.speed>>1));
for (uint16_t i=0; i <SEGMENT.intensity/8; i++) { for (uint16_t i=0; i <SEGMENT.intensity/8; i++) {
uint16_t segLoc = random16(SEGLEN); // 16 bit for larger strands of LED's. uint16_t segLoc = random16(SEGLEN); // 16 bit for larger strands of LED's.
SEGMENT.setPixelColor(segLoc, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(myVals[i%32]+i*4, false, PALETTE_SOLID_WRAP, 0), sampleAgc)); SEGMENT.setPixelColor(segLoc, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(myVals[i%32]+i*4, false, PALETTE_SOLID_WRAP, 0), volumeSmth));
} }
return FRAMETIME; return FRAMETIME;
@ -6714,24 +6661,15 @@ uint16_t WS2812FX::mode_binmap(void) {
#ifdef SR_DEBUG #ifdef SR_DEBUG
uint8_t *maxVol; uint8_t *maxVol;
#endif #endif
uint8_t soundAgc = 0;
float sampleAvg = 0.0f;
float *fftBin = nullptr; float *fftBin = nullptr;
float multAgc, sampleGain;
uint8_t soundSquelch;
uint8_t *inputLevel = (uint8_t*)(&SEGENV.aux1+1); uint8_t *inputLevel = (uint8_t*)(&SEGENV.aux1+1);
um_data_t *um_data; um_data_t *um_data;
if (usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) { if (usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
#ifdef SR_DEBUG #ifdef SR_DEBUG
maxVol = (uint8_t*)um_data->u_data[9]; maxVol = (uint8_t*)um_data->u_data[6];
#endif #endif
sampleAvg = *(float*) um_data->u_data[0]; fftBin = (float*) um_data->u_data[8];
soundAgc = *(uint8_t*)um_data->u_data[1]; inputLevel = (uint8_t*)um_data->u_data[9];
multAgc = *(float*) um_data->u_data[11];
sampleGain = *(float*) um_data->u_data[13];
soundSquelch = *(uint8_t*)um_data->u_data[15];
fftBin = (float*) um_data->u_data[16];
inputLevel = (uint8_t*)um_data->u_data[17];
} }
if (!fftBin) return mode_static(); if (!fftBin) return mode_static();
@ -6798,7 +6736,7 @@ uint16_t mode_blurz(void) { // Blurz. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
if (!fftResult) return mode_static(); if (!fftResult) return mode_static();
if (SEGENV.call == 0) { if (SEGENV.call == 0) {
@ -6835,7 +6773,7 @@ uint16_t mode_DJLight(void) { // Written by ??? Adapted by Wil
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
if (!fftResult) return mode_static(); if (!fftResult) return mode_static();
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64; uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64;
@ -6868,15 +6806,8 @@ uint16_t mode_freqmap(void) { // Map FFT_MajorPeak to SEGLEN.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float my_magnitude = *(float*) um_data->u_data[5] / 4.0f;
float FFT_MajorPeak = *(float*) um_data->u_data[6];
float FFT_Magnitude = *(float*) um_data->u_data[7];
float multAgc = *(float*) um_data->u_data[11];
float my_magnitude = FFT_Magnitude / 4.0;
if (soundAgc) my_magnitude *= multAgc;
if (sampleAvg < 1 ) my_magnitude = 0.001; // noise gate closed - mute
SEGMENT.fade_out(SEGMENT.speed); SEGMENT.fade_out(SEGMENT.speed);
@ -6902,15 +6833,15 @@ uint16_t mode_freqmatrix(void) { // Freqmatrix. By Andreas Plesch
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAgc = *(float*)um_data->u_data[2]; float FFT_MajorPeak = *(float*)um_data->u_data[4];
float FFT_MajorPeak = *(float*)um_data->u_data[6]; float volumeSmth = *(float*) um_data->u_data[0];
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16; uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16;
if(SEGENV.aux0 != secondHand) { if(SEGENV.aux0 != secondHand) {
SEGENV.aux0 = secondHand; SEGENV.aux0 = secondHand;
uint8_t sensitivity = map(SEGMENT.custom3, 0, 255, 1, 10); uint8_t sensitivity = map(SEGMENT.custom3, 0, 255, 1, 10);
int pixVal = (sampleAgc * SEGMENT.intensity * sensitivity) / 256.0f; int pixVal = (volumeSmth * SEGMENT.intensity * sensitivity) / 256.0f;
if (pixVal > 255) pixVal = 255; if (pixVal > 255) pixVal = 255;
float intensity = map(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg float intensity = map(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg
@ -6956,15 +6887,8 @@ uint16_t mode_freqpixels(void) { // Freqpixel. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float my_magnitude = *(float*) um_data->u_data[5] / 16.0f;
float FFT_MajorPeak = *(float*) um_data->u_data[6];
float FFT_Magnitude = *(float*) um_data->u_data[7];
float multAgc = *(float*) um_data->u_data[11];
float my_magnitude = FFT_Magnitude / 16.0;
if (soundAgc) my_magnitude *= multAgc;
if (sampleAvg < 1 ) my_magnitude = 0.001; // noise gate closed - mute
uint16_t fadeRate = 2*SEGMENT.speed - SEGMENT.speed*SEGMENT.speed/255; // Get to 255 as quick as you can. uint16_t fadeRate = 2*SEGMENT.speed - SEGMENT.speed*SEGMENT.speed/255; // Get to 255 as quick as you can.
SEGMENT.fade_out(fadeRate); SEGMENT.fade_out(fadeRate);
@ -7001,10 +6925,8 @@ uint16_t mode_freqwave(void) { // Freqwave. By Andreas Pleschun
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float volumeSmth = *(float*) um_data->u_data[0];
float sampleAgc = *(float*) um_data->u_data[2];
float FFT_MajorPeak = *(float*) um_data->u_data[6];
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16; uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16;
if(SEGENV.aux0 != secondHand) { if(SEGENV.aux0 != secondHand) {
@ -7013,10 +6935,8 @@ uint16_t mode_freqwave(void) { // Freqwave. By Andreas Pleschun
//uint8_t fade = SEGMENT.custom3; //uint8_t fade = SEGMENT.custom3;
//uint8_t fadeval; //uint8_t fadeval;
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
float sensitivity = mapf(SEGMENT.custom3, 1, 255, 1, 10); float sensitivity = mapf(SEGMENT.custom3, 1, 255, 1, 10);
float pixVal = tmpSound * (float)SEGMENT.intensity / 256.0f * sensitivity; float pixVal = volumeSmth * (float)SEGMENT.intensity / 256.0f * sensitivity;
if (pixVal > 255) pixVal = 255; if (pixVal > 255) pixVal = 255;
float intensity = mapf(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg float intensity = mapf(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg
@ -7065,15 +6985,12 @@ uint16_t mode_gravfreq(void) { // Gravfreq. By Andrew Tuline.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float volumeSmth = *(float*) um_data->u_data[0];
float sampleAgc = *(float*) um_data->u_data[2];
float FFT_MajorPeak = *(float*) um_data->u_data[6];
SEGMENT.fade_out(240); SEGMENT.fade_out(240);
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg; float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0;
segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0,32, 0, (float)SEGLEN/2.0); // map to pixels availeable in current segment float mySampleAvg = mapf(segmentSampleAvg*2.0, 0,32, 0, (float)SEGLEN/2.0); // map to pixels availeable in current segment
@ -7113,7 +7030,7 @@ uint16_t mode_noisemove(void) { // Noisemove. By: Andrew Tuli
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
if (!fftResult) return mode_static(); if (!fftResult) return mode_static();
SEGMENT.fade_out(224); // Just in case something doesn't get faded. SEGMENT.fade_out(224); // Just in case something doesn't get faded.
@ -7139,11 +7056,8 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float my_magnitude = *(float*) um_data->u_data[5] / 16.0f;
float FFT_MajorPeak = *(float*) um_data->u_data[6];
float FFT_Magnitude = *(float*) um_data->u_data[7];
float multAgc = *(float*) um_data->u_data[11];
SEGMENT.fade_out(128); // Just in case something doesn't get faded. SEGMENT.fade_out(128); // Just in case something doesn't get faded.
@ -7151,10 +7065,6 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
uint8_t octCount = 0; // Octave counter. uint8_t octCount = 0; // Octave counter.
uint8_t volTemp = 0; uint8_t volTemp = 0;
float my_magnitude = FFT_Magnitude / 16.0; // scale magnitude to be aligned with scaling of FFT bins
if (soundAgc) my_magnitude *= multAgc; // apply gain
if (sampleAvg < 1 ) my_magnitude = 0.001; // mute
if (my_magnitude > 32) volTemp = 255; // We need to squelch out the background noise. if (my_magnitude > 32) volTemp = 255; // We need to squelch out the background noise.
while ( frTemp > 249 ) { while ( frTemp > 249 ) {
@ -7186,14 +7096,11 @@ uint16_t mode_waterfall(void) { // Waterfall. By: Andrew Tulin
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
float sampleAvg = *(float*) um_data->u_data[0]; uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5]; uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
float FFT_MajorPeak = *(float*) um_data->u_data[6]; uint8_t *binNum = (uint8_t*)um_data->u_data[7];
float FFT_Magnitude = *(float*) um_data->u_data[7]; float my_magnitude = *(float*) um_data->u_data[5] / 8.0f;
uint8_t *maxVol = (uint8_t*)um_data->u_data[9];
uint8_t *binNum = (uint8_t*)um_data->u_data[10];
float multAgc = *(float*) um_data->u_data[11];
if (SEGENV.call == 0) { if (SEGENV.call == 0) {
SEGENV.aux0 = 255; SEGENV.aux0 = 255;
@ -7208,10 +7115,6 @@ uint16_t mode_waterfall(void) { // Waterfall. By: Andrew Tulin
if (SEGENV.aux0 != secondHand) { // Triggered millis timing. if (SEGENV.aux0 != secondHand) { // Triggered millis timing.
SEGENV.aux0 = secondHand; SEGENV.aux0 = secondHand;
float my_magnitude = FFT_Magnitude / 8.0f;
if (soundAgc) my_magnitude *= multAgc;
if (sampleAvg < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
uint8_t pixCol = (log10f((float)FFT_MajorPeak) - 2.26f) * 177; // log10 frequency range is from 2.26 to 3.7. Let's scale accordingly. uint8_t pixCol = (log10f((float)FFT_MajorPeak) - 2.26f) * 177; // log10 frequency range is from 2.26 to 3.7. Let's scale accordingly.
if (samplePeak) { if (samplePeak) {
@ -7246,7 +7149,7 @@ uint16_t mode_2DGEQ(void) { // By Will Tatam. Code reduction by Ewoud Wijma.
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
if (!fftResult) return mode_static(); if (!fftResult) return mode_static();
if (SEGENV.call == 0) for (int i=0; i<cols; i++) previousBarHeight[i] = 0; if (SEGENV.call == 0) for (int i=0; i<cols; i++) previousBarHeight[i] = 0;
@ -7310,7 +7213,7 @@ uint16_t mode_2DFunkyPlank(void) { // Written by ??? Adapted by Wil
// add support for no audio data // add support for no audio data
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
if (!fftResult) return mode_static(); if (!fftResult) return mode_static();
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64; uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64;
@ -7405,7 +7308,7 @@ uint16_t mode_2DAkemi(void) {
if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) { if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
um_data = simulateSound(SEGMENT.soundSim); um_data = simulateSound(SEGMENT.soundSim);
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[8]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
float base = fftResult[0]/255.0f; float base = fftResult[0]/255.0f;
//draw and color Akemi //draw and color Akemi

View File

@ -238,7 +238,7 @@ typedef enum UM_Data_Types {
typedef struct UM_Exchange_Data { typedef struct UM_Exchange_Data {
// should just use: size_t arr_size, void **arr_ptr, byte *ptr_type // should just use: size_t arr_size, void **arr_ptr, byte *ptr_type
size_t u_size; // size of u_data array size_t u_size; // size of u_data array
um_types_t *u_type; // array of data types um_types_t *u_type; // array of data types ewowi: not used???
void **u_data; // array of pointers to data void **u_data; // array of pointers to data
UM_Exchange_Data() { UM_Exchange_Data() {
u_size = 0; u_size = 0;

View File

@ -397,22 +397,17 @@ typedef enum UM_SoundSimulations {
// this is still work in progress // this is still work in progress
um_data_t* simulateSound(uint8_t simulationId) um_data_t* simulateSound(uint8_t simulationId)
{ {
static float sampleAvg;
static uint8_t soundAgc;
static float sampleAgc;
static int16_t sampleRaw;
static int16_t rawSampleAgc;
static uint8_t samplePeak; static uint8_t samplePeak;
static float FFT_MajorPeak; static float FFT_MajorPeak;
static float FFT_Magnitude;
static uint8_t maxVol; static uint8_t maxVol;
static uint8_t binNum; static uint8_t binNum;
static float multAgc;
float sampleGain;
uint8_t soundSquelch;
uint8_t inputLevel; uint8_t inputLevel;
static float volumeSmth;
static uint16_t volumeRaw;
static float my_magnitude;
//arrays //arrays
uint8_t *fftResult; uint8_t *fftResult;
float *fftBin; float *fftBin;
@ -428,30 +423,23 @@ um_data_t* simulateSound(uint8_t simulationId)
// NOTE!!! // NOTE!!!
// This may change as AudioReactive usermod may change // This may change as AudioReactive usermod may change
um_data = new um_data_t; um_data = new um_data_t;
um_data->u_size = 18; um_data->u_size = 10;
um_data->u_type = new um_types_t[um_data->u_size]; um_data->u_type = new um_types_t[um_data->u_size];
um_data->u_data = new void*[um_data->u_size]; um_data->u_data = new void*[um_data->u_size];
um_data->u_data[ 0] = &sampleAvg; um_data->u_data[0] = &volumeSmth;
um_data->u_data[ 1] = &soundAgc; um_data->u_data[1] = &volumeRaw;
um_data->u_data[ 2] = &sampleAgc; um_data->u_data[2] = fftResult;
um_data->u_data[ 3] = &sampleRaw; um_data->u_data[3] = &samplePeak;
um_data->u_data[ 4] = &rawSampleAgc; um_data->u_data[4] = &FFT_MajorPeak;
um_data->u_data[ 5] = &samplePeak; um_data->u_data[5] = &my_magnitude;
um_data->u_data[ 6] = &FFT_MajorPeak; um_data->u_data[6] = &maxVol;
um_data->u_data[ 7] = &FFT_Magnitude; um_data->u_data[7] = &binNum;
um_data->u_data[ 8] = fftResult; um_data->u_data[8] = fftBin; //only used in binmap
um_data->u_data[ 9] = &maxVol; um_data->u_data[9] = &inputLevel;
um_data->u_data[10] = &binNum;
um_data->u_data[11] = &multAgc;
um_data->u_data[14] = 0; //free
um_data->u_data[13] = &sampleGain;
um_data->u_data[15] = &soundSquelch;
um_data->u_data[16] = fftBin; //only used in binmap
um_data->u_data[17] = &inputLevel;
} else { } else {
// get arrays from um_data // get arrays from um_data
fftResult = (uint8_t*)um_data->u_data[8]; fftResult = (uint8_t*)um_data->u_data[2];
fftBin = (float*)um_data->u_data[16]; fftBin = (float*)um_data->u_data[8];
} }
uint32_t ms = millis(); uint32_t ms = millis();
@ -462,36 +450,36 @@ um_data_t* simulateSound(uint8_t simulationId)
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = beatsin8(120 / (i+1), 0, 255); fftResult[i] = beatsin8(120 / (i+1), 0, 255);
// fftResult[i] = (beatsin8(120, 0, 255) + (256/16 * i)) % 256; // fftResult[i] = (beatsin8(120, 0, 255) + (256/16 * i)) % 256;
sampleAvg = fftResult[8]; volumeSmth = fftResult[8];
break; break;
case UMS_WeWillRockYou: case UMS_WeWillRockYou:
if (ms%2000 < 200) { if (ms%2000 < 200) {
sampleAvg = random8(255); volumeSmth = random8(255);
for (int i = 0; i<5; i++) for (int i = 0; i<5; i++)
fftResult[i] = random8(255); fftResult[i] = random8(255);
} }
else if (ms%2000 < 400) { else if (ms%2000 < 400) {
sampleAvg = 0; volumeSmth = 0;
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = 0; fftResult[i] = 0;
} }
else if (ms%2000 < 600) { else if (ms%2000 < 600) {
sampleAvg = random8(255); volumeSmth = random8(255);
for (int i = 5; i<11; i++) for (int i = 5; i<11; i++)
fftResult[i] = random8(255); fftResult[i] = random8(255);
} }
else if (ms%2000 < 800) { else if (ms%2000 < 800) {
sampleAvg = 0; volumeSmth = 0;
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = 0; fftResult[i] = 0;
} }
else if (ms%2000 < 1000) { else if (ms%2000 < 1000) {
sampleAvg = random8(255); volumeSmth = random8(255);
for (int i = 11; i<16; i++) for (int i = 11; i<16; i++)
fftResult[i] = random8(255); fftResult[i] = random8(255);
} }
else { else {
sampleAvg = 0; volumeSmth = 0;
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = 0; fftResult[i] = 0;
} }
@ -499,32 +487,23 @@ um_data_t* simulateSound(uint8_t simulationId)
case UMS_10_3: case UMS_10_3:
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = inoise8(beatsin8(90 / (i+1), 0, 200)*15 + (ms>>10), ms>>3); fftResult[i] = inoise8(beatsin8(90 / (i+1), 0, 200)*15 + (ms>>10), ms>>3);
sampleAvg = fftResult[8]; volumeSmth = fftResult[8];
break; break;
case UMS_14_3: case UMS_14_3:
for (int i = 0; i<16; i++) for (int i = 0; i<16; i++)
fftResult[i] = inoise8(beatsin8(120 / (i+1), 10, 30)*10 + (ms>>14), ms>>3); fftResult[i] = inoise8(beatsin8(120 / (i+1), 10, 30)*10 + (ms>>14), ms>>3);
sampleAvg = fftResult[8]; volumeSmth = fftResult[8];
break; break;
} }
//derive other vars from sampleAvg
//sampleAvg = mapf(sampleAvg, 0, 255, 0, 255); // help me out here
soundAgc = 0; //only avg in simulations
sampleAgc = sampleAvg;
sampleRaw = sampleAvg;
sampleRaw = map(sampleRaw, 50, 190, 0, 224);
rawSampleAgc = sampleAvg;
samplePeak = random8() > 250; samplePeak = random8() > 250;
FFT_MajorPeak = sampleAvg; FFT_MajorPeak = volumeSmth;
FFT_Magnitude = sampleAvg;
multAgc = sampleAvg;
sampleGain = 40;
soundSquelch = 10;
maxVol = 10; // this gets feedback fro UI maxVol = 10; // this gets feedback fro UI
binNum = 8; // this gets feedback fro UI binNum = 8; // this gets feedback fro UI
inputLevel = 128; // this gets feedback fro UI inputLevel = 128; // this gets feedback fro UI
volumeRaw = volumeSmth;
my_magnitude = 10000.0 / 8.0f; //no idea if 10000 is a good value for FFT_Magnitude ???
if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
return um_data; return um_data;
} }