Merge branch 'audioreactive-prototype' into segment-api
This commit is contained in:
commit
eb8710df81
@ -2,11 +2,16 @@
|
||||
|
||||
#include "wled.h"
|
||||
#include <driver/i2s.h>
|
||||
#include <driver/adc.h>
|
||||
|
||||
#ifndef ESP32
|
||||
#error This audio reactive usermod does not support the ESP8266.
|
||||
#endif
|
||||
|
||||
#ifdef WLED_DEBUG
|
||||
#include <esp_timer.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Usermods allow you to add own functionality to WLED more easily
|
||||
* See: https://github.com/Aircoookie/WLED/wiki/Add-own-functionality
|
||||
@ -31,7 +36,8 @@
|
||||
|
||||
constexpr i2s_port_t I2S_PORT = I2S_NUM_0;
|
||||
constexpr int BLOCK_SIZE = 128;
|
||||
constexpr int SAMPLE_RATE = 20480; // Base sample rate in Hz
|
||||
constexpr int SAMPLE_RATE = 20480; // Base sample rate in Hz - 20Khz is experimental
|
||||
//constexpr int SAMPLE_RATE = 10240; // Base sample rate in Hz - standard
|
||||
|
||||
// #define MIC_LOGGER
|
||||
// #define MIC_SAMPLING_LOG
|
||||
@ -42,7 +48,7 @@ constexpr int SAMPLE_RATE = 20480; // Base sample rate in Hz
|
||||
// globals
|
||||
static uint8_t inputLevel = 128; // UI slider value
|
||||
static uint8_t soundSquelch = 10; // squelch value for volume reactive routines (config value)
|
||||
static uint8_t sampleGain = 1; // sample gain (config value)
|
||||
static uint8_t sampleGain = 60; // sample gain (config value)
|
||||
static uint8_t soundAgc = 0; // Automagic gain control: 0 - none, 1 - normal, 2 - vivid, 3 - lazy (config value)
|
||||
static uint8_t audioSyncEnabled = 0; // bit field: bit 0 - send, bit 1 - receive (config value)
|
||||
|
||||
@ -51,21 +57,22 @@ static uint8_t audioSyncEnabled = 0; // bit field: bit 0 - send, bit 1
|
||||
// Note: in C++, "const" implies "static" - no need to explicitly declare everything as "static const"
|
||||
//
|
||||
#define AGC_NUM_PRESETS 3 // AGC presets: normal, vivid, lazy
|
||||
const float agcSampleDecay[AGC_NUM_PRESETS] = { 0.9994f, 0.9985f, 0.9997f}; // decay factor for sampleMax, in case the current sample is below sampleMax
|
||||
const double agcSampleDecay[AGC_NUM_PRESETS] = { 0.9994f, 0.9985f, 0.9997f}; // decay factor for sampleMax, in case the current sample is below sampleMax
|
||||
const float agcZoneLow[AGC_NUM_PRESETS] = { 32, 28, 36}; // low volume emergency zone
|
||||
const float agcZoneHigh[AGC_NUM_PRESETS] = { 240, 240, 248}; // high volume emergency zone
|
||||
const float agcZoneStop[AGC_NUM_PRESETS] = { 336, 448, 304}; // disable AGC integrator if we get above this level
|
||||
const float agcTarget0[AGC_NUM_PRESETS] = { 112, 144, 164}; // first AGC setPoint -> between 40% and 65%
|
||||
const float agcTarget0Up[AGC_NUM_PRESETS] = { 88, 64, 116}; // setpoint switching value (a poor man's bang-bang)
|
||||
const float agcTarget1[AGC_NUM_PRESETS] = { 220, 224, 216}; // second AGC setPoint -> around 85%
|
||||
const float agcFollowFast[AGC_NUM_PRESETS] = { 1/192.f, 1/128.f, 1/256.f}; // quickly follow setpoint - ~0.15 sec
|
||||
const float agcFollowSlow[AGC_NUM_PRESETS] = {1/6144.f,1/4096.f,1/8192.f}; // slowly follow setpoint - ~2-15 secs
|
||||
const float agcControlKp[AGC_NUM_PRESETS] = { 0.6f, 1.5f, 0.65f}; // AGC - PI control, proportional gain parameter
|
||||
const float agcControlKi[AGC_NUM_PRESETS] = { 1.7f, 1.85f, 1.2f}; // AGC - PI control, integral gain parameter
|
||||
const double agcFollowFast[AGC_NUM_PRESETS] = { 1/192.f, 1/128.f, 1/256.f}; // quickly follow setpoint - ~0.15 sec
|
||||
const double agcFollowSlow[AGC_NUM_PRESETS] = {1/6144.f,1/4096.f,1/8192.f}; // slowly follow setpoint - ~2-15 secs
|
||||
const double agcControlKp[AGC_NUM_PRESETS] = { 0.6f, 1.5f, 0.65f}; // AGC - PI control, proportional gain parameter
|
||||
const double agcControlKi[AGC_NUM_PRESETS] = { 1.7f, 1.85f, 1.2f}; // AGC - PI control, integral gain parameter
|
||||
const float agcSampleSmooth[AGC_NUM_PRESETS] = { 1/12.f, 1/6.f, 1/16.f}; // smoothing factor for sampleAgc (use rawSampleAgc if you want the non-smoothed value)
|
||||
// AGC presets end
|
||||
|
||||
static AudioSource *audioSource = nullptr;
|
||||
static volatile bool disableSoundProcessing = false; // if true, sound processing (FFT, filters, AGC) will be suspended. "volatile" as its shared between tasks.
|
||||
|
||||
//static uint16_t micData; // Analog input for FFT
|
||||
static uint16_t micDataSm; // Smoothed mic data, as it's a bit twitchy
|
||||
@ -75,6 +82,12 @@ static float multAgc = 1.0f; // sample * multAgc = sampleAgc.
|
||||
////////////////////
|
||||
// Begin FFT Code //
|
||||
////////////////////
|
||||
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
|
||||
// lib_deps += https://github.com/kosme/arduinoFFT#develop @ 1.9.2
|
||||
#define FFT_SPEED_OVER_PRECISION // enables use of reciprocals (1/x etc), and an a few other speedups
|
||||
#define FFT_SQRT_APPROXIMATION // enables "quake3" style inverse sqrt
|
||||
//#define sqrt(x) sqrtf(x) // little hack that reduces FFT time by 50% on ESP32 (as alternative to FFT_SQRT_APPROXIMATION)
|
||||
#endif
|
||||
#include "arduinoFFT.h"
|
||||
|
||||
// FFT Variables
|
||||
@ -88,6 +101,10 @@ static float vReal[samplesFFT];
|
||||
static float vImag[samplesFFT];
|
||||
static float fftBin[samplesFFT];
|
||||
|
||||
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
|
||||
static float windowWeighingFactors[samplesFFT];
|
||||
#endif
|
||||
|
||||
// Try and normalize fftBin values to a max of 4096, so that 4096/16 = 256.
|
||||
// Oh, and bins 0,1,2 are no good, so we'll zero them out.
|
||||
static float fftCalc[16];
|
||||
@ -109,7 +126,12 @@ static uint8_t linearNoise[16] = { 34, 28, 26, 25, 20, 12, 9, 6, 4, 4, 3, 2, 2,
|
||||
static float fftResultPink[16] = { 1.70f, 1.71f, 1.73f, 1.78f, 1.68f, 1.56f, 1.55f, 1.63f, 1.79f, 1.62f, 1.80f, 2.06f, 2.47f, 3.35f, 6.83f, 9.55f };
|
||||
|
||||
// Create FFT object
|
||||
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
|
||||
static ArduinoFFT<float> FFT = ArduinoFFT<float>( vReal, vImag, samplesFFT, SAMPLE_RATE, windowWeighingFactors);
|
||||
#else
|
||||
static arduinoFFT FFT = arduinoFFT(vReal, vImag, samplesFFT, SAMPLE_RATE);
|
||||
#endif
|
||||
|
||||
static TaskHandle_t FFT_Task = nullptr;
|
||||
|
||||
float fftAddAvg(int from, int to) {
|
||||
@ -132,17 +154,25 @@ void FFTcode(void * parameter)
|
||||
delay(1); // DO NOT DELETE THIS LINE! It is needed to give the IDLE(0) task enough time and to keep the watchdog happy.
|
||||
// taskYIELD(), yield(), vTaskDelay() and esp_task_wdt_feed() didn't seem to work.
|
||||
|
||||
// Only run the FFT computing code if we're not in Receive mode
|
||||
if (audioSyncEnabled & 0x02) continue;
|
||||
// Only run the FFT computing code if we're not in Receive mode and not in realtime mode
|
||||
if (disableSoundProcessing || (audioSyncEnabled & 0x02)) {
|
||||
delay(7); // release CPU - delay is implemeted using vTaskDelay(). cannot use yield() because we are out of arduino loop context
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifdef WLED_DEBUG
|
||||
unsigned long start = millis();
|
||||
// unsigned long start = millis();
|
||||
uint64_t start = esp_timer_get_time();
|
||||
#endif
|
||||
|
||||
if (audioSource) audioSource->getSamples(vReal, samplesFFT);
|
||||
|
||||
#ifdef WLED_DEBUG
|
||||
sampleTime = ((millis() - start)*3 + sampleTime*7)/10; // smooth
|
||||
//sampleTime = ((millis() - start)*3 + sampleTime*7)/10; // smooth
|
||||
if (start < esp_timer_get_time()) { // filter out overflows
|
||||
unsigned long sampleTimeInMillis = (esp_timer_get_time() - start +500ULL) / 1000ULL; // "+500" to ensure proper rounding
|
||||
sampleTime = (sampleTimeInMillis*3 + sampleTime*7)/10; // smooth
|
||||
}
|
||||
#endif
|
||||
|
||||
// old code - Last sample in vReal is our current mic sample
|
||||
@ -170,6 +200,12 @@ void FFTcode(void * parameter)
|
||||
micDataSm = (uint16_t)maxSample1;
|
||||
micDataReal = maxSample1;
|
||||
|
||||
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
|
||||
FFT.dcRemoval(); // remove DC offset
|
||||
FFT.windowing( FFTWindow::Flat_top, FFTDirection::Forward); // Weigh data
|
||||
FFT.compute( FFTDirection::Forward ); // Compute FFT
|
||||
FFT.complexToMagnitude(); // Compute magnitudes
|
||||
#else
|
||||
FFT.DCRemoval(); // let FFT lib remove DC component, so we don't need to care about this in getSamples()
|
||||
|
||||
//FFT.Windowing( FFT_WIN_TYP_HAMMING, FFT_FORWARD ); // Weigh data - standard Hamming window
|
||||
@ -178,7 +214,7 @@ void FFTcode(void * parameter)
|
||||
FFT.Windowing( FFT_WIN_TYP_FLT_TOP, FFT_FORWARD ); // Flat Top Window - better amplitude accuracy
|
||||
FFT.Compute( FFT_FORWARD ); // Compute FFT
|
||||
FFT.ComplexToMagnitude(); // Compute magnitudes
|
||||
|
||||
#endif
|
||||
//
|
||||
// vReal[3 .. 255] contain useful data, each a 20Hz interval (60Hz - 5120Hz).
|
||||
// There could be interesting data at bins 0 to 2, but there are too many artifacts.
|
||||
@ -212,7 +248,11 @@ void FFTcode(void * parameter)
|
||||
xtemp[23] = vReal[samplesFFT-1]; vReal[samplesFFT-1] = 0.0f;
|
||||
#endif
|
||||
|
||||
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
|
||||
FFT.majorPeak(FFT_MajorPeak, FFT_Magnitude); // let the effects know which freq was most dominant
|
||||
#else
|
||||
FFT.MajorPeak(&FFT_MajorPeak, &FFT_Magnitude); // let the effects know which freq was most dominant
|
||||
#endif
|
||||
|
||||
#ifdef MAJORPEAK_SUPPRESS_NOISE
|
||||
// dirty hack: limit suppressed channel intensities to FFT_Magnitude
|
||||
@ -297,7 +337,11 @@ void FFTcode(void * parameter)
|
||||
micDataReal = maxSample2;
|
||||
|
||||
#ifdef WLED_DEBUG
|
||||
fftTime = ((millis() - start)*3 + fftTime*7)/10;
|
||||
//fftTime = ((millis() - start)*3 + fftTime*7)/10;
|
||||
if (start < esp_timer_get_time()) { // filter out overflows
|
||||
unsigned long fftTimeInMillis = ((esp_timer_get_time() - start) +500ULL) / 1000ULL; // "+500" to ensure proper rounding
|
||||
fftTime = (fftTimeInMillis*3 + fftTime*7)/10; // smooth
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef SR_DEBUG
|
||||
@ -322,7 +366,7 @@ class AudioReactive : public Usermod {
|
||||
int8_t audioPin = AUDIOPIN;
|
||||
#endif
|
||||
#ifndef DMTYPE // I2S mic type
|
||||
uint8_t dmType = 0; // none/disabled/analog
|
||||
uint8_t dmType = 1; // 0=none/disabled/analog; 1=generic I2S
|
||||
#else
|
||||
uint8_t dmType = DMTYPE;
|
||||
#endif
|
||||
@ -359,9 +403,8 @@ class AudioReactive : public Usermod {
|
||||
|
||||
struct audioSyncPacket {
|
||||
char header[6];
|
||||
uint8_t myVals[32]; // 32 Bytes
|
||||
int sampleAgc; // 04 Bytes
|
||||
int sample; // 04 Bytes
|
||||
int sampleRaw; // 04 Bytes
|
||||
float sampleAvg; // 04 Bytes
|
||||
bool samplePeak; // 01 Bytes
|
||||
uint8_t fftResult[16]; // 16 Bytes
|
||||
@ -379,10 +422,9 @@ class AudioReactive : public Usermod {
|
||||
// variables used in effects
|
||||
uint8_t maxVol = 10; // Reasonable value for constant volume for 'peak detector', as it won't always trigger
|
||||
uint8_t binNum = 8; // Used to select the bin for FFT based beat detection.
|
||||
uint8_t myVals[32]; // Used to store a pile of samples because WLED frame rate and WLED sample rate are not synchronized. Frame rate is too low.
|
||||
bool samplePeak = 0; // Boolean flag for peak. Responding routine must reset this flag
|
||||
int16_t sample; // either sampleRaw or rawSampleAgc depending on soundAgc
|
||||
float sampleSmth; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample
|
||||
float volumeSmth; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample
|
||||
int16_t volumeRaw; // either sampleRaw or rawSampleAgc depending on soundAgc
|
||||
|
||||
#ifdef MIC_SAMPLING_LOG
|
||||
uint8_t targetAgc = 60; // This is our setPoint at 20% of max for the adjusted output (used only in logAudio())
|
||||
@ -390,13 +432,13 @@ class AudioReactive : public Usermod {
|
||||
bool udpSamplePeak = 0; // Boolean flag for peak. Set at the same tiem as samplePeak, but reset by transmitAudioData
|
||||
int16_t micIn = 0; // Current sample starts with negative values and large values, which is why it's 16 bit signed
|
||||
int16_t sampleRaw; // Current sample. Must only be updated ONCE!!! (amplified mic value by sampleGain and inputLevel; smoothed over 16 samples)
|
||||
float sampleMax = 0.0f; // Max sample over a few seconds. Needed for AGC controler.
|
||||
double sampleMax = 0.0; // Max sample over a few seconds. Needed for AGC controler.
|
||||
float sampleReal = 0.0f; // "sampleRaw" as float, to provide bits that are lost otherwise (before amplification by sampleGain or inputLevel). Needed for AGC.
|
||||
float sampleAvg = 0.0f; // Smoothed Average sampleRaw
|
||||
float sampleAgc = 0.0f; // Our AGC sample
|
||||
int16_t rawSampleAgc = 0; // Our AGC sample - raw
|
||||
uint32_t timeOfPeak = 0;
|
||||
uint32_t lastTime = 0;
|
||||
unsigned long lastTime = 0; // last time of running UDP Microphone Sync
|
||||
float micLev = 0.0f; // Used to convert returned value to have '0' as minimum. A leveller
|
||||
float expAdjF = 0.0f; // Used for exponential filter.
|
||||
|
||||
@ -406,7 +448,7 @@ class AudioReactive : public Usermod {
|
||||
// used for AGC
|
||||
uint8_t lastMode = 0; // last known effect mode
|
||||
int last_soundAgc = -1;
|
||||
float control_integrated = 0.0f; // persistent across calls to agcAvg(); "integrator control" = accumulated error
|
||||
double control_integrated = 0.0; // persistent across calls to agcAvg(); "integrator control" = accumulated error
|
||||
unsigned long last_update_time = 0;
|
||||
unsigned long last_kick_time = 0;
|
||||
uint8_t last_user_inputLevel = 0;
|
||||
@ -419,6 +461,7 @@ class AudioReactive : public Usermod {
|
||||
static const char _digitalmic[];
|
||||
static const char UDP_SYNC_HEADER[];
|
||||
|
||||
float my_magnitude;
|
||||
|
||||
// private methods
|
||||
void logAudio()
|
||||
@ -526,7 +569,7 @@ class AudioReactive : public Usermod {
|
||||
float control_error; // "control error" input for PI control
|
||||
|
||||
if (last_soundAgc != soundAgc)
|
||||
control_integrated = 0.0f; // new preset - reset integrator
|
||||
control_integrated = 0.0; // new preset - reset integrator
|
||||
|
||||
// For PI controller, we need to have a constant "frequency"
|
||||
// so let's make sure that the control loop is not running at insane speed
|
||||
@ -540,8 +583,8 @@ class AudioReactive : public Usermod {
|
||||
//multAgcTemp = multAgc; // keep old control value (no change)
|
||||
tmpAgc = 0;
|
||||
// we need to "spin down" the intgrated error buffer
|
||||
if (fabs(control_integrated) < 0.01f) control_integrated = 0.0f;
|
||||
else control_integrated *= 0.91f;
|
||||
if (fabs(control_integrated) < 0.01) control_integrated = 0.0;
|
||||
else control_integrated *= 0.91;
|
||||
} else {
|
||||
// compute new setpoint
|
||||
if (tmpAgc <= agcTarget0Up[AGC_preset])
|
||||
@ -559,9 +602,9 @@ class AudioReactive : public Usermod {
|
||||
|
||||
if (((multAgcTemp > 0.085f) && (multAgcTemp < 6.5f)) //integrator anti-windup by clamping
|
||||
&& (multAgc*sampleMax < agcZoneStop[AGC_preset])) //integrator ceiling (>140% of max)
|
||||
control_integrated += control_error * 0.002f * 0.25f; // 2ms = intgration time; 0.25 for damping
|
||||
control_integrated += control_error * 0.002 * 0.25; // 2ms = intgration time; 0.25 for damping
|
||||
else
|
||||
control_integrated *= 0.9f; // spin down that beasty integrator
|
||||
control_integrated *= 0.9; // spin down that beasty integrator
|
||||
|
||||
// apply PI Control
|
||||
tmpAgc = sampleReal * lastMultAgc; // check "zone" of the signal using previous gain
|
||||
@ -599,6 +642,14 @@ class AudioReactive : public Usermod {
|
||||
//if (userVar0 > 255) userVar0 = 255;
|
||||
|
||||
last_soundAgc = soundAgc;
|
||||
|
||||
volumeSmth = (soundAgc) ? sampleAgc:sampleAvg;
|
||||
volumeRaw = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
|
||||
my_magnitude = FFT_Magnitude; // / 16.0f, 8.0f, 4.0f done in effects
|
||||
if (soundAgc) my_magnitude *= multAgc;
|
||||
if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
|
||||
|
||||
} // agcAvg()
|
||||
|
||||
|
||||
@ -698,12 +749,8 @@ class AudioReactive : public Usermod {
|
||||
audioSyncPacket transmitData;
|
||||
strncpy_P(transmitData.header, PSTR(UDP_SYNC_HEADER), 6);
|
||||
|
||||
for (int i = 0; i < 32; i++) {
|
||||
transmitData.myVals[i] = myVals[i];
|
||||
}
|
||||
|
||||
transmitData.sampleAgc = sampleAgc;
|
||||
transmitData.sample = sampleRaw;
|
||||
transmitData.sampleRaw = sampleRaw;
|
||||
transmitData.sampleAvg = sampleAvg;
|
||||
transmitData.samplePeak = udpSamplePeak;
|
||||
udpSamplePeak = 0; // Reset udpSamplePeak after we've transmitted it
|
||||
@ -742,11 +789,9 @@ class AudioReactive : public Usermod {
|
||||
if (packetSize == sizeof(audioSyncPacket) && !(isValidUdpSyncVersion((const char *)fftBuff))) {
|
||||
audioSyncPacket *receivedPacket = reinterpret_cast<audioSyncPacket*>(fftBuff);
|
||||
|
||||
for (int i = 0; i < 32; i++) myVals[i] = receivedPacket->myVals[i];
|
||||
|
||||
sampleAgc = receivedPacket->sampleAgc;
|
||||
rawSampleAgc = receivedPacket->sampleAgc;
|
||||
sampleRaw = receivedPacket->sample;
|
||||
sampleRaw = receivedPacket->sampleRaw;
|
||||
sampleAvg = receivedPacket->sampleAvg;
|
||||
|
||||
// Only change samplePeak IF it's currently false.
|
||||
@ -774,49 +819,30 @@ class AudioReactive : public Usermod {
|
||||
*/
|
||||
void setup()
|
||||
{
|
||||
disableSoundProcessing = true; // just to be sure
|
||||
if (!initDone) {
|
||||
// usermod exchangeable data
|
||||
// we will assign all usermod exportable data here as pointers to original variables or arrays and allocate memory for pointers
|
||||
um_data = new um_data_t;
|
||||
um_data->u_size = 18;
|
||||
um_data->u_size = 8;
|
||||
um_data->u_type = new um_types_t[um_data->u_size];
|
||||
um_data->u_data = new void*[um_data->u_size];
|
||||
um_data->u_data[ 0] = &sampleAvg; //*used (2D Swirl, 2D Waverly, Gravcenter, Gravcentric, Gravimeter, Midnoise, Noisefire, Noisemeter, Plasmoid, Binmap, Freqmap, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall)
|
||||
um_data->u_data[0] = &volumeSmth; //*used (New)
|
||||
um_data->u_type[0] = UMT_FLOAT;
|
||||
um_data->u_data[ 1] = &soundAgc; //*used (2D Swirl, 2D Waverly, Gravcenter, Gravcentric, Gravimeter, Matripix, Midnoise, Noisefire, Noisemeter, Pixelwave, Plasmoid, Puddles, Binmap, Freqmap, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall)
|
||||
um_data->u_type[ 1] = UMT_BYTE;
|
||||
um_data->u_data[ 2] = &sampleAgc; //*used (can be calculated as: sampleReal * multAgc) (..., Juggles, ..., Pixels, Puddlepeak, Freqmatrix)
|
||||
um_data->u_type[ 2] = UMT_FLOAT;
|
||||
um_data->u_data[ 3] = &sampleRaw; //*used (Matripix, Noisemeter, Pixelwave, Puddles, 2D Swirl, for debugging Gravimeter)
|
||||
um_data->u_type[ 3] = UMT_INT16;
|
||||
um_data->u_data[ 4] = &rawSampleAgc; //*used (Matripix, Noisemeter, Pixelwave, Puddles, 2D Swirl)
|
||||
um_data->u_type[ 4] = UMT_INT16;
|
||||
um_data->u_data[ 5] = &samplePeak; //*used (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[ 5] = UMT_BYTE;
|
||||
um_data->u_data[ 6] = &FFT_MajorPeak; //*used (Ripplepeak, Freqmap, Freqmatrix, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall)
|
||||
um_data->u_type[ 6] = UMT_FLOAT;
|
||||
um_data->u_data[ 7] = &FFT_Magnitude; //*used (Binmap, Freqmap, Freqpixels, Rocktaves, Waterfall)
|
||||
um_data->u_type[ 7] = UMT_FLOAT;
|
||||
um_data->u_data[ 8] = fftResult; //*used (Blurz, DJ Light, Noisemove, GEQ_base, 2D Funky Plank, Akemi)
|
||||
um_data->u_type[ 8] = UMT_BYTE_ARR;
|
||||
um_data->u_data[ 9] = &maxVol; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[ 9] = UMT_BYTE;
|
||||
um_data->u_data[10] = &binNum; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[10] = UMT_BYTE;
|
||||
um_data->u_data[11] = &multAgc; //*used (for debugging) (Gravimeter, Binmap, Freqmap, Freqpixels, Rocktaves, Waterfall,)
|
||||
um_data->u_type[11] = UMT_FLOAT;
|
||||
um_data->u_data[12] = &sampleReal; //*used (for debugging) (Gravimeter)
|
||||
um_data->u_type[12] = UMT_FLOAT;
|
||||
um_data->u_data[13] = &sampleGain; //*used (for debugging) (Gravimeter, Binmap)
|
||||
um_data->u_type[13] = UMT_FLOAT;
|
||||
um_data->u_data[14] = myVals; //*used (only once, Pixels)
|
||||
um_data->u_type[14] = UMT_UINT16_ARR;
|
||||
um_data->u_data[15] = &soundSquelch; //*used (for debugging) (only once, Binmap)
|
||||
um_data->u_type[15] = UMT_BYTE;
|
||||
um_data->u_data[16] = fftBin; //*used (for debugging) (only once, Binmap)
|
||||
um_data->u_type[16] = UMT_FLOAT_ARR;
|
||||
um_data->u_data[17] = &inputLevel; // global UI element!!! (Gravimeter, Binmap)
|
||||
um_data->u_type[17] = UMT_BYTE;
|
||||
um_data->u_data[1] = &volumeRaw; // used (New)
|
||||
um_data->u_type[1] = UMT_UINT16;
|
||||
um_data->u_data[2] = fftResult; //*used (Blurz, DJ Light, Noisemove, GEQ_base, 2D Funky Plank, Akemi)
|
||||
um_data->u_type[2] = UMT_BYTE_ARR;
|
||||
um_data->u_data[3] = &samplePeak; //*used (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[3] = UMT_BYTE;
|
||||
um_data->u_data[4] = &FFT_MajorPeak; //*used (Ripplepeak, Freqmap, Freqmatrix, Freqpixels, Freqwave, Gravfreq, Rocktaves, Waterfall)
|
||||
um_data->u_type[4] = UMT_FLOAT;
|
||||
um_data->u_data[5] = &my_magnitude; // used (New)
|
||||
um_data->u_type[5] = UMT_FLOAT;
|
||||
um_data->u_data[6] = &maxVol; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[6] = UMT_BYTE;
|
||||
um_data->u_data[7] = &binNum; // assigned in effect function from UI element!!! (Puddlepeak, Ripplepeak, Waterfall)
|
||||
um_data->u_type[7] = UMT_BYTE;
|
||||
}
|
||||
|
||||
// Reset I2S peripheral for good measure
|
||||
@ -869,6 +895,7 @@ class AudioReactive : public Usermod {
|
||||
|
||||
if (!audioSource) enabled = false; // audio failed to initialise
|
||||
if (enabled) onUpdateBegin(false); // create FFT task
|
||||
if (enabled) disableSoundProcessing = false;
|
||||
|
||||
initDone = true;
|
||||
}
|
||||
@ -902,18 +929,69 @@ class AudioReactive : public Usermod {
|
||||
*/
|
||||
void loop()
|
||||
{
|
||||
if (!enabled || strip.isUpdating()) return;
|
||||
static unsigned long lastUMRun = millis();
|
||||
|
||||
if (!(audioSyncEnabled & 0x02)) { // Only run the sampling code IF we're not in Receive mode
|
||||
if (!enabled) {
|
||||
disableSoundProcessing = true; // keep processing suspended (FFT task)
|
||||
lastUMRun = millis(); // update time keeping
|
||||
return;
|
||||
}
|
||||
// We cannot wait indefinitely before processing audio data
|
||||
//if (!enabled || strip.isUpdating()) return;
|
||||
if (strip.isUpdating() && (millis() - lastUMRun < 12)) return; // be nice, but not too nice
|
||||
|
||||
// suspend local sound processing when "real time mode" is active (E131, UDP, ADALIGHT, ARTNET)
|
||||
if ( (realtimeOverride == REALTIME_OVERRIDE_NONE) // please odd other orrides here if needed
|
||||
&&( (realtimeMode == REALTIME_MODE_GENERIC)
|
||||
||(realtimeMode == REALTIME_MODE_E131)
|
||||
||(realtimeMode == REALTIME_MODE_UDP)
|
||||
||(realtimeMode == REALTIME_MODE_ADALIGHT)
|
||||
||(realtimeMode == REALTIME_MODE_ARTNET) ) ) // please add other modes here if needed
|
||||
{
|
||||
#ifdef WLED_DEBUG
|
||||
if ((disableSoundProcessing == false) && (audioSyncEnabled == 0)) { // we just switched to "disabled"
|
||||
DEBUG_PRINTLN("[AR userLoop] realtime mode active - audio processing suspended.");
|
||||
DEBUG_PRINTF( " RealtimeMode = %d; RealtimeOverride = %d\n", int(realtimeMode), int(realtimeOverride));
|
||||
}
|
||||
#endif
|
||||
disableSoundProcessing = true;
|
||||
} else {
|
||||
#ifdef WLED_DEBUG
|
||||
if ((disableSoundProcessing == true) && (audioSyncEnabled == 0)) { // we just switched to "disabled"
|
||||
DEBUG_PRINTLN("[AR userLoop] realtime mode ended - audio processing resumed.");
|
||||
DEBUG_PRINTF( " RealtimeMode = %d; RealtimeOverride = %d\n", int(realtimeMode), int(realtimeOverride));
|
||||
}
|
||||
#endif
|
||||
if ((disableSoundProcessing == true) && (audioSyncEnabled == 0)) lastUMRun = millis(); // just left "realtime mode" - update timekeeping
|
||||
disableSoundProcessing = false;
|
||||
}
|
||||
|
||||
if (audioSyncEnabled & 0x02) disableSoundProcessing = true; // make sure everything is disabled IF in audio Receive mode
|
||||
if (audioSyncEnabled & 0x01) disableSoundProcessing = false; // keep running audio IF we're in audio Transmit mode
|
||||
|
||||
if (!(audioSyncEnabled & 0x02) && !disableSoundProcessing) { // Only run the sampling code IF we're not in Receive mode or realtime mode
|
||||
bool agcEffect = false;
|
||||
|
||||
if (soundAgc > AGC_NUM_PRESETS) soundAgc = 0; // make sure that AGC preset is valid (to avoid array bounds violation)
|
||||
|
||||
int userloopDelay = int(millis() - lastUMRun); // how long since last run? we might need to cat up to compensate lost times
|
||||
int samplesSkipped = 0;
|
||||
if (userloopDelay > 12) samplesSkipped = (userloopDelay + 12) / 25; // every 25ms we get a new batch of samples
|
||||
if (samplesSkipped > 100) samplesSkipped = 100; // don't be silly
|
||||
#ifdef WLED_DEBUG
|
||||
// complain when audio userloop has been delayed for long time. Currently we need userloop running between 500 and 1500 times per second.
|
||||
if ((userloopDelay > 23) && !disableSoundProcessing && (audioSyncEnabled == 0)) {
|
||||
// Expect lagging in soundreactive effects if you see the next messages !!!
|
||||
DEBUG_PRINTF("[AR userLoop] hickup detected -> was inactive for last %d millis!\n", userloopDelay);
|
||||
if (samplesSkipped > 0) DEBUG_PRINTF("[AR userLoop] lost %d sample(s).\n", samplesSkipped);
|
||||
}
|
||||
#endif
|
||||
|
||||
lastUMRun = millis(); // update time keeping
|
||||
|
||||
getSample(); // Sample the microphone
|
||||
agcAvg(); // Calculated the PI adjusted value as sampleAvg
|
||||
|
||||
myVals[millis()%32] = sampleAgc; // filling values semi randomly (why?)
|
||||
|
||||
uint8_t knownMode = strip.getFirstSelectedSeg().mode; // 1st selected segment is more appropriate than main segment
|
||||
|
||||
if (lastMode != knownMode) { // only execute if mode changes
|
||||
@ -945,7 +1023,8 @@ class AudioReactive : public Usermod {
|
||||
// update user interfaces - restrict frequency to avoid flooding UI's with small changes
|
||||
if (( ((now_time - last_update_time > 3500) && (abs(new_user_inputLevel - inputLevel) > 2)) // small change - every 3.5 sec (max)
|
||||
||((now_time - last_update_time > 2200) && (abs(new_user_inputLevel - inputLevel) > 15)) // medium change
|
||||
|| ((now_time - last_update_time > 1200) && (abs(new_user_inputLevel - inputLevel) > 31))) ) // BIG change - every second
|
||||
||((now_time - last_update_time > 1200) && (abs(new_user_inputLevel - inputLevel) > 31))) // BIG change - every second
|
||||
&& !strip.isUpdating()) // don't interfere while strip is updating
|
||||
{
|
||||
inputLevel = new_user_inputLevel; // change of least 3 units -> update user variable
|
||||
updateInterfaces(CALL_MODE_WS_SEND); // is this the correct way to notify UIs ? Yes says blazoncek
|
||||
@ -987,6 +1066,7 @@ class AudioReactive : public Usermod {
|
||||
#ifdef WLED_DEBUG
|
||||
fftTime = sampleTime = 0;
|
||||
#endif
|
||||
disableSoundProcessing = true;
|
||||
if (init && FFT_Task) {
|
||||
vTaskSuspend(FFT_Task); // update is about to begin, disable task to prevent crash
|
||||
} else {
|
||||
@ -1004,6 +1084,7 @@ class AudioReactive : public Usermod {
|
||||
0 // Core where the task should run
|
||||
);
|
||||
}
|
||||
if (enabled) disableSoundProcessing = false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -3,6 +3,16 @@
|
||||
#include <Wire.h>
|
||||
#include "wled.h"
|
||||
#include <driver/i2s.h>
|
||||
#include <driver/adc.h>
|
||||
#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 4, 0)
|
||||
#include <driver/adc_deprecated.h>
|
||||
#include <driver/adc_types_deprecated.h>
|
||||
#endif
|
||||
|
||||
//#include <driver/i2s_std.h>
|
||||
//#include <driver/i2s_pdm.h>
|
||||
//#include <driver/gpio.h>
|
||||
|
||||
|
||||
/* ToDo: remove. ES7243 is controlled via compiler defines
|
||||
Until this configuration is moved to the webinterface
|
||||
@ -88,7 +98,11 @@ class I2SSource : public AudioSource {
|
||||
.sample_rate = _sampleRate,
|
||||
.bits_per_sample = I2S_SAMPLE_RESOLUTION,
|
||||
.channel_format = I2S_CHANNEL_FMT_ONLY_LEFT,
|
||||
#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)
|
||||
.communication_format = i2s_comm_format_t(I2S_COMM_FORMAT_STAND_I2S),
|
||||
#else
|
||||
.communication_format = i2s_comm_format_t(I2S_COMM_FORMAT_I2S | I2S_COMM_FORMAT_I2S_MSB),
|
||||
#endif
|
||||
.intr_alloc_flags = ESP_INTR_FLAG_LEVEL1,
|
||||
.dma_buf_count = 8,
|
||||
.dma_buf_len = _blockSize
|
||||
@ -307,7 +321,11 @@ class I2SAdcSource : public I2SSource {
|
||||
.sample_rate = _sampleRate,
|
||||
.bits_per_sample = I2S_SAMPLE_RESOLUTION,
|
||||
.channel_format = I2S_CHANNEL_FMT_ONLY_LEFT,
|
||||
#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(4, 2, 0)
|
||||
.communication_format = i2s_comm_format_t(I2S_COMM_FORMAT_STAND_I2S),
|
||||
#else
|
||||
.communication_format = i2s_comm_format_t(I2S_COMM_FORMAT_I2S | I2S_COMM_FORMAT_I2S_MSB),
|
||||
#endif
|
||||
.intr_alloc_flags = ESP_INTR_FLAG_LEVEL2,
|
||||
.dma_buf_count = 8,
|
||||
.dma_buf_len = _blockSize
|
||||
|
274
wled00/FX.cpp
274
wled00/FX.cpp
@ -1976,7 +1976,7 @@ uint16_t mode_fire_2012()
|
||||
const uint16_t rows = strip.isMatrix ? SEGMENT.virtualHeight() : SEGMENT.virtualLength();
|
||||
|
||||
uint32_t it = strip.now >> 5; //div 32
|
||||
uint16_t q = cols>>2; // a quarter of flames
|
||||
//uint16_t q = cols>>2; // a quarter of flames
|
||||
|
||||
if (!SEGENV.allocateData(cols*rows)) return mode_static(); //allocation failed
|
||||
|
||||
@ -5917,39 +5917,25 @@ static const char _data_FX_MODE_2DDRIFTROSE[] PROGMEM = "Drift Rose@Fade,Blur;;;
|
||||
/* use the following code to pass AudioReactive usermod variables to effect
|
||||
|
||||
uint8_t *binNum = (uint8_t*)&SEGENV.aux1, *maxVol = (uint8_t*)(&SEGENV.aux1+1); // just in case assignment
|
||||
uint16_t sample = 0;
|
||||
uint8_t soundAgc = 0, soundSquelch = 10;
|
||||
bool samplePeak = false;
|
||||
float sampleAgc = 0.0f, sampleAgv = 0.0f, multAgc = 0.0f, sampleReal = 0.0f;
|
||||
float FFT_MajorPeak = 0.0, FFT_Magnitude = 0.0;
|
||||
float FFT_MajorPeak = 0.0;
|
||||
uint8_t *fftResult = nullptr;
|
||||
uint16_t *myVals = nullptr;
|
||||
float *fftBin = nullptr;
|
||||
um_data_t *um_data;
|
||||
if (usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
|
||||
sampleAvg = *(float*) um_data->u_data[ 0];
|
||||
soundAgc = *(uint8_t*) um_data->u_data[ 1];
|
||||
sampleAgc = *(float*) um_data->u_data[ 2];
|
||||
sample = *(uint16_t*)um_data->u_data[ 3];
|
||||
rawSampleAgc = *(uint16_t*)um_data->u_data[ 4];
|
||||
samplePeak = *(uint8_t*) um_data->u_data[ 5];
|
||||
FFT_MajorPeak = *(float*) um_data->u_data[ 6];
|
||||
FFT_Magnitude = *(float*) um_data->u_data[ 7];
|
||||
fftResult = (uint8_t*) um_data->u_data[ 8];
|
||||
maxVol = (uint8_t*) um_data->u_data[ 9]; // requires UI element (SEGMENT.customX?), changes source element
|
||||
binNum = (uint8_t*) um_data->u_data[10]; // requires UI element (SEGMENT.customX?), changes source element
|
||||
multAgc = *(float*) um_data->u_data[11];
|
||||
sampleReal = *(float*) um_data->u_data[12];
|
||||
sampleGain = *(float*) um_data->u_data[13];
|
||||
myVals = (uint16_t*)um_data->u_data[14];
|
||||
soundSquelch = *(uint8_t*) um_data->u_data[15];
|
||||
fftBin = (float*) um_data->u_data[16];
|
||||
inputLevel = (uint8_t*) um_data->u_data[17]; // requires UI element (SEGMENT.customX?), changes source element
|
||||
volumeSmth = *(float*) um_data->u_data[0];
|
||||
volumeRaw = *(float*) um_data->u_data[1];
|
||||
fftResult = (uint8_t*) um_data->u_data[2];
|
||||
samplePeak = *(uint8_t*) um_data->u_data[3];
|
||||
FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
my_magnitude = *(float*) um_data->u_data[5];
|
||||
maxVol = (uint8_t*) um_data->u_data[6]; // requires UI element (SEGMENT.customX?), changes source element
|
||||
binNum = (uint8_t*) um_data->u_data[7]; // requires UI element (SEGMENT.customX?), changes source element
|
||||
fftBin = (float*) um_data->u_data[8];
|
||||
} else {
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
if (!myVals || !fftBin || ...) return mode_static();
|
||||
*/
|
||||
|
||||
|
||||
@ -5970,12 +5956,10 @@ uint16_t mode_ripplepeak(void) { // * Ripple peak. By Andrew Tuli
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5];
|
||||
#ifdef ESP32
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
#endif
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[9];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[10];
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[7];
|
||||
|
||||
// printUmData();
|
||||
|
||||
@ -6062,21 +6046,17 @@ uint16_t mode_2DSwirl(void) {
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
|
||||
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
|
||||
float volumeSmth = *(float*) um_data->u_data[0]; //ewowi: use instead of sampleAvg???
|
||||
int16_t volumeRaw = *(int16_t*) um_data->u_data[1];
|
||||
|
||||
// printUmData();
|
||||
|
||||
float tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
|
||||
leds[XY( i, j)] += ColorFromPalette(SEGPALETTE, (ms / 11 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 11, 200, 255);
|
||||
leds[XY( j, i)] += ColorFromPalette(SEGPALETTE, (ms / 13 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 13, 200, 255);
|
||||
leds[XY(ni, nj)] += ColorFromPalette(SEGPALETTE, (ms / 17 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 17, 200, 255);
|
||||
leds[XY(nj, ni)] += ColorFromPalette(SEGPALETTE, (ms / 29 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 29, 200, 255);
|
||||
leds[XY( i, nj)] += ColorFromPalette(SEGPALETTE, (ms / 37 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 37, 200, 255);
|
||||
leds[XY(ni, j)] += ColorFromPalette(SEGPALETTE, (ms / 41 + sampleAvg*4), tmpSound * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 41, 200, 255);
|
||||
leds[XY( i, j)] += ColorFromPalette(SEGPALETTE, (ms / 11 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 11, 200, 255);
|
||||
leds[XY( j, i)] += ColorFromPalette(SEGPALETTE, (ms / 13 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 13, 200, 255);
|
||||
leds[XY(ni, nj)] += ColorFromPalette(SEGPALETTE, (ms / 17 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 17, 200, 255);
|
||||
leds[XY(nj, ni)] += ColorFromPalette(SEGPALETTE, (ms / 29 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 29, 200, 255);
|
||||
leds[XY( i, nj)] += ColorFromPalette(SEGPALETTE, (ms / 37 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 37, 200, 255);
|
||||
leds[XY(ni, j)] += ColorFromPalette(SEGPALETTE, (ms / 41 + volumeSmth*4), volumeRaw * SEGMENT.intensity / 64, LINEARBLEND); //CHSV( ms / 41, 200, 255);
|
||||
|
||||
SEGMENT.setPixels(leds);
|
||||
return FRAMETIME;
|
||||
@ -6107,9 +6087,7 @@ uint16_t mode_2DWaverly(void) {
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fadeToBlackBy(leds, SEGMENT.speed);
|
||||
|
||||
@ -6119,7 +6097,7 @@ uint16_t mode_2DWaverly(void) {
|
||||
// use audio if available
|
||||
if (um_data) {
|
||||
thisVal /= 32; // reduce intensity of inoise8()
|
||||
thisVal *= (soundAgc) ? sampleAgc : sampleAvg;
|
||||
thisVal *= volumeSmth;
|
||||
}
|
||||
uint16_t thisMax = map(thisVal, 0, 512, 0, rows);
|
||||
|
||||
@ -6162,15 +6140,11 @@ uint16_t mode_gravcenter(void) { // Gravcenter. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fade_out(240);
|
||||
|
||||
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0f;
|
||||
float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0f;
|
||||
segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling
|
||||
|
||||
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 32, 0, (float)SEGLEN/2.0); // map to pixels available in current segment
|
||||
@ -6213,18 +6187,14 @@ uint16_t mode_gravcentric(void) { // Gravcentric. By Andrew
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
// printUmData();
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
|
||||
SEGMENT.fade_out(240);
|
||||
SEGMENT.fade_out(240); // twice? really?
|
||||
|
||||
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0;
|
||||
float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
|
||||
segmentSampleAvg *= 0.125f; // divide by 8, to compensate for later "sensitivty" upscaling
|
||||
|
||||
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0.0f, 32.0f, 0.0f, (float)SEGLEN/2.0); // map to pixels availeable in current segment
|
||||
@ -6267,15 +6237,11 @@ uint16_t mode_gravimeter(void) { // Gravmeter. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fade_out(240);
|
||||
|
||||
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0;
|
||||
float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
|
||||
segmentSampleAvg *= 0.25; // divide by 4, to compensate for later "sensitivty" upscaling
|
||||
|
||||
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0, 64, 0, (SEGLEN-1)); // map to pixels availeable in current segment
|
||||
@ -6311,10 +6277,10 @@ uint16_t mode_juggles(void) { // Juggles. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAgc = *(float*)um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fade_out(224);
|
||||
uint16_t my_sampleAgc = fmax(fmin(sampleAgc, 255.0), 0);
|
||||
uint16_t my_sampleAgc = fmax(fmin(volumeSmth, 255.0), 0);
|
||||
|
||||
for (size_t i=0; i<SEGMENT.intensity/32+1U; i++) {
|
||||
SEGMENT.setPixelColor(beatsin16(SEGMENT.speed/4+i*2,0,SEGLEN-1), color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis()/4+i*2, false, PALETTE_SOLID_WRAP, 0), my_sampleAgc));
|
||||
@ -6339,9 +6305,7 @@ uint16_t mode_matripix(void) { // Matripix. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
|
||||
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
|
||||
int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
|
||||
|
||||
if (SEGENV.call == 0) SEGMENT.fill_solid(leds, CRGB::Black);
|
||||
|
||||
@ -6349,10 +6313,9 @@ uint16_t mode_matripix(void) { // Matripix. By Andrew Tuline.
|
||||
if(SEGENV.aux0 != secondHand) {
|
||||
SEGENV.aux0 = secondHand;
|
||||
|
||||
uint8_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
int pixBri = tmpSound * SEGMENT.intensity / 64;
|
||||
leds[SEGLEN-1] = color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri);
|
||||
for (int i=0; i<SEGLEN-1; i++) leds[i] = leds[i+1];
|
||||
int pixBri = volumeRaw * SEGMENT.intensity / 64;
|
||||
for (uint16_t i=0; i<SEGLEN-1; i++) SEGMENT.setPixelColor(i, SEGMENT.getPixelColor(i+1)); // shift left
|
||||
SEGMENT.setPixelColor(SEGLEN-1, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri));
|
||||
}
|
||||
for (int i=0; i<SEGLEN; i++) SEGMENT.setPixelColor(i, leds[i]);
|
||||
|
||||
@ -6372,22 +6335,19 @@ uint16_t mode_midnoise(void) { // Midnoise. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fade_out(SEGMENT.speed);
|
||||
SEGMENT.fade_out(SEGMENT.speed);
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
float tmpSound2 = tmpSound * (float)SEGMENT.intensity / 256.0; // Too sensitive.
|
||||
float tmpSound2 = volumeSmth * (float)SEGMENT.intensity / 256.0; // Too sensitive.
|
||||
tmpSound2 *= (float)SEGMENT.intensity / 128.0; // Reduce sensitity/length.
|
||||
|
||||
int maxLen = mapf(tmpSound2, 0, 127, 0, SEGLEN/2);
|
||||
if (maxLen >SEGLEN/2) maxLen = SEGLEN/2;
|
||||
|
||||
for (int i=(SEGLEN/2-maxLen); i<(SEGLEN/2+maxLen); i++) {
|
||||
uint8_t index = inoise8(i*tmpSound+SEGENV.aux0, SEGENV.aux1+i*tmpSound); // Get a value from the noise function. I'm using both x and y axis.
|
||||
uint8_t index = inoise8(i*volumeSmth+SEGENV.aux0, SEGENV.aux1+i*volumeSmth); // Get a value from the noise function. I'm using both x and y axis.
|
||||
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0));
|
||||
}
|
||||
|
||||
@ -6414,9 +6374,7 @@ uint16_t mode_noisefire(void) { // Noisefire. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
if (SEGENV.call == 0) SEGMENT.fill(BLACK);
|
||||
|
||||
@ -6424,9 +6382,8 @@ uint16_t mode_noisefire(void) { // Noisefire. By Andrew Tuline.
|
||||
uint16_t index = inoise8(i*SEGMENT.speed/64,millis()*SEGMENT.speed/64*SEGLEN/255); // X location is constant, but we move along the Y at the rate of millis(). By Andrew Tuline.
|
||||
index = (255 - i*256/SEGLEN) * index/(256-SEGMENT.intensity); // Now we need to scale index so that it gets blacker as we get close to one of the ends.
|
||||
// This is a simple y=mx+b equation that's been scaled. index/128 is another scaling.
|
||||
uint8_t tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
|
||||
CRGB color = ColorFromPalette(myPal, index, tmpSound*2, LINEARBLEND); // Use the my own palette.
|
||||
CRGB color = ColorFromPalette(myPal, index, volumeSmth*2, LINEARBLEND); // Use the my own palette.
|
||||
SEGMENT.setPixelColor(i, color);
|
||||
}
|
||||
|
||||
@ -6445,23 +6402,18 @@ uint16_t mode_noisemeter(void) { // Noisemeter. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
|
||||
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
|
||||
|
||||
uint8_t fadeRate = map(SEGMENT.speed,0,255,224,255);
|
||||
SEGMENT.fade_out(fadeRate);
|
||||
|
||||
float tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
float tmpSound2 = tmpSound * 2.0 * (float)SEGMENT.intensity / 255.0;
|
||||
float tmpSound2 = volumeRaw * 2.0 * (float)SEGMENT.intensity / 255.0;
|
||||
int maxLen = mapf(tmpSound2, 0, 255, 0, SEGLEN); // map to pixels availeable in current segment // Still a bit too sensitive.
|
||||
if (maxLen >SEGLEN) maxLen = SEGLEN;
|
||||
|
||||
tmpSound = soundAgc ? sampleAgc : sampleAvg; // now use smoothed value (sampleAvg or sampleAgc)
|
||||
for (int i=0; i<maxLen; i++) { // The louder the sound, the wider the soundbar. By Andrew Tuline.
|
||||
uint8_t index = inoise8(i*tmpSound+SEGENV.aux0, SEGENV.aux1+i*tmpSound); // Get a value from the noise function. I'm using both x and y axis.
|
||||
uint8_t index = inoise8(i*volumeSmth+SEGENV.aux0, SEGENV.aux1+i*volumeSmth); // Get a value from the noise function. I'm using both x and y axis.
|
||||
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(index, false, PALETTE_SOLID_WRAP, 0));
|
||||
}
|
||||
|
||||
@ -6492,16 +6444,13 @@ uint16_t mode_pixelwave(void) { // Pixelwave. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
|
||||
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
|
||||
int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
|
||||
|
||||
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 16;
|
||||
if (SEGENV.aux0 != secondHand) {
|
||||
SEGENV.aux0 = secondHand;
|
||||
|
||||
uint8_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
int pixBri = tmpSound * SEGMENT.intensity / 64;
|
||||
int pixBri = volumeRaw * SEGMENT.intensity / 64;
|
||||
|
||||
leds[SEGLEN/2] = color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(millis(), false, PALETTE_SOLID_WRAP, 0), pixBri);
|
||||
for (int i=SEGLEN-1; i>SEGLEN/2; i--) leds[i] = leds[i-1]; // Move to the right.
|
||||
@ -6534,9 +6483,7 @@ uint16_t mode_plasmoid(void) { // Plasmoid. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fadeToBlackBy(leds, 64);
|
||||
|
||||
@ -6549,8 +6496,7 @@ uint16_t mode_plasmoid(void) { // Plasmoid. By Andrew Tuline.
|
||||
thisbright += cos8(((i*(97 +(5*SEGMENT.speed/32)))+plasmoip->thatphase) & 0xFF)/2; // Let's munge the brightness a bit and animate it all with the phases.
|
||||
|
||||
uint8_t colorIndex=thisbright;
|
||||
int tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
if (tmpSound * SEGMENT.intensity / 64 < thisbright) {thisbright = 0;}
|
||||
if (volumeSmth * SEGMENT.intensity / 64 < thisbright) {thisbright = 0;}
|
||||
|
||||
leds[i] += color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(colorIndex, false, PALETTE_SOLID_WRAP, 0), thisbright);
|
||||
}
|
||||
@ -6576,10 +6522,10 @@ uint16_t mode_puddlepeak(void) { // Puddlepeak. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5];
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[9];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[10];
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[7];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
if (SEGENV.call == 0) {
|
||||
SEGMENT.custom2 = *binNum;
|
||||
@ -6592,7 +6538,7 @@ uint16_t mode_puddlepeak(void) { // Puddlepeak. By Andrew Tuline.
|
||||
SEGMENT.fade_out(fadeVal);
|
||||
|
||||
if (samplePeak == 1) {
|
||||
size = sampleAgc * SEGMENT.intensity /256 /4 + 1; // Determine size of the flash based on the volume.
|
||||
size = volumeSmth * SEGMENT.intensity /256 /4 + 1; // Determine size of the flash based on the volume.
|
||||
if (pos+size>= SEGLEN) size = SEGLEN - pos;
|
||||
}
|
||||
|
||||
@ -6620,14 +6566,10 @@ uint16_t mode_puddles(void) { // Puddles. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
int16_t sampleRaw = *(int16_t*)um_data->u_data[3];
|
||||
int16_t rawSampleAgc = *(int16_t*)um_data->u_data[4];
|
||||
int16_t volumeRaw = *(int16_t*)um_data->u_data[1];
|
||||
|
||||
uint16_t tmpSound = (soundAgc) ? rawSampleAgc : sampleRaw;
|
||||
|
||||
if (tmpSound > 1) {
|
||||
size = tmpSound * SEGMENT.intensity /256 /8 + 1; // Determine size of the flash based on the volume.
|
||||
if (volumeRaw > 1) {
|
||||
size = volumeRaw * SEGMENT.intensity /256 /8 + 1; // Determine size of the flash based on the volume.
|
||||
if (pos+size >= SEGLEN) size = SEGLEN - pos;
|
||||
}
|
||||
|
||||
@ -6650,19 +6592,22 @@ static const char _data_FX_MODE_PUDDLES[] PROGMEM = "Puddles@Fade rate,Puddle si
|
||||
//////////////////////
|
||||
uint16_t mode_pixels(void) { // Pixels. By Andrew Tuline.
|
||||
|
||||
if (!SEGENV.allocateData(32*sizeof(uint8_t))) return mode_static(); //allocation failed
|
||||
uint8_t *myVals = reinterpret_cast<uint8_t*>(SEGENV.data); // Used to store a pile of samples because WLED frame rate and WLED sample rate are not synchronized. Frame rate is too low.
|
||||
|
||||
um_data_t *um_data;
|
||||
if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
uint16_t *myVals = (uint16_t*)um_data->u_data[14];
|
||||
if (!myVals) return mode_static();
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
myVals[millis()%32] = volumeSmth; // filling values semi randomly
|
||||
|
||||
SEGMENT.fade_out(64+(SEGMENT.speed>>1));
|
||||
|
||||
for (int i=0; i <SEGMENT.intensity/8; i++) {
|
||||
uint16_t segLoc = random16(SEGLEN); // 16 bit for larger strands of LED's.
|
||||
SEGMENT.setPixelColor(segLoc, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(myVals[i%32]+i*4, false, PALETTE_SOLID_WRAP, 0), sampleAgc));
|
||||
SEGMENT.setPixelColor(segLoc, color_blend(SEGCOLOR(1), SEGMENT.color_from_palette(myVals[i%32]+i*4, false, PALETTE_SOLID_WRAP, 0), volumeSmth));
|
||||
}
|
||||
|
||||
return FRAMETIME;
|
||||
@ -6689,7 +6634,7 @@ uint16_t mode_blurz(void) { // Blurz. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
if (!fftResult) return mode_static();
|
||||
|
||||
if (SEGENV.call == 0) {
|
||||
@ -6734,7 +6679,7 @@ uint16_t mode_DJLight(void) { // Written by ??? Adapted by Wil
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
if (!fftResult) return mode_static();
|
||||
|
||||
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64;
|
||||
@ -6766,15 +6711,8 @@ uint16_t mode_freqmap(void) { // Map FFT_MajorPeak to SEGLEN.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
float FFT_Magnitude = *(float*) um_data->u_data[7];
|
||||
float multAgc = *(float*) um_data->u_data[11];
|
||||
|
||||
float my_magnitude = FFT_Magnitude / 4.0;
|
||||
if (soundAgc) my_magnitude *= multAgc;
|
||||
if (sampleAvg < 1 ) my_magnitude = 0.001; // noise gate closed - mute
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
float my_magnitude = *(float*) um_data->u_data[5] / 4.0f;
|
||||
|
||||
SEGMENT.fade_out(SEGMENT.speed);
|
||||
|
||||
@ -6805,15 +6743,15 @@ uint16_t mode_freqmatrix(void) { // Freqmatrix. By Andreas Plesch
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAgc = *(float*)um_data->u_data[2];
|
||||
float FFT_MajorPeak = *(float*)um_data->u_data[6];
|
||||
float FFT_MajorPeak = *(float*)um_data->u_data[4];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16;
|
||||
if(SEGENV.aux0 != secondHand) {
|
||||
SEGENV.aux0 = secondHand;
|
||||
|
||||
uint8_t sensitivity = map(SEGMENT.custom3, 0, 255, 1, 10);
|
||||
int pixVal = (sampleAgc * SEGMENT.intensity * sensitivity) / 256.0f;
|
||||
int pixVal = (volumeSmth * SEGMENT.intensity * sensitivity) / 256.0f;
|
||||
if (pixVal > 255) pixVal = 255;
|
||||
|
||||
float intensity = map(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg
|
||||
@ -6860,15 +6798,8 @@ uint16_t mode_freqpixels(void) { // Freqpixel. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
float FFT_Magnitude = *(float*) um_data->u_data[7];
|
||||
float multAgc = *(float*) um_data->u_data[11];
|
||||
|
||||
float my_magnitude = FFT_Magnitude / 16.0;
|
||||
if (soundAgc) my_magnitude *= multAgc;
|
||||
if (sampleAvg < 1 ) my_magnitude = 0.001; // noise gate closed - mute
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
float my_magnitude = *(float*) um_data->u_data[5] / 16.0f;
|
||||
|
||||
uint16_t fadeRate = 2*SEGMENT.speed - SEGMENT.speed*SEGMENT.speed/255; // Get to 255 as quick as you can.
|
||||
SEGMENT.fade_out(fadeRate);
|
||||
@ -6910,12 +6841,8 @@ uint16_t mode_freqwave(void) { // Freqwave. By Andreas Pleschun
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
|
||||
if (SEGENV.call == 0) SEGMENT.fill(BLACK);
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500 % 16;
|
||||
if(SEGENV.aux0 != secondHand) {
|
||||
@ -6924,10 +6851,8 @@ uint16_t mode_freqwave(void) { // Freqwave. By Andreas Pleschun
|
||||
//uint8_t fade = SEGMENT.custom3;
|
||||
//uint8_t fadeval;
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
|
||||
float sensitivity = mapf(SEGMENT.custom3, 1, 255, 1, 10);
|
||||
float pixVal = tmpSound * (float)SEGMENT.intensity / 256.0f * sensitivity;
|
||||
float pixVal = volumeSmth * (float)SEGMENT.intensity / 256.0f * sensitivity;
|
||||
if (pixVal > 255) pixVal = 255;
|
||||
|
||||
float intensity = mapf(pixVal, 0, 255, 0, 100) / 100.0f; // make a brightness from the last avg
|
||||
@ -6977,15 +6902,12 @@ uint16_t mode_gravfreq(void) { // Gravfreq. By Andrew Tuline.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float sampleAgc = *(float*) um_data->u_data[2];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
float volumeSmth = *(float*) um_data->u_data[0];
|
||||
|
||||
SEGMENT.fade_out(240);
|
||||
|
||||
float tmpSound = (soundAgc) ? sampleAgc : sampleAvg;
|
||||
float segmentSampleAvg = tmpSound * (float)SEGMENT.intensity / 255.0;
|
||||
float segmentSampleAvg = volumeSmth * (float)SEGMENT.intensity / 255.0;
|
||||
segmentSampleAvg *= 0.125; // divide by 8, to compensate for later "sensitivty" upscaling
|
||||
|
||||
float mySampleAvg = mapf(segmentSampleAvg*2.0, 0,32, 0, (float)SEGLEN/2.0); // map to pixels availeable in current segment
|
||||
@ -7025,7 +6947,7 @@ uint16_t mode_noisemove(void) { // Noisemove. By: Andrew Tuli
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
if (!fftResult) return mode_static();
|
||||
|
||||
SEGMENT.fade_out(224); // Just in case something doesn't get faded.
|
||||
@ -7056,11 +6978,8 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
float FFT_Magnitude = *(float*) um_data->u_data[7];
|
||||
float multAgc = *(float*) um_data->u_data[11];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
float my_magnitude = *(float*) um_data->u_data[5] / 16.0f;
|
||||
|
||||
SEGMENT.fadeToBlackBy(leds, 64); // Just in case something doesn't get faded.
|
||||
|
||||
@ -7068,10 +6987,6 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
|
||||
uint8_t octCount = 0; // Octave counter.
|
||||
uint8_t volTemp = 0;
|
||||
|
||||
float my_magnitude = FFT_Magnitude / 16.0; // scale magnitude to be aligned with scaling of FFT bins
|
||||
if (soundAgc) my_magnitude *= multAgc; // apply gain
|
||||
if (sampleAvg < 1 ) my_magnitude = 0.001; // mute
|
||||
|
||||
if (my_magnitude > 32) volTemp = 255; // We need to squelch out the background noise.
|
||||
|
||||
while ( frTemp > 249 ) {
|
||||
@ -7104,14 +7019,11 @@ uint16_t mode_waterfall(void) { // Waterfall. By: Andrew Tulin
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
float sampleAvg = *(float*) um_data->u_data[0];
|
||||
uint8_t soundAgc = *(uint8_t*)um_data->u_data[1];
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[5];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[6];
|
||||
float FFT_Magnitude = *(float*) um_data->u_data[7];
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[9];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[10];
|
||||
float multAgc = *(float*) um_data->u_data[11];
|
||||
uint8_t samplePeak = *(uint8_t*)um_data->u_data[3];
|
||||
float FFT_MajorPeak = *(float*) um_data->u_data[4];
|
||||
uint8_t *maxVol = (uint8_t*)um_data->u_data[6];
|
||||
uint8_t *binNum = (uint8_t*)um_data->u_data[7];
|
||||
float my_magnitude = *(float*) um_data->u_data[5] / 8.0f;
|
||||
|
||||
if (SEGENV.call == 0) {
|
||||
SEGENV.aux0 = 255;
|
||||
@ -7126,10 +7038,6 @@ uint16_t mode_waterfall(void) { // Waterfall. By: Andrew Tulin
|
||||
if (SEGENV.aux0 != secondHand) { // Triggered millis timing.
|
||||
SEGENV.aux0 = secondHand;
|
||||
|
||||
float my_magnitude = FFT_Magnitude / 8.0f;
|
||||
if (soundAgc) my_magnitude *= multAgc;
|
||||
if (sampleAvg < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
|
||||
|
||||
uint8_t pixCol = (log10f((float)FFT_MajorPeak) - 2.26f) * 177; // log10 frequency range is from 2.26 to 3.7. Let's scale accordingly.
|
||||
|
||||
if (samplePeak) {
|
||||
@ -7164,7 +7072,7 @@ uint16_t mode_2DGEQ(void) { // By Will Tatam. Code reduction by Ewoud Wijma.
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
if (!fftResult) return mode_static();
|
||||
|
||||
if (SEGENV.call == 0) for (int i=0; i<cols; i++) previousBarHeight[i] = 0;
|
||||
@ -7228,7 +7136,7 @@ uint16_t mode_2DFunkyPlank(void) { // Written by ??? Adapted by Wil
|
||||
// add support for no audio data
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
if (!fftResult) return mode_static();
|
||||
|
||||
uint8_t secondHand = micros()/(256-SEGMENT.speed)/500+1 % 64;
|
||||
@ -7323,7 +7231,7 @@ uint16_t mode_2DAkemi(void) {
|
||||
if (!usermods.getUMData(&um_data, USERMOD_ID_AUDIOREACTIVE)) {
|
||||
um_data = simulateSound(SEGMENT.soundSim);
|
||||
}
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[8];
|
||||
uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
|
||||
float base = fftResult[0]/255.0f;
|
||||
|
||||
//draw and color Akemi
|
||||
|
1498
wled00/html_other.h
1498
wled00/html_other.h
File diff suppressed because it is too large
Load Diff
@ -8,7 +8,7 @@
|
||||
// Autogenerated from wled00/data/style.css, do not edit!!
|
||||
const uint16_t PAGE_settingsCss_length = 824;
|
||||
const uint8_t PAGE_settingsCss[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xad, 0x55, 0x5d, 0x8b, 0x9c, 0x30,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xad, 0x55, 0x5d, 0x8b, 0x9c, 0x30,
|
||||
0x14, 0xfd, 0x2b, 0x96, 0x61, 0x61, 0x0b, 0xa3, 0xa8, 0xa3, 0xb3, 0xd3, 0x48, 0xa1, 0xf4, 0xbd,
|
||||
0x6f, 0xa5, 0x14, 0xca, 0x3e, 0x44, 0x73, 0x1d, 0xc3, 0xe4, 0x43, 0x92, 0xd8, 0x75, 0x2a, 0xfe,
|
||||
0xf7, 0x26, 0x7e, 0xac, 0xce, 0xac, 0x6c, 0x5f, 0xca, 0xe0, 0xa0, 0xde, 0x98, 0x7b, 0xee, 0xb9,
|
||||
@ -66,7 +66,7 @@ const uint8_t PAGE_settingsCss[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_length = 985;
|
||||
const uint8_t PAGE_settings[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xad, 0x56, 0x6d, 0x6f, 0xdb, 0x36,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xad, 0x56, 0x6d, 0x6f, 0xdb, 0x36,
|
||||
0x10, 0xfe, 0xee, 0x5f, 0xc1, 0xb0, 0x58, 0x23, 0xa1, 0xb2, 0xec, 0x38, 0xc3, 0xb0, 0xc9, 0x96,
|
||||
0x8b, 0x35, 0x2f, 0x9d, 0x87, 0x04, 0x0d, 0x90, 0xa4, 0xdd, 0x80, 0x7d, 0xa1, 0xc9, 0x93, 0xcc,
|
||||
0x46, 0x22, 0x05, 0xf2, 0xe4, 0xc4, 0x73, 0xf3, 0xdf, 0x77, 0x94, 0x9d, 0xb7, 0x36, 0xd8, 0x8a,
|
||||
@ -134,7 +134,7 @@ const uint8_t PAGE_settings[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_wifi.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_wifi_length = 1557;
|
||||
const uint8_t PAGE_settings_wifi[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xad, 0x57, 0xff, 0x4f, 0xdb, 0x38,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xad, 0x57, 0xff, 0x4f, 0xdb, 0x38,
|
||||
0x14, 0xff, 0x3d, 0x7f, 0x85, 0xf1, 0x49, 0x53, 0xa3, 0x85, 0x94, 0xb6, 0xc7, 0x6e, 0x62, 0x49,
|
||||
0x76, 0x5d, 0xdb, 0x0d, 0xee, 0x18, 0xeb, 0x29, 0x68, 0xe8, 0xa4, 0x93, 0x26, 0x37, 0x79, 0x6d,
|
||||
0x3d, 0x9c, 0x38, 0x17, 0x3b, 0x2d, 0x88, 0xf1, 0xbf, 0xdf, 0xb3, 0x93, 0x96, 0x16, 0xe8, 0x36,
|
||||
@ -238,7 +238,7 @@ const uint8_t PAGE_settings_wifi[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_leds.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_leds_length = 7326;
|
||||
const uint8_t PAGE_settings_leds[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xdd, 0x3c, 0xed, 0x76, 0xe2, 0xc6,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xdd, 0x3c, 0xed, 0x76, 0xe2, 0xc6,
|
||||
0x92, 0xff, 0x79, 0x8a, 0x76, 0x27, 0x71, 0xa4, 0x8b, 0x0c, 0x12, 0x1f, 0x8e, 0x07, 0x10, 0xac,
|
||||
0xf1, 0x78, 0x26, 0xbe, 0xd7, 0x8e, 0x7d, 0x8c, 0x93, 0xb9, 0x7b, 0x26, 0x73, 0x32, 0x42, 0x34,
|
||||
0xa0, 0xb1, 0x90, 0x74, 0x25, 0x61, 0x9b, 0xb5, 0xd9, 0x67, 0xda, 0x67, 0xd8, 0x27, 0xdb, 0xaa,
|
||||
@ -702,7 +702,7 @@ const uint8_t PAGE_settings_leds[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_dmx.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_dmx_length = 1612;
|
||||
const uint8_t PAGE_settings_dmx[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0x95, 0x57, 0xdb, 0x72, 0xdb, 0x36,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0x95, 0x57, 0xdb, 0x72, 0xdb, 0x36,
|
||||
0x10, 0x7d, 0xd7, 0x57, 0x20, 0x78, 0x88, 0xc9, 0x31, 0x43, 0x4a, 0x4e, 0x95, 0x36, 0x32, 0x49,
|
||||
0x37, 0x56, 0x5c, 0xdb, 0x1d, 0xdb, 0xf5, 0x44, 0x49, 0xd3, 0x4e, 0xd3, 0xe9, 0x40, 0xe4, 0x4a,
|
||||
0x44, 0x4c, 0x02, 0x2c, 0x00, 0x4a, 0x76, 0x2e, 0xff, 0xde, 0x05, 0x48, 0x5d, 0xec, 0xd8, 0x69,
|
||||
@ -809,7 +809,7 @@ const uint8_t PAGE_settings_dmx[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_ui.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_ui_length = 3090;
|
||||
const uint8_t PAGE_settings_ui[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xad, 0x59, 0x6b, 0x73, 0xda, 0x48,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xad, 0x59, 0x6b, 0x73, 0xda, 0x48,
|
||||
0x16, 0xfd, 0xce, 0xaf, 0xe8, 0x74, 0x52, 0x1e, 0x54, 0x56, 0x04, 0x4e, 0x66, 0x6b, 0x13, 0x40,
|
||||
0x78, 0x63, 0xc7, 0x93, 0x78, 0xca, 0xd9, 0x64, 0x83, 0xbd, 0x99, 0xad, 0xac, 0xcb, 0x23, 0xa4,
|
||||
0x06, 0x3a, 0x16, 0x92, 0x46, 0xdd, 0x32, 0x66, 0x09, 0xff, 0x7d, 0xcf, 0xed, 0x96, 0x40, 0x60,
|
||||
@ -1009,7 +1009,7 @@ const uint8_t PAGE_settings_ui[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_sync.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_sync_length = 3153;
|
||||
const uint8_t PAGE_settings_sync[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0x9d, 0x5a, 0x6d, 0x77, 0xda, 0xb8,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0x9d, 0x5a, 0x6d, 0x77, 0xda, 0xb8,
|
||||
0x12, 0xfe, 0xee, 0x5f, 0xa1, 0xf8, 0xc3, 0x2e, 0x6c, 0x08, 0x18, 0x12, 0xd2, 0x94, 0x62, 0xf7,
|
||||
0x86, 0x90, 0x26, 0xec, 0x36, 0x0d, 0x85, 0x64, 0x5f, 0xce, 0xb9, 0xe7, 0xec, 0x11, 0xb6, 0x00,
|
||||
0x25, 0xb6, 0xe5, 0xb5, 0xe5, 0xbc, 0x9c, 0x6e, 0xff, 0xfb, 0x9d, 0x91, 0x6c, 0x03, 0x06, 0x02,
|
||||
@ -1213,7 +1213,7 @@ const uint8_t PAGE_settings_sync[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_time.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_time_length = 3302;
|
||||
const uint8_t PAGE_settings_time[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xd5, 0x1a, 0x6b, 0x57, 0xdb, 0x3a,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xd5, 0x1a, 0x6b, 0x57, 0xdb, 0x3a,
|
||||
0xf2, 0x7b, 0x7e, 0x85, 0x50, 0x7b, 0xb8, 0xf1, 0xc5, 0x79, 0x42, 0x5a, 0x48, 0x62, 0x77, 0x43,
|
||||
0x48, 0x0b, 0x2d, 0x09, 0x9c, 0x26, 0xbd, 0xec, 0xf6, 0x71, 0x6e, 0x15, 0x5b, 0x49, 0x0c, 0x8e,
|
||||
0xe4, 0xb5, 0x65, 0x02, 0x4b, 0xf9, 0xef, 0x3b, 0x92, 0x1c, 0xe7, 0x85, 0x81, 0xf6, 0xde, 0xfd,
|
||||
@ -1426,7 +1426,7 @@ const uint8_t PAGE_settings_time[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_sec.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_sec_length = 2406;
|
||||
const uint8_t PAGE_settings_sec[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xa5, 0x58, 0x6d, 0x53, 0xdb, 0x48,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xa5, 0x58, 0x6d, 0x53, 0xdb, 0x48,
|
||||
0x12, 0xfe, 0xee, 0x5f, 0x31, 0x4c, 0xaa, 0x58, 0xeb, 0x22, 0x2c, 0x43, 0x72, 0x5b, 0x09, 0x20,
|
||||
0xe7, 0x20, 0x90, 0x0d, 0x57, 0x10, 0x28, 0x6c, 0x36, 0x77, 0x95, 0x4b, 0xa5, 0xc6, 0xd2, 0xd8,
|
||||
0x9a, 0x58, 0xd6, 0x68, 0x67, 0x46, 0x38, 0xbe, 0xec, 0xfe, 0xf7, 0x7b, 0x7a, 0x24, 0xd9, 0x86,
|
||||
@ -1583,7 +1583,7 @@ const uint8_t PAGE_settings_sec[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_um.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_um_length = 2230;
|
||||
const uint8_t PAGE_settings_um[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0xa5, 0x58, 0x6d, 0x53, 0xdb, 0x48,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0xa5, 0x58, 0x6d, 0x53, 0xdb, 0x48,
|
||||
0x12, 0xfe, 0xce, 0xaf, 0x10, 0x13, 0x0a, 0xa4, 0xb2, 0x90, 0x4d, 0xb8, 0xdd, 0x4b, 0x6c, 0x8f,
|
||||
0xd9, 0x90, 0x97, 0x0b, 0x57, 0x49, 0xa0, 0x8a, 0xec, 0x5e, 0x5d, 0x71, 0xd4, 0x22, 0x4b, 0x63,
|
||||
0x7b, 0x82, 0x3c, 0xa3, 0x9a, 0x19, 0xf1, 0x72, 0xc6, 0xff, 0xfd, 0x9e, 0x1e, 0x49, 0xc6, 0x26,
|
||||
@ -1729,7 +1729,7 @@ const uint8_t PAGE_settings_um[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_2D.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_2D_length = 1751;
|
||||
const uint8_t PAGE_settings_2D[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0x8d, 0x58, 0x6d, 0x73, 0xdb, 0x36,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0x8d, 0x58, 0x6d, 0x73, 0xdb, 0x36,
|
||||
0x12, 0xfe, 0xce, 0x5f, 0x01, 0x63, 0x3a, 0x2d, 0xd9, 0x50, 0x94, 0xe4, 0xde, 0x75, 0x3a, 0x16,
|
||||
0x49, 0x37, 0x6e, 0xdc, 0xda, 0x1d, 0x7b, 0xe2, 0x89, 0x72, 0xce, 0xdc, 0x5c, 0x3a, 0x29, 0x44,
|
||||
0xae, 0x44, 0xc4, 0x24, 0xc0, 0x01, 0x40, 0xd9, 0xae, 0xe2, 0xff, 0x7e, 0x0b, 0x90, 0x7a, 0xb5,
|
||||
@ -1845,7 +1845,7 @@ const uint8_t PAGE_settings_2D[] PROGMEM = {
|
||||
// Autogenerated from wled00/data/settings_pin.htm, do not edit!!
|
||||
const uint16_t PAGE_settings_pin_length = 471;
|
||||
const uint8_t PAGE_settings_pin[] PROGMEM = {
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0a, 0x5d, 0x52, 0x4d, 0x6f, 0x13, 0x31,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x13, 0x5d, 0x52, 0x4d, 0x6f, 0x13, 0x31,
|
||||
0x10, 0xbd, 0xef, 0xaf, 0x30, 0x73, 0x69, 0x82, 0x92, 0x6c, 0xa8, 0xa8, 0x04, 0xaa, 0xbd, 0x42,
|
||||
0x81, 0x1e, 0xb8, 0x94, 0x48, 0xe5, 0x52, 0x55, 0x55, 0xe5, 0xd8, 0xb3, 0x89, 0x55, 0x7f, 0x2c,
|
||||
0xb6, 0x37, 0x21, 0x54, 0xfc, 0x77, 0xc6, 0xbb, 0xa1, 0xa0, 0x5c, 0xd6, 0x7e, 0x33, 0xe3, 0x37,
|
||||
|
@ -400,64 +400,42 @@ typedef enum UM_SoundSimulations {
|
||||
// this is still work in progress
|
||||
um_data_t* simulateSound(uint8_t simulationId)
|
||||
{
|
||||
static float sampleAvg;
|
||||
static uint8_t soundAgc;
|
||||
static float sampleAgc;
|
||||
static int16_t sampleRaw;
|
||||
static int16_t rawSampleAgc;
|
||||
static uint8_t samplePeak;
|
||||
static float FFT_MajorPeak;
|
||||
static float FFT_Magnitude;
|
||||
static uint8_t maxVol;
|
||||
static uint8_t binNum;
|
||||
static float multAgc;
|
||||
|
||||
float sampleGain;
|
||||
uint8_t soundSquelch;
|
||||
uint8_t inputLevel;
|
||||
static float volumeSmth;
|
||||
static uint16_t volumeRaw;
|
||||
static float my_magnitude;
|
||||
|
||||
//arrays
|
||||
uint8_t *fftResult;
|
||||
uint8_t *myVals;
|
||||
float *fftBin;
|
||||
|
||||
static um_data_t* um_data = nullptr;
|
||||
|
||||
if (!um_data) {
|
||||
//claim storage for arrays
|
||||
fftResult = (uint8_t *)malloc(sizeof(uint8_t) * 16);
|
||||
myVals = (uint8_t *)malloc(sizeof(uint8_t) * 32);
|
||||
fftBin = (float *)malloc(sizeof(float) * 256); // not used (for debugging purposes)
|
||||
|
||||
// initialize um_data pointer structure
|
||||
// NOTE!!!
|
||||
// This may change as AudioReactive usermod may change
|
||||
um_data = new um_data_t;
|
||||
um_data->u_size = 18;
|
||||
um_data->u_size = 8;
|
||||
um_data->u_type = new um_types_t[um_data->u_size];
|
||||
um_data->u_data = new void*[um_data->u_size];
|
||||
um_data->u_data[ 0] = &sampleAvg;
|
||||
um_data->u_data[ 1] = &soundAgc;
|
||||
um_data->u_data[ 2] = &sampleAgc;
|
||||
um_data->u_data[ 3] = &sampleRaw;
|
||||
um_data->u_data[ 4] = &rawSampleAgc;
|
||||
um_data->u_data[ 5] = &samplePeak;
|
||||
um_data->u_data[ 6] = &FFT_MajorPeak;
|
||||
um_data->u_data[ 7] = &FFT_Magnitude;
|
||||
um_data->u_data[ 8] = fftResult;
|
||||
um_data->u_data[ 9] = &maxVol;
|
||||
um_data->u_data[10] = &binNum;
|
||||
um_data->u_data[11] = &multAgc;
|
||||
um_data->u_data[14] = myVals; //*used (only once, Pixels)
|
||||
um_data->u_data[13] = &sampleGain;
|
||||
um_data->u_data[15] = &soundSquelch;
|
||||
um_data->u_data[16] = fftBin; //only used in binmap
|
||||
um_data->u_data[17] = &inputLevel;
|
||||
um_data->u_data[0] = &volumeSmth;
|
||||
um_data->u_data[1] = &volumeRaw;
|
||||
um_data->u_data[2] = fftResult;
|
||||
um_data->u_data[3] = &samplePeak;
|
||||
um_data->u_data[4] = &FFT_MajorPeak;
|
||||
um_data->u_data[5] = &my_magnitude;
|
||||
um_data->u_data[6] = &maxVol;
|
||||
um_data->u_data[7] = &binNum;
|
||||
} else {
|
||||
// get arrays from um_data
|
||||
fftResult = (uint8_t*)um_data->u_data[8];
|
||||
myVals = (uint8_t*)um_data->u_data[14];
|
||||
fftBin = (float*)um_data->u_data[16];
|
||||
fftResult = (uint8_t*)um_data->u_data[2];
|
||||
}
|
||||
|
||||
uint32_t ms = millis();
|
||||
@ -468,36 +446,36 @@ um_data_t* simulateSound(uint8_t simulationId)
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = beatsin8(120 / (i+1), 0, 255);
|
||||
// fftResult[i] = (beatsin8(120, 0, 255) + (256/16 * i)) % 256;
|
||||
sampleAvg = fftResult[8];
|
||||
volumeSmth = fftResult[8];
|
||||
break;
|
||||
case UMS_WeWillRockYou:
|
||||
if (ms%2000 < 200) {
|
||||
sampleAvg = random8(255);
|
||||
volumeSmth = random8(255);
|
||||
for (int i = 0; i<5; i++)
|
||||
fftResult[i] = random8(255);
|
||||
}
|
||||
else if (ms%2000 < 400) {
|
||||
sampleAvg = 0;
|
||||
volumeSmth = 0;
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = 0;
|
||||
}
|
||||
else if (ms%2000 < 600) {
|
||||
sampleAvg = random8(255);
|
||||
volumeSmth = random8(255);
|
||||
for (int i = 5; i<11; i++)
|
||||
fftResult[i] = random8(255);
|
||||
}
|
||||
else if (ms%2000 < 800) {
|
||||
sampleAvg = 0;
|
||||
volumeSmth = 0;
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = 0;
|
||||
}
|
||||
else if (ms%2000 < 1000) {
|
||||
sampleAvg = random8(255);
|
||||
volumeSmth = random8(255);
|
||||
for (int i = 11; i<16; i++)
|
||||
fftResult[i] = random8(255);
|
||||
}
|
||||
else {
|
||||
sampleAvg = 0;
|
||||
volumeSmth = 0;
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = 0;
|
||||
}
|
||||
@ -505,33 +483,22 @@ um_data_t* simulateSound(uint8_t simulationId)
|
||||
case UMS_10_3:
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = inoise8(beatsin8(90 / (i+1), 0, 200)*15 + (ms>>10), ms>>3);
|
||||
sampleAvg = fftResult[8];
|
||||
volumeSmth = fftResult[8];
|
||||
break;
|
||||
case UMS_14_3:
|
||||
for (int i = 0; i<16; i++)
|
||||
fftResult[i] = inoise8(beatsin8(120 / (i+1), 10, 30)*10 + (ms>>14), ms>>3);
|
||||
sampleAvg = fftResult[8];
|
||||
volumeSmth = fftResult[8];
|
||||
break;
|
||||
}
|
||||
|
||||
//derive other vars from sampleAvg
|
||||
|
||||
//sampleAvg = mapf(sampleAvg, 0, 255, 0, 255); // help me out here
|
||||
soundAgc = 0; //only avg in simulations
|
||||
sampleAgc = sampleAvg;
|
||||
sampleRaw = sampleAvg;
|
||||
sampleRaw = map(sampleRaw, 50, 190, 0, 224);
|
||||
rawSampleAgc = sampleAvg;
|
||||
samplePeak = random8() > 250;
|
||||
FFT_MajorPeak = sampleAvg;
|
||||
FFT_Magnitude = sampleAvg;
|
||||
multAgc = sampleAvg;
|
||||
myVals[millis()%32] = sampleAvg; // filling values semi randomly (why?)
|
||||
sampleGain = 40;
|
||||
soundSquelch = 10;
|
||||
FFT_MajorPeak = volumeSmth;
|
||||
maxVol = 10; // this gets feedback fro UI
|
||||
binNum = 8; // this gets feedback fro UI
|
||||
inputLevel = 128; // this gets feedback fro UI
|
||||
volumeRaw = volumeSmth;
|
||||
my_magnitude = 10000.0 / 8.0f; //no idea if 10000 is a good value for FFT_Magnitude ???
|
||||
if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
|
||||
|
||||
return um_data;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user