Merge branch 'dev' of https://github.com/blazoncek/WLED into 0_14

- remove conditional audio compile
- Random Cycle bugfix
- AudioReactive updates
- global gamma flags
This commit is contained in:
Blaž Kristan 2022-09-05 15:13:15 +02:00
commit 51d3268eed
19 changed files with 4170 additions and 4183 deletions

View File

@ -24,7 +24,7 @@
// #define MIC_LOGGER // MIC sampling & sound input debugging (serial plotter) // #define MIC_LOGGER // MIC sampling & sound input debugging (serial plotter)
// #define FFT_SAMPLING_LOG // FFT result debugging // #define FFT_SAMPLING_LOG // FFT result debugging
// #define SR_DEBUG // generic SR DEBUG messages // #define SR_DEBUG // generic SR DEBUG messages
// #define NO_MIC_LOGGER // exclude MIC_LOGGER from SR_DEBUG
#ifdef SR_DEBUG #ifdef SR_DEBUG
#define DEBUGSR_PRINT(x) Serial.print(x) #define DEBUGSR_PRINT(x) Serial.print(x)
@ -85,41 +85,51 @@ const float agcSampleSmooth[AGC_NUM_PRESETS] = { 1/12.f, 1/6.f, 1/16.f}; //
static AudioSource *audioSource = nullptr; static AudioSource *audioSource = nullptr;
static volatile bool disableSoundProcessing = false; // if true, sound processing (FFT, filters, AGC) will be suspended. "volatile" as its shared between tasks. static volatile bool disableSoundProcessing = false; // if true, sound processing (FFT, filters, AGC) will be suspended. "volatile" as its shared between tasks.
// audioreactive variables shared with FFT task
static float micDataReal = 0.0f; // MicIn data with full 24bit resolution - lowest 8bit after decimal point static float micDataReal = 0.0f; // MicIn data with full 24bit resolution - lowest 8bit after decimal point
static float sampleReal = 0.0f; // "sampleRaw" as float, to provide bits that are lost otherwise (before amplification by sampleGain or inputLevel). Needed for AGC.
static float multAgc = 1.0f; // sample * multAgc = sampleAgc. Our AGC multiplier static float multAgc = 1.0f; // sample * multAgc = sampleAgc. Our AGC multiplier
static float sampleAvg = 0.0f; // Smoothed Average sample - sampleAvg < 1 means "quiet" (simple noise gate)
// peak detection
static bool samplePeak = false; // Boolean flag for peak - used in effects. Responding routine may reset this flag. Auto-reset after strip.getMinShowDelay()
static uint8_t maxVol = 10; // Reasonable value for constant volume for 'peak detector', as it won't always trigger (deprecated)
static uint8_t binNum = 8; // Used to select the bin for FFT based beat detection (deprecated)
static bool udpSamplePeak = false; // Boolean flag for peak. Set at the same tiem as samplePeak, but reset by transmitAudioData
static unsigned long timeOfPeak = 0; // time of last sample peak detection.
static void detectSamplePeak(void); // peak detection function (needs scaled FFT reasults in vReal[])
static void autoResetPeak(void); // peak auto-reset function
static int16_t sampleRaw = 0; // Current sample. Must only be updated ONCE!!! (amplified mic value by sampleGain and inputLevel)
static int16_t rawSampleAgc = 0; // not smoothed AGC sample
static float sampleAvg = 0.0f; // Smoothed Average sampleRaw
static float sampleAgc = 0.0f; // Smoothed AGC sample
//////////////////// ////////////////////
// Begin FFT Code // // Begin FFT Code //
//////////////////// ////////////////////
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT #ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
// lib_deps += https://github.com/kosme/arduinoFFT#develop @ 1.9.2 // lib_deps += https://github.com/kosme/arduinoFFT#develop @ 1.9.2
#define FFT_SPEED_OVER_PRECISION // enables use of reciprocals (1/x etc), and an a few other speedups #define FFT_SPEED_OVER_PRECISION // enables use of reciprocals (1/x etc), and an a few other speedups
#define FFT_SQRT_APPROXIMATION // enables "quake3" style inverse sqrt #define FFT_SQRT_APPROXIMATION // enables "quake3" style inverse sqrt
#define sqrt(x) sqrtf(x) // little hack that reduces FFT time by 50% on ESP32 (as alternative to FFT_SQRT_APPROXIMATION) #define sqrt(x) sqrtf(x) // little hack that reduces FFT time by 50% on ESP32 (as alternative to FFT_SQRT_APPROXIMATION)
#else
// lib_deps += https://github.com/blazoncek/arduinoFFT.git
#endif #endif
#include "arduinoFFT.h" #include "arduinoFFT.h"
// FFT Variables // FFT Output variables shared with animations
#define NUM_GEQ_CHANNELS 16 // number of frequency channels. Don't change !!
static float FFT_MajorPeak = 1.0f; // FFT: strongest (peak) frequency
static float FFT_Magnitude = 0.0f; // FFT: volume (magnitude) of peak frequency
static uint8_t fftResult[NUM_GEQ_CHANNELS]= {0};// Our calculated freq. channel result table to be used by effects
// FFT Constants
constexpr uint16_t samplesFFT = 512; // Samples in an FFT batch - This value MUST ALWAYS be a power of 2 constexpr uint16_t samplesFFT = 512; // Samples in an FFT batch - This value MUST ALWAYS be a power of 2
constexpr uint16_t samplesFFT_2 = 256; // meaningfull part of FFT results - only the "lower half" contains useful information. constexpr uint16_t samplesFFT_2 = 256; // meaningfull part of FFT results - only the "lower half" contains useful information.
static float FFT_MajorPeak = 1.0f;
static float FFT_Magnitude = 0.0f;
// These are the input and output vectors. Input vectors receive computed results from FFT. // These are the input and output vectors. Input vectors receive computed results from FFT.
static float vReal[samplesFFT] = {0.0f}; static float vReal[samplesFFT] = {0.0f}; // FFT sample inputs / freq output - these are our raw result bins
static float vImag[samplesFFT] = {0.0f}; static float vImag[samplesFFT] = {0.0f}; // imaginary parts
static float fftBin[samplesFFT_2] = {0.0f};
// the following are observed values, supported by a bit of "educated guessing" // the following are observed values, supported by a bit of "educated guessing"
//#define FFT_DOWNSCALE 0.65f // 20kHz - downscaling factor for FFT results - "Flat-Top" window @20Khz, old freq channels //#define FFT_DOWNSCALE 0.65f // 20kHz - downscaling factor for FFT results - "Flat-Top" window @20Khz, old freq channels
#define FFT_DOWNSCALE 0.46f // downscaling factor for FFT results - for "Flat-Top" window @22Khz, new freq channels #define FFT_DOWNSCALE 0.46f // downscaling factor for FFT results - for "Flat-Top" window @22Khz, new freq channels
#define LOG_256 5.54517744 #define LOG_256 5.54517744
@ -128,13 +138,11 @@ static float windowWeighingFactors[samplesFFT] = {0.0f};
#endif #endif
// Try and normalize fftBin values to a max of 4096, so that 4096/16 = 256. // Try and normalize fftBin values to a max of 4096, so that 4096/16 = 256.
// Oh, and bins 0,1,2 are no good, so we'll zero them out. static float fftCalc[NUM_GEQ_CHANNELS] = {0.0f};
static float fftCalc[16] = {0.0f}; static float fftAvg[NUM_GEQ_CHANNELS] = {0.0f}; // Calculated frequency channel results, with smoothing (used if dynamics limiter is ON)
static uint8_t fftResult[16] = {0}; // Our calculated result table, which we feed to the animations.
#ifdef SR_DEBUG #ifdef SR_DEBUG
static float fftResultMax[16] = {0.0f}; // A table used for testing to determine how our post-processing is working. static float fftResultMax[NUM_GEQ_CHANNELS] = {0.0f}; // A table used for testing to determine how our post-processing is working.
#endif #endif
static float fftAvg[16] = {0.0f};
#ifdef WLED_DEBUG #ifdef WLED_DEBUG
static unsigned long fftTime = 0; static unsigned long fftTime = 0;
@ -142,7 +150,7 @@ static unsigned long sampleTime = 0;
#endif #endif
// Table of multiplication factors so that we can even out the frequency response. // Table of multiplication factors so that we can even out the frequency response.
static float fftResultPink[16] = { 1.70f, 1.71f, 1.73f, 1.78f, 1.68f, 1.56f, 1.55f, 1.63f, 1.79f, 1.62f, 1.80f, 2.06f, 2.47f, 3.35f, 6.83f, 9.55f }; static float fftResultPink[NUM_GEQ_CHANNELS] = { 1.70f, 1.71f, 1.73f, 1.78f, 1.68f, 1.56f, 1.55f, 1.63f, 1.79f, 1.62f, 1.80f, 2.06f, 2.47f, 3.35f, 6.83f, 9.55f };
// Create FFT object // Create FFT object
#ifdef UM_AUDIOREACTIVE_USE_NEW_FFT #ifdef UM_AUDIOREACTIVE_USE_NEW_FFT
@ -161,12 +169,12 @@ static float mapf(float x, float in_min, float in_max, float out_min, float out_
static float fftAddAvg(int from, int to) { static float fftAddAvg(int from, int to) {
float result = 0.0f; float result = 0.0f;
for (int i = from; i <= to; i++) { for (int i = from; i <= to; i++) {
result += fftBin[i]; result += vReal[i];
} }
return result / float(to - from + 1); return result / float(to - from + 1);
} }
// FFT main code // FFT main task
void FFTcode(void * parameter) void FFTcode(void * parameter)
{ {
DEBUGSR_PRINT("FFT started on core: "); DEBUGSR_PRINTLN(xPortGetCoreID()); DEBUGSR_PRINT("FFT started on core: "); DEBUGSR_PRINTLN(xPortGetCoreID());
@ -237,9 +245,9 @@ void FFTcode(void * parameter)
#endif #endif
FFT_MajorPeak = constrain(FFT_MajorPeak, 1.0f, 11025.0f); // restrict value to range expected by effects FFT_MajorPeak = constrain(FFT_MajorPeak, 1.0f, 11025.0f); // restrict value to range expected by effects
for (int i = 0; i < samplesFFT_2; i++) { // Values for bins 0 and 1 are WAY too large. Might as well start at 3. for (int i = 0; i < samplesFFT; i++) {
float t = fabsf(vReal[i]); // just to be sure - values in fft bins should be positive any way float t = fabsf(vReal[i]); // just to be sure - values in fft bins should be positive any way
fftBin[i] = t / 16.0f; // Reduce magnitude. Want end result to be linear and ~4096 max. vReal[i] = t / 16.0f; // Reduce magnitude. Want end result to be scaled linear and ~4096 max.
} // for() } // for()
// mapping of FFT result bins to frequency channels // mapping of FFT result bins to frequency channels
@ -292,14 +300,14 @@ void FFTcode(void * parameter)
// don't use the last bins from 216 to 255. They are usually contaminated by aliasing (aka noise) // don't use the last bins from 216 to 255. They are usually contaminated by aliasing (aka noise)
#endif #endif
} else { // noise gate closed - just decay old values } else { // noise gate closed - just decay old values
for (int i=0; i < 16; i++) { for (int i=0; i < NUM_GEQ_CHANNELS; i++) {
fftCalc[i] *= 0.85f; // decay to zero fftCalc[i] *= 0.85f; // decay to zero
if (fftCalc[i] < 4.0f) fftCalc[i] = 0.0f; if (fftCalc[i] < 4.0f) fftCalc[i] = 0.0f;
} }
} }
// post-processing of frequency channels (pink noise adjustment, AGC, smooting, scaling) // post-processing of frequency channels (pink noise adjustment, AGC, smooting, scaling)
for (int i=0; i < 16; i++) { for (int i=0; i < NUM_GEQ_CHANNELS; i++) {
if (sampleAvg > 1) { // noise gate open if (sampleAvg > 1) { // noise gate open
// Adjustment for frequency curves. // Adjustment for frequency curves.
@ -378,11 +386,43 @@ void FFTcode(void * parameter)
fftTime = (fftTimeInMillis*3 + fftTime*7)/10; // smooth fftTime = (fftTimeInMillis*3 + fftTime*7)/10; // smooth
} }
#endif #endif
// run peak detection
autoResetPeak();
detectSamplePeak();
} // for(;;) } // for(;;)ever
} // FFTcode() } // FFTcode() task end
////////////////////
// Peak detection //
////////////////////
// peak detection is called from FFT task when vReal[] contains valid FFT results
static void detectSamplePeak(void) {
// Poor man's beat detection by seeing if sample > Average + some value.
if ((sampleAvg > 1) && (maxVol > 0) && (binNum > 1) && (vReal[binNum] > maxVol) && ((millis() - timeOfPeak) > 100)) {
// This goes through ALL of the 255 bins - but ignores stupid settings
// Then we got a peak, else we don't. The peak has to time out on its own in order to support UDP sound sync.
samplePeak = true;
timeOfPeak = millis();
udpSamplePeak = true;
}
}
static void autoResetPeak(void) {
uint16_t MinShowDelay = MAX(50, strip.getMinShowDelay()); // Fixes private class variable compiler error. Unsure if this is the correct way of fixing the root problem. -THATDONFC
if (millis() - timeOfPeak > MinShowDelay) { // Auto-reset of samplePeak after a complete frame has passed.
samplePeak = false;
if (audioSyncEnabled == 0) udpSamplePeak = false; // this is normally reset by transmitAudioData
}
}
////////////////////
// usermod class //
////////////////////
//class name. Use something descriptive and leave the ": public Usermod" part :) //class name. Use something descriptive and leave the ": public Usermod" part :)
class AudioReactive : public Usermod { class AudioReactive : public Usermod {
@ -453,40 +493,36 @@ class AudioReactive : public Usermod {
double FFT_MajorPeak; // 08 Bytes double FFT_MajorPeak; // 08 Bytes
}; };
WiFiUDP fftUdp;
// set your config variables to their boot default value (this can also be done in readFromConfig() or a constructor if you prefer) // set your config variables to their boot default value (this can also be done in readFromConfig() or a constructor if you prefer)
bool enabled = false; bool enabled = false;
bool initDone = false; bool initDone = false;
const uint16_t delayMs = 10; // I don't want to sample too often and overload WLED // variables for UDP sound sync
WiFiUDP fftUdp; // UDP object for sound sync (from WiFi UDP, not Async UDP!)
bool udpSyncConnected = false;// UDP connection status -> true if connected to multicast group
unsigned long lastTime = 0; // last time of running UDP Microphone Sync
const uint16_t delayMs = 10; // I don't want to sample too often and overload WLED
uint16_t audioSyncPort= 11988;// default port for UDP sound sync
// used for AGC
int last_soundAgc = -1; // used to detect AGC mode change (for resetting AGC internal error buffers)
double control_integrated = 0.0; // persistent across calls to agcAvg(); "integrator control" = accumulated error
// variables used by getSample() and agcAvg()
int16_t micIn = 0; // Current sample starts with negative values and large values, which is why it's 16 bit signed
double sampleMax = 0.0; // Max sample over a few seconds. Needed for AGC controler.
float micLev = 0.0f; // Used to convert returned value to have '0' as minimum. A leveller
float expAdjF = 0.0f; // Used for exponential filter.
float sampleReal = 0.0f; // "sampleRaw" as float, to provide bits that are lost otherwise (before amplification by sampleGain or inputLevel). Needed for AGC.
int16_t sampleRaw = 0; // Current sample. Must only be updated ONCE!!! (amplified mic value by sampleGain and inputLevel)
int16_t rawSampleAgc = 0; // not smoothed AGC sample
float sampleAgc = 0.0f; // Smoothed AGC sample
// variables used in effects // variables used in effects
uint8_t maxVol = 10; // Reasonable value for constant volume for 'peak detector', as it won't always trigger (deprecated)
uint8_t binNum = 8; // Used to select the bin for FFT based beat detection (deprecated)
bool samplePeak = 0; // Boolean flag for peak. Responding routine must reset this flag
float volumeSmth = 0.0f; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample float volumeSmth = 0.0f; // either sampleAvg or sampleAgc depending on soundAgc; smoothed sample
int16_t volumeRaw = 0; // either sampleRaw or rawSampleAgc depending on soundAgc int16_t volumeRaw = 0; // either sampleRaw or rawSampleAgc depending on soundAgc
float my_magnitude =0.0f; // FFT_Magnitude, scaled by multAgc float my_magnitude =0.0f; // FFT_Magnitude, scaled by multAgc
bool udpSamplePeak = 0; // Boolean flag for peak. Set at the same tiem as samplePeak, but reset by transmitAudioData
int16_t micIn = 0; // Current sample starts with negative values and large values, which is why it's 16 bit signed
double sampleMax = 0.0; // Max sample over a few seconds. Needed for AGC controler.
uint32_t timeOfPeak = 0;
unsigned long lastTime = 0; // last time of running UDP Microphone Sync
float micLev = 0.0f; // Used to convert returned value to have '0' as minimum. A leveller
float expAdjF = 0.0f; // Used for exponential filter.
bool udpSyncConnected = false;
uint16_t audioSyncPort = 11988;
// used for AGC
uint8_t lastMode = 0; // last known effect mode
int last_soundAgc = -1;
double control_integrated = 0.0; // persistent across calls to agcAvg(); "integrator control" = accumulated error
unsigned long last_update_time = 0;
unsigned long last_kick_time = 0;
uint8_t last_user_inputLevel = 0;
// used to feed "Info" Page // used to feed "Info" Page
unsigned long last_UDPTime = 0; // time of last valid UDP sound sync datapacket unsigned long last_UDPTime = 0; // time of last valid UDP sound sync datapacket
float maxSample5sec = 0.0f; // max sample (after AGC) in last 5 seconds float maxSample5sec = 0.0f; // max sample (after AGC) in last 5 seconds
@ -503,6 +539,10 @@ class AudioReactive : public Usermod {
static const char UDP_SYNC_HEADER_v1[]; static const char UDP_SYNC_HEADER_v1[];
// private methods // private methods
////////////////////
// Debug support //
////////////////////
void logAudio() void logAudio()
{ {
#ifdef MIC_LOGGER #ifdef MIC_LOGGER
@ -525,7 +565,7 @@ class AudioReactive : public Usermod {
#ifdef FFT_SAMPLING_LOG #ifdef FFT_SAMPLING_LOG
#if 0 #if 0
for(int i=0; i<16; i++) { for(int i=0; i<NUM_GEQ_CHANNELS; i++) {
Serial.print(fftResult[i]); Serial.print(fftResult[i]);
Serial.print("\t"); Serial.print("\t");
} }
@ -551,11 +591,11 @@ class AudioReactive : public Usermod {
int maxVal = minimumMaxVal; int maxVal = minimumMaxVal;
int minVal = 0; int minVal = 0;
for(int i = 0; i < 16; i++) { for(int i = 0; i < NUM_GEQ_CHANNELS; i++) {
if(fftResult[i] > maxVal) maxVal = fftResult[i]; if(fftResult[i] > maxVal) maxVal = fftResult[i];
if(fftResult[i] < minVal) minVal = fftResult[i]; if(fftResult[i] < minVal) minVal = fftResult[i];
} }
for(int i = 0; i < 16; i++) { for(int i = 0; i < NUM_GEQ_CHANNELS; i++) {
Serial.print(i); Serial.print(":"); Serial.print(i); Serial.print(":");
Serial.printf("%04ld ", map(fftResult[i], 0, (scaleValuesFromCurrentMaxVal ? maxVal : defaultScalingFromHighValue), (mapValuesToPlotterSpace*i*scalingToHighValue)+0, (mapValuesToPlotterSpace*i*scalingToHighValue)+scalingToHighValue-1)); Serial.printf("%04ld ", map(fftResult[i], 0, (scaleValuesFromCurrentMaxVal ? maxVal : defaultScalingFromHighValue), (mapValuesToPlotterSpace*i*scalingToHighValue)+0, (mapValuesToPlotterSpace*i*scalingToHighValue)+scalingToHighValue-1));
} }
@ -574,6 +614,10 @@ class AudioReactive : public Usermod {
} // logAudio() } // logAudio()
//////////////////////
// Audio Processing //
//////////////////////
/* /*
* A "PI controller" multiplier to automatically adjust sound sensitivity. * A "PI controller" multiplier to automatically adjust sound sensitivity.
* *
@ -668,7 +712,7 @@ class AudioReactive : public Usermod {
last_soundAgc = soundAgc; last_soundAgc = soundAgc;
} // agcAvg() } // agcAvg()
// post-processing and filtering of MIC sample (micDataReal) from FFTcode()
void getSample() void getSample()
{ {
float sampleAdj; // Gain adjusted sample value float sampleAdj; // Gain adjusted sample value
@ -729,24 +773,6 @@ class AudioReactive : public Usermod {
if (sampleMax < 0.5f) sampleMax = 0.0f; if (sampleMax < 0.5f) sampleMax = 0.0f;
sampleAvg = ((sampleAvg * 15.0f) + sampleAdj) / 16.0f; // Smooth it out over the last 16 samples. sampleAvg = ((sampleAvg * 15.0f) + sampleAdj) / 16.0f; // Smooth it out over the last 16 samples.
// Fixes private class variable compiler error. Unsure if this is the correct way of fixing the root problem. -THATDONFC
uint16_t MinShowDelay = strip.getMinShowDelay();
if (millis() - timeOfPeak > MinShowDelay) { // Auto-reset of samplePeak after a complete frame has passed.
samplePeak = false;
udpSamplePeak = false;
}
//if (userVar1 == 0) samplePeak = 0;
// Poor man's beat detection by seeing if sample > Average + some value.
if ((maxVol > 0) && (binNum > 1) && (fftBin[binNum] > maxVol) && (millis() > (timeOfPeak + 100))) {
// This goes through ALL of the 255 bins - but ignores stupid settings
// Then we got a peak, else we don't. The peak has to time out on its own in order to support UDP sound sync.
samplePeak = true;
timeOfPeak = millis();
udpSamplePeak = true;
}
} // getSample() } // getSample()
@ -781,6 +807,26 @@ class AudioReactive : public Usermod {
} }
//////////////////////
// UDP Sound Sync //
//////////////////////
// try to establish UDP sound sync connection
void connectUDPSoundSync(void) {
// This function tries to establish a UDP sync connection if needed
// necessary as we also want to transmit in "AP Mode", but the standard "connected()" callback only reacts on STA connection
static unsigned long last_connection_attempt = 0;
if ((audioSyncPort <= 0) || ((audioSyncEnabled & 0x03) == 0)) return; // Sound Sync not enabled
if (udpSyncConnected) return; // already connected
if (!(apActive || interfacesInited)) return; // neither AP nor other connections availeable
if (millis() - last_connection_attempt < 15000) return; // only try once in 15 seconds
// if we arrive here, we need a UDP connection but don't have one
last_connection_attempt = millis();
connected(); // try to start UDP
}
void transmitAudioData() void transmitAudioData()
{ {
if (!udpSyncConnected) return; if (!udpSyncConnected) return;
@ -795,7 +841,7 @@ class AudioReactive : public Usermod {
udpSamplePeak = false; // Reset udpSamplePeak after we've transmitted it udpSamplePeak = false; // Reset udpSamplePeak after we've transmitted it
transmitData.reserved1 = 0; transmitData.reserved1 = 0;
for (int i = 0; i < 16; i++) { for (int i = 0; i < NUM_GEQ_CHANNELS; i++) {
transmitData.fftResult[i] = (uint8_t)constrain(fftResult[i], 0, 254); transmitData.fftResult[i] = (uint8_t)constrain(fftResult[i], 0, 254);
} }
@ -808,12 +854,10 @@ class AudioReactive : public Usermod {
return; return;
} // transmitAudioData() } // transmitAudioData()
static bool isValidUdpSyncVersion(const char *header) { static bool isValidUdpSyncVersion(const char *header) {
return strncmp_P(header, PSTR(UDP_SYNC_HEADER), 6) == 0; return strncmp_P(header, PSTR(UDP_SYNC_HEADER), 6) == 0;
} }
bool receiveAudioData() // check & process new data. return TRUE in case that new audio data was received. bool receiveAudioData() // check & process new data. return TRUE in case that new audio data was received.
{ {
if (!udpSyncConnected) return false; if (!udpSyncConnected) return false;
@ -839,13 +883,7 @@ class AudioReactive : public Usermod {
sampleAgc = volumeSmth; sampleAgc = volumeSmth;
multAgc = 1.0f; multAgc = 1.0f;
// auto-reset sample peak. Need to do it here, because getSample() is not running autoResetPeak();
uint16_t MinShowDelay = strip.getMinShowDelay();
if (millis() - timeOfPeak > MinShowDelay) { // Auto-reset of samplePeak after a complete frame has passed.
samplePeak = false;
udpSamplePeak = false;
}
//if (userVar1 == 0) samplePeak = 0;
// Only change samplePeak IF it's currently false. // Only change samplePeak IF it's currently false.
// If it's true already, then the animation still needs to respond. // If it's true already, then the animation still needs to respond.
if (!samplePeak) { if (!samplePeak) {
@ -855,7 +893,7 @@ class AudioReactive : public Usermod {
} }
//These values are only available on the ESP32 //These values are only available on the ESP32
for (int i = 0; i < 16; i++) fftResult[i] = receivedPacket->fftResult[i]; for (int i = 0; i < NUM_GEQ_CHANNELS; i++) fftResult[i] = receivedPacket->fftResult[i];
my_magnitude = fmaxf(receivedPacket->FFT_Magnitude, 0.0f); my_magnitude = fmaxf(receivedPacket->FFT_Magnitude, 0.0f);
FFT_Magnitude = my_magnitude; FFT_Magnitude = my_magnitude;
@ -869,6 +907,10 @@ class AudioReactive : public Usermod {
} }
//////////////////////
// usermod functions//
//////////////////////
public: public:
//Functions called by WLED or other usermods //Functions called by WLED or other usermods
@ -961,6 +1003,7 @@ class AudioReactive : public Usermod {
disableSoundProcessing = true; disableSoundProcessing = true;
} }
if (enabled) connectUDPSoundSync();
initDone = true; initDone = true;
} }
@ -971,6 +1014,11 @@ class AudioReactive : public Usermod {
*/ */
void connected() void connected()
{ {
if (udpSyncConnected) { // clean-up: if open, close old UDP sync connection
udpSyncConnected = false;
fftUdp.stop();
}
if (audioSyncPort > 0 && (audioSyncEnabled & 0x03)) { if (audioSyncPort > 0 && (audioSyncEnabled & 0x03)) {
#ifndef ESP8266 #ifndef ESP8266
udpSyncConnected = fftUdp.beginMulticast(IPAddress(239, 0, 0, 1), audioSyncPort); udpSyncConnected = fftUdp.beginMulticast(IPAddress(239, 0, 0, 1), audioSyncPort);
@ -1004,7 +1052,7 @@ class AudioReactive : public Usermod {
if (strip.isUpdating() && (millis() - lastUMRun < 2)) return; // be nice, but not too nice if (strip.isUpdating() && (millis() - lastUMRun < 2)) return; // be nice, but not too nice
// suspend local sound processing when "real time mode" is active (E131, UDP, ADALIGHT, ARTNET) // suspend local sound processing when "real time mode" is active (E131, UDP, ADALIGHT, ARTNET)
if ( (realtimeOverride == REALTIME_OVERRIDE_NONE) // please odd other orrides here if needed if ( (realtimeOverride == REALTIME_OVERRIDE_NONE) // please add other overrides here if needed
&&( (realtimeMode == REALTIME_MODE_GENERIC) &&( (realtimeMode == REALTIME_MODE_GENERIC)
||(realtimeMode == REALTIME_MODE_E131) ||(realtimeMode == REALTIME_MODE_E131)
||(realtimeMode == REALTIME_MODE_UDP) ||(realtimeMode == REALTIME_MODE_UDP)
@ -1067,9 +1115,13 @@ class AudioReactive : public Usermod {
if (soundAgc) my_magnitude *= multAgc; if (soundAgc) my_magnitude *= multAgc;
if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute if (volumeSmth < 1 ) my_magnitude = 0.001f; // noise gate closed - mute
limitSampleDynamics(); // optional - makes volumeSmth very smooth and fluent limitSampleDynamics();
} } // if (!disableSoundProcessing)
autoResetPeak(); // auto-reset sample peak after strip minShowDelay
if (!udpSyncConnected) udpSamplePeak = false; // reset UDP samplePeak while UDP is unconnected
connectUDPSoundSync(); // ensure we have a connection - if needed
// UDP Microphone Sync - receive mode // UDP Microphone Sync - receive mode
if ((audioSyncEnabled & 0x02) && udpSyncConnected) { if ((audioSyncEnabled & 0x02) && udpSyncConnected) {
@ -1092,7 +1144,7 @@ class AudioReactive : public Usermod {
} }
#endif #endif
// peak sample from last 5 seconds // Info Page: keep max sample from last 5 seconds
if ((millis() - sampleMaxTimer) > CYCLE_SAMPLEMAX) { if ((millis() - sampleMaxTimer) > CYCLE_SAMPLEMAX) {
sampleMaxTimer = millis(); sampleMaxTimer = millis();
maxSample5sec = (0.15 * maxSample5sec) + 0.85 *((soundAgc) ? sampleAgc : sampleAvg); // reset, and start with some smoothing maxSample5sec = (0.15 * maxSample5sec) + 0.85 *((soundAgc) ? sampleAgc : sampleAvg); // reset, and start with some smoothing
@ -1100,6 +1152,7 @@ class AudioReactive : public Usermod {
} else { } else {
if ((sampleAvg >= 1)) maxSample5sec = fmaxf(maxSample5sec, (soundAgc) ? rawSampleAgc : sampleRaw); // follow maximum volume if ((sampleAvg >= 1)) maxSample5sec = fmaxf(maxSample5sec, (soundAgc) ? rawSampleAgc : sampleRaw); // follow maximum volume
} }
//UDP Microphone Sync - transmit mode //UDP Microphone Sync - transmit mode
if ((audioSyncEnabled & 0x01) && (millis() - lastTime > 20)) { if ((audioSyncEnabled & 0x01) && (millis() - lastTime > 20)) {
// Only run the transmit code IF we're in Transmit mode // Only run the transmit code IF we're in Transmit mode
@ -1137,8 +1190,9 @@ class AudioReactive : public Usermod {
memset(fftCalc, 0, sizeof(fftCalc)); memset(fftCalc, 0, sizeof(fftCalc));
memset(fftAvg, 0, sizeof(fftAvg)); memset(fftAvg, 0, sizeof(fftAvg));
memset(fftResult, 0, sizeof(fftResult)); memset(fftResult, 0, sizeof(fftResult));
for(int i=(init?0:1); i<16; i+=2) fftResult[i] = 16; // make a tiny pattern for(int i=(init?0:1); i<NUM_GEQ_CHANNELS; i+=2) fftResult[i] = 16; // make a tiny pattern
inputLevel = 128; // resset level slider to default inputLevel = 128; // resset level slider to default
autoResetPeak();
if (init && FFT_Task) { if (init && FFT_Task) {
vTaskSuspend(FFT_Task); // update is about to begin, disable task to prevent crash vTaskSuspend(FFT_Task); // update is about to begin, disable task to prevent crash
@ -1186,6 +1240,10 @@ class AudioReactive : public Usermod {
} }
////////////////////////////
// Settings and Info Page //
////////////////////////////
/* /*
* addToJsonInfo() can be used to add custom entries to the /json/info part of the JSON API. * addToJsonInfo() can be used to add custom entries to the /json/info part of the JSON API.
* Creating an "u" object allows you to add custom key/value pairs to the Info section of the WLED web UI. * Creating an "u" object allows you to add custom key/value pairs to the Info section of the WLED web UI.

View File

@ -30,6 +30,7 @@
#define IBN 5100 #define IBN 5100
#define PALETTE_SOLID_WRAP (strip.paletteBlend == 1 || strip.paletteBlend == 3) #define PALETTE_SOLID_WRAP (strip.paletteBlend == 1 || strip.paletteBlend == 3)
#define indexToVStrip(index, stripNr) ((index) | (int((stripNr)+1)<<16))
// effect utility functions // effect utility functions
uint8_t sin_gap(uint16_t in) { uint8_t sin_gap(uint16_t in) {
@ -1926,10 +1927,7 @@ uint16_t mode_palette()
for (int i = 0; i < SEGLEN; i++) for (int i = 0; i < SEGLEN; i++)
{ {
uint8_t colorIndex = (i * 255 / SEGLEN) - counter; uint8_t colorIndex = (i * 255 / SEGLEN) - counter;
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(colorIndex, false, noWrap, 255));
if (noWrap) colorIndex = map(colorIndex, 0, 255, 0, 240); //cut off blend at palette "end"
SEGMENT.setPixelColor(i, SEGMENT.color_from_palette(colorIndex, false, true, 255));
} }
return FRAMETIME; return FRAMETIME;
} }
@ -1966,59 +1964,59 @@ static const char _data_FX_MODE_PALETTE[] PROGMEM = "Palette@Cycle speed,;1,2,3;
// in step 3 above) (Effect Intensity = Sparking). // in step 3 above) (Effect Intensity = Sparking).
uint16_t mode_fire_2012() uint16_t mode_fire_2012()
{ {
const uint16_t cols = strip.isMatrix ? SEGMENT.virtualWidth() : 1; uint16_t strips = SEGMENT.nrOfVStrips();
const uint16_t rows = strip.isMatrix ? SEGMENT.virtualHeight() : SEGMENT.virtualLength(); if (!SEGENV.allocateData(strips * SEGLEN)) return mode_static(); //allocation failed
uint32_t it = strip.now >> 5; //div 32
//uint16_t q = cols>>2; // a quarter of flames
if (!SEGENV.allocateData(cols*rows)) return mode_static(); //allocation failed
byte* heat = SEGENV.data; byte* heat = SEGENV.data;
if (it != SEGENV.step) { uint32_t it = strip.now >> 5; //div 32
SEGENV.step = it;
uint8_t ignition = max(3,rows/10); // ignition area: 10% of segment length or minimum 3 pixels
for (int f = 0; f < cols; f++) { struct virtualStrip {
// Step 1. Cool down every cell a little static void runStrip(uint16_t stripNr, byte* heat, uint32_t it) {
for (int i = 0; i < rows; i++) {
uint8_t cool = (((20 + SEGMENT.speed/3) * 16) / rows); if (it != SEGENV.step)
/* {
// 2D enhancement: cool sides of the flame a bit more uint8_t ignition = max(3,SEGLEN/10); // ignition area: 10% of segment length or minimum 3 pixels
if (cols>5) {
if (f < q) cool = qadd8(cool, 2*(uint16_t)((cool * (q-f))/cols)); // cool segment sides a bit more // Step 1. Cool down every cell a little
if (f > 3*q) cool = qadd8(cool, 2*(uint16_t)((cool * (cols-f))/cols)); // cool segment sides a bit more for (int i = 0; i < SEGLEN; i++) {
uint8_t cool = random8((((20 + SEGMENT.speed/3) * 16) / SEGLEN)+2);
uint8_t minTemp = 0;
if (i<ignition) {
cool /= (ignition-i); // ignition area cools slower
minTemp = 4*(ignition-i) + 8; // and should not become black
}
uint8_t temp = qsub8(heat[i], cool);
heat[i] = i<ignition && temp<minTemp ? minTemp : temp; // prevent ignition area from becoming black
}
// Step 2. Heat from each cell drifts 'up' and diffuses a little
for (int k = SEGLEN -1; k > 1; k--) {
heat[k] = (heat[k - 1] + (heat[k - 2]<<1) ) / 3; // heat[k-2] multiplied by 2
}
// Step 3. Randomly ignite new 'sparks' of heat near the bottom
if (random8() <= SEGMENT.intensity) {
uint8_t y = random8(ignition);
heat[y] = qadd8(heat[y], random8(160,255));
} }
*/
uint8_t temp = qsub8(heat[i+rows*f], random8(0, cool + 2));
heat[i+rows*f] = (temp==0 && i<ignition) ? random8(1,16) : temp; // prevent ignition area from becoming black
} }
// Step 2. Heat from each cell drifts 'up' and diffuses a little // Step 4. Map from heat cells to LED colors
for (int k = rows -1; k > 1; k--) { for (int j = 0; j < SEGLEN; j++) {
heat[k+rows*f] = (heat[k+rows*f - 1] + (heat[k+rows*f - 2]<<1) ) / 3; // heat[k-2] multiplied by 2 SEGMENT.setPixelColor(indexToVStrip(j, stripNr), ColorFromPalette(SEGPALETTE, MIN(heat[j],240), 255, NOBLEND));
}
// Step 3. Randomly ignite new 'sparks' of heat near the bottom
if (random8() <= SEGMENT.intensity) {
uint8_t y = random8(ignition);
heat[y+rows*f] = qadd8(heat[y+rows*f], random8(160,255));
} }
} }
} };
for (int stripNr=0; stripNr<strips; stripNr++)
virtualStrip::runStrip(stripNr, &heat[stripNr * SEGLEN], it);
if (it != SEGENV.step)
SEGENV.step = it;
for (int f = 0; f < cols; f++) {
// Step 4. Map from heat cells to LED colors
for (int j = 0; j < rows; j++) {
CRGB color = ColorFromPalette(SEGPALETTE, /*MIN(*/heat[j+rows*f]/*,240)*/, 255, LINEARBLEND);
if (strip.isMatrix) SEGMENT.setPixelColorXY(f, rows -j -1, color);
else SEGMENT.setPixelColor(j, color);
}
}
return FRAMETIME; return FRAMETIME;
} }
static const char _data_FX_MODE_FIRE_2012[] PROGMEM = "Fire 2012@Cooling,Spark rate;1,2,3;!;sx=120,ix=64,1d,2d"; static const char _data_FX_MODE_FIRE_2012[] PROGMEM = "Fire 2012@Cooling,Spark rate;1,2,3;!;sx=120,ix=64,mp12=1,1d"; //bars
// ColorWavesWithPalettes by Mark Kriegsman: https://gist.github.com/kriegsman/8281905786e8b2632aeb // ColorWavesWithPalettes by Mark Kriegsman: https://gist.github.com/kriegsman/8281905786e8b2632aeb
@ -2849,7 +2847,7 @@ uint16_t mode_bouncing_balls(void) {
} }
int pos = roundf(balls[i].height * (SEGLEN - 1)); int pos = roundf(balls[i].height * (SEGLEN - 1));
if (SEGLEN<32) SEGMENT.setPixelColor(pos | int((stripNr+1)<<16), color); // encode virtual strip into index if (SEGLEN<32) SEGMENT.setPixelColor(indexToVStrip(pos, stripNr), color); // encode virtual strip into index
else SEGMENT.setPixelColor(balls[i].height + (stripNr+1)*10.0f, color); else SEGMENT.setPixelColor(balls[i].height + (stripNr+1)*10.0f, color);
} }
} }
@ -2942,68 +2940,68 @@ typedef struct Spark {
uint8_t colIndex; uint8_t colIndex;
} spark; } spark;
#define maxNumPopcorn 21 // max 21 on 16 segment ESP8266
/* /*
* POPCORN * POPCORN
* modified from https://github.com/kitesurfer1404/WS2812FX/blob/master/src/custom/Popcorn.h * modified from https://github.com/kitesurfer1404/WS2812FX/blob/master/src/custom/Popcorn.h
*/ */
uint16_t mode_popcorn(void) { uint16_t mode_popcorn(void) {
const uint16_t cols = strip.isMatrix ? SEGMENT.virtualWidth() : 1;
const uint16_t rows = strip.isMatrix ? SEGMENT.virtualHeight() : SEGMENT.virtualLength();
//allocate segment data //allocate segment data
uint16_t maxNumPopcorn = 21; // max 21 on 16 segment ESP8266 uint16_t strips = SEGMENT.nrOfVStrips();
uint16_t dataSize = sizeof(spark) * maxNumPopcorn; uint16_t dataSize = sizeof(spark) * maxNumPopcorn;
if (!SEGENV.allocateData(dataSize)) return mode_static(); //allocation failed if (!SEGENV.allocateData(dataSize * strips)) return mode_static(); //allocation failed
Spark* popcorn = reinterpret_cast<Spark*>(SEGENV.data); Spark* popcorn = reinterpret_cast<Spark*>(SEGENV.data);
float gravity = -0.0001 - (SEGMENT.speed/200000.0); // m/s/s
gravity *= rows; //SEGLEN
bool hasCol2 = SEGCOLOR(2); bool hasCol2 = SEGCOLOR(2);
SEGMENT.fill(hasCol2 ? BLACK : SEGCOLOR(1)); SEGMENT.fill(hasCol2 ? BLACK : SEGCOLOR(1));
uint8_t numPopcorn = SEGMENT.intensity*maxNumPopcorn/255; struct virtualStrip {
if (numPopcorn == 0) numPopcorn = 1; static void runStrip(uint16_t stripNr, Spark* popcorn) {
float gravity = -0.0001 - (SEGMENT.speed/200000.0); // m/s/s
gravity *= SEGLEN;
for (int i = 0; i < numPopcorn; i++) { uint8_t numPopcorn = SEGMENT.intensity*maxNumPopcorn/255;
if (popcorn[i].pos >= 0.0f) { // if kernel is active, update its position if (numPopcorn == 0) numPopcorn = 1;
popcorn[i].pos += popcorn[i].vel;
popcorn[i].vel += gravity;
} else { // if kernel is inactive, randomly pop it
if (random8() < 2) { // POP!!!
popcorn[i].pos = 0.01f;
popcorn[i].posX = random16(cols);
uint16_t peakHeight = 128 + random8(128); //0-255 for(int i = 0; i < numPopcorn; i++) {
peakHeight = (peakHeight * (rows -1)) >> 8; if (popcorn[i].pos >= 0.0f) { // if kernel is active, update its position
popcorn[i].vel = sqrt(-2.0 * gravity * peakHeight); popcorn[i].pos += popcorn[i].vel;
popcorn[i].velX = 0; popcorn[i].vel += gravity;
} else { // if kernel is inactive, randomly pop it
if (random8() < 2) { // POP!!!
popcorn[i].pos = 0.01f;
if (SEGMENT.palette) { uint16_t peakHeight = 128 + random8(128); //0-255
popcorn[i].colIndex = random8(); peakHeight = (peakHeight * (SEGLEN -1)) >> 8;
} else { popcorn[i].vel = sqrt(-2.0 * gravity * peakHeight);
byte col = random8(0, NUM_COLORS);
if (!hasCol2 || !SEGCOLOR(col)) col = 0; if (SEGMENT.palette)
popcorn[i].colIndex = col; {
popcorn[i].colIndex = random8();
} else {
byte col = random8(0, NUM_COLORS);
if (!SEGCOLOR(2) || !SEGCOLOR(col)) col = 0;
popcorn[i].colIndex = col;
}
}
}
if (popcorn[i].pos >= 0.0f) { // draw now active popcorn (either active before or just popped)
uint32_t col = SEGMENT.color_wheel(popcorn[i].colIndex);
if (!SEGMENT.palette && popcorn[i].colIndex < NUM_COLORS) col = SEGCOLOR(popcorn[i].colIndex);
uint16_t ledIndex = popcorn[i].pos;
if (ledIndex < SEGLEN) SEGMENT.setPixelColor(indexToVStrip(ledIndex, stripNr), col);
} }
} }
} }
if (popcorn[i].pos >= 0.0f) { // draw now active popcorn (either active before or just popped) };
uint32_t col = SEGMENT.color_wheel(popcorn[i].colIndex);
if (!SEGMENT.palette && popcorn[i].colIndex < NUM_COLORS) col = SEGCOLOR(popcorn[i].colIndex);
uint16_t ledIndex = popcorn[i].pos; for (int stripNr=0; stripNr<strips; stripNr++)
if (ledIndex < rows) { virtualStrip::runStrip(stripNr, &popcorn[stripNr * maxNumPopcorn]);
if (strip.isMatrix) SEGMENT.setPixelColorXY(uint16_t(popcorn[i].posX), rows - 1 - ledIndex, col);
else SEGMENT.setPixelColor(ledIndex, col);
}
}
}
return FRAMETIME; return FRAMETIME;
} }
static const char _data_FX_MODE_POPCORN[] PROGMEM = "Popcorn@!,!;!,!,!;!;1d,2d"; static const char _data_FX_MODE_POPCORN[] PROGMEM = "Popcorn@!,!;!,!,!;!;mp12=1,1d"; //bar
//values close to 100 produce 5Hz flicker, which looks very candle-y //values close to 100 produce 5Hz flicker, which looks very candle-y
@ -3375,91 +3373,84 @@ static const char _data_FX_MODE_EXPLODING_FIREWORKS[] PROGMEM = "Fireworks 1D@Gr
*/ */
uint16_t mode_drip(void) uint16_t mode_drip(void)
{ {
const uint16_t cols = strip.isMatrix ? SEGMENT.virtualWidth() : 1;
const uint16_t rows = strip.isMatrix ? SEGMENT.virtualHeight() : SEGMENT.virtualLength();
//allocate segment data //allocate segment data
uint8_t numDrops = 4; uint16_t strips = SEGMENT.nrOfVStrips();
uint16_t dataSize = sizeof(spark) * numDrops; const int maxNumDrops = 4;
if (!SEGENV.allocateData(dataSize * cols)) return mode_static(); //allocation failed uint16_t dataSize = sizeof(spark) * maxNumDrops;
if (!SEGENV.allocateData(dataSize * strips)) return mode_static(); //allocation failed
Spark* drops = reinterpret_cast<Spark*>(SEGENV.data);
SEGMENT.fill(SEGCOLOR(1)); SEGMENT.fill(SEGCOLOR(1));
Spark* drops = reinterpret_cast<Spark*>(SEGENV.data); struct virtualStrip {
static void runStrip(uint16_t stripNr, Spark* drops) {
numDrops = 1 + (SEGMENT.intensity >> 6); // 255>>6 = 3 uint8_t numDrops = 1 + (SEGMENT.intensity >> 6); // 255>>6 = 3
float gravity = -0.0005 - (SEGMENT.speed/50000.0); float gravity = -0.0005 - (SEGMENT.speed/50000.0);
gravity *= rows-1; gravity *= SEGLEN-1;
int sourcedrop = 12; int sourcedrop = 12;
for (int k=0; k < cols; k++) { for (int j=0;j<numDrops;j++) {
for (size_t j=0; j < numDrops; j++) { if (drops[j].colIndex == 0) { //init
uint16_t idx = k*numDrops + j; drops[j].pos = SEGLEN-1; // start at end
drops[j].vel = 0; // speed
if (drops[idx].colIndex == 0) { //init drops[j].col = sourcedrop; // brightness
drops[idx].pos = rows-1; // start at end drops[j].colIndex = 1; // drop state (0 init, 1 forming, 2 falling, 5 bouncing)
drops[idx].vel = 0; // speed
drops[idx].col = sourcedrop; // brightness
drops[idx].colIndex = 1; // drop state (0 init, 1 forming, 2 falling, 5 bouncing)
}
uint32_t col = color_blend(BLACK, SEGCOLOR(0), sourcedrop);
if (strip.isMatrix) SEGMENT.setPixelColorXY(k, 0, col);
else SEGMENT.setPixelColor(rows-1, col);// water source
if (drops[idx].colIndex == 1) {
if (drops[idx].col > 255) drops[idx].col = 255;
col = color_blend(BLACK,SEGCOLOR(0),drops[idx].col);
if (strip.isMatrix) SEGMENT.setPixelColorXY(k, rows - 1 - uint16_t(drops[idx].pos), col);
else SEGMENT.setPixelColor(uint16_t(drops[idx].pos), col);
drops[idx].col += map(SEGMENT.speed, 0, 255, 1, 6); // swelling
if (random8() < drops[idx].col/10) { // random drop
drops[idx].colIndex = 2; //fall
drops[idx].col = 255;
} }
}
if (drops[idx].colIndex > 1) { // falling
if (drops[idx].pos > 0) { // fall until end of segment
drops[idx].pos += drops[idx].vel;
if (drops[idx].pos < 0) drops[idx].pos = 0;
drops[idx].vel += gravity; // gravity is negative
for (int i = 1; i < 7 - drops[idx].colIndex; i++) { // some minor math so we don't expand bouncing droplets SEGMENT.setPixelColor(indexToVStrip(SEGLEN-1, stripNr), color_blend(BLACK,SEGCOLOR(0), sourcedrop));// water source
uint16_t pos = constrain(uint16_t(drops[idx].pos) +i, 0, rows-1); //this is BAD, returns a pos >= SEGLEN occasionally if (drops[j].colIndex==1) {
col = color_blend(BLACK, SEGCOLOR(0), drops[idx].col/i); if (drops[j].col>255) drops[j].col=255;
if (strip.isMatrix) SEGMENT.setPixelColorXY(k, rows - 1 - pos, col); SEGMENT.setPixelColor(indexToVStrip(uint16_t(drops[j].pos), stripNr), color_blend(BLACK,SEGCOLOR(0),drops[j].col));
else SEGMENT.setPixelColor(pos, col); //spread pixel with fade while falling
drops[j].col += map(SEGMENT.speed, 0, 255, 1, 6); // swelling
if (random8() < drops[j].col/10) { // random drop
drops[j].colIndex=2; //fall
drops[j].col=255;
} }
}
if (drops[j].colIndex > 1) { // falling
if (drops[j].pos > 0) { // fall until end of segment
drops[j].pos += drops[j].vel;
if (drops[j].pos < 0) drops[j].pos = 0;
drops[j].vel += gravity; // gravity is negative
if (drops[idx].colIndex > 2) { // during bounce, some water is on the floor for (int i=1;i<7-drops[j].colIndex;i++) { // some minor math so we don't expand bouncing droplets
col = color_blend(SEGCOLOR(0), BLACK, drops[idx].col); uint16_t pos = constrain(uint16_t(drops[j].pos) +i, 0, SEGLEN-1); //this is BAD, returns a pos >= SEGLEN occasionally
if (strip.isMatrix) SEGMENT.setPixelColorXY(k, rows - 1, col); SEGMENT.setPixelColor(indexToVStrip(pos, stripNr), color_blend(BLACK,SEGCOLOR(0),drops[j].col/i)); //spread pixel with fade while falling
else SEGMENT.setPixelColor(0, col); }
}
} else { // we hit bottom if (drops[j].colIndex > 2) { // during bounce, some water is on the floor
if (drops[idx].colIndex > 2) { // already hit once, so back to forming SEGMENT.setPixelColor(indexToVStrip(0, stripNr), color_blend(SEGCOLOR(0),BLACK,drops[j].col));
drops[idx].colIndex = 0; }
drops[idx].col = sourcedrop; } else { // we hit bottom
if (drops[j].colIndex > 2) { // already hit once, so back to forming
} else { drops[j].colIndex = 0;
drops[j].col = sourcedrop;
if (drops[idx].colIndex == 2) { // init bounce
drops[idx].vel = -drops[idx].vel/4;// reverse velocity with damping } else {
drops[idx].pos += drops[idx].vel;
if (drops[j].colIndex==2) { // init bounce
drops[j].vel = -drops[j].vel/4;// reverse velocity with damping
drops[j].pos += drops[j].vel;
}
drops[j].col = sourcedrop*2;
drops[j].colIndex = 5; // bouncing
} }
drops[idx].col = sourcedrop*2;
drops[idx].colIndex = 5; // bouncing
} }
} }
} }
} }
} };
for (int stripNr=0; stripNr<strips; stripNr++)
virtualStrip::runStrip(stripNr, &drops[stripNr*maxNumDrops]);
return FRAMETIME; return FRAMETIME;
} }
static const char _data_FX_MODE_DRIP[] PROGMEM = "Drip@Gravity,# of drips;!,!;!;1d,2d"; static const char _data_FX_MODE_DRIP[] PROGMEM = "Drip@Gravity,# of drips;!,!;!;mp12=1,1d"; //bar
/* /*
@ -3493,7 +3484,7 @@ uint16_t mode_tetrix(void) {
if (SEGENV.call == 0) { if (SEGENV.call == 0) {
drop->stack = 0; // reset brick stack size drop->stack = 0; // reset brick stack size
drop->step = 0; drop->step = 0;
//for (int i=0; i<SEGLEN; i++) SEGMENT.setPixelColor(i | int((stripNr+1)<<16), SEGCOLOR(1)); // will fill virtual strip only //for (int i=0; i<SEGLEN; i++) SEGMENT.setPixelColor(indexToVStrip(i, stripNr), SEGCOLOR(1)); // will fill virtual strip only
} }
if (drop->step == 0) { // init brick if (drop->step == 0) { // init brick
@ -3518,11 +3509,8 @@ uint16_t mode_tetrix(void) {
if (drop->step == 2) { // falling if (drop->step == 2) { // falling
if (drop->pos > drop->stack) { // fall until top of stack if (drop->pos > drop->stack) { // fall until top of stack
drop->pos -= drop->speed; // may add gravity as: speed += gravity drop->pos -= drop->speed; // may add gravity as: speed += gravity
if (uint16_t(drop->pos) < drop->stack) drop->pos = drop->stack; if (uint16_t(drop->pos) < drop->aux1) drop->pos = drop->aux1;
for (int i=int(drop->pos); i<SEGLEN; i++) { for (int i=int(drop->pos); i<SEGLEN; i++) SEGMENT.setPixelColor(i | int((stripNr+1)<<16), i<int(drop->pos)+drop->aux0 ? drop->col : SEGCOLOR(1));
uint32_t col = i<int(drop->pos)+drop->brick ? SEGMENT.color_from_palette(drop->col, false, false, 0) : SEGCOLOR(1);
SEGMENT.setPixelColor(i | int((stripNr+1)<<16), col);
}
} else { // we hit bottom } else { // we hit bottom
drop->step = 0; // proceed with next brick, go back to init drop->step = 0; // proceed with next brick, go back to init
drop->stack += drop->brick; // increase the stack size drop->stack += drop->brick; // increase the stack size
@ -3534,7 +3522,7 @@ uint16_t mode_tetrix(void) {
drop->brick = 0; // reset brick size (no more growing) drop->brick = 0; // reset brick size (no more growing)
if (drop->step > millis()) { if (drop->step > millis()) {
// allow fading of virtual strip // allow fading of virtual strip
for (int i=0; i<SEGLEN; i++) SEGMENT.blendPixelColor(i | int((stripNr+1)<<16), SEGCOLOR(1), 25); // 10% blend with Bg color for (int i=0; i<SEGLEN; i++) SEGMENT.blendPixelColor(i | int((stripNr+1)<<16), SEGCOLOR(1), 25); // 10% blend
} else { } else {
drop->stack = 0; // reset brick stack size drop->stack = 0; // reset brick stack size
drop->step = 0; // proceed with next brick drop->step = 0; // proceed with next brick
@ -5931,7 +5919,6 @@ static const char _data_FX_MODE_2DDRIFTROSE[] PROGMEM = "Drift Rose@Fade,Blur;;;
#endif // WLED_DISABLE_2D #endif // WLED_DISABLE_2D
#ifndef WLED_DISABLE_AUDIO
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
/******************** audio enhanced routines ************************/ /******************** audio enhanced routines ************************/
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -6258,7 +6245,7 @@ uint16_t mode_gravcentric(void) { // Gravcentric. By Andrew
return FRAMETIME; return FRAMETIME;
} // mode_gravcentric() } // mode_gravcentric()
static const char _data_FX_MODE_GRAVCENTRIC[] PROGMEM = "Gravcentric@Rate of fall,Sensitivity;!;!;ix=128,mp12=2,ssim=0,1d,vo"; // Circle, Beatsin static const char _data_FX_MODE_GRAVCENTRIC[] PROGMEM = "Gravcentric@Rate of fall,Sensitivity;!;!;ix=128,mp12=3,ssim=0,1d,vo"; // Corner, Beatsin
/////////////////////// ///////////////////////
@ -6394,7 +6381,7 @@ uint16_t mode_midnoise(void) { // Midnoise. By Andrew Tuline.
return FRAMETIME; return FRAMETIME;
} // mode_midnoise() } // mode_midnoise()
static const char _data_FX_MODE_MIDNOISE[] PROGMEM = "Midnoise@Fade rate,Maximum length;,!;!;ix=128,mp12=2,ssim=0,1d,vo"; // Circle, Beatsin static const char _data_FX_MODE_MIDNOISE[] PROGMEM = "Midnoise@Fade rate,Maximum length;,!;!;ix=128,mp12=1,ssim=0,1d,vo"; // Bar, Beatsin
////////////////////// //////////////////////
@ -6519,7 +6506,7 @@ uint16_t mode_plasmoid(void) { // Plasmoid. By Andrew Tuline.
} }
float volumeSmth = *(float*) um_data->u_data[0]; float volumeSmth = *(float*) um_data->u_data[0];
SEGMENT.fadeToBlackBy(64); SEGMENT.fadeToBlackBy(32);
plasmoip->thisphase += beatsin8(6,-4,4); // You can change direction and speed individually. plasmoip->thisphase += beatsin8(6,-4,4); // You can change direction and speed individually.
plasmoip->thatphase += beatsin8(7,-4,4); // Two phase values to make a complex pattern. By Andrew Tuline. plasmoip->thatphase += beatsin8(7,-4,4); // Two phase values to make a complex pattern. By Andrew Tuline.
@ -6615,11 +6602,6 @@ uint16_t mode_puddles(void) { // Puddles. By Andrew Tuline.
static const char _data_FX_MODE_PUDDLES[] PROGMEM = "Puddles@Fade rate,Puddle size;!,!;!;mp12=0,ssim=0,1d,vo"; // Pixels, Beatsin static const char _data_FX_MODE_PUDDLES[] PROGMEM = "Puddles@Fade rate,Puddle size;!,!;!;mp12=0,ssim=0,1d,vo"; // Pixels, Beatsin
///////////////////////////////////////////////////////////////////////////////
/******************** audio only routines ************************/
///////////////////////////////////////////////////////////////////////////////
#ifdef USERMOD_AUDIOREACTIVE
////////////////////// //////////////////////
// * PIXELS // // * PIXELS //
////////////////////// //////////////////////
@ -6671,7 +6653,8 @@ uint16_t mode_blurz(void) { // Blurz. By Andrew Tuline.
SEGENV.aux0 = 0; SEGENV.aux0 = 0;
} }
SEGMENT.fade_out(SEGMENT.speed); int fadeoutDelay = (256 - SEGMENT.speed) / 32;
if ((fadeoutDelay <= 1 ) || ((SEGENV.call % fadeoutDelay) == 0)) SEGMENT.fade_out(SEGMENT.speed);
SEGENV.step += FRAMETIME; SEGENV.step += FRAMETIME;
if (SEGENV.step > SPEED_FORMULA_L) { if (SEGENV.step > SPEED_FORMULA_L) {
@ -6739,7 +6722,9 @@ uint16_t mode_freqmap(void) { // Map FFT_MajorPeak to SEGLEN.
float my_magnitude = *(float*) um_data->u_data[5] / 4.0f; float my_magnitude = *(float*) um_data->u_data[5] / 4.0f;
if (FFT_MajorPeak < 1) FFT_MajorPeak = 1; // log10(0) is "forbidden" (throws exception) if (FFT_MajorPeak < 1) FFT_MajorPeak = 1; // log10(0) is "forbidden" (throws exception)
SEGMENT.fade_out(SEGMENT.speed); if (SEGENV.call == 0) SEGMENT.fill(BLACK);
int fadeoutDelay = (256 - SEGMENT.speed) / 32;
if ((fadeoutDelay <= 1 ) || ((SEGENV.call % fadeoutDelay) == 0)) SEGMENT.fade_out(SEGMENT.speed);
int locn = (log10f((float)FFT_MajorPeak) - 1.78f) * (float)SEGLEN/(MAX_FREQ_LOG10 - 1.78f); // log10 frequency range is from 1.78 to 3.71. Let's scale to SEGLEN. int locn = (log10f((float)FFT_MajorPeak) - 1.78f) * (float)SEGLEN/(MAX_FREQ_LOG10 - 1.78f); // log10 frequency range is from 1.78 to 3.71. Let's scale to SEGLEN.
if (locn < 1) locn = 0; // avoid underflow if (locn < 1) locn = 0; // avoid underflow
@ -6754,7 +6739,7 @@ uint16_t mode_freqmap(void) { // Map FFT_MajorPeak to SEGLEN.
return FRAMETIME; return FRAMETIME;
} // mode_freqmap() } // mode_freqmap()
static const char _data_FX_MODE_FREQMAP[] PROGMEM = "Freqmap@Fade rate,Starting color;,!;!;mp12=2,ssim=0,1d,fr"; // Circle, Beatsin static const char _data_FX_MODE_FREQMAP[] PROGMEM = "Freqmap@Fade rate,Starting color;,!;!;mp12=0,ssim=0,1d,fr"; // Pixels, Beatsin
/////////////////////// ///////////////////////
@ -6809,7 +6794,7 @@ uint16_t mode_freqmatrix(void) { // Freqmatrix. By Andreas Plesch
return FRAMETIME; return FRAMETIME;
} // mode_freqmatrix() } // mode_freqmatrix()
static const char _data_FX_MODE_FREQMATRIX[] PROGMEM = "Freqmatrix@Time delay,Sound effect,Low bin,High bin,Sensivity;;;mp12=0,ssim=0,1d,fr"; // Pixels, Beatsin static const char _data_FX_MODE_FREQMATRIX[] PROGMEM = "Freqmatrix@Time delay,Sound effect,Low bin,High bin,Sensivity;;;mp12=3,ssim=0,1d,fr"; // Corner, Beatsin
////////////////////// //////////////////////
@ -6830,7 +6815,10 @@ uint16_t mode_freqpixels(void) { // Freqpixel. By Andrew Tuline.
if (FFT_MajorPeak < 1) FFT_MajorPeak = 1; // log10(0) is "forbidden" (throws exception) if (FFT_MajorPeak < 1) FFT_MajorPeak = 1; // log10(0) is "forbidden" (throws exception)
uint16_t fadeRate = 2*SEGMENT.speed - SEGMENT.speed*SEGMENT.speed/255; // Get to 255 as quick as you can. uint16_t fadeRate = 2*SEGMENT.speed - SEGMENT.speed*SEGMENT.speed/255; // Get to 255 as quick as you can.
SEGMENT.fade_out(fadeRate);
if (SEGENV.call == 0) SEGMENT.fill(BLACK);
int fadeoutDelay = (256 - SEGMENT.speed) / 64;
if ((fadeoutDelay <= 1 ) || ((SEGENV.call % fadeoutDelay) == 0)) SEGMENT.fade_out(fadeRate);
for (int i=0; i < SEGMENT.intensity/32+1; i++) { for (int i=0; i < SEGMENT.intensity/32+1; i++) {
uint16_t locn = random16(0,SEGLEN); uint16_t locn = random16(0,SEGLEN);
@ -6962,7 +6950,7 @@ uint16_t mode_gravfreq(void) { // Gravfreq. By Andrew Tuline.
return FRAMETIME; return FRAMETIME;
} // mode_gravfreq() } // mode_gravfreq()
static const char _data_FX_MODE_GRAVFREQ[] PROGMEM = "Gravfreq@Rate of fall,Sensivity;,!;!;ix=128,mp12=2,ssim=0,1d,fr"; // Circle, Beatsin static const char _data_FX_MODE_GRAVFREQ[] PROGMEM = "Gravfreq@Rate of fall,Sensivity;,!;!;ix=128,mp12=0,ssim=0,1d,fr"; // Pixels, Beatsin
////////////////////// //////////////////////
@ -6976,7 +6964,10 @@ uint16_t mode_noisemove(void) { // Noisemove. By: Andrew Tuli
} }
uint8_t *fftResult = (uint8_t*)um_data->u_data[2]; uint8_t *fftResult = (uint8_t*)um_data->u_data[2];
SEGMENT.fade_out(224); // Just in case something doesn't get faded. if (SEGENV.call == 0) SEGMENT.fill(BLACK);
//SEGMENT.fade_out(224); // Just in case something doesn't get faded.
int fadeoutDelay = (256 - SEGMENT.speed) / 96;
if ((fadeoutDelay <= 1 ) || ((SEGENV.call % fadeoutDelay) == 0)) SEGMENT.fadeToBlackBy(4+ SEGMENT.speed/4);
uint8_t numBins = map(SEGMENT.intensity,0,255,0,16); // Map slider to fftResult bins. uint8_t numBins = map(SEGMENT.intensity,0,255,0,16); // Map slider to fftResult bins.
for (int i=0; i<numBins; i++) { // How many active bins are we using. for (int i=0; i<numBins; i++) { // How many active bins are we using.
@ -7002,13 +6993,16 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
float FFT_MajorPeak = *(float*) um_data->u_data[4]; float FFT_MajorPeak = *(float*) um_data->u_data[4];
float my_magnitude = *(float*) um_data->u_data[5] / 16.0f; float my_magnitude = *(float*) um_data->u_data[5] / 16.0f;
SEGMENT.fadeToBlackBy(64); // Just in case something doesn't get faded. if (SEGENV.call == 0) SEGMENT.fill(BLACK);
SEGMENT.fadeToBlackBy(16); // Just in case something doesn't get faded.
float frTemp = FFT_MajorPeak; float frTemp = FFT_MajorPeak;
uint8_t octCount = 0; // Octave counter. uint8_t octCount = 0; // Octave counter.
uint8_t volTemp = 0; uint8_t volTemp = 0;
if (my_magnitude > 32) volTemp = 255; // We need to squelch out the background noise. volTemp = 32.0f + my_magnitude * 1.5f; // brightness = volume (overflows are handled in next lines)
if (my_magnitude < 48) volTemp = 0; // We need to squelch out the background noise.
if (my_magnitude > 144) volTemp = 255; // everything above this is full brightness
while ( frTemp > 249 ) { while ( frTemp > 249 ) {
octCount++; // This should go up to 5. octCount++; // This should go up to 5.
@ -7024,7 +7018,7 @@ uint16_t mode_rocktaves(void) { // Rocktaves. Same note from eac
return FRAMETIME; return FRAMETIME;
} // mode_rocktaves() } // mode_rocktaves()
static const char _data_FX_MODE_ROCKTAVES[] PROGMEM = "Rocktaves@;,!;!;mp12=0,ssim=0,1d,fr"; // Pixels, Beatsin static const char _data_FX_MODE_ROCKTAVES[] PROGMEM = "Rocktaves@;,!;!;mp12=1,ssim=0,1d,fr"; // Bar, Beatsin
/////////////////////// ///////////////////////
@ -7108,10 +7102,13 @@ uint16_t mode_2DGEQ(void) { // By Will Tatam. Code reduction by Ewoud Wijma.
rippleTime = true; rippleTime = true;
} }
SEGMENT.fadeToBlackBy(SEGMENT.speed); if (SEGENV.call == 0) SEGMENT.fill(BLACK);
int fadeoutDelay = (256 - SEGMENT.speed) / 64;
if ((fadeoutDelay <= 1 ) || ((SEGENV.call % fadeoutDelay) == 0)) SEGMENT.fadeToBlackBy(SEGMENT.speed);
for (int x=0; x < cols; x++) { for (int x=0; x < cols; x++) {
uint8_t band = map(x, 0, cols-1, 0, NUM_BANDS - 1); uint8_t band = map(x, 0, cols-1, 0, NUM_BANDS - 1);
if (NUM_BANDS < 16) band = map(band, 0, NUM_BANDS - 1, 0, 15); // always use full range. comment out this line to get the previous behaviour.
band = constrain(band, 0, 15); band = constrain(band, 0, 15);
uint16_t colorIndex = band * 17; uint16_t colorIndex = band * 17;
uint16_t barHeight = map(fftResult[band], 0, 255, 0, rows); // do not subtract -1 from rows here uint16_t barHeight = map(fftResult[band], 0, 255, 0, rows); // do not subtract -1 from rows here
@ -7193,14 +7190,7 @@ uint16_t mode_2DFunkyPlank(void) { // Written by ??? Adapted by Wil
} // mode_2DFunkyPlank } // mode_2DFunkyPlank
static const char _data_FX_MODE_2DFUNKYPLANK[] PROGMEM = "Funky Plank@Scroll speed,,# of bands;;;ssim=0,2d,fr"; // Beatsin static const char _data_FX_MODE_2DFUNKYPLANK[] PROGMEM = "Funky Plank@Scroll speed,,# of bands;;;ssim=0,2d,fr"; // Beatsin
#endif // WLED_DISABLE_2D
//end audio only routines
#endif
#ifndef WLED_DISABLE_2D
///////////////////////// /////////////////////////
// 2D Akemi // // 2D Akemi //
///////////////////////// /////////////////////////
@ -7305,7 +7295,6 @@ uint16_t mode_2DAkemi(void) {
static const char _data_FX_MODE_2DAKEMI[] PROGMEM = "Akemi@Color speed,Dance;Head palette,Arms & Legs,Eyes & Mouth;Face palette;ssim=0,2d,fr"; //beatsin static const char _data_FX_MODE_2DAKEMI[] PROGMEM = "Akemi@Color speed,Dance;Head palette,Arms & Legs,Eyes & Mouth;Face palette;ssim=0,2d,fr"; //beatsin
#endif // WLED_DISABLE_2D #endif // WLED_DISABLE_2D
#endif // WLED_DISABLE_AUDIO
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
// mode data // mode data
@ -7487,16 +7476,14 @@ void WS2812FX::setupEffectData() {
addEffect(FX_MODE_2DMETABALLS, &mode_2Dmetaballs, _data_FX_MODE_2DMETABALLS); addEffect(FX_MODE_2DMETABALLS, &mode_2Dmetaballs, _data_FX_MODE_2DMETABALLS);
addEffect(FX_MODE_2DPULSER, &mode_2DPulser, _data_FX_MODE_2DPULSER); addEffect(FX_MODE_2DPULSER, &mode_2DPulser, _data_FX_MODE_2DPULSER);
addEffect(FX_MODE_2DDRIFT, &mode_2DDrift, _data_FX_MODE_2DDRIFT); addEffect(FX_MODE_2DDRIFT, &mode_2DDrift, _data_FX_MODE_2DDRIFT);
// --- 2D audio effects --- // --- 2D audio effects ---
#ifndef WLED_DISABLE_AUDIO
addEffect(FX_MODE_2DWAVERLY, &mode_2DWaverly, _data_FX_MODE_2DWAVERLY); addEffect(FX_MODE_2DWAVERLY, &mode_2DWaverly, _data_FX_MODE_2DWAVERLY);
addEffect(FX_MODE_2DSWIRL, &mode_2DSwirl, _data_FX_MODE_2DSWIRL); addEffect(FX_MODE_2DSWIRL, &mode_2DSwirl, _data_FX_MODE_2DSWIRL);
addEffect(FX_MODE_2DAKEMI, &mode_2DAkemi, _data_FX_MODE_2DAKEMI); addEffect(FX_MODE_2DAKEMI, &mode_2DAkemi, _data_FX_MODE_2DAKEMI);
#endif addEffect(FX_MODE_2DGEQ, &mode_2DGEQ, _data_FX_MODE_2DGEQ);
addEffect(FX_MODE_2DFUNKYPLANK, &mode_2DFunkyPlank, _data_FX_MODE_2DFUNKYPLANK);
#endif // WLED_DISABLE_2D #endif // WLED_DISABLE_2D
#ifndef WLED_DISABLE_AUDIO
// --- 1D audio effects --- // --- 1D audio effects ---
addEffect(FX_MODE_PIXELWAVE, &mode_pixelwave, _data_FX_MODE_PIXELWAVE); addEffect(FX_MODE_PIXELWAVE, &mode_pixelwave, _data_FX_MODE_PIXELWAVE);
addEffect(FX_MODE_JUGGLES, &mode_juggles, _data_FX_MODE_JUGGLES); addEffect(FX_MODE_JUGGLES, &mode_juggles, _data_FX_MODE_JUGGLES);
@ -7511,29 +7498,15 @@ void WS2812FX::setupEffectData() {
addEffect(FX_MODE_RIPPLEPEAK, &mode_ripplepeak, _data_FX_MODE_RIPPLEPEAK); addEffect(FX_MODE_RIPPLEPEAK, &mode_ripplepeak, _data_FX_MODE_RIPPLEPEAK);
addEffect(FX_MODE_GRAVCENTER, &mode_gravcenter, _data_FX_MODE_GRAVCENTER); addEffect(FX_MODE_GRAVCENTER, &mode_gravcenter, _data_FX_MODE_GRAVCENTER);
addEffect(FX_MODE_GRAVCENTRIC, &mode_gravcentric, _data_FX_MODE_GRAVCENTRIC); addEffect(FX_MODE_GRAVCENTRIC, &mode_gravcentric, _data_FX_MODE_GRAVCENTRIC);
#endif // WLED_DISABLE_AUDIO
#ifdef USERMOD_AUDIOREACTIVE
// --- WLED-SR audio reactive usermod only effects ---
#ifdef WLED_DISABLE_AUDIO
#error Incompatible options: WLED_DISABLE_AUDIO and USERMOD_AUDIOREACTIVE
#endif
#ifdef WLED_DISABLE_2D
#error AUDIOREACTIVE usermod requires 2D support.
#endif
addEffect(FX_MODE_PIXELS, &mode_pixels, _data_FX_MODE_PIXELS); addEffect(FX_MODE_PIXELS, &mode_pixels, _data_FX_MODE_PIXELS);
addEffect(FX_MODE_FREQWAVE, &mode_freqwave, _data_FX_MODE_FREQWAVE); addEffect(FX_MODE_FREQWAVE, &mode_freqwave, _data_FX_MODE_FREQWAVE);
addEffect(FX_MODE_FREQMATRIX, &mode_freqmatrix, _data_FX_MODE_FREQMATRIX); addEffect(FX_MODE_FREQMATRIX, &mode_freqmatrix, _data_FX_MODE_FREQMATRIX);
addEffect(FX_MODE_2DGEQ, &mode_2DGEQ, _data_FX_MODE_2DGEQ);
addEffect(FX_MODE_WATERFALL, &mode_waterfall, _data_FX_MODE_WATERFALL); addEffect(FX_MODE_WATERFALL, &mode_waterfall, _data_FX_MODE_WATERFALL);
addEffect(FX_MODE_FREQPIXELS, &mode_freqpixels, _data_FX_MODE_FREQPIXELS); addEffect(FX_MODE_FREQPIXELS, &mode_freqpixels, _data_FX_MODE_FREQPIXELS);
addEffect(FX_MODE_NOISEMOVE, &mode_noisemove, _data_FX_MODE_NOISEMOVE); addEffect(FX_MODE_NOISEMOVE, &mode_noisemove, _data_FX_MODE_NOISEMOVE);
addEffect(FX_MODE_FREQMAP, &mode_freqmap, _data_FX_MODE_FREQMAP); addEffect(FX_MODE_FREQMAP, &mode_freqmap, _data_FX_MODE_FREQMAP);
addEffect(FX_MODE_GRAVFREQ, &mode_gravfreq, _data_FX_MODE_GRAVFREQ); addEffect(FX_MODE_GRAVFREQ, &mode_gravfreq, _data_FX_MODE_GRAVFREQ);
addEffect(FX_MODE_DJLIGHT, &mode_DJLight, _data_FX_MODE_DJLIGHT); addEffect(FX_MODE_DJLIGHT, &mode_DJLight, _data_FX_MODE_DJLIGHT);
addEffect(FX_MODE_2DFUNKYPLANK, &mode_2DFunkyPlank, _data_FX_MODE_2DFUNKYPLANK);
addEffect(FX_MODE_BLURZ, &mode_blurz, _data_FX_MODE_BLURZ); addEffect(FX_MODE_BLURZ, &mode_blurz, _data_FX_MODE_BLURZ);
addEffect(FX_MODE_ROCKTAVES, &mode_rocktaves, _data_FX_MODE_ROCKTAVES); addEffect(FX_MODE_ROCKTAVES, &mode_rocktaves, _data_FX_MODE_ROCKTAVES);
//addEffect(FX_MODE_CUSTOMEFFECT, &mode_customEffect, _data_FX_MODE_CUSTOMEFFECT); //WLEDSR Custom Effects
#endif // USERMOD_AUDIOREACTIVE
} }

View File

@ -275,67 +275,39 @@
#define FX_MODE_2DMETABALLS 142 // non audio #define FX_MODE_2DMETABALLS 142 // non audio
#define FX_MODE_2DPULSER 143 // non audio #define FX_MODE_2DPULSER 143 // non audio
#define FX_MODE_2DDRIFT 144 // non audio #define FX_MODE_2DDRIFT 144 // non audio
#endif #define FX_MODE_2DWAVERLY 145 // audio enhanced
#ifndef WLED_DISABLE_AUDIO #define FX_MODE_2DSWIRL 146 // audio enhanced
#ifndef WLED_DISABLE_2D #define FX_MODE_2DAKEMI 147 // audio enhanced
#define FX_MODE_2DWAVERLY 145 // audio enhanced #define FX_MODE_2DGEQ 148 // audio enhanced
#define FX_MODE_2DSWIRL 146 // audio enhanced #define FX_MODE_2DFUNKYPLANK 149 // audio enhanced
#define FX_MODE_2DAKEMI 147 // audio enhanced #endif //WLED_DISABLE_2D
// 148 & 149 reserved #define FX_MODE_PIXELWAVE 150 // audio enhanced
#endif #define FX_MODE_JUGGLES 151 // audio enhanced
#define FX_MODE_PIXELWAVE 150 // audio enhanced #define FX_MODE_MATRIPIX 152 // audio enhanced
#define FX_MODE_JUGGLES 151 // audio enhanced #define FX_MODE_GRAVIMETER 153 // audio enhanced
#define FX_MODE_MATRIPIX 152 // audio enhanced #define FX_MODE_PLASMOID 154 // audio enhanced
#define FX_MODE_GRAVIMETER 153 // audio enhanced #define FX_MODE_PUDDLES 155 // audio enhanced
#define FX_MODE_PLASMOID 154 // audio enhanced #define FX_MODE_MIDNOISE 156 // audio enhanced
#define FX_MODE_PUDDLES 155 // audio enhanced #define FX_MODE_NOISEMETER 157 // audio enhanced
#define FX_MODE_MIDNOISE 156 // audio enhanced #define FX_MODE_NOISEFIRE 158 // audio enhanced
#define FX_MODE_NOISEMETER 157 // audio enhanced #define FX_MODE_PUDDLEPEAK 159 // audio enhanced
#define FX_MODE_NOISEFIRE 158 // audio enhanced #define FX_MODE_RIPPLEPEAK 160 // audio enhanced
#define FX_MODE_PUDDLEPEAK 159 // audio enhanced #define FX_MODE_GRAVCENTER 161 // audio enhanced
#define FX_MODE_RIPPLEPEAK 160 // audio enhanced #define FX_MODE_GRAVCENTRIC 162 // audio enhanced
#define FX_MODE_GRAVCENTER 161 // audio enhanced #define FX_MODE_PIXELS 163 // audio enhanced
#define FX_MODE_GRAVCENTRIC 162 // audio enhanced #define FX_MODE_FREQWAVE 164 // audio enhanced
#endif #define FX_MODE_FREQMATRIX 165 // audio enhanced
#define FX_MODE_WATERFALL 166 // audio enhanced
#define FX_MODE_FREQPIXELS 167 // audio enhanced
#define FX_MODE_BINMAP 168 // audio enhanced
#define FX_MODE_NOISEMOVE 169 // audio enhanced
#define FX_MODE_FREQMAP 170 // audio enhanced
#define FX_MODE_GRAVFREQ 171 // audio enhanced
#define FX_MODE_DJLIGHT 172 // audio enhanced
#define FX_MODE_BLURZ 173 // audio enhanced
#define FX_MODE_ROCKTAVES 174 // audio enhanced
#ifndef USERMOD_AUDIOREACTIVE #define MODE_COUNT 175
#ifndef WLED_DISABLE_AUDIO
#define MODE_COUNT 163
#else
#ifndef WLED_DISABLE_2D
#define MODE_COUNT 145
#else
#define MODE_COUNT 118
#endif
#endif
#else
#ifdef WLED_DISABLE_AUDIO
#error Incompatible options: WLED_DISABLE_AUDIO and USERMOD_AUDIOREACTIVE
#endif
#ifdef WLED_DISABLE_2D
#error AUDIOREACTIVE usermod requires 2D support.
#endif
#define FX_MODE_2DGEQ 148
#define FX_MODE_2DFUNKYPLANK 149
#define FX_MODE_PIXELS 163
#define FX_MODE_FREQWAVE 164
#define FX_MODE_FREQMATRIX 165
#define FX_MODE_WATERFALL 166
#define FX_MODE_FREQPIXELS 167
#define FX_MODE_BINMAP 168
#define FX_MODE_NOISEMOVE 169
#define FX_MODE_FREQMAP 170
#define FX_MODE_GRAVFREQ 171
#define FX_MODE_DJLIGHT 172
#define FX_MODE_BLURZ 173
#define FX_MODE_ROCKTAVES 174
//#define FX_MODE_CUSTOMEFFECT 175 //WLEDSR Custom Effects
#define MODE_COUNT 175
#endif
typedef enum mapping1D2D { typedef enum mapping1D2D {
M12_Pixels = 0, M12_Pixels = 0,
@ -647,8 +619,6 @@ class WS2812FX { // 96 bytes
public: public:
WS2812FX() : WS2812FX() :
gammaCorrectBri(false),
gammaCorrectCol(true),
paletteFade(0), paletteFade(0),
paletteBlend(0), paletteBlend(0),
milliampsPerLed(55), milliampsPerLed(55),
@ -747,8 +717,6 @@ class WS2812FX { // 96 bytes
inline void appendSegment(const Segment &seg = Segment()) { _segments.push_back(seg); } inline void appendSegment(const Segment &seg = Segment()) { _segments.push_back(seg); }
bool bool
gammaCorrectBri,
gammaCorrectCol,
checkSegmentAlignment(void), checkSegmentAlignment(void),
hasRGBWBus(void), hasRGBWBus(void),
hasCCTBus(void), hasCCTBus(void),

View File

@ -208,6 +208,7 @@ void Segment::setUpLeds() {
CRGBPalette16 &Segment::loadPalette(CRGBPalette16 &targetPalette, uint8_t pal) { CRGBPalette16 &Segment::loadPalette(CRGBPalette16 &targetPalette, uint8_t pal) {
static unsigned long _lastPaletteChange = 0; // perhaps it should be per segment static unsigned long _lastPaletteChange = 0; // perhaps it should be per segment
static CRGBPalette16 randomPalette = CRGBPalette16(DEFAULT_COLOR);
byte tcp[72]; byte tcp[72];
if (pal < 245 && pal > GRADIENT_PALETTE_COUNT+13) pal = 0; if (pal < 245 && pal > GRADIENT_PALETTE_COUNT+13) pal = 0;
if (pal > 245 && (strip.customPalettes.size() == 0 || 255U-pal > strip.customPalettes.size()-1)) pal = 0; if (pal > 245 && (strip.customPalettes.size() == 0 || 255U-pal > strip.customPalettes.size()-1)) pal = 0;
@ -229,30 +230,31 @@ CRGBPalette16 &Segment::loadPalette(CRGBPalette16 &targetPalette, uint8_t pal) {
targetPalette = PartyColors_p; break; targetPalette = PartyColors_p; break;
case 1: //periodically replace palette with a random one. Doesn't work with multiple FastLED segments case 1: //periodically replace palette with a random one. Doesn't work with multiple FastLED segments
if (millis() - _lastPaletteChange > 5000 /*+ ((uint32_t)(255-intensity))*100*/) { if (millis() - _lastPaletteChange > 5000 /*+ ((uint32_t)(255-intensity))*100*/) {
targetPalette = CRGBPalette16( randomPalette = CRGBPalette16(
CHSV(random8(), 255, random8(128, 255)), CHSV(random8(), random8(160, 255), random8(128, 255)),
CHSV(random8(), 255, random8(128, 255)), CHSV(random8(), random8(160, 255), random8(128, 255)),
CHSV(random8(), 192, random8(128, 255)), CHSV(random8(), random8(160, 255), random8(128, 255)),
CHSV(random8(), 255, random8(128, 255))); CHSV(random8(), random8(160, 255), random8(128, 255)));
_lastPaletteChange = millis(); _lastPaletteChange = millis();
} break; }
targetPalette = randomPalette; break;
case 2: {//primary color only case 2: {//primary color only
CRGB prim = strip.gammaCorrectCol ? gamma32(colors[0]) : colors[0]; CRGB prim = gamma32(colors[0]);
targetPalette = CRGBPalette16(prim); break;} targetPalette = CRGBPalette16(prim); break;}
case 3: {//primary + secondary case 3: {//primary + secondary
CRGB prim = strip.gammaCorrectCol ? gamma32(colors[0]) : colors[0]; CRGB prim = gamma32(colors[0]);
CRGB sec = strip.gammaCorrectCol ? gamma32(colors[1]) : colors[1]; CRGB sec = gamma32(colors[1]);
targetPalette = CRGBPalette16(prim,prim,sec,sec); break;} targetPalette = CRGBPalette16(prim,prim,sec,sec); break;}
case 4: {//primary + secondary + tertiary case 4: {//primary + secondary + tertiary
CRGB prim = strip.gammaCorrectCol ? gamma32(colors[0]) : colors[0]; CRGB prim = gamma32(colors[0]);
CRGB sec = strip.gammaCorrectCol ? gamma32(colors[1]) : colors[1]; CRGB sec = gamma32(colors[1]);
CRGB ter = strip.gammaCorrectCol ? gamma32(colors[2]) : colors[2]; CRGB ter = gamma32(colors[2]);
targetPalette = CRGBPalette16(ter,sec,prim); break;} targetPalette = CRGBPalette16(ter,sec,prim); break;}
case 5: {//primary + secondary (+tert if not off), more distinct case 5: {//primary + secondary (+tert if not off), more distinct
CRGB prim = strip.gammaCorrectCol ? gamma32(colors[0]) : colors[0]; CRGB prim = gamma32(colors[0]);
CRGB sec = strip.gammaCorrectCol ? gamma32(colors[1]) : colors[1]; CRGB sec = gamma32(colors[1]);
if (colors[2]) { if (colors[2]) {
CRGB ter = strip.gammaCorrectCol ? gamma32(colors[2]) : colors[2]; CRGB ter = gamma32(colors[2]);
targetPalette = CRGBPalette16(prim,prim,prim,prim,prim,sec,sec,sec,sec,sec,ter,ter,ter,ter,ter,prim); targetPalette = CRGBPalette16(prim,prim,prim,prim,prim,sec,sec,sec,sec,sec,ter,ter,ter,ter,ter,prim);
} else { } else {
targetPalette = CRGBPalette16(prim,prim,prim,prim,prim,prim,prim,prim,sec,sec,sec,sec,sec,sec,sec,sec); targetPalette = CRGBPalette16(prim,prim,prim,prim,prim,prim,prim,prim,sec,sec,sec,sec,sec,sec,sec,sec);
@ -823,7 +825,7 @@ uint32_t Segment::color_from_palette(uint16_t i, bool mapping, bool wrap, uint8_
// default palette or no RGB support on segment // default palette or no RGB support on segment
if ((palette == 0 && mcol < NUM_COLORS) || !(_capabilities & 0x01)) { if ((palette == 0 && mcol < NUM_COLORS) || !(_capabilities & 0x01)) {
uint32_t color = (transitional && _t) ? _t->_colorT[mcol] : colors[mcol]; uint32_t color = (transitional && _t) ? _t->_colorT[mcol] : colors[mcol];
color = strip.gammaCorrectCol ? gamma32(color) : color; color = gamma32(color);
if (pbri == 255) return color; if (pbri == 255) return color;
return RGBW32(scale8_video(R(color),pbri), scale8_video(G(color),pbri), scale8_video(B(color),pbri), scale8_video(W(color),pbri)); return RGBW32(scale8_video(R(color),pbri), scale8_video(G(color),pbri), scale8_video(B(color),pbri), scale8_video(W(color),pbri));
} }

View File

@ -302,10 +302,10 @@ bool deserializeConfig(JsonObject doc, bool fromFS) {
float light_gc_bri = light["gc"]["bri"]; float light_gc_bri = light["gc"]["bri"];
float light_gc_col = light["gc"]["col"]; // 2.8 float light_gc_col = light["gc"]["col"]; // 2.8
if (light_gc_bri > 1.5) strip.gammaCorrectBri = true; if (light_gc_bri > 1.5) gammaCorrectBri = true;
else if (light_gc_bri > 0.5) strip.gammaCorrectBri = false; else if (light_gc_bri > 0.5) gammaCorrectBri = false;
if (light_gc_col > 1.5) strip.gammaCorrectCol = true; if (light_gc_col > 1.5) gammaCorrectCol = true;
else if (light_gc_col > 0.5) strip.gammaCorrectCol = false; else if (light_gc_col > 0.5) gammaCorrectCol = false;
JsonObject light_tr = light["tr"]; JsonObject light_tr = light["tr"];
CJSON(fadeTransition, light_tr["mode"]); CJSON(fadeTransition, light_tr["mode"]);
@ -759,8 +759,8 @@ void serializeConfig() {
light[F("aseg")] = autoSegments; light[F("aseg")] = autoSegments;
JsonObject light_gc = light.createNestedObject("gc"); JsonObject light_gc = light.createNestedObject("gc");
light_gc["bri"] = (strip.gammaCorrectBri) ? 2.8 : 1.0; light_gc["bri"] = (gammaCorrectBri) ? 2.8 : 1.0;
light_gc["col"] = (strip.gammaCorrectCol) ? 2.8 : 1.0; light_gc["col"] = (gammaCorrectCol) ? 2.8 : 1.0;
JsonObject light_tr = light.createNestedObject("tr"); JsonObject light_tr = light.createNestedObject("tr");
light_tr["mode"] = fadeTransition; light_tr["mode"] = fadeTransition;

View File

@ -358,7 +358,7 @@ uint8_t gamma8(uint8_t b)
uint32_t gamma32(uint32_t color) uint32_t gamma32(uint32_t color)
{ {
//if (!strip.gammaCorrectCol) return color; if (!gammaCorrectCol) return color;
uint8_t w = W(color); uint8_t w = W(color);
uint8_t r = R(color); uint8_t r = R(color);
uint8_t g = G(color); uint8_t g = G(color);

View File

@ -611,13 +611,13 @@ function parseInfo(i) {
//gId("filter2D").classList.add("hide"); //gId("filter2D").classList.add("hide");
hideModes("2D"); hideModes("2D");
} }
if (i.noaudio) { // if (i.noaudio) {
gId("filterVol").classList.add("hide"); // gId("filterVol").classList.add("hide");
gId("filterFreq").classList.add("hide"); // gId("filterFreq").classList.add("hide");
} // }
// if (!i.u || !i.u.AudioReactive) { // if (!i.u || !i.u.AudioReactive) {
//gId("filterVol").classList.add("hide"); hideModes(" ♪"); // hide volume reactive effects // gId("filterVol").classList.add("hide"); hideModes(" ♪"); // hide volume reactive effects
//gId("filterFreq").classList.add("hide"); hideModes(" ♫"); // hide frequency reactive effects // gId("filterFreq").classList.add("hide"); hideModes(" ♫"); // hide frequency reactive effects
// } // }
} }
@ -654,11 +654,11 @@ function populateInfo(i)
} }
var vcn = "Kuuhaku"; var vcn = "Kuuhaku";
if (i.ver.startsWith("0.14.")) vcn = "Hoshi"; if (i.ver.startsWith("0.14.")) vcn = "Hoshi";
if (i.ver.includes("-bl")) vcn = "Ryujin";
if (i.cn) vcn = i.cn; if (i.cn) vcn = i.cn;
cn += `v${i.ver} "${vcn}"<br><br><table> cn += `v${i.ver} "${vcn}"<br><br><table>
${urows} ${urows}
${urows===""?'':'<tr><td colspan=2><hr style="height:1px;border-width:0;color:gray;background-color:gray"></td></tr>'}
${inforow("Build",i.vid)} ${inforow("Build",i.vid)}
${inforow("Signal strength",i.wifi.signal +"% ("+ i.wifi.rssi, " dBm)")} ${inforow("Signal strength",i.wifi.signal +"% ("+ i.wifi.rssi, " dBm)")}
${inforow("Uptime",getRuntimeStr(i.uptime))} ${inforow("Uptime",getRuntimeStr(i.uptime))}
@ -714,7 +714,7 @@ function populateSegments(s)
} }
let map2D = `<div id="seg${i}map2D" data-map="map2D" class="lbl-s hide">Expand 1D FX<br> let map2D = `<div id="seg${i}map2D" data-map="map2D" class="lbl-s hide">Expand 1D FX<br>
<div class="sel-p"><select class="sel-p" id="seg${i}mp12" onchange="setMp12(${i})"> <div class="sel-p"><select class="sel-p" id="seg${i}mp12" onchange="setMp12(${i})">
<option value="0" ${inst.mp12==0?' selected':''}>Strip</option> <option value="0" ${inst.mp12==0?' selected':''}>Pixels</option>
<option value="1" ${inst.mp12==1?' selected':''}>Bar</option> <option value="1" ${inst.mp12==1?' selected':''}>Bar</option>
<option value="2" ${inst.mp12==2?' selected':''}>Arc</option> <option value="2" ${inst.mp12==2?' selected':''}>Arc</option>
<option value="3" ${inst.mp12==3?' selected':''}>Corner</option> <option value="3" ${inst.mp12==3?' selected':''}>Corner</option>

View File

@ -506,7 +506,6 @@ function populateInfo(i)
} }
var vcn = "Kuuhaku"; var vcn = "Kuuhaku";
if (i.ver.startsWith("0.14.")) vcn = "Hoshi"; if (i.ver.startsWith("0.14.")) vcn = "Hoshi";
if (i.ver.includes("-bl")) vcn = "Ryujin";
if (i.cn) vcn = i.cn; if (i.cn) vcn = i.cn;
cn += `v${i.ver} "${vcn}"<br><br><table> cn += `v${i.ver} "${vcn}"<br><br><table>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -84,8 +84,8 @@ void deserializeSegment(JsonObject elem, byte it, byte presetId)
if ((spc>0 && spc!=seg.spacing) || seg.map1D2D!=map1D2D) seg.fill(BLACK); // clear spacing gaps if ((spc>0 && spc!=seg.spacing) || seg.map1D2D!=map1D2D) seg.fill(BLACK); // clear spacing gaps
seg.map1D2D = map1D2D & 0x07; seg.map1D2D = constrain(map1D2D, 0, 7);
seg.soundSim = soundSim & 0x03; seg.soundSim = constrain(soundSim, 0, 7);
uint16_t len = 1; uint16_t len = 1;
if (stop > start) len = stop - start; if (stop > start) len = stop - start;
@ -162,12 +162,12 @@ void deserializeSegment(JsonObject elem, byte it, byte presetId)
} }
#endif #endif
seg.selected = elem["sel"] | seg.selected; seg.selected = elem["sel"] | seg.selected;
seg.reverse = elem["rev"] | seg.reverse; seg.reverse = elem["rev"] | seg.reverse;
seg.mirror = elem["mi"] | seg.mirror; seg.mirror = elem["mi"] | seg.mirror;
#ifndef WLED_DISABLE_2D #ifndef WLED_DISABLE_2D
seg.reverse_y = elem["rY"] | seg.reverse_y; seg.reverse_y = elem["rY"] | seg.reverse_y;
seg.mirror_y = elem["mY"] | seg.mirror_y; seg.mirror_y = elem["mY"] | seg.mirror_y;
seg.transpose = elem[F("tp")] | seg.transpose; seg.transpose = elem[F("tp")] | seg.transpose;
#endif #endif
@ -190,8 +190,8 @@ void deserializeSegment(JsonObject elem, byte it, byte presetId)
sOpt = extractModeDefaults(fx, "c1"); if (sOpt >= 0) seg.custom1 = sOpt; sOpt = extractModeDefaults(fx, "c1"); if (sOpt >= 0) seg.custom1 = sOpt;
sOpt = extractModeDefaults(fx, "c2"); if (sOpt >= 0) seg.custom2 = sOpt; sOpt = extractModeDefaults(fx, "c2"); if (sOpt >= 0) seg.custom2 = sOpt;
sOpt = extractModeDefaults(fx, "c3"); if (sOpt >= 0) seg.custom3 = sOpt; sOpt = extractModeDefaults(fx, "c3"); if (sOpt >= 0) seg.custom3 = sOpt;
sOpt = extractModeDefaults(fx, "mp12"); if (sOpt >= 0) seg.map1D2D = sOpt & 0x07; sOpt = extractModeDefaults(fx, "mp12"); if (sOpt >= 0) seg.map1D2D = constrain(sOpt, 0, 7);
sOpt = extractModeDefaults(fx, "ssim"); if (sOpt >= 0) seg.soundSim = sOpt & 0x03; sOpt = extractModeDefaults(fx, "ssim"); if (sOpt >= 0) seg.soundSim = constrain(sOpt, 0, 7);
sOpt = extractModeDefaults(fx, "rev"); if (sOpt >= 0) seg.reverse = (bool)sOpt; sOpt = extractModeDefaults(fx, "rev"); if (sOpt >= 0) seg.reverse = (bool)sOpt;
sOpt = extractModeDefaults(fx, "mi"); if (sOpt >= 0) seg.mirror = (bool)sOpt; // NOTE: setting this option is a risky business sOpt = extractModeDefaults(fx, "mi"); if (sOpt >= 0) seg.mirror = (bool)sOpt; // NOTE: setting this option is a risky business
sOpt = extractModeDefaults(fx, "rY"); if (sOpt >= 0) seg.reverse_y = (bool)sOpt; sOpt = extractModeDefaults(fx, "rY"); if (sOpt >= 0) seg.reverse_y = (bool)sOpt;
@ -219,7 +219,7 @@ void deserializeSegment(JsonObject elem, byte it, byte presetId)
getVal(elem["c2"], &seg.custom2); getVal(elem["c2"], &seg.custom2);
uint8_t cust3 = seg.custom3; uint8_t cust3 = seg.custom3;
getVal(elem["c3"], &cust3); // we can't pass reference to bifield getVal(elem["c3"], &cust3); // we can't pass reference to bifield
seg.custom3 = cust3; seg.custom3 = constrain(cust3, 0, 31);
seg.check1 = elem["o1"] | seg.check1; seg.check1 = elem["o1"] | seg.check1;
seg.check2 = elem["o2"] | seg.check2; seg.check2 = elem["o2"] | seg.check2;
@ -265,12 +265,9 @@ void deserializeSegment(JsonObject elem, byte it, byte presetId)
} }
if (set < 2) stop = start + 1; if (set < 2) stop = start + 1;
uint32_t c = gamma32(RGBW32(rgbw[0], rgbw[1], rgbw[2], rgbw[3]));
for (int i = start; i < stop; i++) { for (int i = start; i < stop; i++) {
if (strip.gammaCorrectCol) { seg.setPixelColor(i, c);
seg.setPixelColor(i, gamma8(rgbw[0]), gamma8(rgbw[1]), gamma8(rgbw[2]), gamma8(rgbw[3]));
} else {
seg.setPixelColor(i, rgbw[0], rgbw[1], rgbw[2], rgbw[3]);
}
} }
if (!set) start++; if (!set) start++;
set = 0; set = 0;
@ -496,26 +493,26 @@ void serializeSegment(JsonObject& root, Segment& seg, byte id, bool forPreset, b
strcat(colstr, "]"); strcat(colstr, "]");
root["col"] = serialized(colstr); root["col"] = serialized(colstr);
root["fx"] = seg.mode; root["fx"] = seg.mode;
root["sx"] = seg.speed; root["sx"] = seg.speed;
root["ix"] = seg.intensity; root["ix"] = seg.intensity;
root["pal"] = seg.palette; root["pal"] = seg.palette;
root["c1"] = seg.custom1; root["c1"] = seg.custom1;
root["c2"] = seg.custom2; root["c2"] = seg.custom2;
root["c3"] = seg.custom3; root["c3"] = seg.custom3;
root["sel"] = seg.isSelected(); root["sel"] = seg.isSelected();
root["rev"] = seg.reverse; root["rev"] = seg.reverse;
root["mi"] = seg.mirror; root["mi"] = seg.mirror;
if (strip.isMatrix) { if (strip.isMatrix) {
root["rY"] = seg.reverse_y; root["rY"] = seg.reverse_y;
root["mY"] = seg.mirror_y; root["mY"] = seg.mirror_y;
root[F("tp")] = seg.transpose; root[F("tp")] = seg.transpose;
} }
root["o1"] = seg.check1; root["o1"] = seg.check1;
root["o2"] = seg.check2; root["o2"] = seg.check2;
root["o3"] = seg.check3; root["o3"] = seg.check3;
root["ssim"] = seg.soundSim; root["ssim"] = seg.soundSim;
root["mp12"] = seg.map1D2D; root["mp12"] = seg.map1D2D;
} }
void serializeState(JsonObject root, bool forPreset, bool includeBri, bool segmentBounds) void serializeState(JsonObject root, bool forPreset, bool includeBri, bool segmentBounds)
@ -615,10 +612,6 @@ void serializeInfo(JsonObject root)
leds[F("wv")] = totalLC & 0x02; // deprecated, true if white slider should be displayed for any segment leds[F("wv")] = totalLC & 0x02; // deprecated, true if white slider should be displayed for any segment
leds["cct"] = totalLC & 0x04; // deprecated, use info.leds.lc leds["cct"] = totalLC & 0x04; // deprecated, use info.leds.lc
#ifdef WLED_DISABLE_AUDIO
root[F("noaudio")] = true;
#endif
#ifdef WLED_DEBUG #ifdef WLED_DEBUG
JsonArray i2c = root.createNestedArray(F("i2c")); JsonArray i2c = root.createNestedArray(F("i2c"));
i2c.add(i2c_sda); i2c.add(i2c_sda);

View File

@ -184,8 +184,8 @@ void handleSettingsSet(AsyncWebServerRequest *request, byte subPage)
turnOnAtBoot = request->hasArg(F("BO")); turnOnAtBoot = request->hasArg(F("BO"));
t = request->arg(F("BP")).toInt(); t = request->arg(F("BP")).toInt();
if (t <= 250) bootPreset = t; if (t <= 250) bootPreset = t;
strip.gammaCorrectBri = request->hasArg(F("GB")); gammaCorrectBri = request->hasArg(F("GB"));
strip.gammaCorrectCol = request->hasArg(F("GC")); gammaCorrectCol = request->hasArg(F("GC"));
fadeTransition = request->hasArg(F("TF")); fadeTransition = request->hasArg(F("TF"));
t = request->arg(F("TD")).toInt(); t = request->arg(F("TD")).toInt();

View File

@ -564,7 +564,7 @@ void setRealtimePixel(uint16_t i, byte r, byte g, byte b, byte w)
{ {
uint16_t pix = i + arlsOffset; uint16_t pix = i + arlsOffset;
if (pix < strip.getLengthTotal()) { if (pix < strip.getLengthTotal()) {
if (!arlsDisableGammaCorrection && strip.gammaCorrectCol) { if (!arlsDisableGammaCorrection && gammaCorrectCol) {
r = gamma8(r); r = gamma8(r);
g = gamma8(g); g = gamma8(g);
b = gamma8(b); b = gamma8(b);

View File

@ -133,9 +133,6 @@
#endif #endif
#ifdef USERMOD_AUDIOREACTIVE #ifdef USERMOD_AUDIOREACTIVE
#ifdef WLED_DISABLE_AUDIO
#error Incompatible options: WLED_DISABLE_AUDIO and USERMOD_AUDIOREACTIVE
#endif
#include "../usermods/audioreactive/audio_reactive.h" #include "../usermods/audioreactive/audio_reactive.h"
#endif #endif
@ -260,9 +257,6 @@ void registerUsermods()
#endif #endif
#ifdef USERMOD_AUDIOREACTIVE #ifdef USERMOD_AUDIOREACTIVE
#ifdef WLED_DISABLE_AUDIO
#error Incompatible options: WLED_DISABLE_AUDIO and USERMOD_AUDIOREACTIVE
#endif
usermods.add(new AudioReactive()); usermods.add(new AudioReactive());
#endif #endif
} }

View File

@ -390,7 +390,6 @@ uint16_t crc16(const unsigned char* data_p, size_t length) {
} }
#ifndef WLED_DISABLE_AUDIO
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
// Begin simulateSound (to enable audio enhanced effects to display something) // Begin simulateSound (to enable audio enhanced effects to display something)
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
@ -402,7 +401,6 @@ typedef enum UM_SoundSimulations {
UMS_14_3 UMS_14_3
} um_soundSimulations_t; } um_soundSimulations_t;
// this is still work in progress
um_data_t* simulateSound(uint8_t simulationId) um_data_t* simulateSound(uint8_t simulationId)
{ {
static uint8_t samplePeak; static uint8_t samplePeak;
@ -507,7 +505,6 @@ um_data_t* simulateSound(uint8_t simulationId)
return um_data; return um_data;
} }
#endif
void enumerateLedmaps() { void enumerateLedmaps() {

View File

@ -8,7 +8,7 @@
*/ */
// version code in format yymmddb (b = daily build) // version code in format yymmddb (b = daily build)
#define VERSION 2209020 #define VERSION 220950
//uncomment this if you have a "my_config.h" file you'd like to use //uncomment this if you have a "my_config.h" file you'd like to use
//#define WLED_USE_MY_CONFIG //#define WLED_USE_MY_CONFIG
@ -287,9 +287,11 @@ WLED_GLOBAL byte bootPreset _INIT(0); // save preset to load
//if true, a segment per bus will be created on boot and LED settings save //if true, a segment per bus will be created on boot and LED settings save
//if false, only one segment spanning the total LEDs is created, //if false, only one segment spanning the total LEDs is created,
//but not on LED settings save if there is more than one segment currently //but not on LED settings save if there is more than one segment currently
WLED_GLOBAL bool autoSegments _INIT(false); WLED_GLOBAL bool autoSegments _INIT(false);
WLED_GLOBAL bool correctWB _INIT(false); //CCT color correction of RGB color WLED_GLOBAL bool correctWB _INIT(false); // CCT color correction of RGB color
WLED_GLOBAL bool cctFromRgb _INIT(false); //CCT is calculated from RGB instead of using seg.cct WLED_GLOBAL bool cctFromRgb _INIT(false); // CCT is calculated from RGB instead of using seg.cct
WLED_GLOBAL bool gammaCorrectCol _INIT(false); // use gamma correction on colors
WLED_GLOBAL bool gammaCorrectBri _INIT(false); // use gamma correction on brightness
WLED_GLOBAL byte col[] _INIT_N(({ 255, 160, 0, 0 })); // current RGB(W) primary color. col[] should be updated if you want to change the color. WLED_GLOBAL byte col[] _INIT_N(({ 255, 160, 0, 0 })); // current RGB(W) primary color. col[] should be updated if you want to change the color.
WLED_GLOBAL byte colSec[] _INIT_N(({ 0, 0, 0, 0 })); // current RGB(W) secondary color WLED_GLOBAL byte colSec[] _INIT_N(({ 0, 0, 0, 0 })); // current RGB(W) secondary color

View File

@ -140,8 +140,8 @@ void loadSettingsFromEEPROM()
ntpEnabled = EEPROM.read(327); ntpEnabled = EEPROM.read(327);
currentTimezone = EEPROM.read(328); currentTimezone = EEPROM.read(328);
useAMPM = EEPROM.read(329); useAMPM = EEPROM.read(329);
strip.gammaCorrectBri = EEPROM.read(330); gammaCorrectBri = EEPROM.read(330);
strip.gammaCorrectCol = EEPROM.read(331); gammaCorrectCol = EEPROM.read(331);
overlayCurrent = EEPROM.read(332); overlayCurrent = EEPROM.read(332);
alexaEnabled = EEPROM.read(333); alexaEnabled = EEPROM.read(333);
@ -414,10 +414,10 @@ void deEEP() {
for (byte j = 0; j < numChannels; j++) colX.add(EEPROM.read(memloc + j)); for (byte j = 0; j < numChannels; j++) colX.add(EEPROM.read(memloc + j));
} }
segObj["fx"] = EEPROM.read(i+10); segObj["fx"] = EEPROM.read(i+10);
segObj["sx"] = EEPROM.read(i+11); segObj["sx"] = EEPROM.read(i+11);
segObj["ix"] = EEPROM.read(i+16); segObj["ix"] = EEPROM.read(i+16);
segObj["pal"] = EEPROM.read(i+17); segObj["pal"] = EEPROM.read(i+17);
} else { } else {
Segment* seg = strip.getSegments(); Segment* seg = strip.getSegments();
memcpy(seg, EEPROM.getDataPtr() +i+2, 240); memcpy(seg, EEPROM.getDataPtr() +i+2, 240);

View File

@ -394,8 +394,8 @@ void getSettingsJS(byte subPage, char* dest)
sappend('c',SET_F("BO"),turnOnAtBoot); sappend('c',SET_F("BO"),turnOnAtBoot);
sappend('v',SET_F("BP"),bootPreset); sappend('v',SET_F("BP"),bootPreset);
sappend('c',SET_F("GB"),strip.gammaCorrectBri); sappend('c',SET_F("GB"),gammaCorrectBri);
sappend('c',SET_F("GC"),strip.gammaCorrectCol); sappend('c',SET_F("GC"),gammaCorrectCol);
sappend('c',SET_F("TF"),fadeTransition); sappend('c',SET_F("TF"),fadeTransition);
sappend('v',SET_F("TD"),transitionDelayDefault); sappend('v',SET_F("TD"),transitionDelayDefault);
sappend('c',SET_F("PF"),strip.paletteFade); sappend('c',SET_F("PF"),strip.paletteFade);