This commit is contained in:
kbecke23 2018-06-11 15:52:49 +02:00
commit 59cf6e19a5
4 changed files with 29 additions and 19 deletions

View File

@ -60,7 +60,8 @@ Logs are created by default:
+ each nlms function.Predicted value, input value and error value as well as square mean and deviation
+ updated weights for each nlms calculation
`CPP_NLMS -i <inputfile> -g true` prints a graphical overview to `graphResults.html`. The output is an __SVG__ embedded in html.
`CPP_NLMS -i <inputfile> -g true` prints a graphical overview to `graphResults.html` using the `graphResults_template.html` in the same directory as the binary.
`CPP_NLMS -i <inputfile> -g <pathToTemplate>` does the same, only the `graphResults_template.html` is handed from a diffrent path. The output is an __SVG__ embedded in html for both.
![alt text](https://github.com/FBRDNLMS/NLMSvariants/blob/master/img/lms_graphing.png "ANSI_C/C++ bin, generate graphical output ")

View File

@ -194,7 +194,7 @@ Variant (1/3), substract local mean.
*/
void localMean ( mldata_t *mlData, point_t points[] ) {
double *localWeights = (double *) malloc ( sizeof(double) * mlData->windowSize + 1);
localWeights = mlData->weights;
memcpy(localWeights, mlData->weights, sizeof(double) * sizeof(mlData->windowSize) );
char fileName[50];
const unsigned xErrorLength = mlData->samplesCount;
@ -227,7 +227,7 @@ void localMean ( mldata_t *mlData, point_t points[] ) {
xError[xCount] = xActual - xPredicted; // Get error value
xSquared = 0.0;
for (i = 1; i < _arrayLength; i++) { // Get xSquared
xSquared += pow(xSamples[xCount - i] - xMean, 2);
xSquared += (xSamples[xCount - i] - xMean) * (xSamples[xCount - i] - xMean);
}
if ( xSquared == 0.0 ) { // Otherwise returns Pred: -1.#IND00 in some occassions
xSquared = 1.0;
@ -235,9 +235,9 @@ void localMean ( mldata_t *mlData, point_t points[] ) {
for ( i = 1; i < _arrayLength; i++ ) { // Update weights
localWeights[i] = localWeights[i - 1] + mlData->learnrate * xError[xCount] // Substract localMean
* ( (xSamples[xCount - i] - xMean) / xSquared );
fprintf( fp9, "%lf\n", localWeights[i] );
fprintf( fp9, "%lf;", localWeights[i] );
}
fprintf(fp9, "\n");
fprintf(fp4, "%d\t%f\t%f\t%f\n", xCount, xPredicted, xActual, xError[xCount]); // Write to logfile
points[xCount].xVal[1] = xCount; // Save points so graph can be build later on
@ -253,12 +253,14 @@ void localMean ( mldata_t *mlData, point_t points[] ) {
double deviation = 0.0;
for (i = 1; i < xErrorLength; i++) { // Mean square
deviation += pow(xError[i] - mean, 2);
deviation += (xError[i] - mean) * (xError[i] - mean);
}
deviation /= xErrorLength; // Deviation
printf("mean square err: %lf, variance: %lf\t\tlocal Mean\n", mean, deviation);
fprintf(fp4, "\nQuadratische Varianz(x_error): %f\nMittelwert:(x_error): %f\n\n", deviation, mean); // Write to logfile
fclose(fp4);
free(localWeights);
}
/*
@ -272,12 +274,12 @@ substract direct predecessor
======================================================================================================
*/
void directPredecessor( mldata_t *mlData, point_t points[]) {
double *localWeights = ( double * ) malloc ( sizeof(double) * mlData->windowSize + 1 );
localWeights = mlData->weights;
double *localWeights = (double * ) malloc ( sizeof(double) * mlData->windowSize + 1 );
memcpy(localWeights, mlData->weights, sizeof(double) * sizeof(mlData->windowSize));
char fileName[512];
const unsigned xErrorLength = mlData->samplesCount;
double xError[xErrorLength];
const unsigned xErrorLength = mlData->samplesCount;
double xError[xErrorLength];
unsigned xCount = 0, i;
double xActual = 0.0;
double xPredicted = 0.0;
@ -303,7 +305,8 @@ void directPredecessor( mldata_t *mlData, point_t points[]) {
double xSquared = 0.0;
for (i = 1; i < _arrayLength; i++) {
xSquared += pow(xSamples[xCount - 1] - xSamples[xCount - i - 1], 2); // substract direct predecessor
xSquared += (xSamples[xCount - 1] - xSamples[xCount - i - 1])
* (xSamples[xCount - 1] - xSamples[xCount - i - 1]); // substract direct predecessor
}
if ( xSquared == 0.0 ) { // Otherwise returns Pred: -1.#IND00 in some occassions
xSquared = 1.0;
@ -327,12 +330,14 @@ void directPredecessor( mldata_t *mlData, point_t points[]) {
for (i = 1; i < xErrorLength; i++) {
deviation += pow(xError[i] - mean, 2); // Mean square
deviation += (xError[i] - mean) * (xError[i] - mean); // Mean square
}
deviation /= xErrorLength; // Deviation
printf("mean square err: %lf, variance: %lf\t\t\tdirect Predecessor\n", mean, deviation);
fprintf(fp3, "\nQuadratische Varianz(x_error): %f\nMittelwert:(x_error): %f\n\n", deviation, mean);
fclose(fp3);
free(localWeights);
}
/*
@ -347,10 +352,11 @@ differential predecessor.
*/
void differentialPredecessor ( mldata_t *mlData, point_t points[] ) {
double *localWeights = (double *) malloc ( sizeof(double) * mlData->windowSize + 1 );
localWeights = mlData->weights;
const unsigned xErrorLength = mlData->samplesCount;
memcpy(localWeights, mlData->weights, sizeof(double) * sizeof(mlData->windowSize));
const unsigned xErrorLength = mlData->samplesCount;
char fileName[512];
double xError[xErrorLength];
double xError[xErrorLength];
unsigned xCount = 0, i;
double xPredicted = 0.0;
@ -377,7 +383,8 @@ void differentialPredecessor ( mldata_t *mlData, point_t points[] ) {
double xSquared = 0.0;
for (i = 1; i < _arrayLength; i++) {
xSquared += pow(xSamples[xCount - i] - xSamples[xCount - i - 1], 2); // Substract direct predecessor
xSquared += (xSamples[xCount - i] - xSamples[xCount - i - 1])
* (xSamples[xCount - i] - xSamples[xCount - i -1]); // Substract direct predecessor
}
if ( xSquared == 0.0 ) { // Otherwise returns Pred: -1.#IND00 in some occassions
xSquared = 1.0;
@ -404,12 +411,14 @@ void differentialPredecessor ( mldata_t *mlData, point_t points[] ) {
for (i = 1; i < xErrorLength; i++) { // Mean square
deviation += pow(xError[i] - mean, 2);
deviation += (xError[i] - mean) * (xError[i] - mean);;
}
deviation /= xErrorLength;
printf("mean square err: %lf, variance: %lf\t\t\tdifferential Predecessor\n", mean, deviation);
fprintf(fp6, "\nQuadratische Varianz(x_error): %f\nMittelwert:(x_error): %f\n\n", deviation, mean);
fclose(fp6);
free(localWeights);
}
/*

View File

@ -29,7 +29,7 @@ There are a bunch of options you can predefine but do not have to. The only para
| -w | Size of M (window). | 5 |
| -c | Choose RGB color channel, green has least noise. | green |
| -l | Learnrate of machine learning.| 0.4 |
| -g | include graph building. Choose for amount of input data lower than 1200. If the template is located in another folder use its path otherwise use true.| none|
| -g | include graph building. Choose for amount of input data lower than 1200. If the template is located in another folder use its path otherwise use true. Do not use whitespace in path to folder.| none|
| -s | Seed randomizing weights. Choose for repoducability. | time(NULL)|
This code is ANSI aka C89 compatible. No POSIX, C99, C11 or GNU libs, because it had to be windows compatible . There are way easier methods like getline() for file parsing or getopt() as an args parser, because of compatibility reasons things have been kept simple.

View File

@ -28,6 +28,6 @@ There are a bunch of options you can predefine but do not have to. The only para
| -w | Size of M (window). | 5 |
| -c | Choose RGB color channel, green has least noise. | green |
| -l | Learnrate of machine learning. | 0.4 |
| -g | include graph building. Choose for amount of input data lower than 1200. Choose path if template is located in another folder, else use true.| none|
| -g | include graph building. Choose for amount of input data lower than 1200. Choose path if template is located in another folder, else use true. Do not use whitespace in path.| none|
| -s | Seed randomizing weights. Choose for repoducability. | time(NULL)|