Tutorial de desarrollo de aplicaciones Portapack (12) Receptor SSTV

El código de la publicación anterior era demasiado largo, así que reabrí una publicación.

Finalmente fusioné los procedimientos de demodulación y decodificación.

#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <time.h>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>


#include <signal.h>
#include <unistd.h>
#include <pthread.h>
#include <libhackrf/hackrf.h>
#include <iostream>

using namespace cv;
struct pcm *pcm;

Mat frame;

int SAMPLERATE = 8000;
double oneBitSampleCount;

#define MAXIMUM_BUF_LENGTH		(16 * 16384)

using namespace std;
static volatile bool do_exit = false;

hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];

int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;

int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;

FILE *file;
int downsample;



double round(double r)
{
    return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}

double xvBPV[5];
double yvBPV[5];
double bandPassSSTVSignal(double sampleIn) 
{
    xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2]; 
    xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4]; 
    xvBPV[4] = sampleIn / 7.627348301;
    yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2]; 
    yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4]; 
    yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
    return yvBPV[4];
}



double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
		    +0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
		    -0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
		    +0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
		    -0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
		    -0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
		    +0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
		    +0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
		    -0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
		    +0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
		    +0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
		    -0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
		    +0.0063570207, +0.0034041195, +0.0001082461,
		  };

double xvFIRLP1[51];
double FIRLowPass1(double sampleIn) 
{
    for (int i = 0; i < 50; i++)
        xvFIRLP1[i] = xvFIRLP1[i+1];
    xvFIRLP1[50] = sampleIn / 5.013665674;
    double sum = 0;
    for (int i = 0; i <= 50; i++)
        sum += (FIRLPCoeffs[i] * xvFIRLP1[i]);
    return sum;
}
	
double xvFIRLP2[51];
double FIRLowPass2(double sampleIn)
{
    for (int i = 0; i < 50; i++)
        xvFIRLP2[i] = xvFIRLP2[i+1];
    xvFIRLP2[50] = sampleIn / 5.013665674;
    double sum = 0;
    for (int i = 0; i <= 50; i++)
        sum += (FIRLPCoeffs[i] * xvFIRLP2[i]);
    return sum;
}

// moving average
double xvMA1[9];
double yvMA1prev = 0;
double noiseReductionFilter1(double sampleIn) 
{
    for (int i = 0; i < 8; i++)
        xvMA1[i] = xvMA1[i+1];
    xvMA1[8] = sampleIn;
    yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
    return yvMA1prev;
}

double xvMA2[9];
double yvMA2prev = 0;
double noiseReductionFilter2(double sampleIn)
{
    for (int i = 0; i < 8; i++)
        xvMA2[i] = xvMA2[i+1];
    xvMA2[8] = sampleIn;
    yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
    return yvMA2prev;
}

double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
int fmDemodulateLuminance(double sample)
{
    oscPhase += (2 * M_PI * 2000) / SAMPLERATE;
		
    double realPart = cos(oscPhase) * sample;
    double imaginaryPart = sin(oscPhase) * sample;
			
    if (oscPhase >= 2 * M_PI)
        oscPhase -= 2 * M_PI;

    realPart = FIRLowPass1(realPart);
    imaginaryPart = FIRLowPass2(imaginaryPart);

    realPart = noiseReductionFilter1(realPart);
    imaginaryPart = noiseReductionFilter2(imaginaryPart);

    sample = (imaginaryPart*realPartPrev-realPart*imaginaryPartPrev)/(realPart*realPart+imaginaryPart*imaginaryPart);

    realPartPrev = realPart;
    imaginaryPartPrev = imaginaryPart;

    sample += 0.2335; // bring the value above 0
		
    int luminance = (int)round((sample/0.617)*255);
    luminance = 255-luminance;

    if (luminance > 255)
        luminance = 255;
    if (luminance < 0)
        luminance = 0;

    return luminance;
}

double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;



void receiveImage()
{

    double t, linet;
    double nextSyncTime = 0;

    for (int s = 0; s < imageLengthInSamples; s++)
    {
        t = ((double)s)/((double)SAMPLERATE);
        int temp_lineLengthInS = (int)(lineLengthInS * 10000000);
        int temp_t = (int)(t * 10000000);
        linet = ((double)(temp_t % temp_lineLengthInS))/10000000;

        int lum = luminance[s];

        if (t >= nextSyncTime)
        {
            nextSyncTime += lineLengthInS;
            imshow("frame", frame);
            if (waitKey(5) == 'q')
            {
                break;
            }
        }

        if ((linet >= channelGStartInS && linet < channelGStartInS + channelLengthInS) ||
            (linet >= channelBStartInS && linet < channelBStartInS + channelLengthInS) ||
            (linet >= channelRStartInS && linet < channelRStartInS + channelLengthInS) )
        {
            int y = (int)floor(t/lineLengthInS);
            if (linet >= channelGStartInS && linet < channelGStartInS + channelLengthInS) 
            {
                int x = (int)floor(((linet-channelGStartInS)/channelLengthInS)*320);
                frame.at<Vec3b>(y, x)[1] = lum;

            }
            if (linet >= channelBStartInS && linet < channelBStartInS + channelLengthInS) 
            {
                int x = (int)floor(((linet-channelBStartInS)/channelLengthInS)*320);
                frame.at<Vec3b>(y, x)[0] = lum;
            }
            if (linet >= channelRStartInS && linet < channelRStartInS + channelLengthInS)
            {
                int x = (int)floor(((linet-channelRStartInS)/channelLengthInS)*320);
                frame.at<Vec3b>(y, x)[2] = lum;
            }
        }
    }
    fprintf(stderr, "Finished rx.\n");
}



void sigint_callback_handler(int signum) 
{
    cout << "Caught signal" << endl;
    do_exit = true;
}

void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
	*cr = ar*br - aj*bj;
	*cj = aj*br + ar*bj;
}

int polar_discriminant(int ar, int aj, int br, int bj)
{
    int cr, cj;
    double angle;
    multiply(ar, aj, br, -bj, &cr, &cj);
    angle = atan2((double)cj, (double)cr);
    return (int)(angle / 3.14159 * (1<<14));
}

int rx_callback(hackrf_transfer* transfer)
{
    for (int i = 0; i < transfer->valid_length; i++) 
    {
        double sample =  (int8_t)(transfer->buffer[i]) + 1;
        buf16[i] = (int16_t)sample;    //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
    }

    memcpy(lowpassed, buf16, 2*transfer->valid_length);
    lp_len = transfer->valid_length;

    //low pass //rate = hardware_sample_rate =  6*2M 
    int i=0, i2=0;
    while (i < lp_len) 
    {
        now_r += lowpassed[i];
        now_j += lowpassed[i+1];
        i += 2;
        prev_index++;
        if (prev_index < downsample)
        {
            continue;
        }
        lowpassed[i2]   = now_r; 
        lowpassed[i2+1] = now_j; 
        prev_index = 0;
        now_r = 0;
        now_j = 0;
        i2 += 2;
    }
    lp_len = i2;

    //fm demod //rate = rate_in =  2M  
    int i3, pcm;
    pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
    result_demod[0] = (int16_t)pcm;
    for (i3 = 2; i3 < (lp_len-1); i3 += 2)
    {
        pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
        result_demod[i3/2] = (int16_t)pcm;
    }
    pre_r = lowpassed[lp_len - 2];
    pre_j = lowpassed[lp_len - 1];
    result_demod_len = lp_len/2;

    // low pass real //rate = rate_out = 2M  
    int i4=0, i5=0;
    int fast = rate_out;
    int slow = rate_out2;
    while (i4 < result_demod_len)
    {
        now_lpr += result_demod[i4];
        i4++;
        prev_lpr_index += slow;
        if (prev_lpr_index < fast)
        {
            continue;
        }
        result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
        prev_lpr_index -= fast;
        now_lpr = 0;
        i5 += 1;
    }
    result_demod_len = i5;

    //rate = rate_out2 = 8k

    fprintf(stderr, "result_demod_len: %d\n", result_demod_len);
    for (int s = 0; s < result_demod_len; s++)
    {
        double sampleDouble, sample_filtered;
        sampleDouble = ((double)result_demod[s]) / 32768.0;
        sample_filtered = bandPassSSTVSignal(sampleDouble);
        luminance[luminance_count] = fmDemodulateLuminance(sample_filtered);
        luminance_count++;


        if (luminance_count >= int(imageLengthInSamples))
        {
            luminance_count = 0;
            receiveImage();
        }
    }
       

    


    return 0;
}



int main(int argc, char **argv)
{	
    char *pcm_name;

    file = stdin;
    //pcm_name = "default";
    frame = Mat::zeros(256, 320, CV_8UC3);


    signal(SIGINT, &sigint_callback_handler);
    int res;

    freq = 120e6;
    rate_in = 200000;
    rate_out = 200000;
    rate_out2 = 8000;
    file = stdout;

    downsample = 6;
    hardware_sample_rate = (uint32_t)(downsample * rate_in);

    res = hackrf_init();

    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_init() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_open(&device);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_open() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_lna_gain(device, 40);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_lna_gain() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_vga_gain(device, 26);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_vga_gain() failed" << endl;
        return EXIT_FAILURE;
    }
		
    /* Set the frequency */
    res = hackrf_set_freq(device, freq);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_freq() failed" << endl;
        return EXIT_FAILURE;
    }

    fprintf(stderr, "Oversampling input by: %ix.\n", downsample);

    /* Set the sample rate */
    res = hackrf_set_sample_rate(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_sample_rate() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS )
    {
        cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
        return EXIT_FAILURE;
    }
        

    fprintf(stderr, "Output at %u Hz.\n", rate_in);
    usleep(100000);
	
    res = hackrf_start_rx(device, rx_callback, NULL); 

    while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
    {
        usleep(100000);
    }

    if (do_exit)
    {
        fprintf(stderr, "\nUser cancel, exiting...\n");
    }
    else 
    {
        fprintf(stderr, "\nLibrary error, exiting...\n");
    }

    res = hackrf_close(device);
    if(res != HACKRF_SUCCESS)
    {
        cout << "hackrf_close() failed" << endl;
    } 
    else 
    {
        cout << "hackrf_close() done" << endl;
    }

    hackrf_exit();
    cout << "hackrf_exit() done" << endl;

    return 0;
}

 

g++ decode.cpp -o decode -lhackrf -pthread -lm  `pkg-config --cflags --libs opencv`

El de arriba se muestra después de recibir una imagen, y la calidad de la imagen es buena. El siguiente está recibiendo una línea y mostrando una línea, pero estará desalineada. Se estima que el bloqueo del hilo no está bien.

#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <time.h>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>


#include <signal.h>
#include <unistd.h>
#include <pthread.h>
#include <libhackrf/hackrf.h>
#include <iostream>

using namespace cv;
struct pcm *pcm;

Mat frame;

int SAMPLERATE = 8000;
double oneBitSampleCount;

#define MAXIMUM_BUF_LENGTH		(16 * 16384)

using namespace std;
static volatile bool do_exit = false;

hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];

int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;

int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;

int downsample;

bool bufferIsFull = false;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;

double round(double r)
{
    return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}

double xvBPV[5];
double yvBPV[5];
double bandPassSSTVSignal(double sampleIn) 
{
    xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2]; 
    xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4]; 
    xvBPV[4] = sampleIn / 7.627348301;
    yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2]; 
    yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4]; 
    yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);
    return yvBPV[4];
}



double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
		    +0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
		    -0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
		    +0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
		    -0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
		    -0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
		    +0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
		    +0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
		    -0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
		    +0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
		    +0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
		    -0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
		    +0.0063570207, +0.0034041195, +0.0001082461,
		  };

double xvFIRLP1[51];
double FIRLowPass1(double sampleIn) 
{
    for (int i = 0; i < 50; i++)
        xvFIRLP1[i] = xvFIRLP1[i+1];
    xvFIRLP1[50] = sampleIn / 5.013665674;
    double sum = 0;
    for (int i = 0; i <= 50; i++)
        sum += (FIRLPCoeffs[i] * xvFIRLP1[i]);
    return sum;
}
	
double xvFIRLP2[51];
double FIRLowPass2(double sampleIn)
{
    for (int i = 0; i < 50; i++)
        xvFIRLP2[i] = xvFIRLP2[i+1];
    xvFIRLP2[50] = sampleIn / 5.013665674;
    double sum = 0;
    for (int i = 0; i <= 50; i++)
        sum += (FIRLPCoeffs[i] * xvFIRLP2[i]);
    return sum;
}

// moving average
double xvMA1[9];
double yvMA1prev = 0;
double noiseReductionFilter1(double sampleIn) 
{
    for (int i = 0; i < 8; i++)
        xvMA1[i] = xvMA1[i+1];
    xvMA1[8] = sampleIn;
    yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
    return yvMA1prev;
}

double xvMA2[9];
double yvMA2prev = 0;
double noiseReductionFilter2(double sampleIn)
{
    for (int i = 0; i < 8; i++)
        xvMA2[i] = xvMA2[i+1];
    xvMA2[8] = sampleIn;
    yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
    return yvMA2prev;
}

double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
int fmDemodulateLuminance(double sample)
{
    oscPhase += (2 * M_PI * 2000) / SAMPLERATE;
		
    double realPart = cos(oscPhase) * sample;
    double imaginaryPart = sin(oscPhase) * sample;
			
    if (oscPhase >= 2 * M_PI)
        oscPhase -= 2 * M_PI;

    realPart = FIRLowPass1(realPart);
    imaginaryPart = FIRLowPass2(imaginaryPart);

    realPart = noiseReductionFilter1(realPart);
    imaginaryPart = noiseReductionFilter2(imaginaryPart);

    sample = (imaginaryPart*realPartPrev-realPart*imaginaryPartPrev)/(realPart*realPart+imaginaryPart*imaginaryPart);

    realPartPrev = realPart;
    imaginaryPartPrev = imaginaryPart;

    sample += 0.2335; // bring the value above 0
		
    int luminance = (int)round((sample/0.617)*255);
    luminance = 255-luminance;

    if (luminance > 255)
        luminance = 255;
    if (luminance < 0)
        luminance = 0;

    return luminance;
}

double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;


void receiveImage()
{

    fprintf(stderr, "Finished rx.\n");
}



void sigint_callback_handler(int signum) 
{
    cout << "Caught signal" << endl;
    do_exit = true;
}

void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
	*cr = ar*br - aj*bj;
	*cj = aj*br + ar*bj;
}

int polar_discriminant(int ar, int aj, int br, int bj)
{
    int cr, cj;
    double angle;
    multiply(ar, aj, br, -bj, &cr, &cj);
    angle = atan2((double)cj, (double)cr);
    return (int)(angle / 3.14159 * (1<<14));
}

int rx_callback(hackrf_transfer* transfer)
{
    for (int i = 0; i < transfer->valid_length; i++) 
    {
        double sample =  (int8_t)(transfer->buffer[i]) + 1;
        buf16[i] = (int16_t)sample;    //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
    }

    memcpy(lowpassed, buf16, 2*transfer->valid_length);
    lp_len = transfer->valid_length;

    //low pass //rate = hardware_sample_rate =  6*2M 
    int i=0, i2=0;
    while (i < lp_len) 
    {
        now_r += lowpassed[i];
        now_j += lowpassed[i+1];
        i += 2;
        prev_index++;
        if (prev_index < downsample)
        {
            continue;
        }
        lowpassed[i2]   = now_r; 
        lowpassed[i2+1] = now_j; 
        prev_index = 0;
        now_r = 0;
        now_j = 0;
        i2 += 2;
    }
    lp_len = i2;

    //fm demod //rate = rate_in =  2M  
    int i3, pcm;
    pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
    result_demod[0] = (int16_t)pcm;
    for (i3 = 2; i3 < (lp_len-1); i3 += 2)
    {
        pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
        result_demod[i3/2] = (int16_t)pcm;
    }
    pre_r = lowpassed[lp_len - 2];
    pre_j = lowpassed[lp_len - 1];
    result_demod_len = lp_len/2;

    // low pass real //rate = rate_out = 2M  
    int i4=0, i5=0;
    int fast = rate_out;
    int slow = rate_out2;
    while (i4 < result_demod_len)
    {
        now_lpr += result_demod[i4];
        i4++;
        prev_lpr_index += slow;
        if (prev_lpr_index < fast)
        {
            continue;
        }
        result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
        prev_lpr_index -= fast;
        now_lpr = 0;
        i5 += 1;
    }
    result_demod_len = i5;

    //rate = rate_out2 = 8k
    for (int s = 0; s < result_demod_len; s++)
    {
        double sampleDouble, sample_filtered;
        sampleDouble = ((double)result_demod[s]) / 32768.0;
        sample_filtered = bandPassSSTVSignal(sampleDouble);

        pthread_mutex_lock(&mutex);
        luminance[luminance_count] = fmDemodulateLuminance(sample_filtered);
        luminance_count++;

        if (luminance_count >= int( lineLengthInS*SAMPLERATE))
        {
            bufferIsFull = true;
            luminance_count = 0;
        }
        pthread_mutex_unlock(&mutex);
    }

    return 0;
}



int main(int argc, char **argv)
{	
    frame = Mat::zeros(256, 320, CV_8UC3);

    //cout << "one line samples:" << lineLengthInS*SAMPLERATE << "one line takes seconds: " << lineLengthInS<< endl;
    signal(SIGINT, &sigint_callback_handler);
    int res;

    freq = 120e6;
    rate_in = 200000;
    rate_out = 200000;
    rate_out2 = 8000;

    downsample = 6;
    hardware_sample_rate = (uint32_t)(downsample * rate_in);

    res = hackrf_init();

    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_init() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_open(&device);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_open() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_lna_gain(device, 40);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_lna_gain() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_vga_gain(device, 26);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_vga_gain() failed" << endl;
        return EXIT_FAILURE;
    }
		
    /* Set the frequency */
    res = hackrf_set_freq(device, freq);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_freq() failed" << endl;
        return EXIT_FAILURE;
    }

    fprintf(stderr, "Oversampling input by: %ix.\n", downsample);

    /* Set the sample rate */
    res = hackrf_set_sample_rate(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_sample_rate() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS )
    {
        cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
        return EXIT_FAILURE;
    }
        

    fprintf(stderr, "Output at %u Hz.\n", rate_in);
    usleep(100000);
	
    res = hackrf_start_rx(device, rx_callback, NULL); 

    int y = 0;
    while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
    {


        if (bufferIsFull)
        {
            pthread_mutex_lock(&mutex);
            for (int s = 0; s < int(lineLengthInS*SAMPLERATE); s++)
            {
                double t = ((double)s)/((double)SAMPLERATE);

                int lum = luminance[s];

                if ((t >= channelGStartInS && t < channelGStartInS + channelLengthInS) ||
                    (t >= channelBStartInS && t < channelBStartInS + channelLengthInS) ||
                    (t >= channelRStartInS && t < channelRStartInS + channelLengthInS) )
                {
                    if (t >= channelGStartInS && t < channelGStartInS + channelLengthInS) 
                    {
                        int x = (int)floor(((t-channelGStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[1] = lum;
                    }
                    if (t >= channelBStartInS && t < channelBStartInS + channelLengthInS) 
                    {
                        int x = (int)floor(((t-channelBStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[0] = lum;
                    }
                    if (t >= channelRStartInS && t < channelRStartInS + channelLengthInS)
                    {
                        int x = (int)floor(((t-channelRStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[2] = lum;
                    }
                }
            }
            y = y + 1;
            if (y >=256) y = 0;
            bufferIsFull = false;
            pthread_mutex_unlock(&mutex);
            

            imshow("frame", frame);

            if (waitKey(5) == 'q')
            {
               do_exit = true;
                break;
            }
        }
    }

    if (do_exit)
    {
        fprintf(stderr, "\nUser cancel, exiting...\n");
    }
    else 
    {
        fprintf(stderr, "\nLibrary error, exiting...\n");
    }

    res = hackrf_close(device);
    if(res != HACKRF_SUCCESS)
    {
        cout << "hackrf_close() failed" << endl;
    } 
    else 
    {
        cout << "hackrf_close() done" << endl;
    }

    hackrf_exit();
    cout << "hackrf_exit() done" << endl;

    return 0;
}

El siguiente trabajo es hacer referencia a la demodulación Bluetooth y nrf, combinando diferentes funciones en la misma función de demodulación, para pasar al portapack.

El código combinado es el siguiente:

#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <time.h>

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>


#include <signal.h>
#include <unistd.h>
#include <pthread.h>
#include <libhackrf/hackrf.h>
#include <iostream>

using namespace cv;
struct pcm *pcm;

Mat frame;

int SAMPLERATE = 8000;
double oneBitSampleCount;

#define MAXIMUM_BUF_LENGTH		(16 * 16384)

using namespace std;
static volatile bool do_exit = false;

hackrf_device *device;
uint32_t freq;
uint32_t hardware_sample_rate;
uint16_t buf16[MAXIMUM_BUF_LENGTH];

int16_t lowpassed[MAXIMUM_BUF_LENGTH];
int lp_len;
int rate_in;
int rate_out;
int rate_out2;
int16_t result_demod[MAXIMUM_BUF_LENGTH];
int luminance[914321];
int luminance_count;

int result_demod_len;
int now_r, now_j;
int pre_r, pre_j;
int prev_index;
int now_lpr;
int prev_lpr_index;

int downsample;

bool bufferIsFull = false;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;

double round(double r)
{
    return (r > 0.0) ? floor(r + 0.5) : ceil(r - 0.5);
}

double xvBPV[5];
double yvBPV[5];

double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
		    +0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
		    -0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
		    +0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
		    -0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
		    -0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
		    +0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
		    +0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
		    -0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
		    +0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
		    +0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
		    -0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
		    +0.0063570207, +0.0034041195, +0.0001082461,
		  };

double xvFIRLP1[51];
double xvFIRLP2[51];
double xvMA1[9];
double yvMA1prev = 0;
double xvMA2[9];
double yvMA2prev = 0;
double oscPhase = 0;
double realPartPrev = 0;
double imaginaryPartPrev = 0;
double pixelLengthInS = 0.0004576;
double syncLengthInS = 0.004862;
double porchLengthInS = 0.000572;
double separatorLengthInS = 0.000572;
double channelLengthInS = pixelLengthInS*320;
double channelGStartInS = syncLengthInS + porchLengthInS;
double channelBStartInS = channelGStartInS + channelLengthInS + separatorLengthInS;
double channelRStartInS = channelBStartInS + channelLengthInS + separatorLengthInS;
double lineLengthInS = syncLengthInS + porchLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS + channelLengthInS + separatorLengthInS;
double imageLengthInSamples = (lineLengthInS*256)*SAMPLERATE;

void sigint_callback_handler(int signum) 
{
    cout << "Caught signal" << endl;
    do_exit = true;
}

void multiply(int ar, int aj, int br, int bj, int *cr, int *cj)
{
	*cr = ar*br - aj*bj;
	*cj = aj*br + ar*bj;
}

int polar_discriminant(int ar, int aj, int br, int bj)
{
    int cr, cj;
    double angle;
    multiply(ar, aj, br, -bj, &cr, &cj);
    angle = atan2((double)cj, (double)cr);
    return (int)(angle / 3.14159 * (1<<14));
}

int rx_callback(hackrf_transfer* transfer)
{
    for (int i = 0; i < transfer->valid_length; i++) 
    {
        double sample =  (int8_t)(transfer->buffer[i]) + 1;
        buf16[i] = (int16_t)sample;    //s->buf16[i] = (int16_t)buf[i] - 127; s->buf16[i] -127~128 uint16_t, unsigned for negative?
    }

    memcpy(lowpassed, buf16, 2*transfer->valid_length);
    lp_len = transfer->valid_length;

    //low pass //rate = hardware_sample_rate =  6*2M 
    int i=0, i2=0;
    while (i < lp_len) 
    {
        now_r += lowpassed[i];
        now_j += lowpassed[i+1];
        i += 2;
        prev_index++;
        if (prev_index < downsample)
        {
            continue;
        }
        lowpassed[i2]   = now_r; 
        lowpassed[i2+1] = now_j; 
        prev_index = 0;
        now_r = 0;
        now_j = 0;
        i2 += 2;
    }
    lp_len = i2;

    //fm demod //rate = rate_in =  2M  
    int i3, pcm;
    pcm = polar_discriminant(lowpassed[0], lowpassed[1], pre_r, pre_j);
    result_demod[0] = (int16_t)pcm;
    for (i3 = 2; i3 < (lp_len-1); i3 += 2)
    {
        pcm = polar_discriminant(lowpassed[i3], lowpassed[i3+1], lowpassed[i3-2], lowpassed[i3-1]);
        result_demod[i3/2] = (int16_t)pcm;
    }
    pre_r = lowpassed[lp_len - 2];
    pre_j = lowpassed[lp_len - 1];
    result_demod_len = lp_len/2;

    // low pass real //rate = rate_out = 2M  
    int i4=0, i5=0;
    int fast = rate_out;
    int slow = rate_out2;
    while (i4 < result_demod_len)
    {
        now_lpr += result_demod[i4];
        i4++;
        prev_lpr_index += slow;
        if (prev_lpr_index < fast)
        {
            continue;
        }
        result_demod[i5] = (int16_t)(now_lpr / (fast/slow));
        prev_lpr_index -= fast;
        now_lpr = 0;
        i5 += 1;
    }
    result_demod_len = i5;

    //rate = rate_out2 = 8k
    for (int s = 0; s < result_demod_len; s++)
    {
        double sampleDouble, sample_filtered;
        sampleDouble = ((double)result_demod[s]) / 32768.0;

        xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2]; 
        xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4]; 
        xvBPV[4] = sampleDouble / 7.627348301;
        yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2]; 
        yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4]; 
        yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);

        sample_filtered =  yvBPV[4];


        pthread_mutex_lock(&mutex);


        double sample_result;
        oscPhase += (2 * M_PI * 2000) / SAMPLERATE;

        double realPart = cos(oscPhase) * sample_filtered;
        double imaginaryPart = sin(oscPhase) * sample_filtered;
			
        if (oscPhase >= 2 * M_PI)
            oscPhase -= 2 * M_PI;

        for (int i = 0; i < 50; i++)
            xvFIRLP1[i] = xvFIRLP1[i+1];
        xvFIRLP1[50] = realPart / 5.013665674;
        double realPart_lp = 0;
        for (int i = 0; i <= 50; i++)
            realPart_lp += (FIRLPCoeffs[i] * xvFIRLP1[i]);

        for (int i = 0; i < 50; i++)
            xvFIRLP2[i] = xvFIRLP2[i+1];
        xvFIRLP2[50] = imaginaryPart / 5.013665674;
        double imaginaryPart_lp = 0;
        for (int i = 0; i <= 50; i++)
            imaginaryPart_lp += (FIRLPCoeffs[i] * xvFIRLP2[i]);

        for (int i = 0; i < 8; i++)
            xvMA1[i] = xvMA1[i+1];
        xvMA1[8] = realPart_lp;
        yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
        double realPart_nr = yvMA1prev;

        for (int i = 0; i < 8; i++)
            xvMA2[i] = xvMA2[i+1];
        xvMA2[8] = imaginaryPart_lp;
        yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
        double imaginaryPart_nr = yvMA2prev;

        sample_result = (imaginaryPart_nr*realPartPrev-realPart_nr*imaginaryPartPrev)/(realPart_nr*realPart_nr+imaginaryPart_nr*imaginaryPart_nr);

        realPartPrev = realPart_nr;
        imaginaryPartPrev = imaginaryPart_nr;

        sample_result += 0.2335; 
		
        int luminance_result = (int)round((sample_result/0.617)*255);
        luminance_result = 255-luminance_result;

        if (luminance_result > 255)
            luminance_result = 255;
        if (luminance_result < 0)
            luminance_result = 0;

        luminance[luminance_count] = luminance_result;

        luminance_count++;

        if (luminance_count >= int( lineLengthInS*SAMPLERATE))
        {
            bufferIsFull = true;
            luminance_count = 0;
        }
        pthread_mutex_unlock(&mutex);
    }

    return 0;
}



int main(int argc, char **argv)
{	
    frame = Mat::zeros(256, 320, CV_8UC3);

    //cout << "one line samples:" << lineLengthInS*SAMPLERATE << "one line takes seconds: " << lineLengthInS<< endl;
    signal(SIGINT, &sigint_callback_handler);
    int res;

    freq = 120e6;
    rate_in = 200000;
    rate_out = 200000;
    rate_out2 = 8000;

    downsample = 6;
    hardware_sample_rate = (uint32_t)(downsample * rate_in);

    res = hackrf_init();

    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_init() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_open(&device);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_open() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_lna_gain(device, 40);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_lna_gain() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_vga_gain(device, 26);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_vga_gain() failed" << endl;
        return EXIT_FAILURE;
    }
		
    /* Set the frequency */
    res = hackrf_set_freq(device, freq);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_freq() failed" << endl;
        return EXIT_FAILURE;
    }

    fprintf(stderr, "Oversampling input by: %ix.\n", downsample);

    /* Set the sample rate */
    res = hackrf_set_sample_rate(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS ) 
    {
        cout << "hackrf_set_sample_rate() failed" << endl;
        return EXIT_FAILURE;
    }

    res = hackrf_set_baseband_filter_bandwidth(device, hardware_sample_rate);
    if( res != HACKRF_SUCCESS )
    {
        cout << "hackrf_baseband_filter_bandwidth_set() failed" << endl;
        return EXIT_FAILURE;
    }
        

    fprintf(stderr, "Output at %u Hz.\n", rate_in);
    usleep(100000);
	
    res = hackrf_start_rx(device, rx_callback, NULL); 

    int y = 0;
    while ((hackrf_is_streaming(device) == HACKRF_TRUE) && (do_exit == false))
    {


        if (bufferIsFull)
        {
            pthread_mutex_lock(&mutex);
            for (int s = 0; s < int(lineLengthInS*SAMPLERATE); s++)
            {
                double t = ((double)s)/((double)SAMPLERATE);

                int lum = luminance[s];

                if ((t >= channelGStartInS && t < channelGStartInS + channelLengthInS) ||
                    (t >= channelBStartInS && t < channelBStartInS + channelLengthInS) ||
                    (t >= channelRStartInS && t < channelRStartInS + channelLengthInS) )
                {
                    if (t >= channelGStartInS && t < channelGStartInS + channelLengthInS) 
                    {
                        int x = (int)floor(((t-channelGStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[1] = lum;
                    }
                    if (t >= channelBStartInS && t < channelBStartInS + channelLengthInS) 
                    {
                        int x = (int)floor(((t-channelBStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[0] = lum;
                    }
                    if (t >= channelRStartInS && t < channelRStartInS + channelLengthInS)
                    {
                        int x = (int)floor(((t-channelRStartInS)/channelLengthInS)*320);
                        frame.at<Vec3b>(y, x)[2] = lum;
                    }
                }
            }
            y = y + 1;
            if (y >=256) y = 0;
            bufferIsFull = false;
            pthread_mutex_unlock(&mutex);
            

            imshow("frame", frame);

            if (waitKey(5) == 'q')
            {
               do_exit = true;
                break;
            }
        }
    }

    if (do_exit)
    {
        fprintf(stderr, "\nUser cancel, exiting...\n");
    }
    else 
    {
        fprintf(stderr, "\nLibrary error, exiting...\n");
    }

    res = hackrf_close(device);
    if(res != HACKRF_SUCCESS)
    {
        cout << "hackrf_close() failed" << endl;
    } 
    else 
    {
        cout << "hackrf_close() done" << endl;
    }

    hackrf_exit();
    cout << "hackrf_exit() done" << endl;

    return 0;
}

La parte de visualización del portapack puede referirse a la demodulación de TV analógica y la transmisión SSTV.

Intenté portar el código C ++ a portapack durante unos días, y me resultó difícil hacerlo, y la imagen estaba completamente mal, así que planeo renunciar a este método temporalmente. Cambie a la demodulación de fft, porque descubrí que el fft de audio de arriba en la demodulación de audio de wfm es básicamente lo que quiero, y también lo hago después de la demodulación de fm, y es obvio que el pico de la señal sstv se mueve hacia la izquierda y hacia la derecha. Planeo combinar el ejemplo anterior de demodulación fft de python para hacerlo. El código portado anteriormente se coloca a continuación.

#include "sstv_collector.hpp"

#include "dsp_fft.hpp"

#include "utility.hpp"
#include "event_m4.hpp"
#include "portapack_shared_memory.hpp"

#include "event_m4.hpp"

#include <algorithm>

void SSTvCollector::on_message(const Message* const message) {
	switch(message->id) {
	case Message::ID::UpdateSpectrum:
		update();
		break;

	case Message::ID::SpectrumStreamingConfig:
		set_state(*reinterpret_cast<const SpectrumStreamingConfigMessage*>(message));
		break;

	default:
		break;
	}
}

void SSTvCollector::set_state(const SpectrumStreamingConfigMessage& message) {
	if( message.mode == SpectrumStreamingConfigMessage::Mode::Running ) {
		start();
	} else {
		stop();
	}
}

void SSTvCollector::start() {
	streaming = true;
	ChannelSpectrumConfigMessage message { &fifo };
	shared_memory.application_queue.push(message);
}

void SSTvCollector::stop() {
	streaming = false;
	fifo.reset_in();
}

void SSTvCollector::set_decimation_factor(
	const size_t decimation_factor
) {
	channel_spectrum_decimator.set_factor(decimation_factor);
}

/* TODO: Refactor to register task with idle thread?
 * It's sad that the idle thread has to call all the way back here just to
 * perform the deferred task on the buffer of data we prepared.
 */

void SSTvCollector::feed(
	const buffer_c16_t& channel,
	const uint32_t filter_pass_frequency,
	const uint32_t filter_stop_frequency
) {
	// Called from baseband processing thread.
	channel_filter_pass_frequency = filter_pass_frequency;
	channel_filter_stop_frequency = filter_stop_frequency;

	channel_spectrum_decimator.feed(
		channel,
		[this](const buffer_c16_t& data) {
			this->post_message(data);
		}
	);
}

/*
template<typename T>
static typename T::value_type spectrum_window_hamming_3(const T& s, const size_t i) {
	static_assert(power_of_two(s.size()), "Array size must be power of 2");
	constexpr size_t mask = s.size() - 1;
	// Three point Hamming window.
	return s[i] * 0.54f + (s[(i-1) & mask] + s[(i+1) & mask]) * -0.23f;
};

const auto corrected_sample = spectrum_window_hamming_3(channel_spectrum, i);
*/

void SSTvCollector::post_message(const buffer_c16_t& data) {
	// Called from baseband processing thread.
	double xvBPV[5];
	double yvBPV[5];
 
	double FIRLPCoeffs[51] = { +0.0001082461, +0.0034041195, +0.0063570207, +0.0078081648,
		    +0.0060550614, -0.0002142384, -0.0104500335, -0.0211855480,
		    -0.0264527776, -0.0201269304, +0.0004419626, +0.0312014771,
		    +0.0606261038, +0.0727491887, +0.0537028370, -0.0004362161,
		    -0.0779387981, -0.1511168919, -0.1829049634, -0.1390189257,
		    -0.0017097774, +0.2201896764, +0.4894395006, +0.7485289338,
		    +0.9357596142, +1.0040320616, +0.9357596142, +0.7485289338,
		    +0.4894395006, +0.2201896764, -0.0017097774, -0.1390189257,
		    -0.1829049634, -0.1511168919, -0.0779387981, -0.0004362161,
		    +0.0537028370, +0.0727491887, +0.0606261038, +0.0312014771,
		    +0.0004419626, -0.0201269304, -0.0264527776, -0.0211855480,
		    -0.0104500335, -0.0002142384, +0.0060550614, +0.0078081648,
		    +0.0063570207, +0.0034041195, +0.0001082461,
		  };
 
	double xvFIRLP1[51];
	double xvFIRLP2[51];
	double xvMA1[9];
	double yvMA1prev = 0;
	double xvMA2[9];
	double yvMA2prev = 0;
	double oscPhase = 0;
	double realPartPrev = 0;
	double imaginaryPartPrev = 0;
	if( streaming && !channel_spectrum_request_update ) {
		//fft_swap(data, channel_spectrum);
                for(size_t s=0; s<256; s=s+2) 
		{
                        double sampleDouble, sample_filtered, sample_result;
			sampleDouble = ((double)data.p[s].real())/128;

			xvBPV[0] = xvBPV[1]; xvBPV[1] = xvBPV[2]; 
			xvBPV[2] = xvBPV[3]; xvBPV[3] = xvBPV[4]; 
			xvBPV[4] = sampleDouble / 7.627348301;
			yvBPV[0] = yvBPV[1]; yvBPV[1] = yvBPV[2]; 
			yvBPV[2] = yvBPV[3]; yvBPV[3] = yvBPV[4]; 
			yvBPV[4] = (xvBPV[0]+xvBPV[4])-2*xvBPV[2]+(-0.2722149379*yvBPV[0])+(0.3385637917*yvBPV[1])+(-0.8864523063*yvBPV[2])+(0.7199258671*yvBPV[3]);

			sample_filtered =  yvBPV[4];

			oscPhase += (2 * 3.14159265358 * 2000) / 8000;
 
			double realPart = cos(oscPhase) * sample_filtered;
			double imaginaryPart = sin(oscPhase) * sample_filtered;
			
			if (oscPhase >= 2 * 3.14159265358)
				oscPhase -= 2 * 3.14159265358;

			for (int i = 0; i < 50; i++)
				xvFIRLP1[i] = xvFIRLP1[i+1];
			xvFIRLP1[50] = realPart / 5.013665674;
			double realPart_lp = 0;
			for (int i = 0; i <= 50; i++)
				realPart_lp += (FIRLPCoeffs[i] * xvFIRLP1[i]);
 
			for (int i = 0; i < 50; i++)
				xvFIRLP2[i] = xvFIRLP2[i+1];
			xvFIRLP2[50] = imaginaryPart / 5.013665674;
			double imaginaryPart_lp = 0;
			for (int i = 0; i <= 50; i++)
				imaginaryPart_lp += (FIRLPCoeffs[i] * xvFIRLP2[i]);

			for (int i = 0; i < 8; i++)
				xvMA1[i] = xvMA1[i+1];
			xvMA1[8] = realPart_lp;
			yvMA1prev = yvMA1prev+xvMA1[8]-xvMA1[0];
			double realPart_nr = yvMA1prev;
 
			for (int i = 0; i < 8; i++)
				xvMA2[i] = xvMA2[i+1];
			xvMA2[8] = imaginaryPart_lp;
			yvMA2prev = yvMA2prev+xvMA2[8]-xvMA2[0];
			double imaginaryPart_nr = yvMA2prev;

			sample_result = (imaginaryPart_nr*realPartPrev-realPart_nr*imaginaryPartPrev)/(realPart_nr*realPart_nr+imaginaryPart_nr*imaginaryPart_nr);
 
			realPartPrev = realPart_nr;
			imaginaryPartPrev = imaginaryPart_nr;
 
			sample_result += 0.2335;

			int luminance_result = (int)round((sample_result/0.617)*255);
			luminance_result = 255-luminance_result;
 
			if (luminance_result > 255)
				luminance_result = 255;
			if (luminance_result < 0)
				luminance_result = 0;
			//channel_spectrum[s/2] = {luminance_result, luminance_result};
			channel_spectrum[s/2] = {luminance_result, luminance_result};
		}
		channel_spectrum_sampling_rate = data.sampling_rate;
		channel_spectrum_request_update = true;
		EventDispatcher::events_flag(EVT_MASK_SPECTRUM);
	}
}

void SSTvCollector::update() {
	// Called from idle thread (after EVT_MASK_SPECTRUM is flagged)
	if( streaming && channel_spectrum_request_update ) {
		/* Decimated buffer is full. Compute spectrum. */
		//fft_c_preswapped(channel_spectrum, 0, 8);

		ChannelSpectrum spectrum;
		spectrum.sampling_rate = channel_spectrum_sampling_rate;
		spectrum.channel_filter_pass_frequency = channel_filter_pass_frequency;
		spectrum.channel_filter_stop_frequency = channel_filter_stop_frequency;
		for(size_t i=0; i<128; i++) {
			//const auto corrected_sample = channel_spectrum[i];
                        //const auto corrected_sample = channel_spectrum[i].real();
			//const unsigned int v = corrected_sample + 127.0f;
			const unsigned int v = channel_spectrum[i].real();
			spectrum.db[i] = std::max(0U, std::min(255U, v));
		}
		fifo.in(spectrum);
	}

	channel_spectrum_request_update = false;
}

Más tarde descubrí que el método fft no es muy bueno, porque observé cuidadosamente el proceso de demodulación de imágenes martin m1 con fft en python. Si la frecuencia de muestreo es 8000, la longitud del cálculo de fft es generalmente de 9 puntos de muestreo, pero no de 9 consecutivos. Se puede calcular un píxel ingresando los puntos de muestreo uno por uno, a veces se perderá un poco. Si un píxel se calcula de acuerdo con 9 puntos de muestreo de manera rígida, la imagen resultante es apenas clara y solo una sección está en color. .

import signal
import argparse
from sys import argv, exit

import numpy as np
import soundfile
from PIL import Image
from scipy.signal.windows import hann


def handle_sigint(signal, frame):
    print()
    print("Received interrupt signal, exiting.")
    exit(0)


SCAN_TIME = 0.146432
SYNC_PULSE = 0.004862
SYNC_PORCH = 0.000572
SEP_PULSE = 0.000572

CHAN_TIME = SEP_PULSE + SCAN_TIME

CHAN_OFFSETS = [SYNC_PULSE + SYNC_PORCH]
CHAN_OFFSETS.append(CHAN_OFFSETS[0] + CHAN_TIME)
CHAN_OFFSETS.append(CHAN_OFFSETS[1] + CHAN_TIME)

LINE_TIME = SYNC_PULSE + SYNC_PORCH + 3 * CHAN_TIME
PIXEL_TIME = SCAN_TIME / 320

def calc_lum(freq):
    """Converts SSTV pixel frequency range into 0-255 luminance byte"""

    lum = int(round((freq - 1500) / 3.1372549))
    return min(max(lum, 0), 255)


def barycentric_peak_interp(bins, x):
    """Interpolate between frequency bins to find x value of peak"""

    # Takes x as the index of the largest bin and interpolates the
    # x value of the peak using neighbours in the bins array

    # Make sure data is in bounds
    y1 = bins[x] if x <= 0 else bins[x-1]
    y3 = bins[x] if x + 1 >= len(bins) else bins[x+1]

    denom = y3 + bins[x] + y1
    if denom == 0:
        return 0  # erroneous

    return (y3 - y1) / denom + x

def peak_fft_freq(data):
    """Finds the peak frequency from a section of audio data"""
    windowed_data = data * hann(len(data))
    fft = np.abs(np.fft.rfft(windowed_data))

    # Get index of bin with highest magnitude
    x = np.argmax(fft)
    # Interpolated peak frequency
    peak = barycentric_peak_interp(fft, x)
    #peak = x

    # Return frequency in hz
    return peak * sample_rate / len(windowed_data)



if __name__ == "__main__":
    signal.signal(signal.SIGINT, handle_sigint)
    samples, sample_rate = soundfile.read("mmsstv.wav")
    if  samples.ndim > 1:  # convert to mono if stereo
        samples = samples.mean(axis=1)

    centre_window_time = (PIXEL_TIME *  2.34) / 2
    pixel_window = round(centre_window_time * 2 * sample_rate) 

    #pixel window is the time of the length of a pixel
    #centre_window_time is half of that time

    height = 256
    channels = 3
    width = 320
    # Use list comprehension to init list so we can return data early
    image_data = [[[0 for i in range(width)]
                   for j in range(channels)] for k in range(height)]



    
    seq_start = 0
    chan = 0
    px = 0
    line = 0
    px_end = 0
    px_pos = 0
    while (px_pos <= len(samples)/2):
        if chan == 0 and px == 0:
            seq_start += round(LINE_TIME * sample_rate)
        chan_offset = CHAN_OFFSETS[chan]
        px_pos = round(seq_start + (chan_offset + px *
                               PIXEL_TIME - centre_window_time) *
                               sample_rate)

        freq = peak_fft_freq(samples[px_pos:px_pos + pixel_window])

        image_data[line][chan][px] = calc_lum(freq)

        px = px + 1
        if (px >= width):
            px = 0
            chan = chan + 1
            if (chan >= channels):
                line = line + 1
                chan = 0
                if (line >= height):
                    break
    '''
    i = 0
    chan = 0
    px = 0
    line = 0
    while (i < len(samples)):
        freq = peak_fft_freq(samples[i:i+9])
        #print (i,freq)
        image_data[line][chan][px] = calc_lum(freq)
        #print (line,chan,px)
        i = i + 4

        px = px + 1
        if (px >= width):
            px = 0
            chan = chan + 1
            if (chan >= channels):
                line = line + 1
                chan = 0
                if (line >= height):
                    break
    
    '''

    image = Image.new("RGB", (width, height))
    pixel_data = image.load()

    print("Drawing image data...")

    for y in range(height):
        for x in range(width):
            pixel = (image_data[y][2][x],image_data[y][0][x],image_data[y][1][x])
            pixel_data[x, y] = pixel

    print("...Done!")
    image.save("result.png")

Entonces, hay un problema. Solo puedo atravesar un búfer de muestreo según la ubicación del píxel y calcular la ubicación del búfer correspondiente según la ubicación del píxel para obtener el valor, en lugar de leer directamente el búfer secuencialmente como el valor del píxel. Para Portapack, solo puedo calcular el valor de fft de una longitud fija (como 32 o 256), no qué sección quiero calcular, y el espectro de fft calculado debe codificarse para obtener la frecuencia pico a pico más alta. En resumen, es posible utilizar portapack para ver el espectro de frecuencias de audio después de la demodulación fm, pero se limita a la observación visual y no se puede utilizar para imágenes sstv.

Así que todavía tengo que volver al método anterior.
Dibujo los gráficos de variables después de varias operaciones paso a paso. Si no se hace nada, los gráficos en el dominio del tiempo son correctos. Después del paso de banda, puede cambiar según mi señal, pero no está en línea con la predicción teórica. Después de la conversión de frecuencia posterior y el filtrado de paso bajo, descubrí que la imagen dibujada ya no cambiará de acuerdo con el cambio de señal y, después de la multiplicación final, la imagen está completamente en blanco.

Supongo que te gusta

Origin blog.csdn.net/shukebeta008/article/details/107103839
Recomendado
Clasificación