Some data were collected using a DJI drone equipped with a multispectral camera.
RVI, NDVI, SAVI, MSAVI, NDWI, and corresponding threshold segmentation.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
//植被指数计算
void RVI(const Mat &nir, const Mat &r,Mat &rvi)
{
divide(nir, r, rvi,1,CV_32F);
}
void NDVI(const Mat &nir, const Mat &r, Mat &ndvi)
{
divide(nir - r, nir + r, ndvi, 1, CV_32F);
}
void SAVI(const Mat &nir, const Mat &r, double L,Mat &savi)
{
divide((nir - r)*(1 + L), nir + r + L, savi, 1, CV_32F);
}
void MSAVI(const Mat &nir,const Mat &r,Mat &msavi)
{
Mat tmp1, tmp2,tmp3;
pow(2 * nir + 1, 2, tmp1);
tmp2 = tmp1 - 8 * (nir - r);
tmp2.convertTo(tmp2, CV_32F);
pow(tmp2, 0.5, tmp3);
subtract(2 * nir + 1, tmp3, msavi, noArray(), CV_32F);
msavi *= 0.5;
return;
}
//水体指数计算
void NDWI(const Mat &nir, const Mat &g, Mat & ndwi)
{
divide(g - nir, g + nir, ndwi, 1, CV_32F);
}
//OTSU 阈值分割
void OTSUSegmentation(const Mat & input, Mat & output, double &value)
{
double minv, maxv;
minMaxLoc(input, &minv, &maxv);
Mat tmp;
divide(input - minv, maxv - minv, tmp, 1, CV_32F);
tmp = tmp * 255;
tmp.convertTo(tmp, CV_8U);
value = threshold(tmp, output, 128, 255, ThresholdTypes::THRESH_BINARY | ThresholdTypes::THRESH_OTSU);
value /= 255;
value = value * (maxv - minv) + minv;
}
int main()
{
// , ImreadModes::IMREAD_GRAYSCALE
Mat nir = imread("image2/DJI_1625.TIF" , ImreadModes::IMREAD_GRAYSCALE);
Mat r = imread("image2/DJI_1623.TIF" , ImreadModes::IMREAD_GRAYSCALE);
Mat b= imread("image2/DJI_1621.TIF" , ImreadModes::IMREAD_GRAYSCALE);
Mat g=imread("image2/DJI_1622.TIF" , ImreadModes::IMREAD_GRAYSCALE);
Mat rvi;
RVI(nir, r, rvi);
imwrite("image2/rvi.tif", rvi);
Mat rviArea;
double rviV;
OTSUSegmentation(rvi, rviArea, rviV);
imwrite("image2/rviArea.tif", rviArea);
cout << "rvi阈值:" << rviV << endl;
Mat ndvi;
NDVI(nir, r, ndvi);
imwrite("image2/ndvi.tif", ndvi);
Mat ndviArea;
double ndviV;
OTSUSegmentation(ndvi, ndviArea, ndviV);
imwrite("image2/ndviArea.tif", ndviArea);
cout << "ndvi阈值:" << ndviV << endl;
Mat savi;
SAVI(nir, r, 0.5,savi);
imwrite("image2/savi.tif", savi);
Mat saviArea;
double saviV;
OTSUSegmentation(savi,saviArea,saviV);
imwrite("image2/saviArea.tif", saviArea);
cout << "savi阈值:" << saviV << endl;
Mat msavi;
MSAVI(nir, r,msavi);
imwrite("image2/msavi.tif", msavi);
Mat msaviArea;
double msaviV;
OTSUSegmentation(msavi, msaviArea, msaviV);
imwrite("image2/msaviArea.tif", msaviArea);
cout << "msavi阈值:" << msaviV << endl;
Mat ndwi;
NDWI(nir, g,ndwi);
imwrite("image2/ndwi.tif", ndwi);
Mat ndwiArea;
double ndwiV;
OTSUSegmentation(ndwi, ndwiArea, ndwiV);
imwrite("image2/ndwiArea.tif",ndwiArea);
cout << "ndwi阈值:" << ndwiV << endl;
return 0;
}
Then convert to colormap.
The above ones saved locally or in memory are all single-channel, and the idea is to convert them to RGB three-channel.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat scaleGray(const Mat& inputGray)
{
Mat outputGray(inputGray.size(), CV_8U);
unsigned char grayValue, maxValue = 1;
for (int y = 0; y < inputGray.rows; y++)
for (int x = 0; x < inputGray.cols; x++)
{
grayValue = inputGray.at<uchar>(y, x);
maxValue = max(maxValue, grayValue);
}
float scale = 255.0 / maxValue;
for (int y = 0; y < inputGray.rows; y++)
for (int x = 0; x < inputGray.cols; x++)
{
outputGray.at<uchar>(y, x) = static_cast<unsigned char>(inputGray.at<uchar>(y, x) * scale + 0.5);
}
return outputGray;
}
Mat gray2pseudocolor(const Mat& scaledGray)
{
Mat outputPseudocolor(scaledGray.size(), CV_8UC3);
unsigned char grayValue;
for (int y = 0; y < scaledGray.rows; y++)
for (int x = 0; x < scaledGray.cols; x++)
{
//获取灰度值
grayValue = scaledGray.at<uchar>(y, x);
Vec3b& pixel = outputPseudocolor.at<Vec3b>(y, x);
//给mat的RGB三通道赋值 orange 255 127 0
pixel[0] = abs(255 - grayValue);
pixel[1] = abs(127 - grayValue);
pixel[2] = abs(0 - grayValue);
}
return outputPseudocolor;
}
Mat gray2rainbow(const Mat& scaledGray)
{
Mat outputRainbow(scaledGray.size(), CV_8UC3);
unsigned char grayValue;
for (int y = 0; y < scaledGray.rows; y++)
for (int x = 0; x < scaledGray.cols; x++)
{
grayValue = scaledGray.at<uchar>(y, x);
Vec3b& pixel = outputRainbow.at<Vec3b>(y, x);
//对灰度值进行分阶段处理,让色彩更丰富
if (grayValue <= 51)
{
pixel[0] = 255;
pixel[1] = grayValue * 5;
pixel[2] = 0;
}
else if (grayValue <= 102)
{
grayValue -= 51;
pixel[0] = 255 - grayValue * 5;
pixel[1] = 255;
pixel[2] = 0;
}
else if (grayValue <= 153)
{
grayValue -= 102;
pixel[0] = 0;
pixel[1] = 255;
pixel[2] = grayValue * 5;
}
else if (grayValue <= 204)
{
grayValue -= 153;
pixel[0] = 0;
pixel[1] = 255 - static_cast<unsigned char>(grayValue * 128.0 / 51 + 0.5);
pixel[2] = 255;
}
else if (grayValue <= 255)
{
grayValue -= 204;
pixel[0] = 0;
pixel[1] = 127 - static_cast<unsigned char>(grayValue * 127.0 / 51 + 0.5);
pixel[2] = 255;
}
}
return outputRainbow;
}
int main(int argc, char* argv[])
{
Mat inputGray = imread("", 0);
Mat scaledGray = scaleGray(inputGray);
Mat pseudocolor = gray2pseudocolor(scaledGray);
Mat rainbow = gray2rainbow(scaledGray);
imwrite("scaledGray.jpg", scaledGray);
imwrite("pseudocolor.jpg", pseudocolor);
imwrite("rainbow.jpg", rainbow);
return 0;
}
Then you can try it on each photo. The code that merges the two parts is actually in the call to main