背景抽出 - パラメータの変更とガウス混合モデルでの使用 BackgroundSubtractorMOG2
2022-02-28 07:48:04
opencvのオープンソースコードを使用して、その後、あなたのニーズに合わせてパラメータを変更し、簡単に使用するには、直接次のファイルをコピーして、新しいプロジェクトを使用することができます。
<スパン MOG_BGS3.hppファイル
#include "opencv2/core/core.hpp"
#include <list>
#include "cv.h"
using namespace cv;
namespace OurMogBgs{
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm
{
public:
virtual ~BackgroundSubtractor();
CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask,
double learningRate=0);
virtual void getBackgroundImage(OutputArray backgroundImage) const;
};
class CV_EXPORTS_W BackgroundSubtractorMOG3 : public BackgroundSubtractor
{
public:
CV_WRAP BackgroundSubtractorMOG3();
CV_WRAP BackgroundSubtractorMOG3(int history, float varThreshold, bool bShadowDetection=true);
virtual ~BackgroundSubtractorMOG3();
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1);
virtual void getBackgroundImage(OutputArray backgroundImage) const;
virtual void initialize(Size frameSize, int frameType);
protected:
Size frameSize;
int frameType;
Mat bgmodel;
Mat bgmodelUsedModes;
int nframes;
int history;
int nmixtures;
double varThreshold;
float backgroundRatio;
float varThresholdGen;
float fVarInit;
float fVarMin;
float fVarMax;
float fCT;
bool bShadowDetection;
unsigned char nShadowDetection;/
float fTau;
};
}
MOG_BGS3.cppファイル
#include "stdafx.h"
#include "MOG_BGS3.hpp"
#include <list>
using namespace cv;
namespace OurMogBgs{
/*
Interface of Gaussian mixture algorithm from:
"Improved adaptive Gausian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
Advantages:
-fast - number of Gausssian components is constantly adapted per pixel.
-performs also shadow detection (see bgfg_segm_test.cpp example)
*/BackgroundSubtractor::test.cpp
BackgroundSubtractor::~BackgroundSubtractor() {}
void BackgroundSubtractor::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
{
}
void BackgroundSubtractor::getBackgroundImage(OutputArray backgroundImage) const
{
}
// default parameters of gaussian background detection algorithm
static const int defaultHistory3 = 500; // Learning rate; alpha = 1/defaultHistory2
static const float defaultVarThreshold3 = 4.0f*4.0f; // threshold used to determine whether the background is a background or not on the squared Marx distance
static const int defaultNMixtures3 = 3; // maximal number of Gaussians in mixture
static const float defaultBackgroundRatio3 = 0.9f; // threshold sum of weights for background test
static const float defaultVarThresholdGen3 = 2.5f*2.5f; // the function that determines if there is a match
static const float defaultVarInit3 = 30.0f; // initial variance for new components initialized variance
static const float defaultVarMax3 = 5*defaultVarInit3;
static const float defaultVarMin3 = 4.0f;
// additional parameters
static const float defaultfCT3 = 0.05f; // complexity reduction prior constant 0 - no reduction of number of components
static const unsigned char defaultnShadowDetection3 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
struct GaussBGStatModel3Params
{
//image info
int nWidth;
int nHeight;
int nND;//number of data dimensions (image channels)
bool bPostFiltering;//defult 1 - do postfiltering -
//internal:
bool fitsPDF = false;//if it remains zero a new GMM mode will be added
int nmodes = modesUsed[x], nNewModes = nmodes;//current number of modes in GMM
float totalWeight = 0.f;
float* mean_m = mean;
//
//go through all modes
for( int mode = 0; mode < nmodes; mode++, mean_m += nchannels )
{
float weight = alpha1*gmm[mode].weight + prune;//need only weight if fit is found
int swap_count = 0;
//fit not found yet
if( !fitsPDF )
{
//check if it belongs to some of the remaining modes
float var = gmm[mode].variance; //variance of the Gaussian mixture model
//calculate difference and distance
float dist2;
if( nchannels == 3 )
{
dData[0] = mean_m[0] - data[0];
dData[1] = mean_m[1] - data[1];
dData[2] = mean_m[2] - data[2];
dist2 = dData[0]*dData[0] + dData[1]*dData[1] + dData[2]*dData[2];
}
else
{
dist2 = 0.f;
for( int c = 0; c < nchannels; c++ )
{
dData[c] = mean_m[c] - data[c];
dist2 += dData[c]*dData[c];
}
}
//background? - Tb - usually larger than Tg
if( totalWeight < TB && dist2 < Tb*var )
background = true;
//check fit
if( dist2 < Tg*var )
{
/
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT/weight;
//update mean
for( int c = 0; c < nchannels; c++ )
mean_m[c] -= k*dData[c];
//update variance
float varnew = var + k*(dist2 - var);
//limit the variance
varnew = MAX(varnew, varMin);
varnew = MIN(varnew, varMax);
gmm[mode].variance = varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for( int i = mode; i > 0; i-- )
{
//check one up
if( weight < gmm[i-1].weight )
break;
swap_count++;
//swap one up
std::swap(gmm[i], gmm[i-1]);
for( int c = 0; c < nchannels; c++ )
std::swap(mean[i*nchannels + c], mea
メイン関数(自分で命名してください)
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include "opencv2/core/core.hpp"
#include < opencv2/highgui/highgui.hpp >
#include "MOG_BGS3.hpp"
using namespace cv;
using namespace std;
using namespace OurMogBgs;
int main()
{
VideoCapture capture("c:\\... \\1.avi");
if( !capture.isOpened() )
{
cout<<"Reading video failed"<<<endl;
return -1;
}
//Get the entire frame number
long totalframenumber = capture.get(CV_CAP_PROP_FRAME_COUNT);
cout<<"the entire video total"<<<totalframenumber<<"frames"<<endl;
//set start frame()
long frametostart = 1;
capture.set( CV_CAP_PROP_FRAME_COUNT,frametostart);
cout<<"read"<"from"<<frametostart<<"frame"<<endl;
//set the end frame
int frametostop = 100;
if(frametostop < frametostart)
{
cout<<"The end frame is smaller than the start frame, program error, about to exit! "<<endl;
return -1;
}
else
{
cout<<"end frame for: the "<<frametostop<<"frame"<<endl;
}
double rate = capture.get(CV_CAP_PROP_FPS);
int delay = 100/rate;
Mat frame;
// foreground image
Mat foreground;
//background image
Mat background;
BackgroundSubtractorMOG3 mog(20,16,true);
bool stop(false);
long currentframe = frametostart;
while( !stop )
{
if( !capture.read(frame) )
{
cout<<"Failed to read image from video or read entire video"<<<endl;
return -2;
}
imshow("input video",frame);
// parameters are: input image, output image, learning rate
mog(frame,foreground,0.005); //
mog.getBackgroundImage(background); // return the current background image
imshow("foreground",foreground);
imshow("background",background);
// press esc to exit, press other keys will stop at the current frame
int c = waitKey(delay);
if ( (char)c == 27 || currentframe >= frametostop)
{
stop = true;
}
if ( c >= 0)
{
waitKey(0);
}
currentframe++;
if (currentframe == frametostop)
{
imwrite("c:\\... \\\... ", background);
}
else continue;
}
waitKey(0);
}
関連
-
OpenCV - 理想的なハイパスフィルタとローパスフィルタ (C++)
-
OpenCV-Butterworth ローパスおよびハイパスフィルタ (C++)
-
C++ベースのOpenCV共通関数
-
opencv VideoCaptureの問題。ストリームを停止できない。デバイスに不適切な ioctl
-
OpenCV演習 - 顔検出と顔画像抽出
-
OpenCV-Matrix データ型変換 cv::convertTo
-
opencv3.0とopencv2.4のガウス混合モデルbackgroundSubtractorMOG2の使い分けについて
-
OpenCV - 長方形の境界 cv::boundingRect
-
opencv notes - cvCreateImage 関数の説明
-
cvCvtColor の使用法
最新
-
nginxです。[emerg] 0.0.0.0:80 への bind() に失敗しました (98: アドレスは既に使用中です)
-
htmlページでギリシャ文字を使うには
-
ピュアhtml+cssでの要素読み込み効果
-
純粋なhtml + cssで五輪を実現するサンプルコード
-
ナビゲーションバー・ドロップダウンメニューのHTML+CSSサンプルコード
-
タイピング効果を実現するピュアhtml+css
-
htmlの選択ボックスのプレースホルダー作成に関する質問
-
html css3 伸縮しない 画像表示効果
-
トップナビゲーションバーメニュー作成用HTML+CSS
-
html+css 実装 サイバーパンク風ボタン