Learn how easy it is to sync an existing GitHub or Google Code repo to a SourceForge project! See Demo

Close

[73298b]: contrib / gel / vgel / vgel_kl.cxx Maximize Restore History

Download this file

vgel_kl.cxx    244 lines (197 with data), 7.2 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
#include <vgel/vgel_kl.h>
#include <vil/vil_byte.h>
#include <vil/vil_pixel.h>
#include <vil/vil_memory_image_of.h>
#include <vil/vil_image_as.h>
#include <vgel/vgel_multi_view_data.h>
#include <vtol/vtol_vertex_2d.h>
#include <vidl/vidl_movie.h>
#include <vcl_iostream.h>
vgel_kl::vgel_kl(const vgel_kl_params & params):_params(params)
{
}
vgel_kl::~vgel_kl()
{
}
void vgel_kl::match_sequence(vcl_vector<vil_image> &image_list,vgel_multi_view_data_vertex_sptr matches)
{
// Uses the KL tracker to track points through an image
int nFeatures = _params.numpoints;
int nFrames = image_list.size();
// If there are no frames in this movie, then skip
if (nFrames < 1) return;
// Set up the context
KLT_TrackingContext tc = KLTCreateTrackingContext();
KLT_FeatureList fl = KLTCreateFeatureList(nFeatures);
KLT_FeatureTable ft = KLTCreateFeatureTable(nFrames, nFeatures);
// Apply the defaults
set_tracking_context (tc);
tc->sequentialMode = TRUE;
int width=image_list[0].width();
int height=image_list[0].height();
// Now, get the imagery into a linear buffer
KLT_PixelType* img1=convert_to_gs_image(image_list[0]);
// Get some features from the first image
KLTSelectGoodFeatures(tc, img1, width, height, fl);
KLTStoreFeatureList(fl, ft, 0);
for (int i=1; i<nFrames; i++)
{
KLT_PixelType* img2=convert_to_gs_image(image_list[i]);
// Track the points
KLTTrackFeatures(tc, img1, img2, width, height, fl);
// Restore lost features
if (_params.replaceLostPoints)
KLTReplaceLostFeatures(tc, img2, width, height, fl);
// Store the values
KLTStoreFeatureList(fl, ft, i);
}
// Go through the feature table and store them
int matchnum = -1;
int pointnum, viewnum;
//matches.set_params(ft->nFrames,ft->nFeatures);
for (pointnum=0; pointnum<ft->nFeatures; pointnum++)
for (viewnum=0; viewnum<ft->nFrames; viewnum++)
{
// Get the current feature
KLT_Feature feat = ft->feature[pointnum][viewnum];
// Get the components of this feature
float x = feat->x;
float y = feat->y;
//int val = feat->val;
// Test to see if this is the continuation of a sequence
// - then put in the table
if (feat->val == 0)
{
vtol_vertex_2d_sptr vertex=new vtol_vertex_2d(x,y);
matches->set(viewnum, matchnum, vertex);
}
// Otherwise, this is the start of a sequence
if (feat->val > 0)
{
// Must test to see if the next value is zero
// - otherwise this is a 1-frame sequence
if (viewnum < ft->nFrames-1 &&
ft->feature[pointnum][viewnum+1]->val == 0)
{
// This is a new match
matchnum++;
// Store it
vtol_vertex_2d_sptr vertex=new vtol_vertex_2d(x,y);
matches->set (viewnum, matchnum, vertex);
}
}
}
// Finally, renumber the matches
// matches.renumber();
}
void vgel_kl::match_sequence(vidl_movie_sptr movie,vgel_multi_view_data_vertex_sptr matches)
{
vcl_vector<vil_image> image_list;
for (vidl_movie::frame_iterator pframe = movie->first();
pframe <= movie->last();
++pframe)
{
vil_image im = vil_image(pframe->get_image());
image_list.push_back(im);
}
match_sequence(image_list,matches);
}
vcl_vector<vtol_vertex_2d_sptr>* vgel_kl::extract_points(vil_image & image)
{
int width=image.width();
int height=image.height();
vcl_cerr << "Beginning points extraction" << vcl_endl;
KLT_PixelType* img1=convert_to_gs_image(image);
// Now, run the extractor
int nFeatures = _params.numpoints;
vcl_cerr << "Setting up the context..." << vcl_endl;
// Set up the context
KLT_TrackingContext tc = KLTCreateTrackingContext();
// Set the default values
set_tracking_context (tc);
// KLTPrintTrackingContext(tc);
// Set up structure to hold the features.
vcl_cerr << "Setting up structure to hold the features..." << vcl_endl;
KLT_FeatureList fl = KLTCreateFeatureList(nFeatures);
// Extract the features
vcl_cerr << "Extracting the features..." << vcl_endl;
KLTSelectGoodFeatures(tc, img1, width, height, fl);
// Make an IUPointGroup to hold the values
vcl_vector<vtol_vertex_2d_sptr> *grp = new vcl_vector<vtol_vertex_2d_sptr>();
for (int i=0 ; i< fl->nFeatures ; i++)
{
// Change the point into an IUPoint -- offset by ilow, jlow
float x = fl->feature[i]->x;
float y = fl->feature[i]->y;
//HomgPoint2D *point = new HomgPoint2D (x, y,1.0)
vtol_vertex_2d_sptr point=new vtol_vertex_2d(x,y);
// Put the point in the backup list
grp->push_back(point);
}
// !!
// We probably need to delete the feature list
// Return the group
return grp;
}
//Convert a vil_image to an array of grey scale
KLT_PixelType* vgel_kl::convert_to_gs_image(vil_image &image)
{
if (vil_pixel_format(image)==VIL_RGB_BYTE)
{
vcl_cerr << "Converting image to grey scale..." << vcl_endl;
int w=image.width();
int h=image.height();
KLT_PixelType* tab_mono=new KLT_PixelType[w*h];
vcl_cerr << "width: " <<w<< " height: "<<h<< vcl_endl;
vil_memory_image_of<vil_byte> ima_mono;
ima_mono.resize(w,h);
vil_image_as_byte(image).get_section(ima_mono.get_buffer(), 0, 0, w, h);
vil_byte* p=ima_mono.get_buffer();
for (int i=0;i<w;i++)
for (int j=0;j<h;j++)
{
tab_mono[i*h+j]=(KLT_PixelType)p[i*h+j];
}
return tab_mono;
}
else if (vil_pixel_format(image)==VIL_BYTE)
{
int w=image.width();
int h=image.height();
KLT_PixelType* tab_mono=new KLT_PixelType[w*h];
vcl_cerr << "width: " <<w<< " height: "<<h<< vcl_endl;
vil_memory_image_of<vil_byte> ima_mono;
ima_mono.resize(w,h);
vil_image_as_byte(image).get_section(ima_mono.get_buffer(), 0, 0, w, h);
vil_byte* p=ima_mono.get_buffer();
for (int i=0;i<w;i++)
for (int j=0;j<h;j++)
{
tab_mono[i*h+j]=(KLT_PixelType)p[i*h+j];
}
return tab_mono;
}
return NULL;
}
void vgel_kl::set_tracking_context( KLT_TrackingContext tc)
{
/* Set values to values derived from the parameters */
tc->mindist = _params.mindist;
tc->window_width = _params.window_width;
tc->window_height = _params.window_height;
tc->sequentialMode = _params.sequentialMode;
tc->smoothBeforeSelecting = _params.smoothBeforeSelecting;
tc->writeInternalImages = _params.writeInternalImages;
tc->min_eigenvalue = _params.min_eigenvalue;
tc->min_determinant = _params.min_determinant;
tc->max_iterations = _params.max_iterations;
tc->min_displacement = _params.min_displacement;
tc->max_residue = _params.max_residue;
tc->grad_sigma = _params.grad_sigma;
tc->smooth_sigma_fact = _params.smooth_sigma_fact;
tc->pyramid_sigma_fact = _params.pyramid_sigma_fact;
tc->nSkippedPixels = _params.nSkippedPixels;
// klt functions to complete the setup
KLTChangeTCPyramid (tc, _params.search_range); //set nPyramidLevels and subsampling
KLTUpdateTCBorder (tc); //set borderx and bordery
}