SHOGUN  6.1.3
GMNPLib.cpp
Go to the documentation of this file.
1 /*-----------------------------------------------------------------------
2  *
3  * This program is free software; you can redistribute it and/or modify
4  * it under the terms of the GNU General Public License as published by
5  * the Free Software Foundation; either version 3 of the License, or
6  * (at your option) any later version.
7  *
8  * Library of solvers for Generalized Nearest Point Problem (GNPP).
9  *
10  * Written (W) 1999-2008 Vojtech Franc, xfrancv@cmp.felk.cvut.cz
11  * Copyright (C) 1999-2008 Center for Machine Perception, CTU FEL Prague
12  *
13  *
14 gmnplib.c: Library of solvers for Generalized Minimal Norm Problem (GMNP).
15 
16  Generalized Minimal Norm Problem to solve is
17 
18  min 0.5*alpha'*H*alpha + c'*alpha
19 
20  subject to sum(alpha) = 1, alpha(i) >= 0
21 
22  H [dim x dim] is symmetric positive definite matrix.
23  c [dim x 1] is an arbitrary vector.
24 
25  The precision of the found solution is given by
26  the parameters tmax, tolabs and tolrel which
27  define the stopping conditions:
28 
29  UB-LB <= tolabs -> exit_flag = 1 Abs. tolerance.
30  UB-LB <= UB*tolrel -> exit_flag = 2 Relative tolerance.
31  LB > th -> exit_flag = 3 Threshold on lower bound.
32  t >= tmax -> exit_flag = 0 Number of iterations.
33 
34  UB ... Upper bound on the optimal solution.
35  LB ... Lower bound on the optimal solution.
36  t ... Number of iterations.
37  History ... Value of LB and UB wrt. number of iterations.
38 
39 
40  The following algorithms are implemented:
41  ..............................................
42 
43  - GMNP solver based on improved MDM algorithm 1 (u fixed v optimized)
44  exitflag = gmnp_imdm( &get_col, diag_H, vector_c, dim,
45  tmax, tolabs, tolrel, th, &alpha, &t, &History, verb );
46 
47  For more info refer to V.Franc: Optimization Algorithms for Kernel
48  Methods. Research report. CTU-CMP-2005-22. CTU FEL Prague. 2005.
49  ftp://cmp.felk.cvut.cz/pub/cmp/articles/franc/Franc-PhD.pdf .
50 
51  Modifications:
52  09-sep-2005, VF
53  24-jan-2005, VF
54  26-nov-2004, VF
55  25-nov-2004, VF
56  21-nov-2004, VF
57  20-nov-2004, VF
58  31-may-2004, VF
59  23-Jan-2004, VF
60 
61 -------------------------------------------------------------------- */
62 
63 #include <shogun/base/progress.h>
66 
67 #include <string.h>
68 #include <limits.h>
69 
70 using namespace shogun;
71 
72 #define HISTORY_BUF 1000000
73 
74 #define MINUS_INF INT_MIN
75 #define PLUS_INF INT_MAX
76 
77 #define INDEX(ROW,COL,DIM) ((COL*DIM)+ROW)
78 #define KDELTA(A,B) (A==B)
79 #define KDELTA4(A1,A2,A3,A4) ((A1==A2)||(A1==A3)||(A1==A4)||(A2==A3)||(A2==A4)||(A3==A4))
80 
82 {
83  SG_UNSTABLE("CGMNPLib::CGMNPLib()", "\n")
84 
85  diag_H = NULL;
86  kernel_columns = NULL;
87  cache_index = NULL;
88  first_kernel_inx = 0;
89  Cache_Size = 0;
90  m_num_data = 0;
91  m_reg_const = 0;
92  m_vector_y = 0;
93  m_kernel = NULL;
94 
95  first_virt_inx = 0;
96  memset(&virt_columns, 0, sizeof (virt_columns));
97  m_num_virt_data = 0;
98  m_num_classes = 0;
99 }
100 
102  float64_t* vector_y, CKernel* kernel, int32_t num_data,
103  int32_t num_virt_data, int32_t num_classes, float64_t reg_const)
104 : CSGObject()
105 {
106  m_num_classes=num_classes;
107  m_num_virt_data=num_virt_data;
108  m_reg_const = reg_const;
109  m_num_data = num_data;
110  m_vector_y = vector_y;
111  m_kernel = kernel;
112 
113  Cache_Size = ((int64_t) kernel->get_cache_size())*1024*1024/(sizeof(float64_t)*num_data);
114  Cache_Size = CMath::min(Cache_Size, (int64_t) num_data);
115 
116  SG_INFO("using %d kernel cache lines\n", Cache_Size)
117  ASSERT(Cache_Size>=2)
118 
119  /* allocates memory for kernel cache */
120  kernel_columns = SG_MALLOC(float64_t*, Cache_Size);
121  cache_index = SG_MALLOC(float64_t, Cache_Size);
122 
123  for(int32_t i = 0; i < Cache_Size; i++ )
124  {
125  kernel_columns[i] = SG_MALLOC(float64_t, num_data);
126  cache_index[i] = -2;
127  }
128  first_kernel_inx = 0;
129 
130 
131 
132  for(int32_t i = 0; i < 3; i++ )
133  {
134  virt_columns[i] = SG_MALLOC(float64_t, num_virt_data);
135  }
136  first_virt_inx = 0;
137 
138  diag_H = SG_MALLOC(float64_t, num_virt_data);
139 
140  for(int32_t i = 0; i < num_virt_data; i++ )
141  diag_H[i] = kernel_fce(i,i);
142 }
143 
145 {
146  for(int32_t i = 0; i < Cache_Size; i++ )
147  SG_FREE(kernel_columns[i]);
148 
149  for(int32_t i = 0; i < 3; i++ )
150  SG_FREE(virt_columns[i]);
151 
152  SG_FREE(cache_index);
153  SG_FREE(kernel_columns);
154 
155  SG_FREE(diag_H);
156 }
157 
158 /* ------------------------------------------------------------
159  Returns pointer at a-th column of the kernel matrix.
160  This function maintains FIFO cache of kernel columns.
161 ------------------------------------------------------------ */
163 {
164  float64_t *col_ptr;
165  int32_t i;
166  int32_t inx;
167 
168  inx = -1;
169  for( i=0; i < Cache_Size; i++ ) {
170  if( cache_index[i] == a ) { inx = i; break; }
171  }
172 
173  if( inx != -1 ) {
174  col_ptr = kernel_columns[inx];
175  return( col_ptr );
176  }
177 
178  col_ptr = kernel_columns[first_kernel_inx];
180 
182  if( first_kernel_inx >= Cache_Size ) first_kernel_inx = 0;
183 
184  for( i=0; i < m_num_data; i++ ) {
185  col_ptr[i] = m_kernel->kernel(i,a);
186  }
187 
188  return( col_ptr );
189 }
190 
191 /* ------------------------------------------------------------
192  Computes index of input example and its class label from
193  index of virtual "single-class" example.
194 ------------------------------------------------------------ */
195 void CGMNPLib::get_indices2( int32_t *index, int32_t *c, int32_t i )
196 {
197  *index = i / (m_num_classes-1);
198 
199  *c= (i % (m_num_classes-1))+1;
200  if( *c>= m_vector_y[ *index ]) (*c)++;
201 
202  return;
203 }
204 
205 
206 /* ------------------------------------------------------------
207  Returns pointer at the a-th column of the virtual K matrix.
208 
209  (note: the b-th column must be preserved in the cache during
210  updating but b is from (a(t-2), a(t-1)) where a=a(t) and
211  thus FIFO with three columns does not have to take care od b.)
212 ------------------------------------------------------------ */
213 float64_t* CGMNPLib::get_col( int32_t a, int32_t b )
214 {
215  int32_t i;
216  float64_t *col_ptr;
217  float64_t *ker_ptr;
218  float64_t value;
219  int32_t i1,c1,i2,c2;
220 
221  col_ptr = virt_columns[first_virt_inx++];
222  if( first_virt_inx >= 3 ) first_virt_inx = 0;
223 
224  get_indices2( &i1, &c1, a );
225  ker_ptr = (float64_t*) get_kernel_col( i1 );
226 
227  for( i=0; i < m_num_virt_data; i++ ) {
228  get_indices2( &i2, &c2, i );
229 
230  if( KDELTA4(m_vector_y[i1],m_vector_y[i2],c1,c2) ) {
231  value = (+KDELTA(m_vector_y[i1],m_vector_y[i2])
232  -KDELTA(m_vector_y[i1],c2)
233  -KDELTA(m_vector_y[i2],c1)
234  +KDELTA(c1,c2)
235  )*(ker_ptr[i2]+1);
236  }
237  else
238  {
239  value = 0;
240  }
241 
242  if(a==i) value += m_reg_const;
243 
244  col_ptr[i] = value;
245  }
246 
247  return( col_ptr );
248 }
249 
250 
251 /* --------------------------------------------------------------
252  GMNP solver based on improved MDM algorithm 1.
253 
254  Search strategy: u determined by common rule and v is
255  optimized.
256 
257  Usage: exitflag = gmnp_imdm( &get_col, diag_H, vector_c, dim,
258  tmax, tolabs, tolrel, th, &alpha, &t, &History );
259 -------------------------------------------------------------- */
260 
262  int32_t dim,
263  int32_t tmax,
264  float64_t tolabs,
265  float64_t tolrel,
266  float64_t th,
267  float64_t *alpha,
268  int32_t *ptr_t,
269  float64_t **ptr_History,
270  int32_t verb)
271 {
272  float64_t LB;
273  float64_t UB;
274  float64_t aHa, ac;
275  float64_t tmp, tmp1;
276  float64_t Huu, Huv, Hvv;
277  float64_t min_beta, beta;
278  float64_t max_improv, improv;
279  float64_t lambda;
280  float64_t *History;
281  float64_t *Ha;
282  float64_t *tmp_ptr;
283  float64_t *col_u, *col_v;
284  int32_t u=0;
285  int32_t v=0;
286  int32_t new_u=0;
287  int32_t i;
288  int32_t t;
289  int32_t History_size;
290  int8_t exitflag;
291 
292  /* ------------------------------------------------------------ */
293  /* Initialization */
294  /* ------------------------------------------------------------ */
295 
296  Ha = SG_MALLOC(float64_t, dim);
297  if( Ha == NULL ) SG_ERROR("Not enough memory.")
298 
299  History_size = (tmax < HISTORY_BUF ) ? tmax+1 : HISTORY_BUF;
300  History = SG_MALLOC(float64_t, History_size*2);
301  if( History == NULL ) SG_ERROR("Not enough memory.")
302 
303  /* inx = argmin(0.5*diag_H + vector_c ); */
304  for( tmp1 = PLUS_INF, i = 0; i < dim; i++ ) {
305  tmp = 0.5*diag_H[i] + vector_c[i];
306  if( tmp1 > tmp) {
307  tmp1 = tmp;
308  v = i;
309  }
310  }
311 
312  col_v = (float64_t*)get_col(v,-1);
313 
314  for( min_beta = PLUS_INF, i = 0; i < dim; i++ )
315  {
316  alpha[i] = 0;
317  Ha[i] = col_v[i];
318 
319  beta = Ha[i] + vector_c[i];
320  if( beta < min_beta ) {
321  min_beta = beta;
322  u = i;
323  }
324  }
325 
326  alpha[v] = 1;
327  aHa = diag_H[v];
328  ac = vector_c[v];
329 
330  UB = 0.5*aHa + ac;
331  LB = min_beta - 0.5*aHa;
332 
333  t = 0;
334  History[INDEX(0,0,2)] = LB;
335  History[INDEX(1,0,2)] = UB;
336 
337  if( verb ) {
338  SG_PRINT("Init: UB=%f, LB=%f, UB-LB=%f, (UB-LB)/|UB|=%f \n",
339  UB, LB, UB-LB,(UB-LB)/UB);
340  }
341 
342  /* Stopping conditions */
343  if( UB-LB <= tolabs ) exitflag = 1;
344  else if(UB-LB <= CMath::abs(UB)*tolrel ) exitflag = 2;
345  else if(LB > th ) exitflag = 3;
346  else exitflag = -1;
347 
348  /* ------------------------------------------------------------ */
349  /* Main optimization loop */
350  /* ------------------------------------------------------------ */
351 
352  auto pb = progress(range(10), *this->io);
353  col_u = (float64_t*)get_col(u,-1);
354  while( exitflag == -1 )
355  {
356  t++;
357 
358  col_v = (float64_t*)get_col(v,u);
359 
360  /* Adaptation rule and update */
361  Huu = diag_H[u];
362  Hvv = diag_H[v];
363  Huv = col_u[v];
364 
365  lambda = (Ha[v]-Ha[u]+vector_c[v]-vector_c[u])/(alpha[v]*(Huu-2*Huv+Hvv));
366  if( lambda < 0 ) lambda = 0; else if (lambda > 1) lambda = 1;
367 
368  aHa = aHa + 2*alpha[v]*lambda*(Ha[u]-Ha[v])+
369  lambda*lambda*alpha[v]*alpha[v]*(Huu-2*Huv+Hvv);
370 
371  ac = ac + lambda*alpha[v]*(vector_c[u]-vector_c[v]);
372 
373  tmp = alpha[v];
374  alpha[u]=alpha[u]+lambda*alpha[v];
375  alpha[v]=alpha[v]-lambda*alpha[v];
376 
377  UB = 0.5*aHa + ac;
378 
379 /* max_beta = MINUS_INF;*/
380  for( min_beta = PLUS_INF, i = 0; i < dim; i++ )
381  {
382  Ha[i] = Ha[i] + lambda*tmp*(col_u[i] - col_v[i]);
383 
384  beta = Ha[i]+ vector_c[i];
385 
386  if( beta < min_beta )
387  {
388  new_u = i;
389  min_beta = beta;
390  }
391  }
392 
393  LB = min_beta - 0.5*aHa;
394  u = new_u;
395  col_u = (float64_t*)get_col(u,-1);
396 
397  /* search for optimal v while u is fixed */
398  for( max_improv = MINUS_INF, i = 0; i < dim; i++ ) {
399 
400  if( alpha[i] != 0 ) {
401  beta = Ha[i] + vector_c[i];
402 
403  if( beta >= min_beta ) {
404 
405  tmp = diag_H[u] - 2*col_u[i] + diag_H[i];
406  if( tmp != 0 ) {
407  improv = (0.5*(beta-min_beta)*(beta-min_beta))/tmp;
408 
409  if( improv > max_improv ) {
410  max_improv = improv;
411  v = i;
412  }
413  }
414  }
415  }
416  }
417 
418  /* Stopping conditions */
419  if( UB-LB <= tolabs ) exitflag = 1;
420  else if( UB-LB <= CMath::abs(UB)*tolrel) exitflag = 2;
421  else if(LB > th ) exitflag = 3;
422  else if(t >= tmax) exitflag = 0;
423 
424  /* print info */
425  pb.print_absolute(
426  CMath::abs((UB - LB) / UB), -CMath::log10(CMath::abs(UB - LB)),
427  -CMath::log10(1.0), -CMath::log10(tolrel));
428 
429  if (verb && (t % verb) == 0)
430  {
431  SG_PRINT(
432  "%d: UB=%f, LB=%f, UB-LB=%f, (UB-LB)/|UB|=%f \n", t, UB, LB,
433  UB - LB, (UB - LB) / UB);
434  }
435 
436  /* Store selected values */
437  if (t < History_size)
438  {
439  History[INDEX(0, t, 2)] = LB;
440  History[INDEX(1, t, 2)] = UB;
441  }
442  else
443  {
444  tmp_ptr = SG_MALLOC(float64_t, (History_size + HISTORY_BUF) * 2);
445  if (tmp_ptr == NULL)
446  SG_ERROR("Not enough memory.")
447  for (i = 0; i < History_size; i++)
448  {
449  tmp_ptr[INDEX(0, i, 2)] = History[INDEX(0, i, 2)];
450  tmp_ptr[INDEX(1, i, 2)] = History[INDEX(1, i, 2)];
451  }
452  tmp_ptr[INDEX(0, t, 2)] = LB;
453  tmp_ptr[INDEX(1, t, 2)] = UB;
454 
455  History_size += HISTORY_BUF;
456  SG_FREE(History);
457  History = tmp_ptr;
458  }
459  }
460 
461  /* print info about last iteration*/
462  pb.complete_absolute();
463  if(verb && (t % verb) ) {
464  SG_PRINT("exit: UB=%f, LB=%f, UB-LB=%f, (UB-LB)/|UB|=%f \n",
465  UB, LB, UB-LB,(UB-LB)/UB);
466  }
467 
468 
469  /*------------------------------------------------------- */
470  /* Set outputs */
471  /*------------------------------------------------------- */
472  (*ptr_t) = t;
473  (*ptr_History) = History;
474 
475  /* Free memory */
476  SG_FREE(Ha);
477 
478  return( exitflag );
479 }
480 
481 /* ------------------------------------------------------------
482  Retures (a,b)-th element of the virtual kernel matrix
483  of size [num_virt_data x num_virt_data].
484 ------------------------------------------------------------ */
485 float64_t CGMNPLib::kernel_fce( int32_t a, int32_t b )
486 {
487  float64_t value;
488  int32_t i1,c1,i2,c2;
489 
490  get_indices2( &i1, &c1, a );
491  get_indices2( &i2, &c2, b );
492 
493  if( KDELTA4(m_vector_y[i1],m_vector_y[i2],c1,c2) ) {
494  value = (+KDELTA(m_vector_y[i1],m_vector_y[i2])
495  -KDELTA(m_vector_y[i1],c2)
496  -KDELTA(m_vector_y[i2],c1)
497  +KDELTA(c1,c2)
498  )*(m_kernel->kernel( i1, i2 )+1);
499  }
500  else
501  {
502  value = 0;
503  }
504 
505  if(a==b) value += m_reg_const;
506 
507  return( value );
508 }
#define SG_INFO(...)
Definition: SGIO.h:117
int32_t first_kernel_inx
Definition: GMNPLib.h:148
float64_t * virt_columns[3]
Definition: GMNPLib.h:163
int32_t first_virt_inx
Definition: GMNPLib.h:161
float64_t * m_vector_y
Definition: GMNPLib.h:156
#define PLUS_INF
Definition: GMNPLib.cpp:75
float64_t m_reg_const
Definition: GMNPLib.h:154
PRange< T > progress(Range< T > range, const SGIO &io, std::string prefix="PROGRESS: ", SG_PRG_MODE mode=UTF8, std::function< bool()> condition=[](){return true;})
Definition: progress.h:712
static float64_t log10(float64_t v)
Definition: Math.h:693
CKernel * m_kernel
Definition: GMNPLib.h:158
#define SG_ERROR(...)
Definition: SGIO.h:128
float64_t ** kernel_columns
Definition: GMNPLib.h:144
float64_t kernel(int32_t idx_a, int32_t idx_b)
float64_t * get_col(int32_t a, int32_t b)
Definition: GMNPLib.cpp:213
Range< T > range(T rend)
Definition: range.h:136
int32_t m_num_classes
Definition: GMNPLib.h:167
float64_t * cache_index
Definition: GMNPLib.h:146
float64_t * diag_H
Definition: GMNPLib.h:142
virtual ~CGMNPLib()
Definition: GMNPLib.cpp:144
void get_indices2(int32_t *index, int32_t *c, int32_t i)
Definition: GMNPLib.cpp:195
#define HISTORY_BUF
Definition: GMNPLib.cpp:72
#define SG_PRINT(...)
Definition: SGIO.h:136
int32_t m_num_data
Definition: GMNPLib.h:152
#define ASSERT(x)
Definition: SGIO.h:176
float64_t kernel_fce(int32_t a, int32_t b)
Definition: GMNPLib.cpp:485
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:124
double float64_t
Definition: common.h:60
int8_t gmnp_imdm(float64_t *vector_c, int32_t dim, int32_t tmax, float64_t tolabs, float64_t tolrel, float64_t th, float64_t *alpha, int32_t *ptr_t, float64_t **ptr_History, int32_t verb)
Definition: GMNPLib.cpp:261
#define INDEX(ROW, COL, DIM)
Definition: GMNPLib.cpp:77
#define MINUS_INF
Definition: GMNPLib.cpp:74
int32_t m_num_virt_data
Definition: GMNPLib.h:165
#define KDELTA(A, B)
Definition: GMNPLib.cpp:78
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
The Kernel base class.
int32_t get_cache_size()
float64_t * get_kernel_col(int32_t a)
Definition: GMNPLib.cpp:162
int64_t Cache_Size
Definition: GMNPLib.h:150
#define SG_UNSTABLE(func,...)
Definition: SGIO.h:131
static T min(T a, T b)
Definition: Math.h:138
#define KDELTA4(A1, A2, A3, A4)
Definition: GMNPLib.cpp:79
static T abs(T a)
Definition: Math.h:161

SHOGUN Machine Learning Toolbox - Documentation