SHOGUN  6.1.3
ExponentialARDKernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 2015 Wu Lin
8  * Written (W) 2012 Jacob Walker
9  *
10  * Adapted from WeightedDegreeRBFKernel.cpp
11  */
12 
17 
18 using namespace shogun;
19 
21 {
22  init();
23 }
24 
26 {
28 }
29 
30 void CExponentialARDKernel::init()
31 {
33 
36 
37  m_weights_rows=1.0;
38  m_weights_cols=1.0;
39 
40 
41  SG_ADD(&m_log_weights, "log_weights", "Feature weights in log domain", MS_AVAILABLE,
43 
44  SG_ADD(&m_weights_rows, "weights_rows", "Row of feature weights", MS_NOT_AVAILABLE);
45  SG_ADD(&m_weights_cols, "weights_cols", "Column of feature weights", MS_NOT_AVAILABLE);
46  SG_ADD((int *)(&m_ARD_type), "type", "ARD kernel type", MS_NOT_AVAILABLE);
47 
49  SG_ADD(&m_weights_raw, "weights_raw", "Features weights in standard domain", MS_NOT_AVAILABLE);
50 
51 }
52 
54 {
55  REQUIRE(hs, "Features not set!\n");
56  CDenseFeatures<float64_t> * dense_hs=dynamic_cast<CDenseFeatures<float64_t> *>(hs);
57  if (dense_hs)
58  return dense_hs->get_feature_vector(idx);
59 
60  CDotFeatures * dot_hs=dynamic_cast<CDotFeatures *>(hs);
61  REQUIRE(dot_hs, "Kernel only supports DotFeatures\n");
62  return dot_hs->get_computed_dot_feature_vector(idx);
63 
64 }
65 
66 
68 {
69  REQUIRE(weights.num_rows>0 && weights.num_cols>0, "Weights matrix is non-empty\n");
70  if (weights.num_rows==1)
71  {
72  if(weights.num_cols>1)
73  {
74  SGVector<float64_t> vec(weights.matrix,weights.num_cols,false);
75  set_vector_weights(vec);
76  }
77  else
78  set_scalar_weights(weights[0]);
79  }
80  else
81  set_matrix_weights(weights);
82 }
83 
85 {
87  {
89  {
92  }
93  else if (m_ARD_type==KT_FULL)
94  {
97  index_t offset=0;
98  for (int i=0;i<m_weights_raw.num_cols && i<m_weights_raw.num_rows;i++)
99  {
101  std::copy(m_log_weights.vector+offset,m_log_weights.vector+offset+m_weights_raw.num_rows-i,begin+i);
102  begin[i]=CMath::exp(begin[i]);
103  offset+=m_weights_raw.num_rows-i;
104  }
105  }
106  else
107  {
108  SG_ERROR("Unsupported ARD type\n");
109  }
111  }
112 }
113 
115 {
118 }
119 
121 {
122  REQUIRE(weight>0, "Scalar (%f) weight should be positive\n",weight);
126 
127  m_weights_rows=1.0;
128  m_weights_cols=1.0;
129 }
130 
132 {
133  REQUIRE(rhs==NULL && lhs==NULL,
134  "Setting vector weights must be before initialize features\n");
135  REQUIRE(weights.vlen>0, "Vector weight should be non-empty\n");
137  for(index_t i=0; i<weights.vlen; i++)
138  {
139  REQUIRE(weights[i]>0, "Each entry of vector weight (v[%d]=%f) should be positive\n",
140  i,weights[i]);
141  m_log_weights[i]=CMath::log(weights[i]);
142  }
144 
145  m_weights_rows=1.0;
146  m_weights_cols=weights.vlen;
147 }
148 
150 {
151  REQUIRE(rhs==NULL && lhs==NULL,
152  "Setting matrix weights must be before initialize features\n");
153  REQUIRE(weights.num_cols>0, "Matrix weight should be non-empty");
154  REQUIRE(weights.num_rows>=weights.num_cols,
155  "Number of row (%d) must be not less than number of column (%d)",
156  weights.num_rows, weights.num_cols);
157 
158  m_weights_rows=weights.num_rows;
159  m_weights_cols=weights.num_cols;
163 
164  index_t offset=0;
165  for (int i=0; i<weights.num_cols && i<weights.num_rows; i++)
166  {
167  float64_t* begin=weights.get_column_vector(i);
168  REQUIRE(begin[i]>0, "The diagonal entry of matrix weight (w(%d,%d)=%f) should be positive\n",
169  i,i,begin[i]);
170  std::copy(begin+i,begin+weights.num_rows,m_log_weights.vector+offset);
171  m_log_weights[offset]=CMath::log(m_log_weights[offset]);
172  offset+=weights.num_rows-i;
173  }
174 }
175 
177 {
178  init();
179 }
180 
182  CDotFeatures* r, int32_t size) : CDotKernel(size)
183 {
184  init();
185  init(l,r);
186 }
187 
188 bool CExponentialARDKernel::init(CFeatures* l, CFeatures* r)
189 {
190  cleanup();
191  CDotKernel::init(l, r);
192  int32_t dim=((CDotFeatures*) l)->get_dim_feature_space();
193  if (m_ARD_type==KT_FULL)
194  {
195  REQUIRE(m_weights_rows==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
196  dim, m_weights_rows);
197  }
198  else if (m_ARD_type==KT_DIAG)
199  {
200  REQUIRE(m_log_weights.vlen==dim, "Dimension mismatch between features (%d) and weights (%d)\n",
201  dim, m_log_weights.vlen);
202  }
203  return init_normalizer();
204 }
205 
206 
208 {
209  REQUIRE(m_ARD_type==KT_FULL || m_ARD_type==KT_DIAG, "This method only supports vector weights or matrix weights\n");
211  if (m_ARD_type==KT_FULL)
212  {
214  index_t offset=0;
215  // TODO: investigate a better way to make this
216  // block thread-safe
217  SGVector<float64_t> log_weights = m_log_weights.clone();
218  //can be done it in parallel
219  for (index_t i=0;i<m_weights_rows && i<m_weights_cols;i++)
220  {
221  SGMatrix<float64_t> weights(log_weights.vector+offset,1,m_weights_rows-i,false);
222  weights[0]=CMath::exp(weights[0]);
223  SGMatrix<float64_t> rtmp(vec.vector+i,vec.vlen-i,1,false);
225  weights[0]=CMath::log(weights[0]);
226  res[i]=s[0];
227  offset+=m_weights_rows-i;
228  }
229  }
230  else
231  {
232  SGMatrix<float64_t> rtmp(vec.vector,vec.vlen,1,false);
234  res = linalg::element_prod(weights, rtmp);
235  }
236  return res;
237 }
238 
240  float64_t & scalar_weight)
241 {
242  SGMatrix<float64_t> right;
243 
244  if (m_ARD_type==KT_SCALAR)
245  {
246  right=SGMatrix<float64_t>(vec.vector,vec.vlen,1,false);
247  scalar_weight*=CMath::exp(m_log_weights[0]);
248  }
249  else if (m_ARD_type==KT_DIAG || m_ARD_type==KT_FULL)
250  right=get_weighted_vector(vec);
251  else
252  {
253  SG_ERROR("Unsupported ARD type\n");
254  }
255  return right;
256 }
257 
259 {
260  REQUIRE(lhs, "Left features not set!\n");
261  REQUIRE(rhs, "Right features not set!\n");
262 
263  if (m_ARD_type!=KT_SCALAR)
264  {
265  REQUIRE(index>=0, "Index (%d) must be non-negative\n",index);
266  REQUIRE(index<m_log_weights.vlen, "Index (%d) must be within #dimension of weights (%d)\n",
267  index, m_log_weights.vlen);
268  }
269 }
virtual void cleanup()
Definition: Kernel.cpp:172
SGVector< float64_t > m_log_weights
virtual void update_parameter_hash()
Definition: SGObject.cpp:282
int32_t index_t
Definition: common.h:72
virtual SGMatrix< float64_t > compute_right_product(SGVector< float64_t >vec, float64_t &scalar_weight)
virtual void set_weights(SGMatrix< float64_t > weights)
#define SG_ERROR(...)
Definition: SGIO.h:128
#define REQUIRE(x,...)
Definition: SGIO.h:181
virtual void set_scalar_weights(float64_t weight)
Features that support dot products among other operations.
Definition: DotFeatures.h:44
SGMatrix< float64_t > get_weighted_vector(SGVector< float64_t > vec)
ST * get_feature_vector(int32_t num, int32_t &len, bool &dofree)
SGMatrix< float64_t > m_weights_raw
Template class DotKernel is the base class for kernels working on DotFeatures.
Definition: DotKernel.h:31
virtual SGMatrix< float64_t > get_weights()
double float64_t
Definition: common.h:60
virtual SGVector< float64_t > get_feature_vector(int32_t idx, CFeatures *hs)
index_t num_rows
Definition: SGMatrix.h:495
SGVector< T > clone() const
Definition: SGVector.cpp:262
index_t num_cols
Definition: SGMatrix.h:497
void set_const(T const_elem)
Definition: SGVector.cpp:199
virtual bool init_normalizer()
Definition: Kernel.cpp:167
void matrix_prod(SGMatrix< T > &A, SGVector< T > &b, SGVector< T > &result, bool transpose=false)
virtual void check_weight_gradient_index(index_t index)
CFeatures * rhs
feature vectors to occur on right hand side
virtual void set_vector_weights(SGVector< float64_t > weights)
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
CFeatures * lhs
feature vectors to occur on left hand side
The class Features is the base class of all feature objects.
Definition: Features.h:69
static float64_t exp(float64_t x)
Definition: Math.h:551
static float64_t log(float64_t v)
Definition: Math.h:714
SGVector< float64_t > get_computed_dot_feature_vector(int32_t num)
Container< T > exponent(const Container< T > &a)
void set_const(T const_elem)
Definition: SGMatrix.cpp:209
virtual void set_matrix_weights(SGMatrix< float64_t > weights)
#define SG_ADD(...)
Definition: SGObject.h:93
void element_prod(Block< SGMatrix< T >> &a, Block< SGMatrix< T >> &b, SGMatrix< T > &result)
virtual bool parameter_hash_changed()
Definition: SGObject.cpp:296
T * get_column_vector(index_t col) const
Definition: SGMatrix.h:144
index_t vlen
Definition: SGVector.h:571

SHOGUN Machine Learning Toolbox - Documentation