SHOGUN  6.1.3
LinalgNamespace.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016, Shogun-Toolbox e.V. <shogun-team@shogun-toolbox.org>
3  * All rights reserved.
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * 1. Redistributions of source code must retain the above copyright notice,
8  * this list of conditions and the following disclaimer.
9  *
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  *
14  * 3. Neither the name of the copyright holder nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  * Authors: 2016 Pan Deng, Soumyajit De, Heiko Strathmann, Viktor Gal
31  */
32 
33 #ifndef LINALG_NAMESPACE_H_
34 #define LINALG_NAMESPACE_H_
35 
39 
40 namespace shogun
41 {
42 
43  namespace linalg
44  {
45 
52  template <typename T, template <typename> class Container>
53  LinalgBackendBase* infer_backend(const Container<T>& a)
54  {
55  if (a.on_gpu())
56  {
57  if (sg_linalg->get_gpu_backend())
58  return sg_linalg->get_gpu_backend();
59  else
60  {
61  SG_SERROR(
62  "Vector or matrix is on GPU but no GPU backend registered. \
63  This can happen if the GPU backend was de-activated \
64  after memory has been transferred to GPU.\n");
65  return NULL;
66  }
67  }
68  else
69  return sg_linalg->get_cpu_backend();
70  }
71 
80  template <typename T, template <typename> class Container>
82  infer_backend(const Container<T>& a, const Container<T>& b)
83  {
84  if (a.on_gpu() && b.on_gpu())
85  {
86  if (sg_linalg->get_gpu_backend())
87  return sg_linalg->get_gpu_backend();
88  else
89  {
90  SG_SERROR(
91  "Vector or matrix is on GPU but no GPU backend registered. \
92  This can happen if the GPU backend was de-activated \
93  after memory has been transferred to GPU.\n");
94  return NULL;
95  }
96  }
97  else if (a.on_gpu() || b.on_gpu())
98  {
99  SG_SERROR(
100  "Cannot operate with first vector/matrix on_gpu flag(%d) \
101  and second vector/matrix on_gpu flag (%d).\n",
102  a.on_gpu(), b.on_gpu());
103  return NULL;
104  }
105  else
106  return sg_linalg->get_cpu_backend();
107  }
108 
117  template <typename T>
119  {
120  sg_linalg->m_gpu_transfer.lock();
121 
122  if (a.on_gpu())
123  {
124  if (sg_linalg->get_linalg_warnings())
125  SG_SWARNING("The vector is already on GPU.\n");
126  }
127  else
128  {
129  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
130 
131  if (gpu_backend)
132  b = SGVector<T>(gpu_backend->to_gpu(a), a.vlen);
133  else
134  {
135  if (sg_linalg->get_linalg_warnings())
136  SG_SWARNING("Trying to access GPU memory\
137  without GPU backend registered.\n");
138  b = a;
139  }
140  }
141 
142  sg_linalg->m_gpu_transfer.unlock();
143  }
144 
153  template <typename T>
155  {
156  sg_linalg->m_gpu_transfer.lock();
157 
158  if (a.on_gpu())
159  {
160  if (sg_linalg->get_linalg_warnings())
161  SG_SWARNING("The matrix is already on GPU.\n");
162  }
163  else
164  {
165  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
166 
167  if (gpu_backend)
168  b = SGMatrix<T>(
169  gpu_backend->to_gpu(a), a.num_rows, a.num_cols);
170  else
171  {
172  if (sg_linalg->get_linalg_warnings())
173  SG_SWARNING("Trying to access GPU memory\
174  without GPU backend registered.\n");
175  b = a;
176  }
177  }
178 
179  sg_linalg->m_gpu_transfer.unlock();
180  }
181 
187  template <typename T, template <typename> class Container>
188  void to_gpu(Container<T>& a)
189  {
190  to_gpu(a, a);
191  }
192 
200  template <typename T>
202  {
203  sg_linalg->m_gpu_transfer.lock();
204  if (a.on_gpu())
205  {
206  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
207  if (gpu_backend)
208  {
209  typedef typename std::aligned_storage<
210  sizeof(T), alignof(T)>::type aligned_t;
211  T* data;
212  data = reinterpret_cast<T*>(SG_MALLOC(aligned_t, a.size()));
213  gpu_backend->from_gpu(a, data);
214  b = SGVector<T>(data, a.size());
215  }
216  else
217  SG_SERROR(
218  "Data memory on GPU but no GPU backend registered. \
219  This can happen if the GPU backend was de-activated \
220  after memory has been transferred to GPU.\n");
221  }
222  else
223  {
224  if (sg_linalg->get_linalg_warnings())
225  SG_SWARNING("The data is already on CPU.\n");
226  b = a;
227  }
228 
229  sg_linalg->m_gpu_transfer.unlock();
230  }
231 
239  template <typename T>
241  {
242  sg_linalg->m_gpu_transfer.lock();
243  if (a.on_gpu())
244  {
245  LinalgBackendBase* gpu_backend = sg_linalg->get_gpu_backend();
246  if (gpu_backend)
247  {
248  typedef typename std::aligned_storage<
249  sizeof(T), alignof(T)>::type aligned_t;
250  T* data;
251  data = reinterpret_cast<T*>(
252  SG_MALLOC(aligned_t, a.num_rows * a.num_cols));
253  gpu_backend->from_gpu(a, data);
254  b = SGMatrix<T>(data, a.num_rows, a.num_cols);
255  }
256  else
257  SG_SERROR(
258  "Data memory on GPU but no GPU backend registered. \
259  This can happen if the GPU backend was de-activated \
260  after memory has been transferred to GPU.\n");
261  }
262  else
263  {
264  if (sg_linalg->get_linalg_warnings())
265  SG_SWARNING("The data is already on CPU.\n");
266  b = a;
267  }
268 
269  sg_linalg->m_gpu_transfer.unlock();
270  }
271 
278  template <typename T, template <typename> class Container>
279  void from_gpu(Container<T>& a)
280  {
281  from_gpu(a, a);
282  }
283 
296  template <typename T>
297  void
298  add(SGVector<T>& a, SGVector<T>& b, SGVector<T>& result, T alpha = 1,
299  T beta = 1)
300  {
301  REQUIRE(
302  a.vlen == b.vlen,
303  "Length of vector a (%d) doesn't match vector b (%d).\n",
304  a.vlen, b.vlen);
305  REQUIRE(
306  result.vlen == b.vlen,
307  "Length of vector result (%d) doesn't match vector a (%d).\n",
308  result.vlen, a.vlen);
309 
310  REQUIRE(
311  !(result.on_gpu() ^ a.on_gpu()), "Cannot operate with vector "
312  "result on_gpu (%d) and "
313  "vector a on_gpu (%d).\n",
314  result.on_gpu(), a.on_gpu());
315  REQUIRE(
316  !(result.on_gpu() ^ b.on_gpu()), "Cannot operate with vector "
317  "result on_gpu (%d) and "
318  "vector b on_gpu (%d).\n",
319  result.on_gpu(), b.on_gpu());
320 
321  infer_backend(a, b)->add(a, b, alpha, beta, result);
322  }
323 
336  template <typename T>
337  void
338  add(SGMatrix<T>& a, SGMatrix<T>& b, SGMatrix<T>& result, T alpha = 1,
339  T beta = 1)
340  {
341  REQUIRE(
342  (a.num_rows == b.num_rows),
343  "Number of rows of matrix a (%d) must match matrix b (%d).\n",
344  a.num_rows, b.num_rows);
345  REQUIRE(
346  (a.num_cols == b.num_cols), "Number of columns of matrix a "
347  "(%d) must match matrix b (%d).\n",
348  a.num_cols, b.num_cols);
349 
350  REQUIRE(
351  !(result.on_gpu() ^ a.on_gpu()), "Cannot operate with matrix "
352  "result on_gpu (%d) and "
353  "matrix a on_gpu (%d).\n",
354  result.on_gpu(), a.on_gpu());
355  REQUIRE(
356  !(result.on_gpu() ^ b.on_gpu()), "Cannot operate with matrix "
357  "result on_gpu (%d) and "
358  "matrix b on_gpu (%d).\n",
359  result.on_gpu(), b.on_gpu());
360 
361  infer_backend(a, b)->add(a, b, alpha, beta, result);
362  }
363 
374  template <typename T, template <typename> class Container>
375  Container<T>
376  add(Container<T>& a, Container<T>& b, T alpha = 1, T beta = 1)
377  {
378  auto result = a.clone();
379  add(a, b, result, alpha, beta);
380  return result;
381  }
382 
394  template <typename T>
396  const SGMatrix<T>& A, index_t i, const SGVector<T>& b,
397  SGMatrix<T>& result, T alpha = 1, T beta = 1)
398  {
399  REQUIRE(
400  A.num_rows == b.vlen, "Number of rows of matrix A (%d) doesn't "
401  "match length of vector b (%d).\n",
402  A.num_rows, b.vlen);
403  REQUIRE(
404  result.num_rows == A.num_rows,
405  "Number of rows of result (%d) doesn't match matrix A (%d).\n",
406  result.num_rows, A.num_rows);
407  REQUIRE(
408  i >= 0 && i < A.num_cols, "Index i (%d) is out of range (0-%d)",
409  i, A.num_cols - 1);
410 
412  ->add_col_vec(A, i, b, result, alpha, beta);
413  }
414 
426  template <typename T>
428  const SGMatrix<T>& A, index_t i, const SGVector<T>& b,
429  SGVector<T>& result, T alpha = 1, T beta = 1)
430  {
431  REQUIRE(
432  A.num_rows == b.vlen, "Number of rows of matrix A (%d) doesn't "
433  "match length of vector b (%d).\n",
434  A.num_rows, b.vlen);
435  REQUIRE(
436  result.vlen == b.vlen,
437  "Length of result (%d) doesn't match vector b (%d).\n",
438  result.vlen, b.vlen);
439  REQUIRE(
440  i >= 0 && i < A.num_cols, "Index i (%d) is out of range (0-%d)",
441  i, A.num_cols - 1);
442 
444  ->add_col_vec(A, i, b, result, alpha, beta);
445  }
446 
459  template <typename T>
461  const SGMatrix<T>& A, const SGVector<T>& b, SGMatrix<T>& result,
462  T alpha = 1, T beta = 1)
463  {
464  REQUIRE(
465  A.num_rows == b.vlen, "Number of rows of matrix A (%d) doesn't "
466  "match length of vector b (%d).\n",
467  A.num_rows, b.vlen);
468  REQUIRE(
469  result.num_rows == A.num_rows && result.num_cols == A.num_cols,
470  "Dimension mismatch! A (%d x %d) vs result (%d x %d).\n",
471  A.num_rows, A.num_cols, result.num_rows, result.num_cols);
472 
474  ->add_vector(A, b, result, alpha, beta);
475  }
476 
483  template <typename T, template <typename> class Container>
484  void add_scalar(Container<T>& a, T b)
485  {
486  infer_backend(a)->add_scalar(a, b);
487  }
488 
499  template <typename T>
501  {
502  REQUIRE(
503  A.num_rows == A.num_cols, "Matrix A (%d x% d) is not square!\n",
504  A.num_rows, A.num_cols);
505  infer_backend(A)->center_matrix(A);
506  }
507 
516  template <typename T>
517  void
518  dger(T alpha, const SGVector<T> x, const SGVector<T> y, SGMatrix<T>& A)
519  {
520  auto x_martix =
522  auto y_martix =
524 
525  auto temp_martix = SGMatrix<T>::matrix_multiply(
526  x_martix, y_martix, false, true, alpha);
527  add(A, temp_martix, A);
528  }
529 
540  template <typename T>
542  cholesky_factor(const SGMatrix<T>& A, const bool lower = true)
543  {
544  return infer_backend(A)->cholesky_factor(A, lower);
545  }
546 
558  template <typename T>
560  const SGMatrix<T>& L, const SGVector<T>& b, const bool lower = true)
561  {
562  return infer_backend(L, SGMatrix<T>(b))
563  ->cholesky_solver(L, b, lower);
564  }
565 
575  template <typename T>
576  T dot(const SGVector<T>& a, const SGVector<T>& b)
577  {
578  REQUIRE(
579  a.vlen == b.vlen,
580  "Length of vector a (%d) doesn't match vector b (%d).\n",
581  a.vlen, b.vlen);
582  return infer_backend(a, b)->dot(a, b);
583  }
584 
600  template <typename T>
602  const SGMatrix<T>& A, SGVector<T>& eigenvalues,
603  SGMatrix<T>& eigenvectors)
604  {
605  REQUIRE(
606  A.num_rows == A.num_cols, "Matrix A (%d x% d) is not square!\n",
607  A.num_rows, A.num_cols);
608  REQUIRE(
609  A.num_rows == eigenvectors.num_rows,
610  "Number of rows of A (%d) doesn't match eigenvectors' matrix "
611  "(%d).\n",
612  A.num_rows, eigenvectors.num_rows);
613  REQUIRE(
614  A.num_cols == eigenvectors.num_cols,
615  "Number of columns of A (%d) doesn't match eigenvectors' "
616  "matrix (%d).\n",
617  A.num_cols, eigenvectors.num_cols);
618  REQUIRE(
619  A.num_cols == eigenvalues.vlen,
620  "Length of eigenvalues' vector doesn't match matrix A");
621 
622  infer_backend(A)->eigen_solver(A, eigenvalues, eigenvectors);
623  }
624 
639  template <typename T>
641  const SGMatrix<T>& A, SGVector<T>& eigenvalues,
642  SGMatrix<T>& eigenvectors, index_t k = 0)
643  {
644 
645  REQUIRE(
646  A.num_rows == A.num_cols, "Matrix A (%d x% d) is not square!\n",
647  A.num_rows, A.num_cols);
648 
649  if (k == 0)
650  k = A.num_rows;
651  REQUIRE(
652  k > 0 && k <= A.num_rows,
653  "Invalid value of k (%d), it must be in the range 1-%d.", k,
654  A.num_rows)
655 
656  REQUIRE(
657  A.num_rows == eigenvectors.num_rows,
658  "Number of rows of A (%d) doesn't match eigenvectors' matrix "
659  "(%d).\n",
660  A.num_rows, eigenvectors.num_rows);
661  REQUIRE(
662  k == eigenvectors.num_cols, "Number of requested eigenvectors "
663  "(%d) doesn't match the number "
664  "of result matrix columns (%d).\n",
665  k, eigenvectors.num_cols);
666  REQUIRE(
667  k == eigenvalues.vlen, "Length of result vector doesn't "
668  "match the number of requested "
669  "eigenvalues");
670 
671  infer_backend(A)->eigen_solver_symmetric(
672  A, eigenvalues, eigenvectors, k);
673  }
674 
688  template <typename T>
690  Block<SGMatrix<T>>& a, Block<SGMatrix<T>>& b, SGMatrix<T>& result)
691  {
692  REQUIRE(
693  a.m_row_size == b.m_row_size && a.m_col_size == b.m_col_size,
694  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n", a.m_row_size,
695  a.m_col_size, b.m_row_size, b.m_col_size);
696  REQUIRE(
697  a.m_row_size == result.num_rows &&
698  a.m_col_size == result.num_cols,
699  "Dimension mismatch! A(%d x %d) vs result(%d x %d)\n",
700  a.m_row_size, a.m_col_size, result.num_rows, result.num_cols);
701 
702  REQUIRE(
703  !result.on_gpu(),
704  "Cannot operate with matrix result on_gpu (%d) \
705  as matrix blocks are on CPU.\n",
706  result.on_gpu());
707 
708  sg_linalg->get_cpu_backend()->element_prod(a, b, result);
709  }
710 
721  template <typename T>
723  {
724  REQUIRE(
725  a.m_row_size == b.m_row_size && a.m_col_size == b.m_col_size,
726  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n", a.m_row_size,
727  a.m_col_size, b.m_row_size, b.m_col_size);
728 
729  SGMatrix<T> result(a.m_row_size, a.m_col_size);
730  result.zero();
731 
732  element_prod(a, b, result);
733 
734  return result;
735  }
736 
748  template <typename T>
750  {
751  REQUIRE(
752  a.num_rows == b.num_rows && a.num_cols == b.num_cols,
753  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n", a.num_rows,
754  a.num_cols, b.num_rows, b.num_cols);
755  REQUIRE(
756  a.num_rows == result.num_rows && a.num_cols == result.num_cols,
757  "Dimension mismatch! A(%d x %d) vs result(%d x %d)\n",
758  a.num_rows, a.num_cols, result.num_rows, result.num_cols);
759 
760  REQUIRE(
761  !(result.on_gpu() ^ a.on_gpu()),
762  "Cannot operate with matrix result on_gpu (%d) and \
763  matrix A on_gpu (%d).\n",
764  result.on_gpu(), a.on_gpu());
765  REQUIRE(
766  !(result.on_gpu() ^ b.on_gpu()),
767  "Cannot operate with matrix result on_gpu (%d) and \
768  matrix B on_gpu (%d).\n",
769  result.on_gpu(), b.on_gpu());
770 
771  infer_backend(a, b)->element_prod(a, b, result);
772  }
773 
783  template <typename T>
785  {
786  REQUIRE(
787  a.num_rows == b.num_rows && a.num_cols == b.num_cols,
788  "Dimension mismatch! A(%d x %d) vs B(%d x %d)\n", a.num_rows,
789  a.num_cols, b.num_rows, b.num_cols);
790 
791  SGMatrix<T> result;
792  result = a.clone();
793 
794  element_prod(a, b, result);
795 
796  return result;
797  }
798 
806  template <typename T, template <typename> class Container>
807  Container<T> exponent(const Container<T>& a)
808  {
809  Container<T> result;
810  result = a.clone();
811 
812  infer_backend(a)->exponent(a, result);
813 
814  return result;
815  }
816 
822  template <typename T>
823  void identity(SGMatrix<T>& identity_matrix)
824  {
825  REQUIRE(identity_matrix.num_rows == identity_matrix.num_cols, "Matrix is not square!\n");
826  infer_backend(identity_matrix)->identity(identity_matrix);
827  }
828 
839  template <typename T>
841  SGMatrix<T>& A, SGVector<T>& b, SGVector<T>& result,
842  bool transpose = false)
843  {
844  if (transpose)
845  {
846  REQUIRE(
847  A.num_rows == b.vlen,
848  "Row number of Matrix A (%d) doesn't match \
849  length of vector b (%d).\n",
850  A.num_rows, b.vlen);
851  REQUIRE(
852  result.vlen == A.num_cols,
853  "Length of vector result (%d) doesn't match \
854  column number of Matrix A (%d).\n",
855  result.vlen, A.num_cols);
856  }
857  else
858  {
859  REQUIRE(
860  A.num_cols == b.vlen,
861  "Column number of Matrix A (%d) doesn't match \
862  length of vector b (%d).\n",
863  A.num_cols, b.vlen);
864  REQUIRE(
865  result.vlen == A.num_rows,
866  "Length of vector result (%d) doesn't match \
867  row number of Matrix A (%d).\n",
868  result.vlen, A.num_rows);
869  }
870 
871  REQUIRE(
872  !(result.on_gpu() ^ A.on_gpu()), "Cannot operate with vector "
873  "result on_gpu (%d) and "
874  "vector a on_gpu (%d).\n",
875  result.on_gpu(), A.on_gpu());
876  REQUIRE(
877  !(result.on_gpu() ^ b.on_gpu()), "Cannot operate with vector "
878  "result on_gpu (%d) and "
879  "vector b on_gpu (%d).\n",
880  result.on_gpu(), b.on_gpu());
881 
883  ->matrix_prod(A, b, result, transpose, false);
884  }
885 
894  template <typename T>
896  matrix_prod(SGMatrix<T>& A, SGVector<T>& b, bool transpose = false)
897  {
898  SGVector<T> result;
899  if (transpose)
900  {
901  REQUIRE(
902  A.num_rows == b.vlen,
903  "Row number of Matrix A (%d) doesn't match \
904  length of vector b (%d).\n",
905  A.num_rows, b.vlen);
906  result = SGVector<T>(A.num_cols);
907  }
908  else
909  {
910  REQUIRE(
911  A.num_cols == b.vlen,
912  "Column number of Matrix A (%d) doesn't match \
913  length of vector b (%d).\n",
914  A.num_cols, b.vlen);
915  result = SGVector<T>(A.num_rows);
916  }
917 
918  if (A.on_gpu())
919  to_gpu(result);
920 
921  matrix_prod(A, b, result, transpose);
922  return result;
923  }
924 
937  template <typename T>
939  SGMatrix<T>& A, SGMatrix<T>& B, SGMatrix<T>& result,
940  bool transpose_A = false, bool transpose_B = false)
941  {
942  REQUIRE(
943  !(result.on_gpu() ^ A.on_gpu()),
944  "Cannot operate with matrix result on_gpu (%d) and \
945  matrix A on_gpu (%d).\n",
946  result.on_gpu(), A.on_gpu());
947  REQUIRE(
948  !(result.on_gpu() ^ B.on_gpu()),
949  "Cannot operate with matrix result on_gpu (%d) and \
950  matrix B on_gpu (%d).\n",
951  result.on_gpu(), B.on_gpu());
952 
953  if (transpose_A)
954  {
955  REQUIRE(
956  A.num_cols == result.num_rows,
957  "Number of columns for A (%d) and \
958  number of rows for result (%d) should be equal!\n",
959  A.num_cols, result.num_rows);
960  if (transpose_B)
961  {
962  REQUIRE(
963  A.num_rows == B.num_cols,
964  "Number of rows for A (%d) and \
965  number of columns for B (%d) should be equal!\n",
966  A.num_rows, B.num_cols);
967  REQUIRE(
968  B.num_rows == result.num_cols,
969  "Number of rows for B (%d) and \
970  number of columns for result (%d) should be equal!\n",
971  B.num_rows, result.num_cols);
972  }
973  else
974  {
975  REQUIRE(
976  A.num_rows == B.num_rows,
977  "Number of rows for A (%d) and \
978  number of rows for B (%d) should be equal!\n",
979  A.num_rows, B.num_rows);
980  REQUIRE(
981  B.num_cols == result.num_cols,
982  "Number of columns for B (%d) and \
983  number of columns for result (%d) should be equal!\n",
984  B.num_cols, result.num_cols);
985  }
986  }
987  else
988  {
989  REQUIRE(
990  A.num_rows == result.num_rows,
991  "Number of rows for A (%d) and \
992  number of rows for result (%d) should be equal!\n",
993  A.num_rows, result.num_rows);
994  if (transpose_B)
995  {
996  REQUIRE(
997  A.num_cols == B.num_cols,
998  "Number of columns for A (%d) and \
999  number of columns for B (%d) should be equal!\n",
1000  A.num_cols, B.num_cols);
1001  REQUIRE(
1002  B.num_rows == result.num_cols,
1003  "Number of rows for B (%d) and \
1004  number of columns for result (%d) should be equal!\n",
1005  B.num_rows, result.num_cols);
1006  }
1007  else
1008  {
1009  REQUIRE(
1010  A.num_cols == B.num_rows,
1011  "Number of columns for A (%d) and \
1012  number of rows for B (%d) should be equal!\n",
1013  A.num_cols, B.num_rows);
1014  REQUIRE(
1015  B.num_cols == result.num_cols,
1016  "Number of columns for B (%d) and \
1017  number of columns for result (%d) should be equal!\n",
1018  B.num_cols, result.num_cols);
1019  }
1020  }
1021 
1022  infer_backend(A, B)->matrix_prod(
1023  A, B, result, transpose_A, transpose_B);
1024  }
1025 
1038  template <typename T>
1040  SGMatrix<T>& A, SGMatrix<T>& B, bool transpose_A = false,
1041  bool transpose_B = false)
1042  {
1043  SGMatrix<T> result;
1044 
1045  if (transpose_A & transpose_B)
1046  {
1047  REQUIRE(
1048  A.num_rows == B.num_cols, "Number of rows for A (%d) and \
1049  number of columns for B (%d) should be equal!\n",
1050  A.num_rows, B.num_cols);
1051  result = SGMatrix<T>(A.num_cols, B.num_rows);
1052  }
1053  else if (transpose_A)
1054  {
1055  REQUIRE(
1056  A.num_rows == B.num_rows, "Number of rows for A (%d) and \
1057  number of rows for B (%d) should be equal!\n",
1058  A.num_rows, B.num_rows);
1059  result = SGMatrix<T>(A.num_cols, B.num_cols);
1060  }
1061  else if (transpose_B)
1062  {
1063  REQUIRE(
1064  A.num_cols == B.num_cols,
1065  "Number of columns for A (%d) and \
1066  number of columns for B (%d) should be equal!\n",
1067  A.num_cols, B.num_cols);
1068  result = SGMatrix<T>(A.num_rows, B.num_rows);
1069  }
1070  else
1071  {
1072  REQUIRE(
1073  A.num_cols == B.num_rows,
1074  "Number of columns for A (%d) and \
1075  number of rows for B (%d) should be equal!\n",
1076  A.num_cols, B.num_rows);
1077  result = SGMatrix<T>(A.num_rows, B.num_cols);
1078  }
1079 
1080  if (A.on_gpu())
1081  to_gpu(result);
1082 
1083  matrix_prod(A, B, result, transpose_A, transpose_B);
1084 
1085  return result;
1086  }
1087 
1102  template <typename T>
1103  void dgemv(
1104  T alpha, SGMatrix<T> a, bool transpose, SGVector<T> x, T beta,
1105  SGVector<T>& y)
1106  {
1107  auto temp_vector = matrix_prod(a, x, transpose);
1108  add(temp_vector, y, y, alpha, beta);
1109  }
1110 
1126  template <typename T>
1127  void dgemm(
1128  T alpha, SGMatrix<T> a, SGMatrix<T> b, bool transpose_a,
1129  bool transpose_b, T beta, SGMatrix<T>& c)
1130  {
1131  auto temp_matrix = matrix_prod(a, b, transpose_a, transpose_b);
1132  add(temp_matrix, c, c, alpha, beta);
1133  }
1134 
1141  template <typename T, template <typename> class Container>
1142  T max(const Container<T>& a)
1143  {
1144  return infer_backend(a)->max(a);
1145  }
1146 
1155  template <typename T, template <typename> class Container>
1156  typename std::enable_if<!std::is_same<T, complex128_t>::value,
1157  float64_t>::type
1158  mean(const Container<T>& a)
1159  {
1160  REQUIRE(a.size() > 0, "Vector/Matrix cannot be empty!\n");
1161  return infer_backend(a)->mean(a);
1162  }
1163 
1172  template <template <typename> class Container>
1173  complex128_t mean(const Container<complex128_t>& a)
1174  {
1175  REQUIRE(a.size() > 0, "Vector/Matrix cannot be empty!\n");
1176  return infer_backend(a)->mean(a);
1177  }
1178 
1185  template <typename T>
1186  T norm(const SGVector<T>& a)
1187  {
1188  REQUIRE(a.size() > 0, "Vector cannot be empty!\n");
1189  return CMath::sqrt(dot(a, a));
1190  }
1191 
1200  template <typename T, template <typename> class Container>
1201  Container<T> qr_solver(const SGMatrix<T>& A, const Container<T>& b)
1202  {
1203  REQUIRE(
1204  A.num_rows == A.num_cols, "Matrix A (%d x% d) is not square!\n",
1205  A.num_rows, A.num_cols);
1206 
1207  return infer_backend(A, SGMatrix<T>(b))->qr_solver(A, b);
1208  }
1209 
1217  template <typename T, template <typename> class Container>
1218  void range_fill(Container<T>& a, const T start = 0)
1219  {
1220  infer_backend(a)->range_fill(a, start);
1221  }
1222 
1233  template <typename T>
1234  void scale(SGVector<T>& a, SGVector<T>& result, T alpha = 1)
1235  {
1236  REQUIRE(
1237  result.vlen == a.vlen,
1238  "Length of vector result (%d) doesn't match vector a (%d).\n",
1239  result.vlen, a.vlen);
1240  infer_backend(a, result)->scale(a, alpha, result);
1241  }
1242 
1253  template <typename T>
1254  void scale(SGMatrix<T>& A, SGMatrix<T>& result, T alpha = 1)
1255  {
1256  REQUIRE(
1257  (A.num_rows == result.num_rows), "Number of rows of matrix A "
1258  "(%d) must match matrix "
1259  "result (%d).\n",
1260  A.num_rows, result.num_rows);
1261  REQUIRE(
1262  (A.num_cols == result.num_cols), "Number of columns of matrix "
1263  "A (%d) must match matrix "
1264  "result (%d).\n",
1265  A.num_cols, result.num_cols);
1266  infer_backend(A, result)->scale(A, alpha, result);
1267  }
1268 
1277  template <typename T, template <typename> class Container>
1278  Container<T> scale(Container<T>& a, T alpha = 1)
1279  {
1280  auto result = a.clone();
1281  scale(a, result, alpha);
1282  return result;
1283  }
1284 
1291  template <typename T, template <typename> class Container>
1292  void set_const(Container<T>& a, T value)
1293  {
1294  infer_backend(a)->set_const(a, value);
1295  }
1296 
1306  template <typename T, template <typename> class Container>
1307  T sum(const Container<T>& a, bool no_diag = false)
1308  {
1309  return infer_backend(a)->sum(a, no_diag);
1310  }
1311 
1322  template <typename T>
1323  T sum(const Block<SGMatrix<T>>& a, bool no_diag = false)
1324  {
1325  return sg_linalg->get_cpu_backend()->sum(a, no_diag);
1326  }
1327 
1336  template <typename T>
1337  T sum_symmetric(const SGMatrix<T>& a, bool no_diag = false)
1338  {
1339  REQUIRE(a.num_rows == a.num_cols, "Matrix is not square!\n");
1340  return infer_backend(a)->sum_symmetric(a, no_diag);
1341  }
1342 
1352  template <typename T>
1353  T sum_symmetric(const Block<SGMatrix<T>>& a, bool no_diag = false)
1354  {
1355  REQUIRE(a.m_row_size == a.m_col_size, "Matrix is not square!\n");
1356  return sg_linalg->get_cpu_backend()->sum_symmetric(a, no_diag);
1357  }
1358 
1369  template <typename T>
1370  SGVector<T> colwise_sum(const SGMatrix<T>& mat, bool no_diag = false)
1371  {
1372  return infer_backend(mat)->colwise_sum(mat, no_diag);
1373  }
1374 
1386  template <typename T>
1387  SGVector<T>
1388  colwise_sum(const Block<SGMatrix<T>>& a, bool no_diag = false)
1389  {
1390  return sg_linalg->get_cpu_backend()->colwise_sum(a, no_diag);
1391  }
1392 
1402  template <typename T>
1403  SGVector<T> rowwise_sum(const SGMatrix<T>& mat, bool no_diag = false)
1404  {
1405  return infer_backend(mat)->rowwise_sum(mat, no_diag);
1406  }
1407 
1419  template <typename T>
1420  SGVector<T>
1421  rowwise_sum(const Block<SGMatrix<T>>& a, bool no_diag = false)
1422  {
1423  return sg_linalg->get_cpu_backend()->rowwise_sum(a, no_diag);
1424  }
1425 
1444  template <typename T>
1445  void
1447  bool thin_U = true,
1449  {
1450  auto r = CMath::min(A.num_cols, A.num_rows);
1451  REQUIRE(
1452  (A.num_rows == U.num_rows),
1453  "Number of rows of matrix A (%d) must match matrix U (%d).\n",
1454  A.num_rows, U.num_rows);
1455  if (thin_U)
1456  {
1457  REQUIRE(
1458  (U.num_cols == r), "Number of columns of matrix A (%d) "
1459  "must match A's smaller dimension "
1460  "(%d).\n",
1461  A.num_cols, r);
1462  }
1463  else
1464  {
1465  REQUIRE(
1466  (A.num_rows == U.num_cols), "Number of rows of matrix A "
1467  "(%d) must match number of "
1468  "columns of U (%d).\n",
1469  A.num_cols, r);
1470  }
1471  REQUIRE(
1472  (s.vlen == r), "Length of vector s (%d) doesn't match A's "
1473  "smaller dimension (%d).\n",
1474  s.vlen, r);
1475 
1476  infer_backend(A)->svd(A, s, U, thin_U, alg);
1477  }
1478 
1485  template <typename T>
1486  T trace(const SGMatrix<T>& A)
1487  {
1488  REQUIRE(A.num_rows == A.num_cols, "Matrix is not square!\n");
1489  return infer_backend(A)->trace(A);
1490  }
1491 
1498  template <typename T>
1500  {
1501  return infer_backend(A)->transpose_matrix(A);
1502  }
1503 
1513  template <typename T, template <typename> class Container>
1514  Container<T> triangular_solver(
1515  const SGMatrix<T>& L, const Container<T>& b,
1516  const bool lower = true)
1517  {
1518  return infer_backend(L, SGMatrix<T>(b))
1519  ->triangular_solver(L, b, lower);
1520  }
1521 
1527  template <typename T, template <typename> class Container>
1528  void zero(Container<T>& a)
1529  {
1530  infer_backend(a)->zero(a);
1531  }
1532  }
1533 }
1534 
1535 #endif // LINALG_NAMESPACE_H_
static SGMatrix< float64_t > matrix_multiply(SGMatrix< float64_t > A, SGMatrix< float64_t > B, bool transpose_A=false, bool transpose_B=false, float64_t scale=1.0)
Definition: SGMatrix.cpp:1060
T sum_symmetric(const SGMatrix< T > &a, bool no_diag=false)
void dgemm(T alpha, SGMatrix< T > a, SGMatrix< T > b, bool transpose_a, bool transpose_b, T beta, SGMatrix< T > &c)
std::complex< float64_t > complex128_t
Definition: common.h:77
SGMatrix< T > transpose_matrix(const SGMatrix< T > &A)
void eigen_solver(const SGMatrix< T > &A, SGVector< T > &eigenvalues, SGMatrix< T > &eigenvectors)
int32_t index_t
Definition: common.h:72
bool on_gpu() const
Definition: SGMatrix.h:83
static T sqrt(T x)
Definition: Math.h:428
#define SG_SWARNING(...)
Definition: SGIO.h:163
void scale(SGVector< T > &a, SGVector< T > &result, T alpha=1)
SGMatrix< T > cholesky_factor(const SGMatrix< T > &A, const bool lower=true)
Container< T > qr_solver(const SGMatrix< T > &A, const Container< T > &b)
SGVector< T > rowwise_sum(const SGMatrix< T > &mat, bool no_diag=false)
void add(SGVector< T > &a, SGVector< T > &b, SGVector< T > &result, T alpha=1, T beta=1)
#define REQUIRE(x,...)
Definition: SGIO.h:181
T dot(const SGVector< T > &a, const SGVector< T > &b)
void eigen_solver_symmetric(const SGMatrix< T > &A, SGVector< T > &eigenvalues, SGMatrix< T > &eigenvectors, index_t k=0)
void add_col_vec(const SGMatrix< T > &A, index_t i, const SGVector< T > &b, SGMatrix< T > &result, T alpha=1, T beta=1)
void set_const(Container< T > &a, T value)
Generic class Block which wraps a matrix class and contains block specific information, providing a uniform way to deal with matrix blocks for all supported backend matrices.
void dger(T alpha, const SGVector< T > x, const SGVector< T > y, SGMatrix< T > &A)
LinalgBackendBase * infer_backend(const Container< T > &a)
std::enable_if<!std::is_same< T, complex128_t >::value, float64_t >::type mean(const Container< T > &a)
T trace(const SGMatrix< T > &A)
std::unique_ptr< SGLinalg > sg_linalg
Container< T > triangular_solver(const SGMatrix< T > &L, const Container< T > &b, const bool lower=true)
SGMatrix< T > clone() const
Definition: SGMatrix.cpp:330
SGVector< T > colwise_sum(const SGMatrix< T > &mat, bool no_diag=false)
void zero(Container< T > &a)
int32_t size() const
Definition: SGVector.h:156
SGVector< T > cholesky_solver(const SGMatrix< T > &L, const SGVector< T > &b, const bool lower=true)
T norm(const SGVector< T > &a)
double float64_t
Definition: common.h:60
void dgemv(T alpha, SGMatrix< T > a, bool transpose, SGVector< T > x, T beta, SGVector< T > &y)
void range_fill(Container< T > &a, const T start=0)
index_t num_rows
Definition: SGMatrix.h:495
shogun vector
void add_vector(const SGMatrix< T > &A, const SGVector< T > &b, SGMatrix< T > &result, T alpha=1, T beta=1)
index_t num_cols
Definition: SGMatrix.h:497
void add_scalar(Container< T > &a, T b)
void matrix_prod(SGMatrix< T > &A, SGVector< T > &b, SGVector< T > &result, bool transpose=false)
shogun matrix
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
T sum(const Container< T > &a, bool no_diag=false)
void center_matrix(SGMatrix< T > &A)
void identity(SGMatrix< T > &identity_matrix)
#define SG_SERROR(...)
Definition: SGIO.h:164
Base interface of generic linalg methods and generic memory transfer methods.
Container< T > exponent(const Container< T > &a)
void svd(const SGMatrix< T > &A, SGVector< T > &s, SGMatrix< T > &U, bool thin_U=true, SVDAlgorithm alg=SVDAlgorithm::BidiagonalDivideConquer)
void to_gpu(SGVector< T > &a, SGVector< T > &b)
static SGMatrix< T > convert_to_matrix(SGVector< T > vector, index_t nrows, index_t ncols, bool fortran_order)
Definition: SGVector.cpp:959
bool on_gpu() const
Definition: SGVector.h:91
void from_gpu(SGVector< T > &a, SGVector< T > &b)
T max(const Container< T > &a)
static T min(T a, T b)
Definition: Math.h:138
void element_prod(Block< SGMatrix< T >> &a, Block< SGMatrix< T >> &b, SGMatrix< T > &result)
index_t vlen
Definition: SGVector.h:571

SHOGUN Machine Learning Toolbox - Documentation