SHOGUN  3.2.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules
Kernel.cpp
Go to the documentation of this file.
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 3 of the License, or
5  * (at your option) any later version.
6  *
7  * Written (W) 1999-2009 Soeren Sonnenburg
8  * Written (W) 1999-2008 Gunnar Raetsch
9  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
10  */
11 
12 #include <shogun/lib/config.h>
13 #include <shogun/lib/common.h>
14 #include <shogun/io/SGIO.h>
15 #include <shogun/io/File.h>
16 #include <shogun/lib/Time.h>
17 #include <shogun/lib/Signal.h>
18 
19 #include <shogun/base/Parallel.h>
20 
21 #include <shogun/kernel/Kernel.h>
24 #include <shogun/base/Parameter.h>
25 
27 
28 #include <string.h>
29 #include <unistd.h>
31 
32 #ifdef HAVE_PTHREAD
33 #include <pthread.h>
34 #endif
35 
36 using namespace shogun;
37 
39 {
40  init();
42 }
43 
44 CKernel::CKernel(int32_t size) : CSGObject()
45 {
46  init();
47 
48  if (size<10)
49  size=10;
50 
51  cache_size=size;
53 }
54 
55 
56 CKernel::CKernel(CFeatures* p_lhs, CFeatures* p_rhs, int32_t size) : CSGObject()
57 {
58  init();
59 
60  if (size<10)
61  size=10;
62 
63  cache_size=size;
64 
66  init(p_lhs, p_rhs);
68 }
69 
71 {
72  if (get_is_initialized())
73  SG_ERROR("Kernel still initialized on destruction.\n")
74 
77 
78  SG_INFO("Kernel deleted (%p).\n", this)
79 }
80 
81 
82 
83 bool CKernel::init(CFeatures* l, CFeatures* r)
84 {
85  /* make sure that features are not deleted if same ones are used */
86  SG_REF(l);
87  SG_REF(r);
88 
89  //make sure features were indeed supplied
90  REQUIRE(l, "CKernel::init(%p, %p): Left hand side features required!\n", l, r)
91  REQUIRE(r, "CKernel::init(%p, %p): Right hand side features required!\n", l, r)
92 
93  //make sure features are compatible
96 
97  //remove references to previous features
99 
100  //increase reference counts
101  SG_REF(l);
102  if (l==r)
103  lhs_equals_rhs=true;
104  else // l!=r
105  SG_REF(r);
106 
107  lhs=l;
108  rhs=r;
109 
112 
115 
116  /* unref "safety" refs from beginning */
117  SG_UNREF(r);
118  SG_UNREF(l);
119 
120  SG_DEBUG("leaving CKernel::init(%p, %p)\n", l, r)
121  return true;
122 }
123 
125 {
126  SG_REF(n);
127  if (lhs && rhs)
128  n->init(this);
129 
131  normalizer=n;
132 
133  return (normalizer!=NULL);
134 }
135 
137 {
139  return normalizer;
140 }
141 
143 {
144  return normalizer->init(this);
145 }
146 
148 {
150 }
151 
152 
153 
154 void CKernel::load(CFile* loader)
155 {
158 }
159 
160 void CKernel::save(CFile* writer)
161 {
162  SGMatrix<float64_t> k_matrix=get_kernel_matrix<float64_t>();
164  writer->set_matrix(k_matrix.matrix, k_matrix.num_rows, k_matrix.num_cols);
166 }
167 
169 {
170  SG_DEBUG("entering CKernel::remove_lhs_and_rhs\n")
171  if (rhs!=lhs)
172  SG_UNREF(rhs);
173  rhs = NULL;
174  num_rhs=0;
175 
176  SG_UNREF(lhs);
177  lhs = NULL;
178  num_lhs=0;
179  lhs_equals_rhs=false;
180 
181 
182  SG_DEBUG("leaving CKernel::remove_lhs_and_rhs\n")
183 }
184 
186 {
187  if (rhs==lhs)
188  rhs=NULL;
189  SG_UNREF(lhs);
190  lhs = NULL;
191  num_lhs=0;
192  lhs_equals_rhs=false;
193 
194 }
195 
198 {
199  if (rhs!=lhs)
200  SG_UNREF(rhs);
201  rhs = NULL;
202  num_rhs=0;
203  lhs_equals_rhs=false;
204 
205 
206 }
207 
208 #define ENUM_CASE(n) case n: SG_INFO(#n " ") break;
209 
211 {
212  SG_INFO("%p - \"%s\" weight=%1.2f OPT:%s", this, get_name(),
214  get_optimization_type()==FASTBUTMEMHUNGRY ? "FASTBUTMEMHUNGRY" :
215  "SLOWBUTMEMEFFICIENT");
216 
217  switch (get_kernel_type())
218  {
278  }
279 
280  switch (get_feature_class())
281  {
292  ENUM_CASE(C_WD)
303  }
304 
305  switch (get_feature_type())
306  {
321  }
322  SG_INFO("\n")
323 }
324 #undef ENUM_CASE
325 
327  int32_t count, int32_t *IDX, float64_t * weights)
328 {
329  SG_ERROR("kernel does not support linadd optimization\n")
330  return false ;
331 }
332 
334 {
335  SG_ERROR("kernel does not support linadd optimization\n")
336  return false;
337 }
338 
340 {
341  SG_ERROR("kernel does not support linadd optimization\n")
342  return 0;
343 }
344 
346  int32_t num_vec, int32_t* vec_idx, float64_t* target, int32_t num_suppvec,
347  int32_t* IDX, float64_t* weights, float64_t factor)
348 {
349  SG_ERROR("kernel does not support batch computation\n")
350 }
351 
352 void CKernel::add_to_normal(int32_t vector_idx, float64_t weight)
353 {
354  SG_ERROR("kernel does not support linadd optimization, add_to_normal not implemented\n")
355 }
356 
358 {
359  SG_ERROR("kernel does not support linadd optimization, clear_normal not implemented\n")
360 }
361 
363 {
364  return 1;
365 }
366 
368  int32_t vector_idx, float64_t * subkernel_contrib)
369 {
370  SG_ERROR("kernel compute_by_subkernel not implemented\n")
371 }
372 
373 const float64_t* CKernel::get_subkernel_weights(int32_t &num_weights)
374 {
375  num_weights=1 ;
376  return &combined_kernel_weight ;
377 }
378 
380 {
381  int num_weights = 1;
382  const float64_t* weight = get_subkernel_weights(num_weights);
383  return SGVector<float64_t>(const_cast<float64_t*>(weight),1,false);
384 }
385 
387 {
388  ASSERT(weights.vector)
389  if (weights.vlen!=1)
390  SG_ERROR("number of subkernel weights should be one ...\n")
391 
392  combined_kernel_weight = weights.vector[0] ;
393 }
394 
396 {
397  if (kernel)
398  {
399  CKernel* casted=dynamic_cast<CKernel*>(kernel);
400  REQUIRE(casted, "CKernel::obtain_from_generic(): Error, provided object"
401  " of class \"%s\" is not a subclass of CKernel!\n",
402  kernel->get_name());
403  return casted;
404  }
405  else
406  return NULL;
407 }
408 
410 {
411  int32_t num_suppvec=svm->get_num_support_vectors();
412  int32_t* sv_idx=SG_MALLOC(int32_t, num_suppvec);
413  float64_t* sv_weight=SG_MALLOC(float64_t, num_suppvec);
414 
415  for (int32_t i=0; i<num_suppvec; i++)
416  {
417  sv_idx[i] = svm->get_support_vector(i);
418  sv_weight[i] = svm->get_alpha(i);
419  }
420  bool ret = init_optimization(num_suppvec, sv_idx, sv_weight);
421 
422  SG_FREE(sv_idx);
423  SG_FREE(sv_weight);
424  return ret;
425 }
426 
428 {
430  if (lhs_equals_rhs)
431  rhs=lhs;
432 }
433 
435 {
437 
438  if (lhs_equals_rhs)
439  rhs=NULL;
440 }
441 
443 {
445 
446  if (lhs_equals_rhs)
447  rhs=lhs;
448 }
449 
451  SG_ADD(&cache_size, "cache_size",
452  "Cache size in MB.", MS_NOT_AVAILABLE);
453  SG_ADD((CSGObject**) &lhs, "lhs",
454  "Feature vectors to occur on left hand side.", MS_NOT_AVAILABLE);
455  SG_ADD((CSGObject**) &rhs, "rhs",
456  "Feature vectors to occur on right hand side.", MS_NOT_AVAILABLE);
457  SG_ADD(&lhs_equals_rhs, "lhs_equals_rhs",
458  "If features on lhs are the same as on rhs.", MS_NOT_AVAILABLE);
459  SG_ADD(&num_lhs, "num_lhs", "Number of feature vectors on left hand side.",
461  SG_ADD(&num_rhs, "num_rhs", "Number of feature vectors on right hand side.",
463  SG_ADD(&combined_kernel_weight, "combined_kernel_weight",
464  "Combined kernel weight.", MS_AVAILABLE);
465  SG_ADD(&optimization_initialized, "optimization_initialized",
466  "Optimization is initialized.", MS_NOT_AVAILABLE);
467  SG_ADD((machine_int_t*) &opt_type, "opt_type",
468  "Optimization type.", MS_NOT_AVAILABLE);
469  SG_ADD(&properties, "properties", "Kernel properties.", MS_NOT_AVAILABLE);
470  SG_ADD((CSGObject**) &normalizer, "normalizer", "Normalize the kernel.",
471  MS_AVAILABLE);
472 }
473 
474 
475 void CKernel::init()
476 {
477  cache_size=10;
478  kernel_matrix=NULL;
479  lhs=NULL;
480  rhs=NULL;
481  num_lhs=0;
482  num_rhs=0;
483  lhs_equals_rhs=false;
488  normalizer=NULL;
489 
490 
491 
493 }
494 
495 namespace shogun
496 {
498 template <class T> struct K_THREAD_PARAM
499 {
503  int32_t start;
505  int32_t end;
507  int64_t total_start;
509  int64_t total_end;
511  int32_t m;
513  int32_t n;
515  T* result;
517  bool symmetric;
519  bool verbose;
520 };
521 }
522 
524  bool no_diag)
525 {
526  SG_DEBUG("Entering\n");
527 
528  REQUIRE(has_features(), "No features assigned to kernel\n")
529  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
530  REQUIRE(block_begin>=0 && block_begin<num_rhs,
531  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
532  REQUIRE(block_begin+block_size<=num_rhs,
533  "Invalid block size (%d) at starting index (%d, %d)! "
534  "Please use smaller blocks!", block_size, block_begin, block_begin)
535  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
536 
537  float64_t sum=0.0;
538 
539  // since the block is symmetric with main diagonal inside, we can save half
540  // the computation with using only the upper triangular part.
541  // this can be done in parallel
542 #pragma omp parallel for
543  for (index_t i=0; i<block_size; ++i)
544  {
545  // compute the kernel values on the upper triangular part of the kernel
546  // matrix and compute sum on the fly
547  for (index_t j=i+1; j<block_size; ++j)
548  {
549  float64_t k=kernel(i+block_begin, j+block_begin);
550 #pragma omp atomic
551  sum+=k;
552  }
553  }
554 
555  // the actual sum would be twice of what we computed
556  sum*=2;
557 
558  // add the diagonal elements if required - keeping this check
559  // outside of the loop to save cycles
560  if (!no_diag)
561  {
562 #pragma omp parallel for
563  for (index_t i=0; i<block_size; ++i)
564  {
565  float64_t diag=kernel(i+block_begin, i+block_begin);
566 #pragma omp atomic
567  sum+=diag;
568  }
569  }
570 
571  SG_DEBUG("Leaving\n");
572 
573  return sum;
574 }
575 
576 float64_t CKernel::sum_block(index_t block_begin_row, index_t block_begin_col,
577  index_t block_size_row, index_t block_size_col, bool no_diag)
578 {
579  SG_DEBUG("Entering\n");
580 
581  REQUIRE(has_features(), "No features assigned to kernel\n")
582  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
583  block_begin_col>=0 && block_begin_col<num_rhs,
584  "Invalid block begin index (%d, %d)!\n",
585  block_begin_row, block_begin_col)
586  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
587  block_begin_col+block_size_col<=num_rhs,
588  "Invalid block size (%d, %d) at starting index (%d, %d)! "
589  "Please use smaller blocks!", block_size_row, block_size_col,
590  block_begin_row, block_begin_col)
591  REQUIRE(block_size_row>=1 && block_size_col>=1,
592  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
593 
594  // check if removal of diagonal is required/valid
595  if (no_diag && block_size_row!=block_size_col)
596  {
597  SG_WARNING("Not removing the main diagonal since block is not square!\n");
598  no_diag=false;
599  }
600 
601  float64_t sum=0.0;
602 
603  // this can be done in parallel for the rows/cols
604 #pragma omp parallel for
605  for (index_t i=0; i<block_size_row; ++i)
606  {
607  // compute the kernel values and compute sum on the fly
608  for (index_t j=0; j<block_size_col; ++j)
609  {
610  float64_t k=no_diag && i==j ? 0 :
611  kernel(i+block_begin_row, j+block_begin_col);
612 #pragma omp atomic
613  sum+=k;
614  }
615  }
616 
617  SG_DEBUG("Leaving\n");
618 
619  return sum;
620 }
621 
623  index_t block_size, bool no_diag)
624 {
625  SG_DEBUG("Entering\n");
626 
627  REQUIRE(has_features(), "No features assigned to kernel\n")
628  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
629  REQUIRE(block_begin>=0 && block_begin<num_rhs,
630  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
631  REQUIRE(block_begin+block_size<=num_rhs,
632  "Invalid block size (%d) at starting index (%d, %d)! "
633  "Please use smaller blocks!", block_size, block_begin, block_begin)
634  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
635 
636  // initialize the vector that accumulates the row/col-wise sum on the go
637  SGVector<float64_t> row_sum(block_size);
638  row_sum.set_const(0.0);
639 
640  // since the block is symmetric with main diagonal inside, we can save half
641  // the computation with using only the upper triangular part.
642  // this can be done in parallel for the rows/cols
643 #pragma omp parallel for
644  for (index_t i=0; i<block_size; ++i)
645  {
646  // compute the kernel values on the upper triangular part of the kernel
647  // matrix and compute row-wise sum on the fly
648  for (index_t j=i+1; j<block_size; ++j)
649  {
650  float64_t k=kernel(i+block_begin, j+block_begin);
651 #pragma omp critical
652  {
653  row_sum[i]+=k;
654  row_sum[j]+=k;
655  }
656  }
657  }
658 
659  // add the diagonal elements if required - keeping this check
660  // outside of the loop to save cycles
661  if (!no_diag)
662  {
663 #pragma omp parallel for
664  for (index_t i=0; i<block_size; ++i)
665  {
666  float64_t diag=kernel(i+block_begin, i+block_begin);
667  row_sum[i]+=diag;
668  }
669  }
670 
671  SG_DEBUG("Leaving\n");
672 
673  return row_sum;
674 }
675 
677  block_begin, index_t block_size, bool no_diag)
678 {
679  SG_DEBUG("Entering\n");
680 
681  REQUIRE(has_features(), "No features assigned to kernel\n")
682  REQUIRE(lhs_equals_rhs, "The kernel matrix is not symmetric!\n")
683  REQUIRE(block_begin>=0 && block_begin<num_rhs,
684  "Invalid block begin index (%d, %d)!\n", block_begin, block_begin)
685  REQUIRE(block_begin+block_size<=num_rhs,
686  "Invalid block size (%d) at starting index (%d, %d)! "
687  "Please use smaller blocks!", block_size, block_begin, block_begin)
688  REQUIRE(block_size>=1, "Invalid block size (%d)!\n", block_size)
689 
690  // initialize the matrix that accumulates the row/col-wise sum on the go
691  // the first column stores the sum of kernel values
692  // the second column stores the sum of squared kernel values
693  SGMatrix<float64_t> row_sum(block_size, 2);
694  row_sum.set_const(0.0);
695 
696  // since the block is symmetric with main diagonal inside, we can save half
697  // the computation with using only the upper triangular part
698  // this can be done in parallel for the rows/cols
699 #pragma omp parallel for
700  for (index_t i=0; i<block_size; ++i)
701  {
702  // compute the kernel values on the upper triangular part of the kernel
703  // matrix and compute row-wise sum and squared sum on the fly
704  for (index_t j=i+1; j<block_size; ++j)
705  {
706  float64_t k=kernel(i+block_begin, j+block_begin);
707 #pragma omp critical
708  {
709  row_sum(i, 0)+=k;
710  row_sum(j, 0)+=k;
711  row_sum(i, 1)+=k*k;
712  row_sum(j, 1)+=k*k;
713  }
714  }
715  }
716 
717  // add the diagonal elements if required - keeping this check
718  // outside of the loop to save cycles
719  if (!no_diag)
720  {
721 #pragma omp parallel for
722  for (index_t i=0; i<block_size; ++i)
723  {
724  float64_t diag=kernel(i+block_begin, i+block_begin);
725  row_sum(i, 0)+=diag;
726  row_sum(i, 1)+=diag*diag;
727  }
728  }
729 
730  SG_DEBUG("Leaving\n");
731 
732  return row_sum;
733 }
734 
736  index_t block_begin_col, index_t block_size_row,
737  index_t block_size_col, bool no_diag)
738 {
739  SG_DEBUG("Entering\n");
740 
741  REQUIRE(has_features(), "No features assigned to kernel\n")
742  REQUIRE(block_begin_row>=0 && block_begin_row<num_lhs &&
743  block_begin_col>=0 && block_begin_col<num_rhs,
744  "Invalid block begin index (%d, %d)!\n",
745  block_begin_row, block_begin_col)
746  REQUIRE(block_begin_row+block_size_row<=num_lhs &&
747  block_begin_col+block_size_col<=num_rhs,
748  "Invalid block size (%d, %d) at starting index (%d, %d)! "
749  "Please use smaller blocks!", block_size_row, block_size_col,
750  block_begin_row, block_begin_col)
751  REQUIRE(block_size_row>=1 && block_size_col>=1,
752  "Invalid block size (%d, %d)!\n", block_size_row, block_size_col)
753 
754  // check if removal of diagonal is required/valid
755  if (no_diag && block_size_row!=block_size_col)
756  {
757  SG_WARNING("Not removing the main diagonal since block is not square!\n");
758  no_diag=false;
759  }
760 
761  // initialize the vector that accumulates the row/col-wise sum on the go
762  // the first block_size_row entries store the row-wise sum of kernel values
763  // the nextt block_size_col entries store the col-wise sum of kernel values
764  SGVector<float64_t> sum(block_size_row+block_size_col);
765  sum.set_const(0.0);
766 
767  // this can be done in parallel for the rows/cols
768 #pragma omp parallel for
769  for (index_t i=0; i<block_size_row; ++i)
770  {
771  // compute the kernel values and compute sum on the fly
772  for (index_t j=0; j<block_size_col; ++j)
773  {
774  float64_t k=no_diag && i==j ? 0 :
775  kernel(i+block_begin_row, j+block_begin_col);
776 #pragma omp critical
777  {
778  sum[i]+=k;
779  sum[j+block_size_row]+=k;
780  }
781  }
782  }
783 
784  SG_DEBUG("Leaving\n");
785 
786  return sum;
787 }
788 
789 template <class T> void* CKernel::get_kernel_matrix_helper(void* p)
790 {
791  K_THREAD_PARAM<T>* params= (K_THREAD_PARAM<T>*) p;
792  int32_t i_start=params->start;
793  int32_t i_end=params->end;
794  CKernel* k=params->kernel;
795  T* result=params->result;
796  bool symmetric=params->symmetric;
797  int32_t n=params->n;
798  int32_t m=params->m;
799  bool verbose=params->verbose;
800  int64_t total_start=params->total_start;
801  int64_t total_end=params->total_end;
802  int64_t total=total_start;
803 
804  for (int32_t i=i_start; i<i_end; i++)
805  {
806  int32_t j_start=0;
807 
808  if (symmetric)
809  j_start=i;
810 
811  for (int32_t j=j_start; j<n; j++)
812  {
813  float64_t v=k->kernel(i,j);
814  result[i+j*m]=v;
815 
816  if (symmetric && i!=j)
817  result[j+i*m]=v;
818 
819  if (verbose)
820  {
821  total++;
822 
823  if (symmetric && i!=j)
824  total++;
825 
826  if (total%100 == 0)
827  SG_OBJ_PROGRESS(k, total, total_start, total_end)
828 
830  break;
831  }
832  }
833 
834  }
835 
836  return NULL;
837 }
838 
839 template <class T>
841 {
842  T* result = NULL;
843 
844  REQUIRE(has_features(), "no features assigned to kernel\n")
845 
846  int32_t m=get_num_vec_lhs();
847  int32_t n=get_num_vec_rhs();
848 
849  int64_t total_num = int64_t(m)*n;
850 
851  // if lhs == rhs and sizes match assume k(i,j)=k(j,i)
852  bool symmetric= (lhs && lhs==rhs && m==n);
853 
854  SG_DEBUG("returning kernel matrix of size %dx%d\n", m, n)
855 
856  result=SG_MALLOC(T, total_num);
857 
858  int32_t num_threads=parallel->get_num_threads();
859  if (num_threads < 2)
860  {
861  K_THREAD_PARAM<T> params;
862  params.kernel=this;
863  params.result=result;
864  params.start=0;
865  params.end=m;
866  params.total_start=0;
867  params.total_end=total_num;
868  params.n=n;
869  params.m=m;
870  params.symmetric=symmetric;
871  params.verbose=true;
872  get_kernel_matrix_helper<T>((void*) &params);
873  }
874  else
875  {
876  pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
877  K_THREAD_PARAM<T>* params = SG_MALLOC(K_THREAD_PARAM<T>, num_threads);
878  int64_t step= total_num/num_threads;
879 
880  int32_t t;
881 
882  num_threads--;
883  for (t=0; t<num_threads; t++)
884  {
885  params[t].kernel = this;
886  params[t].result = result;
887  params[t].start = compute_row_start(t*step, n, symmetric);
888  params[t].end = compute_row_start((t+1)*step, n, symmetric);
889  params[t].total_start=t*step;
890  params[t].total_end=(t+1)*step;
891  params[t].n=n;
892  params[t].m=m;
893  params[t].symmetric=symmetric;
894  params[t].verbose=false;
895 
896  int code=pthread_create(&threads[t], NULL,
897  CKernel::get_kernel_matrix_helper<T>, (void*)&params[t]);
898 
899  if (code != 0)
900  {
901  SG_WARNING("Thread creation failed (thread %d of %d) "
902  "with error:'%s'\n",t, num_threads, strerror(code));
903  num_threads=t;
904  break;
905  }
906  }
907 
908  params[t].kernel = this;
909  params[t].result = result;
910  params[t].start = compute_row_start(t*step, n, symmetric);
911  params[t].end = m;
912  params[t].total_start=t*step;
913  params[t].total_end=total_num;
914  params[t].n=n;
915  params[t].m=m;
916  params[t].symmetric=symmetric;
917  params[t].verbose=true;
918  get_kernel_matrix_helper<T>(&params[t]);
919 
920  for (t=0; t<num_threads; t++)
921  {
922  if (pthread_join(threads[t], NULL) != 0)
923  SG_WARNING("pthread_join of thread %d/%d failed\n", t, num_threads)
924  }
925 
926  SG_FREE(params);
927  SG_FREE(threads);
928  }
929 
930  SG_DONE()
931 
932  return SGMatrix<T>(result,m,n,true);
933 }
934 
935 template SGMatrix<float64_t> CKernel::get_kernel_matrix<float64_t>();
936 template SGMatrix<float32_t> CKernel::get_kernel_matrix<float32_t>();
937 
938 template void* CKernel::get_kernel_matrix_helper<float64_t>(void* p);
939 template void* CKernel::get_kernel_matrix_helper<float32_t>(void* p);
virtual void clear_normal()
Definition: Kernel.cpp:357
virtual const char * get_name() const =0
virtual void load_serializable_post()
Definition: Kernel.cpp:427
virtual bool init(CFeatures *lhs, CFeatures *rhs)
Definition: Kernel.cpp:83
int32_t compute_row_start(int64_t offs, int32_t n, bool symmetric)
Definition: Kernel.h:788
#define SG_INFO(...)
Definition: SGIO.h:118
virtual void cleanup()
Definition: Kernel.cpp:147
#define SG_RESET_LOCALE
Definition: SGIO.h:86
#define SG_DONE()
Definition: SGIO.h:157
virtual void set_matrix(const bool *matrix, int32_t num_feat, int32_t num_vec)
Definition: File.cpp:124
virtual void compute_by_subkernel(int32_t vector_idx, float64_t *subkernel_contrib)
Definition: Kernel.cpp:367
int32_t get_num_threads() const
Definition: Parallel.cpp:64
int32_t index_t
Definition: common.h:62
int32_t num_rhs
number of feature vectors on right hand side
Definition: Kernel.h:872
static void * get_kernel_matrix_helper(void *p)
Definition: Kernel.cpp:789
Class ShogunException defines an exception which is thrown whenever an error inside of shogun occurs...
virtual bool set_normalizer(CKernelNormalizer *normalizer)
Definition: Kernel.cpp:124
virtual float64_t sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:576
virtual int32_t get_num_vectors() const =0
virtual void save_serializable_pre()
Definition: SGObject.cpp:1067
#define SG_ERROR(...)
Definition: SGIO.h:129
#define REQUIRE(x,...)
Definition: SGIO.h:206
virtual bool delete_optimization()
Definition: Kernel.cpp:333
index_t num_cols
Definition: SGMatrix.h:331
float64_t kernel(int32_t idx_a, int32_t idx_b)
Definition: Kernel.h:201
#define ENUM_CASE(n)
Definition: Kernel.cpp:208
uint64_t properties
Definition: Kernel.h:885
Parallel * parallel
Definition: SGObject.h:499
virtual void remove_rhs()
takes all necessary steps if the rhs is removed from kernel
Definition: Kernel.cpp:197
virtual int32_t get_num_vec_lhs()
Definition: Kernel.h:513
SGMatrix< float64_t > get_kernel_matrix()
Definition: Kernel.h:214
#define SG_REF(x)
Definition: SGObject.h:51
#define SG_SET_LOCALE_C
Definition: SGIO.h:85
int32_t cache_size
cache_size in MB
Definition: Kernel.h:853
index_t num_rows
Definition: SGMatrix.h:329
bool get_is_initialized()
Definition: Kernel.h:635
virtual SGMatrix< float64_t > row_wise_sum_squared_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:676
float64_t combined_kernel_weight
Definition: Kernel.h:875
virtual void register_params()
Definition: Kernel.cpp:450
void save(CFile *writer)
Definition: Kernel.cpp:160
virtual void remove_lhs_and_rhs()
Definition: Kernel.cpp:168
index_t vlen
Definition: SGVector.h:637
virtual CKernelNormalizer * get_normalizer()
Definition: Kernel.cpp:136
#define ASSERT(x)
Definition: SGIO.h:201
Class SGObject is the base class of all shogun objects.
Definition: SGObject.h:112
virtual SGVector< float64_t > row_col_wise_sum_block(index_t block_begin_row, index_t block_begin_col, index_t block_size_row, index_t block_size_col, bool no_diag=false)
Definition: Kernel.cpp:735
#define SG_OBJ_PROGRESS(o,...)
Definition: SGIO.h:147
virtual float64_t sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:523
virtual SGVector< float64_t > get_subkernel_weights()
Definition: Kernel.cpp:379
double float64_t
Definition: common.h:50
virtual EFeatureType get_feature_type()=0
Matrix::Scalar sum(Matrix m, bool no_diag=false)
Definition: Redux.h:70
KERNELCACHE_ELEM * kernel_matrix
Definition: Kernel.h:859
A File access base class.
Definition: File.h:34
virtual void save_serializable_post()
Definition: Kernel.cpp:442
virtual float64_t compute_optimized(int32_t vector_idx)
Definition: Kernel.cpp:339
EOptimizationType get_optimization_type()
Definition: Kernel.h:623
virtual void save_serializable_post()
Definition: SGObject.cpp:1072
void list_kernel()
Definition: Kernel.cpp:210
float64_t get_alpha(int32_t idx)
float64_t get_combined_kernel_weight()
Definition: Kernel.h:684
virtual SGVector< float64_t > row_wise_sum_symmetric_block(index_t block_begin, index_t block_size, bool no_diag=true)
Definition: Kernel.cpp:622
virtual EFeatureClass get_feature_class() const =0
Identity Kernel Normalization, i.e. no normalization is applied.
int32_t num_lhs
number of feature vectors on left hand side
Definition: Kernel.h:870
The class Kernel Normalizer defines a function to post-process kernel values.
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
Definition: Signal.h:86
virtual int32_t get_num_vec_rhs()
Definition: Kernel.h:522
virtual void set_subkernel_weights(SGVector< float64_t > weights)
Definition: Kernel.cpp:386
virtual bool init_normalizer()
Definition: Kernel.cpp:142
bool optimization_initialized
Definition: Kernel.h:878
EOptimizationType opt_type
Definition: Kernel.h:882
void load(CFile *loader)
Definition: Kernel.cpp:154
virtual void load_serializable_post()
Definition: SGObject.cpp:1062
CFeatures * rhs
feature vectors to occur on right hand side
Definition: Kernel.h:864
static CKernel * obtain_from_generic(CSGObject *kernel)
Definition: Kernel.cpp:395
#define SG_UNREF(x)
Definition: SGObject.h:52
#define SG_DEBUG(...)
Definition: SGIO.h:107
all of classes and functions are contained in the shogun namespace
Definition: class_list.h:18
virtual bool init(CKernel *k)=0
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
Definition: Kernel.cpp:345
bool lhs_equals_rhs
lhs
Definition: Kernel.h:867
int machine_int_t
Definition: common.h:59
virtual EKernelType get_kernel_type()=0
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
Definition: Kernel.cpp:326
CFeatures * lhs
feature vectors to occur on left hand side
Definition: Kernel.h:862
The class Features is the base class of all feature objects.
Definition: Features.h:68
virtual void save_serializable_pre()
Definition: Kernel.cpp:434
virtual void remove_lhs()
Definition: Kernel.cpp:185
virtual int32_t get_num_subkernels()
Definition: Kernel.cpp:362
bool init_optimization_svm(CSVM *svm)
Definition: Kernel.cpp:409
A generic Support Vector Machine Interface.
Definition: SVM.h:49
The Kernel base class.
Definition: Kernel.h:153
CKernelNormalizer * normalizer
Definition: Kernel.h:889
#define SG_WARNING(...)
Definition: SGIO.h:128
#define SG_ADD(...)
Definition: SGObject.h:81
virtual bool has_features()
Definition: Kernel.h:531
virtual ~CKernel()
Definition: Kernel.cpp:70
virtual void add_to_normal(int32_t vector_idx, float64_t weight)
Definition: Kernel.cpp:352
void set_const(T const_elem)
Definition: SGMatrix.cpp:109
void set_const(T const_elem)
Definition: SGVector.cpp:152
virtual EFeatureType get_feature_type() const =0
virtual EFeatureClass get_feature_class()=0

SHOGUN Machine Learning Toolbox - Documentation