78 SG_ERROR(
"Specified features are not of type CDotFeatures\n")
87 ASSERT(num_vec==num_train_labels)
102 t = 1 / (eta0 * lambda);
104 SG_INFO(
"lambda=%f, epochs=%d, eta0=%f\n", lambda, epochs, eta0)
110 SG_INFO(
"Training on %d vectors\n", num_vec)
114 bool is_log_loss =
false;
121 for (int32_t i=0; i<num_vec; i++)
127 if (z < 1 || is_log_loss)
134 if (use_regularized_bias)
135 bias *= 1 - eta * lambda * bscale;
136 bias += etd * y * bscale;
144 r = pow(1 - eta * lambda, skip);
170 SG_INFO(
"Estimating sparsity and bscale num_vec=%d num_feat=%d.\n", num_vec, c_dim)
177 for (int32_t j=0; j<num_vec && m<=1000; j++, n++)
191 skip = (int32_t) ((16 * n * c_dim) / r);
192 SG_INFO(
"using %d examples. skip=%d bscale=%.6f\n", n, skip, bscale)
209 use_regularized_bias=
false;
221 m_parameters->
add(&use_bias,
"use_bias",
"Indicates if bias is used.");
222 m_parameters->
add(&use_regularized_bias,
"use_regularized_bias",
"Indicates if bias is regularized.");
virtual int32_t get_nnz_features_for_vector(int32_t num)=0
virtual ELabelType get_label_type() const =0
Class CLossFunction is the base class of all loss functions.
The class Labels models labels, i.e. class assignments of objects.
virtual float64_t dense_dot(int32_t vec_idx1, const float64_t *vec2, int32_t vec2_len)=0
virtual int32_t get_num_labels() const =0
virtual int32_t get_num_vectors() const =0
virtual void add_to_dense_vec(float64_t alpha, int32_t vec_idx1, float64_t *vec2, int32_t vec2_len, bool abs_val=false)=0
Features that support dot products among other operations.
virtual int32_t get_dim_feature_space() const =0
static void scale_vector(T alpha, T *vec, int32_t len)
Scale vector inplace.
void add(bool *param, const char *name, const char *description="")
virtual bool train_machine(CFeatures *data=NULL)
virtual ELossType get_loss_type()=0
static void clear_cancel()
virtual void set_features(CDotFeatures *feat)
static T max(T a, T b)
return the maximum of two integers
Class LinearMachine is a generic interface for all kinds of linear machines like classifiers.
static bool cancel_computations()
static float64_t dot(const bool *v1, const bool *v2, int32_t n)
Compute dot product between v1 and v2 (blas optimized)
all of classes and functions are contained in the shogun namespace
The class Features is the base class of all feature objects.
void set_loss_function(CLossFunction *loss_func)
Binary Labels for binary classification.
CHingeLoss implements the hinge loss function.
virtual float64_t first_derivative(float64_t prediction, float64_t label)
bool has_property(EFeatureProperty p) const
virtual void set_labels(CLabels *lab)
ELossType
shogun loss type