24 #ifndef DOXYGEN_SHOULD_SKIP_THIS
25 struct S_THREAD_PARAM_KERNEL_MACHINE
37 #endif // DOXYGEN_SHOULD_SKIP_THIS
49 int32_t num_sv=svs.
vlen;
67 int32_t num_sv = svs.
vlen;
218 int32_t * sv_idx = SG_MALLOC(int32_t, num_sv);
221 for(int32_t i=0; i<num_sv; i++)
233 SG_ERROR(
"initialization of kernel optimization failed\n")
238 SG_ERROR(
"initialization of kernel optimization failed\n")
257 SG_DEBUG(
"entering %s::apply_get_outputs(%s at %p)\n",
260 REQUIRE(
kernel,
"%s::apply_get_outputs(): No kernel assigned!\n")
264 SG_ERROR(
"%s: No vectors on left hand side (%s). This is probably due to"
265 " an implementation error in %s, where it was forgotten to set "
266 "the data (m_svs) indices\n",
get_name(),
273 REQUIRE(lhs,
"%s::apply_get_outputs(): No left hand side specified\n",
294 SG_DEBUG(
"computing output on %d test examples\n", num_vectors)
307 SG_DEBUG(
"Batch evaluation enabled\n")
312 int32_t* idx=SG_MALLOC(int32_t, num_vectors);
315 for (int32_t i=0; i<num_vectors; i++)
325 output.
vector, get_num_support_vectors(), sv_idx, sv_weight);
331 for (int32_t i=0; i<num_vectors; i++)
342 S_THREAD_PARAM_KERNEL_MACHINE params;
343 params.kernel_machine=
this;
344 params.result = output.
vector;
346 params.end=num_vectors;
348 params.indices = NULL;
349 params.indices_len = 0;
355 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
356 S_THREAD_PARAM_KERNEL_MACHINE* params = SG_MALLOC(S_THREAD_PARAM_KERNEL_MACHINE, num_threads);
357 int32_t step= num_vectors/num_threads;
361 for (t=0; t<num_threads-1; t++)
363 params[t].kernel_machine =
this;
364 params[t].result = output.
vector;
365 params[t].start = t*step;
366 params[t].end = (t+1)*step;
367 params[t].verbose =
false;
368 params[t].indices = NULL;
369 params[t].indices_len = 0;
370 pthread_create(&threads[t], NULL,
374 params[t].kernel_machine =
this;
375 params[t].result = output.
vector;
376 params[t].start = t*step;
377 params[t].end = num_vectors;
378 params[t].verbose =
true;
379 params[t].indices = NULL;
380 params[t].indices_len = 0;
383 for (t=0; t<num_threads-1; t++)
384 pthread_join(threads[t], NULL);
394 SG_INFO(
"prematurely stopped. \n")
400 SG_DEBUG(
"leaving %s::apply_get_outputs(%s at %p)\n",
427 S_THREAD_PARAM_KERNEL_MACHINE* params = (S_THREAD_PARAM_KERNEL_MACHINE*) p;
432 for (int32_t vec=params->start; vec<params->end; vec++)
434 for (int32_t vec=params->start; vec<params->end &&
440 int32_t num_vectors=params->end - params->start;
441 int32_t v=vec-params->start;
442 if ( (v% (num_vectors/100+1))== 0)
447 index_t idx=params->indices ? params->indices[vec] : vec;
448 result[vec] = kernel_machine->
apply_one(idx);
457 SG_ERROR(
"kernel is needed to store SV features.\n")
463 SG_ERROR(
"kernel lhs is needed to store SV features.\n")
487 SG_ERROR(
"CKernelMachine::train_locked() call data_lock() before!\n")
536 SG_ERROR(
"CKernelMachine::apply_locked() call data_lock() before!\n")
541 int32_t num_inds=indices.
vlen;
557 S_THREAD_PARAM_KERNEL_MACHINE params;
558 params.kernel_machine=
this;
559 params.result=output.
vector;
564 params.indices=indices.
vector;
565 params.indices_len=indices.
vlen;
573 pthread_t* threads = SG_MALLOC(pthread_t, num_threads-1);
574 S_THREAD_PARAM_KERNEL_MACHINE* params=SG_MALLOC(S_THREAD_PARAM_KERNEL_MACHINE, num_threads);
575 int32_t step= num_inds/num_threads;
578 for (t=0; t<num_threads-1; t++)
580 params[t].kernel_machine=
this;
581 params[t].result=output.
vector;
584 params[t].start=t*step;
585 params[t].end=(t+1)*step;
586 params[t].indices=indices.
vector;
587 params[t].indices_len=indices.
vlen;
589 params[t].verbose=
false;
594 params[t].kernel_machine=
this;
595 params[t].result=output.
vector;
598 params[t].start=t*step;
599 params[t].end=num_inds;
600 params[t].indices=indices.
vector;
601 params[t].indices_len=indices.
vlen;
603 params[t].verbose=
true;
606 for (t=0; t<num_threads-1; t++)
607 pthread_join(threads[t], NULL);
616 SG_INFO(
"prematurely stopped.\n")
627 SG_ERROR(
"The kernel is not initialized\n")
629 SG_ERROR(
"Locking is not supported (yet) with combined kernel. Please disable it in cross validation")
675 void CKernelMachine::init()
701 new SGParamInfo(
"custom_kernel", CT_SCALAR, ST_NONE, PT_SGOBJECT, 1),
707 new SGParamInfo(
"kernel_backup", CT_SCALAR, ST_NONE, PT_SGOBJECT, 1),
virtual float64_t apply_one(int32_t num)
virtual const char * get_name() const =0
SGVector< float64_t > apply_get_outputs(CFeatures *data)
virtual bool init(CFeatures *lhs, CFeatures *rhs)
SGVector< int32_t > m_svs
int32_t get_num_support_vectors()
void set_bias_enabled(bool enable_bias)
void range_fill(T start=0)
Class that holds informations about a certain parameter of an CSGObject. Contains name...
virtual CBinaryLabels * apply_locked_binary(SGVector< index_t > indices)
Real Labels are real-valued labels.
ParameterMap * m_parameter_map
int32_t get_num_threads() const
virtual void add_row_subset(SGVector< index_t > subset)
The class Labels models labels, i.e. class assignments of objects.
The Custom Kernel allows for custom user provided kernel matrices.
virtual const char * get_name() const
virtual CRegressionLabels * apply_regression(CFeatures *data=NULL)
SGVector< int32_t > get_support_vectors()
virtual int32_t get_num_vectors() const =0
CCustomKernel * m_custom_kernel
void put(const SGParamInfo *key, const SGParamInfo *value)
float64_t kernel(int32_t idx_a, int32_t idx_b)
static void * apply_helper(void *p)
virtual bool train_machine(CFeatures *data=NULL)
CKernel * m_kernel_backup
A generic KernelMachine interface.
virtual int32_t get_num_vec_lhs()
virtual void remove_all_row_subsets()
A generic learning machine interface.
bool get_is_initialized()
void set_support_vectors(SGVector< int32_t > svs)
virtual bool train_locked(SGVector< index_t > indices)
SGVector< float64_t > m_alpha
virtual void remove_col_subset()
bool has_property(EKernelProperty p)
virtual void add_col_subset(SGVector< index_t > subset)
virtual void store_model_features()
Class SGObject is the base class of all shogun objects.
bool get_batch_computation_enabled()
void set_bias(float64_t bias)
void set_batch_computation_enabled(bool enable)
static void clear_cancel()
virtual SGVector< float64_t > apply_locked_get_output(SGVector< index_t > indices)
bool set_alpha(int32_t idx, float64_t val)
virtual void data_unlock()
virtual void data_unlock()
virtual void data_lock(CLabels *labs, CFeatures *features)
virtual void remove_subset()
virtual float64_t compute_optimized(int32_t vector_idx)
float64_t get_alpha(int32_t idx)
bool use_batch_computation
virtual void add_subset(SGVector< index_t > subset)
virtual bool supports_locking() const
bool set_support_vector(int32_t idx, int32_t val)
bool init_kernel_optimization()
int32_t get_support_vector(int32_t idx)
static bool cancel_computations()
virtual int32_t get_num_vec_rhs()
bool get_show_progress() const
SGVector< float64_t > get_alphas()
all of classes and functions are contained in the shogun namespace
bool get_linadd_enabled()
virtual void compute_batch(int32_t num_vec, int32_t *vec_idx, float64_t *target, int32_t num_suppvec, int32_t *IDX, float64_t *alphas, float64_t factor=1.0)
virtual bool init_optimization(int32_t count, int32_t *IDX, float64_t *weights)
virtual CFeatures * copy_subset(SGVector< index_t > indices)
virtual CRegressionLabels * apply_locked_regression(SGVector< index_t > indices)
void set_alphas(SGVector< float64_t > alphas)
The class Features is the base class of all feature objects.
SGVector< T > clone() const
#define SG_SPROGRESS(...)
void set_linadd_enabled(bool enable)
Binary Labels for binary classification.
void set_kernel(CKernel *k)
virtual ~CKernelMachine()
bool is_data_locked() const
virtual CBinaryLabels * apply_binary(CFeatures *data=NULL)
bool create_new_model(int32_t num)
virtual void data_lock(CLabels *labs, CFeatures *features=NULL)