Updated the example programs so that there isn't this confusing use of the

phase "support vectors" all over the place.  Also fixed them to compile now
that I renamed the support_vectors field in decision_function to basis_vectors.

--HG--
extra : convert_revision : svn%3Afdd8eb12-d10e-0410-9acb-85c331704f74/trunk%403279
This commit is contained in:
Davis King 2009-11-29 18:59:24 +00:00
parent eff11e2735
commit b812367930
5 changed files with 18 additions and 18 deletions

View File

@ -48,9 +48,9 @@ int main()
// you need to set. The first argument to the constructor is the kernel we wish to
// use. The second is a parameter that determines the numerical accuracy with which
// the object will perform the centroid estimation. Generally, smaller values
// give better results but cause the algorithm to attempt to use more support vectors
// give better results but cause the algorithm to attempt to use more dictionary vectors
// (and thus run slower and use more memory). The third argument, however, is the
// maximum number of support vectors a kcentroid is allowed to use. So you can use
// maximum number of dictionary vectors a kcentroid is allowed to use. So you can use
// it to control the runtime complexity.
kcentroid<kernel_type> test(kernel_type(0.1),0.01, 15);

View File

@ -46,9 +46,9 @@ int main()
// you need to set. The first argument to the constructor is the kernel we wish to
// use. The second is a parameter that determines the numerical accuracy with which
// the object will perform part of the learning algorithm. Generally, smaller values
// give better results but cause the algorithm to attempt to use more support vectors
// give better results but cause the algorithm to attempt to use more dictionary vectors
// (and thus run slower and use more memory). The third argument, however, is the
// maximum number of support vectors a kcentroid is allowed to use. So you can use
// maximum number of dictionary vectors a kcentroid is allowed to use. So you can use
// it to control the runtime complexity.
kcentroid<kernel_type> kc(kernel_type(0.1),0.01, 8);
@ -133,13 +133,13 @@ int main()
cout << test(samples[i+2*num]) << "\n";
}
// Now print out how many support vectors each center used. Note that
// Now print out how many dictionary vectors each center used. Note that
// the maximum number of 8 was reached. If you went back to the kcentroid
// constructor and changed the 8 to some bigger number you would see that these
// numbers would go up. However, 8 is all we need to correctly cluster this dataset.
cout << "num sv for center 0: " << test.get_kcentroid(0).dictionary_size() << endl;
cout << "num sv for center 1: " << test.get_kcentroid(1).dictionary_size() << endl;
cout << "num sv for center 2: " << test.get_kcentroid(2).dictionary_size() << endl;
cout << "num dictionary vectors for center 0: " << test.get_kcentroid(0).dictionary_size() << endl;
cout << "num dictionary vectors for center 1: " << test.get_kcentroid(1).dictionary_size() << endl;
cout << "num dictionary vectors for center 2: " << test.get_kcentroid(2).dictionary_size() << endl;
}

View File

@ -108,9 +108,9 @@ int main()
// you need to set. The first argument to the constructor is the kernel we wish to
// use. The second is a parameter that determines the numerical accuracy with which
// the object will perform part of the ranking algorithm. Generally, smaller values
// give better results but cause the algorithm to attempt to use more support vectors
// give better results but cause the algorithm to attempt to use more dictionary vectors
// (and thus run slower and use more memory). The third argument, however, is the
// maximum number of support vectors a kcentroid is allowed to use. So you can use
// maximum number of dictionary vectors a kcentroid is allowed to use. So you can use
// it to put an upper limit on the runtime complexity.
kcentroid<kernel_type> kc(kernel_type(gamma), 0.001, 25);

View File

@ -136,9 +136,9 @@ int main()
learned_function.normalizer = normalizer; // save normalization information
learned_function.function = trainer.train(samples, labels); // perform the actual RVM training and save the results
// print out the number of support vectors in the resulting decision function
cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.support_vectors.nr() << endl;
// print out the number of relevance vectors in the resulting decision function
cout << "\nnumber of relevance vectors in our learned_function is "
<< learned_function.function.basis_vectors.nr() << endl;
// now lets try this decision_function on some samples we haven't seen before
sample_type sample;
@ -171,10 +171,10 @@ int main()
learned_pfunct.function = train_probabilistic_decision_function(trainer, samples, labels, 3);
// Now we have a function that returns the probability that a given sample is of the +1 class.
// print out the number of support vectors in the resulting decision function.
// print out the number of relevance vectors in the resulting decision function.
// (it should be the same as in the one above)
cout << "\nnumber of support vectors in our learned_pfunct is "
<< learned_pfunct.function.decision_funct.support_vectors.nr() << endl;
cout << "\nnumber of relevance vectors in our learned_pfunct is "
<< learned_pfunct.function.decision_funct.basis_vectors.nr() << endl;
sample(0) = 3.123;
sample(1) = 2;

View File

@ -144,7 +144,7 @@ int main()
// print out the number of support vectors in the resulting decision function
cout << "\nnumber of support vectors in our learned_function is "
<< learned_function.function.support_vectors.nr() << endl;
<< learned_function.function.basis_vectors.nr() << endl;
// now lets try this decision_function on some samples we haven't seen before
sample_type sample;
@ -180,7 +180,7 @@ int main()
// print out the number of support vectors in the resulting decision function.
// (it should be the same as in the one above)
cout << "\nnumber of support vectors in our learned_pfunct is "
<< learned_pfunct.function.decision_funct.support_vectors.nr() << endl;
<< learned_pfunct.function.decision_funct.basis_vectors.nr() << endl;
sample(0) = 3.123;
sample(1) = 2;