mirror of
https://github.com/davisking/dlib.git
synced 2024-11-01 10:14:53 +08:00
Added a comment about playing around with the epsilon to improve
training time. --HG-- extra : convert_revision : svn%3Afdd8eb12-d10e-0410-9acb-85c331704f74/trunk%403778
This commit is contained in:
parent
51831d20ac
commit
6ce7fd7416
@ -99,6 +99,11 @@ int main()
|
||||
// here we make an instance of the rvm_trainer object that uses our kernel type.
|
||||
rvm_trainer<kernel_type> trainer;
|
||||
|
||||
// One thing you can do to reduce the RVM training time is to make its
|
||||
// stopping epsilon bigger. However, this might make the outputs less
|
||||
// reliable. But sometimes it works out well. 0.001 is the default.
|
||||
trainer.set_epsilon(0.001);
|
||||
|
||||
// Now we loop over some different gamma values to see how good they are. Note
|
||||
// that this is a very simple way to try out a few possible parameter choices. You
|
||||
// should look at the model_selection_ex.cpp program for examples of more sophisticated
|
||||
|
@ -61,6 +61,11 @@ int main()
|
||||
cout << "using gamma of " << gamma << endl;
|
||||
trainer.set_kernel(kernel_type(gamma));
|
||||
|
||||
// One thing you can do to reduce the RVM training time is to make its
|
||||
// stopping epsilon bigger. However, this might make the outputs less
|
||||
// reliable. But sometimes it works out well. 0.001 is the default.
|
||||
trainer.set_epsilon(0.001);
|
||||
|
||||
// now train a function based on our sample points
|
||||
decision_function<kernel_type> test = trainer.train(samples, labels);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user