Fixed some compiler warnings and also a bug in the cached version of the

batch_trainer.

--HG--
extra : convert_revision : svn%3Afdd8eb12-d10e-0410-9acb-85c331704f74/trunk%403146
This commit is contained in:
Davis King 2009-07-31 00:12:27 +00:00
parent 3f1a7f5c05
commit a23e5f6224
2 changed files with 20 additions and 12 deletions

View File

@ -313,7 +313,7 @@ namespace dlib
cache.reset(new cache_type); cache.reset(new cache_type);
cache->frequency_of_use.resize(samps.size()); cache->frequency_of_use.resize(samps.size());
for (unsigned long i = 0; i < samps.size(); ++i) for (long i = 0; i < samps.size(); ++i)
cache->frequency_of_use[i] = std::make_pair(0, i); cache->frequency_of_use[i] = std::make_pair(0, i);
// Set the cache build/rebuild threshold so that we have to have // Set the cache build/rebuild threshold so that we have to have
@ -328,18 +328,18 @@ namespace dlib
const sample_type& b const sample_type& b
) const ) const
{ {
const long a_loc = cache->sample_location[a];
const long b_loc = cache->sample_location[b];
cache->frequency_of_use[a].first += 1;
cache->frequency_of_use[b].first += 1;
// rebuild the cache every so often // rebuild the cache every so often
if (counter > counter_threshold ) if (counter > counter_threshold )
{ {
build_cache(); build_cache();
} }
const long a_loc = cache->sample_location[a];
const long b_loc = cache->sample_location[b];
cache->frequency_of_use[a].first += 1;
cache->frequency_of_use[b].first += 1;
if (a_loc != -1) if (a_loc != -1)
{ {
return cache->kernel(a_loc, b); return cache->kernel(a_loc, b);
@ -377,13 +377,13 @@ namespace dlib
cache->sample_location.assign(samples->size(), -1); cache->sample_location.assign(samples->size(), -1);
// loop over all the samples in the cache // loop over all the samples in the cache
for (unsigned long i = 0; i < cache_size; ++i) for (long i = 0; i < cache_size; ++i)
{ {
const long cur = cache->frequency_of_use[i].second; const long cur = cache->frequency_of_use[i].second;
cache->sample_location[cur] = i; cache->sample_location[cur] = i;
// now populate all possible kernel products with the current sample // now populate all possible kernel products with the current sample
for (unsigned long j = 0; j < samples->size(); ++j) for (long j = 0; j < samples->size(); ++j)
{ {
cache->kernel(i, j) = real_kernel((*samples)(cur), (*samples)(j)); cache->kernel(i, j) = real_kernel((*samples)(cur), (*samples)(j));
} }
@ -391,7 +391,7 @@ namespace dlib
} }
// reset the frequency of use metrics // reset the frequency of use metrics
for (unsigned long i = 0; i < samples->size(); ++i) for (long i = 0; i < samples->size(); ++i)
cache->frequency_of_use[i] = std::make_pair(0, i); cache->frequency_of_use[i] = std::make_pair(0, i);
} }
@ -547,7 +547,8 @@ namespace dlib
ckernel_type ck(trainer.get_kernel(), x, cache_size); ckernel_type ck(trainer.get_kernel(), x, cache_size);
// now rebind the trainer to use the caching kernel // now rebind the trainer to use the caching kernel
typename trainer_type::template rebind<ckernel_type>::other my_trainer; typedef typename trainer_type::template rebind<ckernel_type>::other rebound_trainer_type;
rebound_trainer_type my_trainer;
my_trainer.set_kernel(ck); my_trainer.set_kernel(ck);
replicate_settings(trainer, my_trainer); replicate_settings(trainer, my_trainer);

View File

@ -359,16 +359,23 @@ namespace
print_spinner(); print_spinner();
matrix<scalar_type> peg_cv = cross_validate_trainer_threaded(batch(pegasos_trainer,1.0), x,y, 4, 2); matrix<scalar_type> peg_cv = cross_validate_trainer_threaded(batch(pegasos_trainer,1.0), x,y, 4, 2);
print_spinner(); print_spinner();
matrix<scalar_type> peg_c_cv = cross_validate_trainer_threaded(batch_cached(pegasos_trainer,1.0), x,y, 4, 2);
print_spinner();
dlog << LDEBUG << "rvm cv: " << rvm_cv; dlog << LDEBUG << "rvm cv: " << rvm_cv;
dlog << LDEBUG << "svm cv: " << svm_cv; dlog << LDEBUG << "svm cv: " << svm_cv;
dlog << LDEBUG << "rbf cv: " << rbf_cv; dlog << LDEBUG << "rbf cv: " << rbf_cv;
dlog << LDEBUG << "peg cv: " << peg_cv; dlog << LDEBUG << "peg cv: " << peg_cv;
dlog << LDEBUG << "peg cached cv: " << peg_c_cv;
// make sure the cached version of pegasos computes the same result
DLIB_TEST(sum(abs(peg_cv - peg_c_cv)) < std::sqrt(std::numeric_limits<double>::epsilon()));
DLIB_TEST_MSG(mean(rvm_cv) > 0.9, rvm_cv); DLIB_TEST_MSG(mean(rvm_cv) > 0.9, rvm_cv);
DLIB_TEST_MSG(mean(svm_cv) > 0.9, svm_cv); DLIB_TEST_MSG(mean(svm_cv) > 0.9, svm_cv);
DLIB_TEST_MSG(mean(rbf_cv) > 0.9, rbf_cv); DLIB_TEST_MSG(mean(rbf_cv) > 0.9, rbf_cv);
DLIB_TEST_MSG(mean(peg_cv) > 0.9, rbf_cv); DLIB_TEST_MSG(mean(peg_cv) > 0.9, peg_cv);
DLIB_TEST_MSG(mean(peg_c_cv) > 0.9, peg_c_cv);
const long num_sv = trainer.train(x,y).support_vectors.size(); const long num_sv = trainer.train(x,y).support_vectors.size();
print_spinner(); print_spinner();