mirror of
https://github.com/davisking/dlib.git
synced 2024-11-01 10:14:53 +08:00
Fix trainer with unsupervised loss (#2436)
* Don't try to use labels in unsupervised losses
I hope that is the right way of fixing this...
* fix it by duplicating most code in send_job (works on my machine)
I will probably need to find a way to reuse the code
* try to fix it reusing the code... not sure though
* Revert "try to fix it reusing the code... not sure though"
This reverts commit f308cac6df
.
* check the type of the training label to fix the issue instead
This commit is contained in:
parent
b9f04fdc45
commit
8a2c744207
@ -1199,6 +1199,8 @@ namespace dlib
|
||||
|
||||
const auto prev_dev = dlib::cuda::get_device();
|
||||
|
||||
const bool has_unsupervised_loss = std::is_same<no_label_type, training_label_type>::value;
|
||||
|
||||
double j = 0;
|
||||
|
||||
for (size_t i = 0; i < devs; ++i)
|
||||
@ -1211,7 +1213,8 @@ namespace dlib
|
||||
if (start < stop)
|
||||
{
|
||||
devices[i]->net.to_tensor(dbegin+start, dbegin+stop, job.t[i]);
|
||||
job.labels[i].assign(lbegin+start, lbegin+stop);
|
||||
if (!has_unsupervised_loss)
|
||||
job.labels[i].assign(lbegin+start, lbegin+stop);
|
||||
job.have_data[i] = true;
|
||||
}
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user