From 8a2c7442074339ac9ffceff6ef5a49e0114222b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A0=20Arrufat?= <1671644+arrufat@users.noreply.github.com> Date: Mon, 27 Sep 2021 20:47:04 +0900 Subject: [PATCH] Fix trainer with unsupervised loss (#2436) * Don't try to use labels in unsupervised losses I hope that is the right way of fixing this... * fix it by duplicating most code in send_job (works on my machine) I will probably need to find a way to reuse the code * try to fix it reusing the code... not sure though * Revert "try to fix it reusing the code... not sure though" This reverts commit f308cac6df712da3619fb05b14f3345f0ec07b9a. * check the type of the training label to fix the issue instead --- dlib/dnn/trainer.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/dlib/dnn/trainer.h b/dlib/dnn/trainer.h index 7c4432418..ee8e9a37b 100644 --- a/dlib/dnn/trainer.h +++ b/dlib/dnn/trainer.h @@ -1199,6 +1199,8 @@ namespace dlib const auto prev_dev = dlib::cuda::get_device(); + const bool has_unsupervised_loss = std::is_same::value; + double j = 0; for (size_t i = 0; i < devs; ++i) @@ -1211,7 +1213,8 @@ namespace dlib if (start < stop) { devices[i]->net.to_tensor(dbegin+start, dbegin+stop, job.t[i]); - job.labels[i].assign(lbegin+start, lbegin+stop); + if (!has_unsupervised_loss) + job.labels[i].assign(lbegin+start, lbegin+stop); job.have_data[i] = true; } else