From 516b744b43c6897af442c1f17ea10347c7b9f90b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A0=20Arrufat?= <1671644+arrufat@users.noreply.github.com> Date: Thu, 10 Mar 2022 22:09:53 +0900 Subject: [PATCH] Add missing vistor implementations to visitors.h (#2539) Notably, set_all_bn_running_stats_window_sizes and fuse_layers. But also I took the chance to remove the superflous separators and change the attribute of upsample layers from stride to scale. --- dlib/dnn/layers.h | 130 --------------------- dlib/dnn/visitors.h | 269 +++++++++++++++++++++++++++----------------- 2 files changed, 163 insertions(+), 236 deletions(-) diff --git a/dlib/dnn/layers.h b/dlib/dnn/layers.h index 686e883bf..df9f3d3a6 100644 --- a/dlib/dnn/layers.h +++ b/dlib/dnn/layers.h @@ -1708,136 +1708,6 @@ namespace dlib template using bn_fc = add_layer, SUBNET>; -// ---------------------------------------------------------------------------------------- - - namespace impl - { - class visitor_bn_running_stats_window_size - { - public: - - visitor_bn_running_stats_window_size(unsigned long new_window_size_) : new_window_size(new_window_size_) {} - - template - void set_window_size(T&) const - { - // ignore other layer detail types - } - - template < layer_mode mode > - void set_window_size(bn_& l) const - { - l.set_running_stats_window_size(new_window_size); - } - - template - void operator()(size_t , input_layer_type& ) const - { - // ignore other layers - } - - template - void operator()(size_t , add_layer& l) const - { - set_window_size(l.layer_details()); - } - - private: - - unsigned long new_window_size; - }; - - class visitor_disable_input_bias - { - public: - - template - void disable_input_bias(T&) const - { - // ignore other layer types - } - - // handle the standard case - template - void disable_input_bias(add_layer& l) - { - disable_bias(l.subnet().layer_details()); - set_bias_learning_rate_multiplier(l.subnet().layer_details(), 0); - set_bias_weight_decay_multiplier(l.subnet().layer_details(), 0); - } - - template - void disable_input_bias(add_layer, U, E>& l) - { - disable_bias(l.subnet().layer_details()); - set_bias_learning_rate_multiplier(l.subnet().layer_details(), 0); - set_bias_weight_decay_multiplier(l.subnet().layer_details(), 0); - } - - // handle input repeat layer case - template class R, typename U, typename E> - void disable_input_bias(add_layer, repeat, E>& l) - { - disable_bias(l.subnet().get_repeated_layer(0).layer_details()); - set_bias_learning_rate_multiplier(l.subnet().get_repeated_layer(0).layer_details(), 0); - set_bias_weight_decay_multiplier(l.subnet().get_repeated_layer(0).layer_details(), 0); - } - - template class R, typename U, typename E> - void disable_input_bias(add_layer, E>& l) - { - disable_bias(l.subnet().get_repeated_layer(0).layer_details()); - set_bias_learning_rate_multiplier(l.subnet().get_repeated_layer(0).layer_details(), 0); - set_bias_weight_decay_multiplier(l.subnet().get_repeated_layer(0).layer_details(), 0); - } - - // handle input repeat layer with tag case - template - void disable_input_bias(add_layer, add_tag_layer, E>& ) - { - } - - template - void disable_input_bias(add_layer, E>& ) - { - } - - // handle tag layer case - template - void disable_input_bias(add_layer, add_tag_layer, E>& ) - { - } - - template - void disable_input_bias(add_layer, E>& ) - { - } - - // handle skip layer case - template class TAG, typename U, typename E> - void disable_input_bias(add_layer, add_skip_layer, E>& ) - { - } - - template