diff --git a/dlib/cmake b/dlib/cmake index 33c2edfc3..e13d04cca 100644 --- a/dlib/cmake +++ b/dlib/cmake @@ -78,6 +78,12 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu message(STATUS "Enabling SSE2 instructions") add_definitions(-DDLIB_HAVE_SSE2) endif() + + # By default Visual Studio does not support .obj files with more than 65k sections + # Code generated by file_to_code_ex and code using DNN module can have them + # this flag enables > 65k sections, but produces .obj files that will not be readable by + # VS 2005 + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") endif() diff --git a/dlib/dnn.h b/dlib/dnn.h index 4dc6cf80e..7d8678cdc 100644 --- a/dlib/dnn.h +++ b/dlib/dnn.h @@ -3,6 +3,12 @@ #ifndef DLIB_DNn_ #define DLIB_DNn_ +// DNN module uses template-based network declaration that leads to very long +// type names. Visual Studio will produce Warning C4503 in such cases +#ifdef _MSC_VER +# pragma warning( disable: 4503 ) +#endif + #include "dnn/tensor.h" #include "dnn/input.h" #include "dnn/layers.h" diff --git a/dlib/dnn/core.h b/dlib/dnn/core.h index 5d1ac529a..afccebfc5 100644 --- a/dlib/dnn/core.h +++ b/dlib/dnn/core.h @@ -208,6 +208,8 @@ namespace dlib }; template struct alwaysbool { typedef bool type; }; + // one more structure for VS 2015 UP3 support workaround + template struct alwaysbool2 { typedef bool type; }; resizable_tensor& rt(); @@ -254,7 +256,7 @@ namespace dlib constexpr auto has_inplace_backward( layer_type& layer, SUBNET& sub - ) -> typename alwaysbool::type + ) -> typename alwaysbool2::type { return false; } @@ -263,7 +265,7 @@ namespace dlib constexpr auto has_inplace_backward( layer_type& layer, SUBNET& sub - ) -> typename alwaysbool::type + ) -> typename alwaysbool2::type { return false; } @@ -272,7 +274,7 @@ namespace dlib constexpr auto has_inplace_backward( layer_type& layer, SUBNET& sub - ) -> typename alwaysbool::type + ) -> typename alwaysbool2::type { return true; } @@ -281,7 +283,7 @@ namespace dlib constexpr auto has_inplace_backward( layer_type& layer, SUBNET& sub - ) -> typename alwaysbool::type + ) -> typename alwaysbool2::type { return true; } @@ -290,7 +292,7 @@ namespace dlib constexpr auto is_inplace_layer( layer_type& layer, const SUBNET& sub - ) -> typename alwaysbool::type + ) -> typename alwaysbool2::type { return false; } @@ -1363,7 +1365,7 @@ namespace dlib static_assert(sample_expansion_factor >= 1, "The input layer can't produce fewer output tensors than there are inputs."); - add_tag_layer() = default; + add_tag_layer() {}; add_tag_layer(const add_tag_layer&) = default; add_tag_layer(add_tag_layer&&) = default; add_tag_layer& operator=(add_tag_layer&&) = default; @@ -2552,7 +2554,7 @@ namespace dlib static_assert(sample_expansion_factor >= 1, "The input layer can't produce fewer output tensors than there are inputs."); - add_skip_layer() = default; + add_skip_layer() {}; add_skip_layer(const add_skip_layer&) = default; add_skip_layer(add_skip_layer&&) = default; add_skip_layer& operator=(add_skip_layer&&) = default; diff --git a/dlib/dnn/layers.h b/dlib/dnn/layers.h index 617457b30..0eca1d75b 100644 --- a/dlib/dnn/layers.h +++ b/dlib/dnn/layers.h @@ -2025,45 +2025,15 @@ namespace dlib using softmax = add_layer; // ---------------------------------------------------------------------------------------- - namespace impl{ - // helper classes for layer concat processing - template class... TAG_TYPES> - struct concat_helper_impl { - }; - template class TAG_TYPE> - struct concat_helper_impl{ - constexpr static size_t tag_count() {return 1;} - static void list_tags(std::ostream& out) - { - out << tag_id::id; - } - - template - static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) - { - auto& t = layer(sub).get_output(); - out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc()); - } - template - static void concat(tensor& out, const SUBNET& sub, size_t k_offset) - { - auto& t = layer(sub).get_output(); - tt::copy_tensor(out, k_offset, t, 0, t.k()); - } - template - static void split(const tensor& input, SUBNET& sub, size_t k_offset) - { - auto& t = layer(sub).get_gradient_input(); - tt::copy_tensor(t, 0, input, k_offset, t.k()); - } - }; + namespace impl + { template class TAG_TYPE, template class... TAG_TYPES> - struct concat_helper_impl{ + struct concat_helper_impl{ constexpr static size_t tag_count() {return 1 + concat_helper_impl::tag_count();} - static void list_tags(std::ostream& out) - { - out << tag_id::id << ","; + static void list_tags(std::ostream& out) + { + out << tag_id::id << (tag_count() > 1 ? "," : ""); concat_helper_impl::list_tags(out); } @@ -2090,6 +2060,33 @@ namespace dlib concat_helper_impl::split(input, sub, k_offset); } }; + template class TAG_TYPE> + struct concat_helper_impl{ + constexpr static size_t tag_count() {return 1;} + static void list_tags(std::ostream& out) + { + out << tag_id::id; + } + + template + static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k) + { + auto& t = layer(sub).get_output(); + out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc()); + } + template + static void concat(tensor& out, const SUBNET& sub, size_t k_offset) + { + auto& t = layer(sub).get_output(); + tt::copy_tensor(out, k_offset, t, 0, t.k()); + } + template + static void split(const tensor& input, SUBNET& sub, size_t k_offset) + { + auto& t = layer(sub).get_gradient_input(); + tt::copy_tensor(t, 0, input, k_offset, t.k()); + } + }; } // concat layer template< diff --git a/dlib/use_cpp_11.cmake b/dlib/use_cpp_11.cmake index 2203634de..cd284cc6d 100644 --- a/dlib/use_cpp_11.cmake +++ b/dlib/use_cpp_11.cmake @@ -61,7 +61,6 @@ else() ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_delegating_constructors;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_thread_local;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_constexpr;" AND - ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_decltype_incomplete_return_types;" AND ";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;") set(COMPILER_CAN_DO_CPP_11 1) diff --git a/examples/dnn_mnist_advanced_ex.cpp b/examples/dnn_mnist_advanced_ex.cpp index 4da911b4f..6e4f0a844 100644 --- a/examples/dnn_mnist_advanced_ex.cpp +++ b/examples/dnn_mnist_advanced_ex.cpp @@ -10,7 +10,6 @@ - Accessing and configuring layers in a network */ - #include #include #include