Merge pull request #139 from e-fominov/dnn_vs2015_up3

DNN Visual Studio 2015 Update3 support
pull/140/head before_dnn_serialization_cleanup
Davis E. King 8 years ago committed by GitHub
commit ea9cba7eeb

@ -78,6 +78,12 @@ elseif (MSVC OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") # else if using Visu
message(STATUS "Enabling SSE2 instructions")
add_definitions(-DDLIB_HAVE_SSE2)
endif()
# By default Visual Studio does not support .obj files with more than 65k sections
# Code generated by file_to_code_ex and code using DNN module can have them
# this flag enables > 65k sections, but produces .obj files that will not be readable by
# VS 2005
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif()

@ -3,6 +3,12 @@
#ifndef DLIB_DNn_
#define DLIB_DNn_
// DNN module uses template-based network declaration that leads to very long
// type names. Visual Studio will produce Warning C4503 in such cases
#ifdef _MSC_VER
# pragma warning( disable: 4503 )
#endif
#include "dnn/tensor.h"
#include "dnn/input.h"
#include "dnn/layers.h"

@ -208,6 +208,8 @@ namespace dlib
};
template <typename T> struct alwaysbool { typedef bool type; };
// one more structure for VS 2015 UP3 support workaround
template <typename T> struct alwaysbool2 { typedef bool type; };
resizable_tensor& rt();
@ -254,7 +256,7 @@ namespace dlib
constexpr auto has_inplace_backward(
layer_type& layer,
SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward(rt(),rt(),sub,rt()))>::type
) -> typename alwaysbool2<decltype(layer.backward(rt(),rt(),sub,rt()))>::type
{
return false;
}
@ -263,7 +265,7 @@ namespace dlib
constexpr auto has_inplace_backward(
layer_type& layer,
SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward(rt(),sub,rt()))>::type
) -> typename alwaysbool2<decltype(layer.backward(rt(),sub,rt()))>::type
{
return false;
}
@ -272,7 +274,7 @@ namespace dlib
constexpr auto has_inplace_backward(
layer_type& layer,
SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward_inplace(rt(),rt(),sub.get_gradient_input(),rt()))>::type
) -> typename alwaysbool2<decltype(layer.backward_inplace(rt(),rt(),sub.get_gradient_input(),rt()))>::type
{
return true;
}
@ -281,7 +283,7 @@ namespace dlib
constexpr auto has_inplace_backward(
layer_type& layer,
SUBNET& sub
) -> typename alwaysbool<decltype(layer.backward_inplace(rt(),sub.get_gradient_input(),rt()))>::type
) -> typename alwaysbool2<decltype(layer.backward_inplace(rt(),sub.get_gradient_input(),rt()))>::type
{
return true;
}
@ -290,7 +292,7 @@ namespace dlib
constexpr auto is_inplace_layer(
layer_type& layer,
const SUBNET& sub
) -> typename alwaysbool<decltype(layer.forward(sub,rt()))>::type
) -> typename alwaysbool2<decltype(layer.forward(sub,rt()))>::type
{
return false;
}
@ -1363,7 +1365,7 @@ namespace dlib
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_tag_layer() = default;
add_tag_layer() {};
add_tag_layer(const add_tag_layer&) = default;
add_tag_layer(add_tag_layer&&) = default;
add_tag_layer& operator=(add_tag_layer&&) = default;
@ -2552,7 +2554,7 @@ namespace dlib
static_assert(sample_expansion_factor >= 1,
"The input layer can't produce fewer output tensors than there are inputs.");
add_skip_layer() = default;
add_skip_layer() {};
add_skip_layer(const add_skip_layer&) = default;
add_skip_layer(add_skip_layer&&) = default;
add_skip_layer& operator=(add_skip_layer&&) = default;

@ -2025,69 +2025,66 @@ namespace dlib
using softmax = add_layer<softmax_, SUBNET>;
// ----------------------------------------------------------------------------------------
namespace impl{
// helper classes for layer concat processing
template <template<typename> class... TAG_TYPES>
struct concat_helper_impl {
};
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id;
namespace impl
{
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << (tag_count() > 1 ? "," : "");
concat_helper_impl<TAG_TYPES...>::list_tags(out);
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
}
};
template <template<typename> class TAG_TYPE, template<typename> class... TAG_TYPES>
struct concat_helper_impl<TAG_TYPE, TAG_TYPES...>{
constexpr static size_t tag_count() {return 1 + concat_helper_impl<TAG_TYPES...>::tag_count();}
template <template<typename> class TAG_TYPE>
struct concat_helper_impl<TAG_TYPE>{
constexpr static size_t tag_count() {return 1;}
static void list_tags(std::ostream& out)
{
out << tag_id<TAG_TYPE>::id << ",";
concat_helper_impl<TAG_TYPES...>::list_tags(out);
out << tag_id<TAG_TYPE>::id;
}
template<typename SUBNET>
static void resize_out(resizable_tensor& out, const SUBNET& sub, long sum_k)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
concat_helper_impl<TAG_TYPES...>::resize_out(out, sub, sum_k + t.k());
out.set_size(t.num_samples(), t.k() + sum_k, t.nr(), t.nc());
}
template<typename SUBNET>
static void concat(tensor& out, const SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_output();
tt::copy_tensor(out, k_offset, t, 0, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::concat(out, sub, k_offset);
}
template<typename SUBNET>
static void split(const tensor& input, SUBNET& sub, size_t k_offset)
{
auto& t = layer<TAG_TYPE>(sub).get_gradient_input();
tt::copy_tensor(t, 0, input, k_offset, t.k());
k_offset += t.k();
concat_helper_impl<TAG_TYPES...>::split(input, sub, k_offset);
}
};
}

@ -61,7 +61,6 @@ else()
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_delegating_constructors;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_thread_local;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_constexpr;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_decltype_incomplete_return_types;" AND
";${CMAKE_CXX_COMPILE_FEATURES};" MATCHES ";cxx_auto_type;")
set(COMPILER_CAN_DO_CPP_11 1)

@ -10,7 +10,6 @@
- Accessing and configuring layers in a network
*/
#include <dlib/dnn.h>
#include <iostream>
#include <dlib/data_io.h>

Loading…
Cancel
Save