Renamed iterator types to correctly reflect their requirements (i.e. not input

iterators in the sense implied by the C++ standard but rather at least forward
iterators).
pull/169/head
Davis King 8 years ago
parent 4b05b4d4bb
commit 22758268fc

@ -75,10 +75,10 @@ namespace dlib
typedef int input_type;
const static unsigned int sample_expansion_factor = 1;
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ,
input_iterator ,
forward_iterator ,
forward_iterator ,
resizable_tensor&
) const
{
@ -739,20 +739,20 @@ namespace dlib
T&& ...args
) : add_layer(layer_det, args...) { }
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
subnetwork->to_tensor(ibegin,iend,data);
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
to_tensor(ibegin,iend,temp_tensor);
@ -1101,10 +1101,10 @@ namespace dlib
INPUT_LAYER il
) : add_layer(tuple_head(layer_det),il) {}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
@ -1115,10 +1115,10 @@ namespace dlib
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
to_tensor(ibegin,iend,temp_tensor);
@ -1385,20 +1385,20 @@ namespace dlib
{
}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
subnetwork.to_tensor(ibegin,iend,data);
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
return subnetwork(ibegin,iend);
@ -1637,20 +1637,20 @@ namespace dlib
{
}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
subnetwork.to_tensor(ibegin,iend,data);
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
to_tensor(ibegin,iend,temp_tensor);
@ -1866,20 +1866,20 @@ namespace dlib
gradient_input_is_stale(true)
{}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
input_layer.to_tensor(ibegin,iend,data);
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
input_layer.to_tensor(ibegin,iend,cached_output);
@ -2143,10 +2143,10 @@ namespace dlib
{
}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
@ -2164,10 +2164,10 @@ namespace dlib
loss.to_label(x, wsub, obegin);
}
template <typename input_iterator, typename output_iterator>
template <typename forward_iterator, typename output_iterator>
void operator() (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
output_iterator obegin
)
{
@ -2208,10 +2208,10 @@ namespace dlib
return loss.compute_loss_value_and_gradient(x, lbegin, wsub);
}
template <typename input_iterator, typename label_iterator>
template <typename forward_iterator, typename label_iterator>
double compute_loss (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
label_iterator lbegin
)
{
@ -2228,10 +2228,10 @@ namespace dlib
return loss.compute_loss_value_and_gradient(x, wsub);
}
template <typename input_iterator>
template <typename forward_iterator>
double compute_loss (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
to_tensor(ibegin,iend,temp_tensor);
@ -2250,10 +2250,10 @@ namespace dlib
subnetwork.back_propagate_error(x);
return l;
}
template <typename input_iterator, typename label_iterator>
template <typename forward_iterator, typename label_iterator>
double compute_parameter_gradients (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
label_iterator lbegin
)
{
@ -2270,10 +2270,10 @@ namespace dlib
subnetwork.back_propagate_error(x);
return l;
}
template <typename input_iterator>
template <typename forward_iterator>
double compute_parameter_gradients (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
to_tensor(ibegin,iend,temp_tensor);
@ -2574,20 +2574,20 @@ namespace dlib
{
}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
subnetwork.to_tensor(ibegin,iend,data);
}
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
)
{
subnetwork(ibegin,iend);

@ -288,10 +288,10 @@ namespace dlib
- #subnet() == subnet_type(args)
!*/
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const;
/*!
@ -343,10 +343,10 @@ namespace dlib
than the input layer.
!*/
template <typename input_iterator>
template <typename forward_iterator>
const tensor& operator() (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
);
/*!
requires
@ -696,10 +696,10 @@ namespace dlib
loss layer used by this network.
!*/
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const;
/*!
@ -736,10 +736,10 @@ namespace dlib
obegin.
!*/
template <typename input_iterator, typename label_iterator>
template <typename forward_iterator, typename label_iterator>
void operator() (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
label_iterator obegin
);
/*!
@ -811,10 +811,10 @@ namespace dlib
- This function does not update the network parameters.
!*/
template <typename input_iterator, typename label_iterator>
template <typename forward_iterator, typename label_iterator>
double compute_loss (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
label_iterator lbegin
);
/*!
@ -846,10 +846,10 @@ namespace dlib
- This function does not update the network parameters.
!*/
template <typename input_iterator>
template <typename forward_iterator>
double compute_loss (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
);
/*!
requires
@ -885,10 +885,10 @@ namespace dlib
- returns compute_loss(x,lbegin)
!*/
template <typename input_iterator, typename label_iterator>
template <typename forward_iterator, typename label_iterator>
double compute_parameter_gradients (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
label_iterator lbegin
);
/*!
@ -924,10 +924,10 @@ namespace dlib
- returns compute_loss(x)
!*/
template <typename input_iterator>
template <typename forward_iterator>
double compute_parameter_gradients (
input_iterator ibegin,
input_iterator iend
forward_iterator ibegin,
forward_iterator iend
);
/*!
requires

@ -58,10 +58,10 @@ namespace dlib
float get_avg_green() const { return avg_green; }
float get_avg_blue() const { return avg_blue; }
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
@ -182,10 +182,10 @@ namespace dlib
float get_avg_green() const { return avg_green; }
float get_avg_blue() const { return avg_blue; }
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
@ -301,10 +301,10 @@ namespace dlib
template <typename mm>
input(const input<array2d<T,mm>>&) {}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{
@ -395,10 +395,10 @@ namespace dlib
template <long NR, long NC, typename mm, typename L>
input(const input<matrix<T,NR,NC,mm,L>>&) {}
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const
{

@ -65,10 +65,10 @@ namespace dlib
const static unsigned int sample_expansion_factor;
typedef whatever_type_to_tensor_expects input_type;
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const;
/*!
@ -123,10 +123,10 @@ namespace dlib
const static unsigned int sample_expansion_factor = 1;
typedef T input_type;
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const;
/*!
@ -210,10 +210,10 @@ namespace dlib
- returns the value subtracted from the blue color channel.
!*/
template <typename input_iterator>
template <typename forward_iterator>
void to_tensor (
input_iterator ibegin,
input_iterator iend,
forward_iterator ibegin,
forward_iterator iend,
resizable_tensor& data
) const;
/*!

Loading…
Cancel
Save