10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H 23 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
24 struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
27 typedef typename promote_storage_type<
typename LhsXprType::Scalar,
28 typename RhsXprType::Scalar>::ret Scalar;
29 typedef typename promote_storage_type<typename traits<LhsXprType>::StorageKind,
30 typename traits<RhsXprType>::StorageKind>::ret StorageKind;
31 typedef typename promote_index_type<typename traits<LhsXprType>::Index,
32 typename traits<RhsXprType>::Index>::type Index;
33 typedef typename LhsXprType::Nested LhsNested;
34 typedef typename RhsXprType::Nested RhsNested;
35 typedef typename remove_reference<LhsNested>::type _LhsNested;
36 typedef typename remove_reference<RhsNested>::type _RhsNested;
37 static const int NumDimensions = traits<LhsXprType>::NumDimensions;
38 static const int Layout = traits<LhsXprType>::Layout;
42 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
43 struct eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>,
Eigen::Dense>
45 typedef const TensorConcatenationOp<Axis, LhsXprType, RhsXprType>& type;
48 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
49 struct nested<TensorConcatenationOp<Axis, LhsXprType, RhsXprType>, 1, typename eval<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >::type>
51 typedef TensorConcatenationOp<Axis, LhsXprType, RhsXprType> type;
57 template<
typename Axis,
typename LhsXprType,
typename RhsXprType>
61 typedef typename internal::traits<TensorConcatenationOp>::Scalar Scalar;
62 typedef typename internal::traits<TensorConcatenationOp>::StorageKind StorageKind;
63 typedef typename internal::traits<TensorConcatenationOp>::Index Index;
64 typedef typename internal::nested<TensorConcatenationOp>::type Nested;
65 typedef typename internal::promote_storage_type<
typename LhsXprType::CoeffReturnType,
66 typename RhsXprType::CoeffReturnType>::ret CoeffReturnType;
67 typedef typename NumTraits<Scalar>::Real RealScalar;
69 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorConcatenationOp(
const LhsXprType& lhs,
const RhsXprType& rhs, Axis axis)
70 : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {}
73 const typename internal::remove_all<typename LhsXprType::Nested>::type&
74 lhsExpression()
const {
return m_lhs_xpr; }
77 const typename internal::remove_all<typename RhsXprType::Nested>::type&
78 rhsExpression()
const {
return m_rhs_xpr; }
80 EIGEN_DEVICE_FUNC
const Axis& axis()
const {
return m_axis; }
83 EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (
const TensorConcatenationOp& other)
85 typedef TensorAssignOp<TensorConcatenationOp, const TensorConcatenationOp> Assign;
86 Assign assign(*
this, other);
87 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
91 template<
typename OtherDerived>
93 EIGEN_STRONG_INLINE TensorConcatenationOp& operator = (
const OtherDerived& other)
95 typedef TensorAssignOp<TensorConcatenationOp, const OtherDerived> Assign;
96 Assign assign(*
this, other);
97 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
102 typename LhsXprType::Nested m_lhs_xpr;
103 typename RhsXprType::Nested m_rhs_xpr;
109 template<
typename Axis,
typename LeftArgType,
typename RightArgType,
typename Device>
110 struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
113 typedef typename XprType::Index Index;
114 static const int NumDims = internal::array_size<typename TensorEvaluator<LeftArgType, Device>::Dimensions>::value;
115 static const int RightNumDims = internal::array_size<typename TensorEvaluator<RightArgType, Device>::Dimensions>::value;
116 typedef DSizes<Index, NumDims> Dimensions;
117 typedef typename XprType::Scalar Scalar;
118 typedef typename XprType::CoeffReturnType CoeffReturnType;
119 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
127 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
128 : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
131 EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE);
132 EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
134 eigen_assert(0 <= m_axis && m_axis < NumDims);
135 const Dimensions& lhs_dims = m_leftImpl.dimensions();
136 const Dimensions& rhs_dims = m_rightImpl.dimensions();
139 for (; i < m_axis; ++i) {
140 eigen_assert(lhs_dims[i] > 0);
141 eigen_assert(lhs_dims[i] == rhs_dims[i]);
142 m_dimensions[i] = lhs_dims[i];
144 eigen_assert(lhs_dims[i] > 0);
145 eigen_assert(rhs_dims[i] > 0);
146 m_dimensions[i] = lhs_dims[i] + rhs_dims[i];
147 for (++i; i < NumDims; ++i) {
148 eigen_assert(lhs_dims[i] > 0);
149 eigen_assert(lhs_dims[i] == rhs_dims[i]);
150 m_dimensions[i] = lhs_dims[i];
154 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
155 m_leftStrides[0] = 1;
156 m_rightStrides[0] = 1;
157 m_outputStrides[0] = 1;
159 for (
int j = 1; j < NumDims; ++j) {
160 m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1];
161 m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1];
162 m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1];
165 m_leftStrides[NumDims - 1] = 1;
166 m_rightStrides[NumDims - 1] = 1;
167 m_outputStrides[NumDims - 1] = 1;
169 for (
int j = NumDims - 2; j >= 0; --j) {
170 m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1];
171 m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1];
172 m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1];
177 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
180 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(Scalar* )
182 m_leftImpl.evalSubExprsIfNeeded(NULL);
183 m_rightImpl.evalSubExprsIfNeeded(NULL);
187 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup()
189 m_leftImpl.cleanup();
190 m_rightImpl.cleanup();
195 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 198 array<Index, NumDims> subs;
199 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
200 for (
int i = NumDims - 1; i > 0; --i) {
201 subs[i] = index / m_outputStrides[i];
202 index -= subs[i] * m_outputStrides[i];
206 for (
int i = 0; i < NumDims - 1; ++i) {
207 subs[i] = index / m_outputStrides[i];
208 index -= subs[i] * m_outputStrides[i];
210 subs[NumDims - 1] = index;
213 const Dimensions& left_dims = m_leftImpl.dimensions();
214 if (subs[m_axis] < left_dims[m_axis]) {
216 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
217 left_index = subs[0];
218 for (
int i = 1; i < NumDims; ++i) {
219 left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
222 left_index = subs[NumDims - 1];
223 for (
int i = NumDims - 2; i >= 0; --i) {
224 left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
227 return m_leftImpl.coeff(left_index);
229 subs[m_axis] -= left_dims[m_axis];
230 const Dimensions& right_dims = m_rightImpl.dimensions();
232 if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
233 right_index = subs[0];
234 for (
int i = 1; i < NumDims; ++i) {
235 right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
238 right_index = subs[NumDims - 1];
239 for (
int i = NumDims - 2; i >= 0; --i) {
240 right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
243 return m_rightImpl.coeff(right_index);
248 template<
int LoadMode>
249 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const 251 const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
252 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
253 eigen_assert(index + packetSize - 1 < dimensions().TotalSize());
255 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
256 for (
int i = 0; i < packetSize; ++i) {
257 values[i] = coeff(index+i);
259 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
263 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
264 costPerCoeff(
bool vectorized)
const {
265 const double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
266 2 * TensorOpCost::MulCost<Index>() +
267 TensorOpCost::DivCost<Index>() +
268 TensorOpCost::ModCost<Index>());
269 const double lhs_size = m_leftImpl.dimensions().TotalSize();
270 const double rhs_size = m_rightImpl.dimensions().TotalSize();
271 return (lhs_size / (lhs_size + rhs_size)) *
272 m_leftImpl.costPerCoeff(vectorized) +
273 (rhs_size / (lhs_size + rhs_size)) *
274 m_rightImpl.costPerCoeff(vectorized) +
275 TensorOpCost(0, 0, compute_cost);
278 EIGEN_DEVICE_FUNC Scalar* data()
const {
return NULL; }
281 Dimensions m_dimensions;
282 array<Index, NumDims> m_outputStrides;
283 array<Index, NumDims> m_leftStrides;
284 array<Index, NumDims> m_rightStrides;
291 template<
typename Axis,
typename LeftArgType,
typename RightArgType,
typename Device>
292 struct TensorEvaluator<TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
293 :
public TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgType>, Device>
297 typedef typename Base::Dimensions Dimensions;
305 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(XprType& op,
const Device& device)
308 EIGEN_STATIC_ASSERT((static_cast<int>(Layout) == static_cast<int>(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE);
311 typedef typename XprType::Index Index;
312 typedef typename XprType::Scalar Scalar;
313 typedef typename XprType::CoeffReturnType CoeffReturnType;
314 typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
316 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
319 array<Index, Base::NumDims> subs;
320 for (
int i = Base::NumDims - 1; i > 0; --i) {
321 subs[i] = index / this->m_outputStrides[i];
322 index -= subs[i] * this->m_outputStrides[i];
326 const Dimensions& left_dims = this->m_leftImpl.dimensions();
327 if (subs[this->m_axis] < left_dims[this->m_axis]) {
328 Index left_index = subs[0];
329 for (
int i = 1; i < Base::NumDims; ++i) {
330 left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i];
332 return this->m_leftImpl.coeffRef(left_index);
334 subs[this->m_axis] -= left_dims[this->m_axis];
335 const Dimensions& right_dims = this->m_rightImpl.dimensions();
336 Index right_index = subs[0];
337 for (
int i = 1; i < Base::NumDims; ++i) {
338 right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i];
340 return this->m_rightImpl.coeffRef(right_index);
344 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
345 void writePacket(Index index,
const PacketReturnType& x)
347 const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
348 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
349 eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize());
351 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
352 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
353 for (
int i = 0; i < packetSize; ++i) {
354 coeffRef(index+i) = values[i];
361 #endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H Namespace containing all symbols from the Eigen library.
Definition: AdolcForward:45
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:28
The tensor base class.
Definition: TensorBase.h:827
Tensor concatenation class.
Definition: TensorConcatenation.h:58