Doxygen Book
gsttensor_transform.c
Go to the documentation of this file.
1 
46 #ifdef HAVE_CONFIG_H
47 #include <config.h>
48 #endif
49 
50 #include <string.h>
51 #include <math.h>
52 #include <nnstreamer_log.h>
53 #include <nnstreamer_util.h>
54 #include "gsttensor_transform.h"
55 
56 #ifdef HAVE_ORC
57 #include "nnstreamer-orc.h"
58 #endif
59 
63 #ifndef DBG
64 #define DBG (!filter->silent)
65 #endif
66 
67 GST_DEBUG_CATEGORY_STATIC (gst_tensor_transform_debug);
68 #define GST_CAT_DEFAULT gst_tensor_transform_debug
69 #define CAPS_STRING GST_TENSOR_CAP_DEFAULT ";" GST_TENSORS_CAP_MAKE ("{ static, flexible }")
70 #define REGEX_DIMCHG_OPTION "^([0-9]|1[0-5]):([0-9]|1[0-5])$"
71 #define REGEX_TYPECAST_OPTION "(^[u]?int(8|16|32|64)$|^float(16|32|64)$)"
72 #define REGEX_TRANSPOSE_OPTION "^(?:([0-2]):(?!.*\\1)){3}3$"
73 #define REGEX_STAND_OPTION "^(default|dc-average)(:([u]?int(8|16|32|64)|float(16|32|64)))?(,per-channel:(true|false))?$"
74 #define REGEX_CLAMP_OPTION "^((([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?))):"\
75  "((([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)))$"
76 #define REGEX_PADDING_OPTION "^((left|right|top|bottom|front|back):(\\d)(,)?)+(layout:(NCHW|NHWC))?$"
77 #define REGEX_ARITH_OPTION "^(typecast:([u]?int(8|16|32|64)|float(16|32|64)),)?"\
78  "(per-channel:(false|true@[0-9]+),)?"\
79  "(((add|mul|div)(:([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?))+(@[0-9]+)?)(,|))+$"
80 
81 #define REGEX_ARITH_OPTION_TYPECAST "(typecast:([u]?int(8|16|32|64)|float(16|32|64)))"
82 
87 #define NNS_TENSOR_TRANSPOSE_RANK_LIMIT (4)
88 
93 #define NNS_TENSOR_PADDING_RANK_LIMIT (3)
94 
98 enum
99 {
107 };
108 
112 #ifdef HAVE_ORC
113 #define DEFAULT_ACCELERATION TRUE
114 #else
115 #define DEFAULT_ACCELERATION FALSE
116 #endif
117 
118 static const gchar *gst_tensor_transform_stand_string[] = {
119  [STAND_DEFAULT] = "default",
120  [STAND_DC_AVERAGE] = "dc-average",
121  [STAND_END] = NULL
122 };
123 
124 static const gchar *gst_tensor_transform_operator_string[] = {
125  [GTT_OP_TYPECAST] = "typecast",
126  [GTT_OP_ADD] = "add",
127  [GTT_OP_MUL] = "mul",
128  [GTT_OP_DIV] = "div",
129  [GTT_OP_UNKNOWN] = NULL
130 };
131 
135 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
136  GST_PAD_SINK,
137  GST_PAD_ALWAYS,
138  GST_STATIC_CAPS (CAPS_STRING));
139 
143 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
144  GST_PAD_SRC,
145  GST_PAD_ALWAYS,
146  GST_STATIC_CAPS (CAPS_STRING));
147 
148 #define gst_tensor_transform_parent_class parent_class
149 G_DEFINE_TYPE (GstTensorTransform, gst_tensor_transform,
150  GST_TYPE_BASE_TRANSFORM);
151 
152 /* GObject vmethod implementations */
153 static void gst_tensor_transform_set_property (GObject * object, guint prop_id,
154  const GValue * value, GParamSpec * pspec);
155 static void gst_tensor_transform_get_property (GObject * object, guint prop_id,
156  GValue * value, GParamSpec * pspec);
157 static void gst_tensor_transform_finalize (GObject * object);
158 
159 /* GstBaseTransformer vmethod implementations */
160 static GstFlowReturn gst_tensor_transform_transform (GstBaseTransform * trans,
161  GstBuffer * inbuf, GstBuffer * outbuf);
162 static GstCaps *gst_tensor_transform_transform_caps (GstBaseTransform * trans,
163  GstPadDirection direction, GstCaps * caps, GstCaps * filter);
164 static GstCaps *gst_tensor_transform_fixate_caps (GstBaseTransform * trans,
165  GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
166 static gboolean gst_tensor_transform_set_caps (GstBaseTransform * trans,
167  GstCaps * incaps, GstCaps * outcaps);
168 static gboolean gst_tensor_transform_transform_size (GstBaseTransform * trans,
169  GstPadDirection direction, GstCaps * caps, gsize size,
170  GstCaps * othercaps, gsize * othersize);
171 
173  filter, GstPadDirection direction, guint idx, const GstTensorInfo * in_info,
174  GstTensorInfo * out_info);
175 
176 #define GST_TYPE_TENSOR_TRANSFORM_MODE (gst_tensor_transform_mode_get_type ())
177 
181 static GType
183 {
184  static GType mode_type = 0;
185 
186  if (mode_type == 0) {
187  static GEnumValue mode_types[] = {
188  {GTT_DIMCHG, "Mode for changing tensor dimensions, "
189  "option=FROM_DIM:TO_DIM (with a regex, " REGEX_DIMCHG_OPTION
190  ", where NNS_TENSOR_RANK_LIMIT is 16)",
191  "dimchg"},
192  {GTT_TYPECAST, "Mode for casting type of tensor, "
193  "option=" REGEX_TYPECAST_OPTION, "typecast"},
194  {GTT_ARITHMETIC, "Mode for arithmetic operations with tensor, "
195  "option=[typecast:TYPE,][per-channel:(false|true@DIM),]add|mul|div:NUMBER[@CH_IDX], ...",
196  "arithmetic"},
197  {GTT_TRANSPOSE, "Mode for transposing shape of tensor, "
198  "option=D1\':D2\':D3\':D4 (fixed to 3)",
199  "transpose"},
200  {GTT_STAND, "Mode for statistical standardization of tensor, "
201  "option=(default|dc-average)[:TYPE][,per-channel:(false|true)]",
202  "stand"},
203  {GTT_CLAMP, "Mode for clamping all elements of tensor into the range, "
204  "option=CLAMP_MIN:CLAMP_MAX",
205  "clamp"},
206  {GTT_PADDING, "Mode for padding of tensor, "
207  "option=left|right|top|bottom|front|back:NUMBER[,layout:(NCHW|NHWC)]",
208  "padding"},
209  {GTT_UNKNOWN, "Unknown or not-implemented-yet mode",
210  "unknown"},
211  {0, NULL, NULL},
212  };
213 
214  mode_type = g_enum_register_static ("gtt_mode_type", mode_types);
215  }
216 
217  return mode_type;
218 }
219 
223 static void
225 {
226  GObjectClass *gobject_class;
227  GstElementClass *gstelement_class;
228  GstBaseTransformClass *trans_class;
229 
230  GST_DEBUG_CATEGORY_INIT (gst_tensor_transform_debug, "tensor_transform", 0,
231  "Element to transforms tensor dimension or type");
232 
233  trans_class = (GstBaseTransformClass *) klass;
234  gstelement_class = (GstElementClass *) trans_class;
235  gobject_class = (GObjectClass *) gstelement_class;
236 
237  gobject_class->set_property = gst_tensor_transform_set_property;
238  gobject_class->get_property = gst_tensor_transform_get_property;
239  gobject_class->finalize = gst_tensor_transform_finalize;
240 
241  g_object_class_install_property (gobject_class, PROP_SILENT,
242  g_param_spec_boolean ("silent", "Silent", "Produce verbose output ?",
243  FALSE, G_PARAM_READWRITE));
244  g_object_class_install_property (gobject_class, PROP_MODE,
245  g_param_spec_enum ("mode", "Mode", "Mode used for transforming tensor",
247  G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
248  g_object_class_install_property (gobject_class, PROP_OPTION,
249  g_param_spec_string ("option", "Option",
250  "Option for the tensor transform mode ?", "", G_PARAM_READWRITE));
251  g_object_class_install_property (gobject_class, PROP_ACCELERATION,
252  g_param_spec_boolean ("acceleration", "Acceleration", "Orc acceleration",
253  DEFAULT_ACCELERATION, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
254  g_object_class_install_property (gobject_class, PROP_APPLY,
255  g_param_spec_string ("apply", "Apply", "Select tensors to apply, "
256  "separated with ',' in case of multiple tensors. Default to apply all tensors.",
257  "", G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
258  g_object_class_install_property (gobject_class, PROP_TRANSPOSE_RANK_LIMIT,
259  g_param_spec_uint ("transpose-rank-limit", "Transpose rank limit",
260  "The rank limit of transpose, which varies per version of nnstreamer and may be lower than the global rank limit if it is over 4.",
262  G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
263 
264  gst_element_class_set_details_simple (gstelement_class,
265  "TensorTransform",
266  "Filter/Tensor",
267  "Transforms other/tensor dimensions for different models or frameworks",
268  "MyungJoo Ham <myungjoo.ham@samsung.com>");
269 
270  gst_element_class_add_pad_template (gstelement_class,
271  gst_static_pad_template_get (&src_factory));
272  gst_element_class_add_pad_template (gstelement_class,
273  gst_static_pad_template_get (&sink_factory));
274  /* Refer: https://gstreamer.freedesktop.org/documentation/design/element-transform.html */
275  trans_class->passthrough_on_same_caps = FALSE;
276 
277  /* Processing units */
278  trans_class->transform = GST_DEBUG_FUNCPTR (gst_tensor_transform_transform);
279 
280  /* Negotiation units */
281  trans_class->transform_caps =
282  GST_DEBUG_FUNCPTR (gst_tensor_transform_transform_caps);
283  trans_class->fixate_caps =
284  GST_DEBUG_FUNCPTR (gst_tensor_transform_fixate_caps);
285  trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_tensor_transform_set_caps);
286 
287  /* Allocation units */
288  trans_class->transform_size =
289  GST_DEBUG_FUNCPTR (gst_tensor_transform_transform_size);
290 }
291 
298 static void
300 {
301  filter->silent = TRUE;
302  filter->mode = GTT_UNKNOWN;
303  filter->option = NULL;
304  filter->loaded = FALSE;
305  filter->operators = NULL;
307  filter->apply = NULL;
308 
311 }
312 
320 {
321  int index;
322 
324 
325  return (index < 0) ? GTT_OP_UNKNOWN : index;
326 }
327 
335 {
336  int index;
337 
339 
340  return (index < 0) ? STAND_END : index;
341 }
342 
343 #ifndef FLOAT16_SUPPORT
344 
347 static void
349 {
350  ml_loge
351  ("Tensor_transform does not support float16 operators. Apply -Denable-float16=true for meson build option if your architecture support float16. Note that tensor-transform's float16 is adhoc and does NOT perform good (slow!).\n");
352  g_assert (0);
353 }
354 #endif
355 
356 #ifdef FLOAT16_SUPPORT
357 
361 static void
362 refrain_from_heavy_op_on_float16 (gulong n)
363 {
364  static int warned = 0;
365  /* 1 million */
366  if (n > 1000000) {
367  if (warned)
368  return;
369  ml_logw
370  ("Tensor_transform implementation for float16 does not support SIMD. Heavy tensor-transform operations of float16 is not recommended. Try to apply heavy ops with other types (e.g., float32) and convert it to float16 at the time when it's really needed.\n");
371  warned = 1;
372  }
373 }
374 
376 #define _conv_to_f16(intype, o, i, n) \
377  do { \
378  float16 *op = (gpointer) (o); \
379  intype *ip = (gpointer) (i); \
380  gulong idx; \
381  refrain_from_heavy_op_on_float16 (n); \
382  for (idx = 0; idx < n; idx++) \
383  *(op + idx) = (float16) *(ip + idx); \
384  } while (0)
385 
387 #define _conv_from_f16_action(n, op, ip, otypename) \
388  do { \
389  gulong idx; \
390  for (idx = 0; idx < n; idx++) \
391  *(op + idx) = (otypename) *(ip + idx); \
392  } while (0)
393 
395 #define _conv_from_f16(otype, o, i, n) \
396  do { \
397  float16 *ip = (gpointer) (i); \
398  refrain_from_heavy_op_on_float16 (n); \
399  switch (otype) { \
400  case _NNS_INT32: { \
401  int32_t *op = (gpointer) (o); \
402  _conv_from_f16_action (n, op, ip, int32_t); \
403  break; } \
404  case _NNS_UINT32: { \
405  uint32_t *op = (gpointer) (o); \
406  _conv_from_f16_action (n, op, ip, uint32_t); \
407  break; } \
408  case _NNS_INT16: { \
409  int16_t *op = (gpointer) (o); \
410  _conv_from_f16_action (n, op, ip, int16_t); \
411  break; } \
412  case _NNS_UINT16: { \
413  uint16_t *op = (gpointer) (o); \
414  _conv_from_f16_action (n, op, ip, uint16_t); \
415  break; } \
416  case _NNS_INT8: { \
417  int8_t *op = (gpointer) (o); \
418  _conv_from_f16_action (n, op, ip, int8_t); \
419  break; } \
420  case _NNS_UINT8: { \
421  uint8_t *op = (gpointer) (o); \
422  _conv_from_f16_action (n, op, ip, uint8_t); \
423  break; } \
424  case _NNS_FLOAT64: { \
425  double *op = (gpointer) (o); \
426  _conv_from_f16_action (n, op, ip, double); \
427  break; } \
428  case _NNS_FLOAT32: { \
429  float *op = (gpointer) (o); \
430  _conv_from_f16_action (n, op, ip, float); \
431  break; } \
432  case _NNS_FLOAT16: { \
433  float16 *op = (gpointer) (o); \
434  _conv_from_f16_action (n, op, ip, float16); \
435  break; } \
436  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (otype)); g_assert (0); \
437  } \
438  } while (0)
439 
441 #define _op_float16(i, n, v, op) \
442  do { \
443  gulong idx; \
444  float16 *data_in = (float16 *) (i); \
445  refrain_from_heavy_op_on_float16 (n); \
446  switch (op) { \
447  case GTT_OP_ADD: \
448  for (idx = 0; idx < n; idx++) \
449  data_in[idx] = data_in[idx] + (v); \
450  break; \
451  case GTT_OP_MUL: \
452  for (idx = 0; idx < n; idx++) \
453  data_in[idx] = data_in[idx] * (v); \
454  break; \
455  case GTT_OP_DIV: \
456  for (idx = 0; idx < n; idx++) \
457  data_in[idx] = data_in[idx] / (v); \
458  break; \
459  default: GST_ERROR_OBJECT (filter, "Unknown operator for float16: %d", op); break; \
460  } \
461  } while (0)
462 
463 #else /* ! FLOAT16_SUPPORT */
464 #define _conv_to_f16(intype, o, i, n) do { float16_not_supported (); } while (0)
465 #define _conv_from_f16(otype, o, i, n) do { float16_not_supported (); } while (0)
466 #define _op_float16(i, n, v, op) do { float16_not_supported (); } while (0)
467 #endif /* FLOAT16_SUPPORT */
468 
469 #ifdef HAVE_ORC
470 /* define macros for orc */
471 #define orc_func_conv(intype,outtype) nns_orc_conv_
472 #define orc_func_add(intype) nns_orc_add_c_
473 #define orc_func_mul(intype) nns_orc_mul_c_
474 #define orc_func_div(intype) nns_orc_div_c_
475 
476 #define orc_typecast_to(i,o,n,intype,otype,intypename) do { \
477  switch (otype) { \
478  case _NNS_INT32: orc_func_conv (intype, s32) ((gpointer) o, (gpointer) i, n); break; \
479  case _NNS_UINT32: orc_func_conv (intype, u32) ((gpointer) o, (gpointer) i, n); break; \
480  case _NNS_INT16: orc_func_conv (intype, s16) ((gpointer) o, (gpointer) i, n); break; \
481  case _NNS_UINT16: orc_func_conv (intype, u16) ((gpointer) o, (gpointer) i, n); break; \
482  case _NNS_INT8: orc_func_conv (intype, s8) ((gpointer) o, (gpointer) i, n); break; \
483  case _NNS_UINT8: orc_func_conv (intype, u8) ((gpointer) o, (gpointer) i, n); break; \
484  case _NNS_FLOAT64: orc_func_conv (intype, f64) ((gpointer) o, (gpointer) i, n); break; \
485  case _NNS_FLOAT32: orc_func_conv (intype, f32) ((gpointer) o, (gpointer) i, n); break; \
486  case _NNS_INT64: orc_func_conv (intype, s64) ((gpointer) o, (gpointer) i, n); break; \
487  case _NNS_UINT64: orc_func_conv (intype, u64) ((gpointer) o, (gpointer) i, n); break; \
488  case _NNS_FLOAT16: _conv_to_f16 (intypename, o, i, n); break; \
489  default: GST_ERROR_OBJECT (filter, "Unsupported output type %d", otype); g_assert (0); break; \
490  } \
491  } while (0)
492 
493 #define orc_typecast(i,o,n,itype,otype) do { \
494  switch (itype) { \
495  case _NNS_INT32: orc_typecast_to (i, o, n, s32, otype, int32_t); break; \
496  case _NNS_UINT32: orc_typecast_to (i, o, n, u32, otype, uint32_t); break; \
497  case _NNS_INT16: orc_typecast_to (i, o, n, s16, otype, int16_t); break; \
498  case _NNS_UINT16: orc_typecast_to (i, o, n, u16, otype, uint16_t); break; \
499  case _NNS_INT8: orc_typecast_to (i, o, n, s8, otype, int8_t); break; \
500  case _NNS_UINT8: orc_typecast_to (i, o, n, u8, otype, uint8_t); break; \
501  case _NNS_FLOAT64: orc_typecast_to (i, o, n, f64, otype, double); break; \
502  case _NNS_FLOAT32: orc_typecast_to (i, o, n, f32, otype, float); break; \
503  case _NNS_INT64: orc_typecast_to (i, o, n, s64, otype, int64_t); break; \
504  case _NNS_UINT64: orc_typecast_to (i, o, n, u64, otype, uint64_t); break; \
505  case _NNS_FLOAT16: _conv_from_f16 (otype, o, i, n); break; \
506  default: GST_ERROR_OBJECT (filter, "Unsupported input type %d", itype); g_assert (0); break; \
507  } \
508  } while (0)
509 
510 #define orc_typesize(size, type) do { \
511  switch (type) { \
512  case _NNS_INT32: size = sizeof(int32_t); break; \
513  case _NNS_UINT32: size = sizeof(uint32_t); break; \
514  case _NNS_INT16: size = sizeof(int16_t); break; \
515  case _NNS_UINT16: size = sizeof(uint16_t); break; \
516  case _NNS_INT8: size = sizeof(int8_t); break; \
517  case _NNS_UINT8: size = sizeof(uint8_t); break; \
518  case _NNS_FLOAT64: size = sizeof(double); break; \
519  case _NNS_FLOAT32: size = sizeof(float); break; \
520  case _NNS_INT64: size = sizeof(int64_t); break; \
521  case _NNS_UINT64: size = sizeof(uint64_t); break; \
522  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", type); g_assert (0); break; \
523  } \
524  } while (0)
525 
526 #define orc_operator_func(i,n,v,opfunc,op) do { \
527  switch ((v)->type) { \
528  case _NNS_INT32: opfunc (s32) ((gpointer) i, (v)->data._int32_t, n); break; \
529  case _NNS_UINT32: opfunc (u32) ((gpointer) i, (v)->data._uint32_t, n); break; \
530  case _NNS_INT16: opfunc (s16) ((gpointer) i, (v)->data._int16_t, n); break; \
531  case _NNS_UINT16: opfunc (u16) ((gpointer) i, (v)->data._uint16_t, n); break; \
532  case _NNS_INT8: opfunc (s8) ((gpointer) i, (v)->data._int8_t, n); break; \
533  case _NNS_UINT8: opfunc (u8) ((gpointer) i, (v)->data._uint8_t, n); break; \
534  case _NNS_FLOAT64: opfunc (f64) ((gpointer) i, (v)->data._double, n); break; \
535  case _NNS_FLOAT32: opfunc (f32) ((gpointer) i, (v)->data._float, n); break; \
536  case _NNS_INT64: opfunc (s64) ((gpointer) i, (v)->data._int64_t, n); break; \
537  case _NNS_UINT64: opfunc (u64) ((gpointer) i, (v)->data._uint64_t, n); break; \
538  case _NNS_FLOAT16: _op_float16 (i, n, (v)->data._float16, op); break; \
539  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (v)->type); g_assert (0); break; \
540  } \
541  } while (0)
542 
543 #define orc_operator_div_loop(i,n,val,typename) do { \
544  gsize idx_div; \
545  typename *data_in = (typename *) (i); \
546  for (idx_div = 0; idx_div < (n); ++idx_div) { \
547  data_in[idx_div] = data_in[idx_div] / (val); \
548  } \
549  } while (0)
550 
551 #define orc_operator(i,n,v,op) do { \
552  switch (op) { \
553  case GTT_OP_ADD: orc_operator_func (i, n, v, orc_func_add, op); break; \
554  case GTT_OP_MUL: orc_operator_func (i, n, v, orc_func_mul, op); break; \
555  case GTT_OP_DIV: \
556  switch ((v)->type) { \
557  case _NNS_INT32: orc_operator_div_loop (i, n, (v)->data._int32_t, int32_t); break; \
558  case _NNS_UINT32: orc_operator_div_loop (i, n, (v)->data._uint32_t, uint32_t); break; \
559  case _NNS_INT16: orc_operator_div_loop (i, n, (v)->data._int16_t, int16_t); break; \
560  case _NNS_UINT16: orc_operator_div_loop (i, n, (v)->data._uint16_t, uint16_t); break; \
561  case _NNS_INT8: orc_operator_div_loop (i, n, (v)->data._int8_t, int8_t); break; \
562  case _NNS_UINT8: orc_operator_div_loop (i, n, (v)->data._uint8_t, uint8_t); break; \
563  case _NNS_FLOAT64: orc_func_div (f64) ((gpointer) i, (v)->data._double, n); break; \
564  case _NNS_FLOAT32: orc_func_div (f32) ((gpointer) i, (v)->data._float, n); break; \
565  case _NNS_INT64: orc_operator_div_loop (i, n, (v)->data._int64_t, int64_t); break; \
566  case _NNS_UINT64: orc_operator_div_loop (i, n, (v)->data._uint64_t, uint64_t); break; \
567  case _NNS_FLOAT16: _op_float16 (i, n, (v)->data._float16, op); break; \
568  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (v)->type); g_assert (0); break; \
569  } \
570  break; \
571  default: GST_ERROR_OBJECT (filter, "Unknown operator %d", op); break; \
572  } \
573  } while (0)
574 #endif /* HAVE_ORC */
575 
579 #define handle_operator(d,v,oper,vtype) do { \
580  switch (oper) { \
581  case GTT_OP_ADD: \
582  (d)->data._
583  break; \
584  case GTT_OP_MUL: \
585  (d)->data._
586  break; \
587  case GTT_OP_DIV: \
588  if ((v)->data._
589  GST_ERROR_OBJECT (filter, "Invalid state, denominator is 0."); \
590  return FALSE; \
591  } \
592  (d)->data._
593  break; \
594  default: \
595  GST_ERROR_OBJECT (filter, "Unknown operator %d", oper); \
596  return FALSE; \
597  } \
598  } while (0)
599 
608 static gboolean
609 gst_tensor_transform_do_operator (GstTensorTransform * filter,
610  tensor_data_s * desc, const tensor_data_s * val,
612 {
613  g_return_val_if_fail (desc != NULL, FALSE);
614  g_return_val_if_fail (val != NULL, FALSE);
615  g_return_val_if_fail (desc->type == val->type, FALSE);
616 
617  switch (desc->type) {
618  case _NNS_INT32:
619  handle_operator (desc, val, op, int32_t);
620  break;
621  case _NNS_UINT32:
622  handle_operator (desc, val, op, uint32_t);
623  break;
624  case _NNS_INT16:
625  handle_operator (desc, val, op, int16_t);
626  break;
627  case _NNS_UINT16:
628  handle_operator (desc, val, op, uint16_t);
629  break;
630  case _NNS_INT8:
631  handle_operator (desc, val, op, int8_t);
632  break;
633  case _NNS_UINT8:
634  handle_operator (desc, val, op, uint8_t);
635  break;
636  case _NNS_FLOAT64:
637  handle_operator (desc, val, op, double);
638  break;
639  case _NNS_FLOAT32:
640  handle_operator (desc, val, op, float);
641  break;
642  case _NNS_FLOAT16:
643 #ifdef FLOAT16_SUPPORT
644  handle_operator (desc, val, op, float16);
645 #else
647 #endif
648  break;
649  case _NNS_INT64:
650  handle_operator (desc, val, op, int64_t);
651  break;
652  case _NNS_UINT64:
653  handle_operator (desc, val, op, uint64_t);
654  break;
655  default:
656  GST_ERROR_OBJECT (filter, "Unknown tensor type %d", desc->type);
657  return FALSE;
658  }
659 
660  return TRUE;
661 }
662 
668 static gboolean
670 {
671  gchar *filter_name;
672  gboolean ret = FALSE;
673 
674  if (filter->mode == GTT_UNKNOWN || filter->option == NULL)
675  return TRUE;
676 
677  filter_name = gst_object_get_name ((GstObject *) filter);
678 
679  switch (filter->mode) {
680  case GTT_DIMCHG:
681  {
682  gchar **strv = NULL;
683 
684  if (!g_regex_match_simple (REGEX_DIMCHG_OPTION, filter->option,
685  G_REGEX_CASELESS, 0)) {
686  ml_loge
687  ("%s: dimchg: \'%s\' is not valid option string: it should be in the form of IDX_DIM_FROM:IDX_DIM_TO: with a regex, "
688  REGEX_DIMCHG_OPTION "\n", filter_name, filter->option);
689  break;
690  }
691 
692  strv = g_strsplit (filter->option, ":", 2);
693 
694  filter->data_dimchg.from = (int) g_ascii_strtoll (strv[0], NULL, 10);
695  filter->data_dimchg.to = (int) g_ascii_strtoll (strv[1], NULL, 10);
696  ret = filter->loaded = TRUE;
697  g_strfreev (strv);
698  break;
699  }
700  case GTT_TYPECAST:
701  {
702  if (g_regex_match_simple (REGEX_TYPECAST_OPTION, filter->option,
703  G_REGEX_CASELESS, 0)) {
704  filter->data_typecast.to = gst_tensor_get_type (filter->option);
705  ret = filter->loaded = TRUE;
706  } else {
707  ml_loge
708  ("%s: typecast: \'%s\' is not valid data type for tensor: data type of tensor should be one of %s\n",
709  filter_name, filter->option, GST_TENSOR_TYPE_ALL);
710  }
711  break;
712  }
713  case GTT_ARITHMETIC:
714  {
715  gchar *str_option;
716  gchar **str_operators;
717  gchar **str_op;
719  guint i, num_operators, num_op;
720  GRegex *regex_option_tc;
721 
722  filter->data_arithmetic.out_type = _NNS_END;
723  filter->data_arithmetic.per_channel_arith = FALSE;
724 
725  if (filter->operators) {
726  GST_WARNING_OBJECT (filter,
727  "There exists pre-defined operators (total %d), now reset these.",
728  g_slist_length (filter->operators));
729 
730  g_slist_free_full (filter->operators, g_free);
731  filter->operators = NULL;
732  }
733 
734  regex_option_tc = g_regex_new (REGEX_ARITH_OPTION_TYPECAST,
735  G_REGEX_CASELESS, 0, 0);
736 
737  if (!regex_option_tc) {
738  GST_ERROR_OBJECT (filter,
739  "arithmetic: failed to create a GRegex structure for %s\n",
741  break;
742  }
743 
744  if (g_regex_match_full (regex_option_tc, filter->option, -1,
745  1, 0, NULL, NULL)) {
746  str_option = g_regex_replace (regex_option_tc, filter->option, -1, 1,
747  "", 0, 0);
748  ml_loge
749  ("%s: arithmetic: [typecast:TYPE,] should be located at the first to prevent memory re-allocation: typecast(s) in the middle of \'%s\' will be ignored\n",
750  filter_name, filter->option);
751  } else {
752  str_option = g_strdup (filter->option);
753  }
754  g_regex_unref (regex_option_tc);
755 
756  if (!g_regex_match_simple (REGEX_ARITH_OPTION, str_option,
757  G_REGEX_CASELESS, 0)) {
758  ml_loge
759  ("%s: arithmetic: \'%s\' is not valid option string: it should be in the form of [typecast:TYPE,][per-channel:(false|true@DIM),]add|mul|div:NUMBER[@CH_IDX]..., ...\n",
760  filter_name, str_option);
761  g_free (str_option);
762  break;
763  }
764  str_operators = g_strsplit (str_option, ",", -1);
765  num_operators = g_strv_length (str_operators);
766 
767  for (i = 0; i < num_operators; ++i) {
768  str_op = g_strsplit (str_operators[i], ":", -1);
769  num_op = g_strv_length (str_op);
770 
771  if (str_op[0]) {
772  gchar **values = g_strsplit (str_op[1], "@", -1);
773  guint num_values = g_strv_length (values);
774 
775  /* check whether per-channel */
776  if (g_ascii_strcasecmp (str_op[0], "per-channel") == 0) {
777  if (num_values > 1 && g_ascii_strcasecmp (values[0], "true") == 0) {
778  ml_logi
779  ("Set per-channel for arithmetic and assume that %s-th dim is the channel",
780  values[1]);
781  filter->data_arithmetic.per_channel_arith = TRUE;
782  filter->data_arithmetic.ch_dim =
783  (guint) g_ascii_strtoull (values[1], NULL, 10);
784  }
785 
786  g_strfreev (values);
787  g_strfreev (str_op);
788  continue;
789  }
790 
791  op_s = g_new0 (tensor_transform_operator_s, 1);
792  g_assert (op_s);
793 
794  op_s->op = gst_tensor_transform_get_operator (str_op[0]);
795  op_s->applying_ch = -1; /* -1 means applying to all channels */
796  switch (op_s->op) {
797  case GTT_OP_TYPECAST:
798  if (num_op > 1 && str_op[1]) {
799  op_s->value.type = gst_tensor_get_type (values[0]);
800  filter->data_arithmetic.out_type = op_s->value.type;
801  } else {
802  GST_WARNING_OBJECT (filter, "Invalid option for typecast %s",
803  str_operators[i]);
804  op_s->op = GTT_OP_UNKNOWN;
805  }
806  break;
807  case GTT_OP_ADD:
808  case GTT_OP_MUL:
809  case GTT_OP_DIV:
810  if (num_op > 1 && str_op[1]) {
811  /* get operand */
812  if (strchr (values[0], '.') || strchr (values[0], 'e') ||
813  strchr (values[0], 'E')) {
814  double val;
815 
816  val = g_ascii_strtod (values[0], NULL);
817  gst_tensor_data_set (&op_s->value, _NNS_FLOAT64, &val);
818  } else {
819  int64_t val;
820 
821  val = g_ascii_strtoll (values[0], NULL, 10);
822  gst_tensor_data_set (&op_s->value, _NNS_INT64, &val);
823  }
824 
825  if (filter->data_arithmetic.per_channel_arith && num_values > 1) {
826  op_s->applying_ch = g_ascii_strtoll (values[1], NULL, 10);
827  }
828 
829  } else {
830  GST_WARNING_OBJECT (filter,
831  "Invalid option for arithmetic %s", str_operators[i]);
832  op_s->op = GTT_OP_UNKNOWN;
833  }
834  break;
835  default:
836  GST_WARNING_OBJECT (filter, "Unknown operator %s", str_op[0]);
837  break;
838  }
839 
840  /* append operator */
841  if (op_s->op != GTT_OP_UNKNOWN) {
842  filter->operators = g_slist_append (filter->operators, op_s);
843  } else {
844  g_free (op_s);
845  }
846 
847  g_strfreev (values);
848  } else {
849  GST_WARNING_OBJECT (filter, "Invalid option %s", str_operators[i]);
850  }
851 
852  g_strfreev (str_op);
853  }
854 
855  ret = filter->loaded = (filter->operators != NULL);
856  g_strfreev (str_operators);
857  g_free (str_option);
858  break;
859  }
860  case GTT_TRANSPOSE:
861  {
862  int i;
863  gchar **strv = NULL;
864 
865  if (!g_regex_match_simple (REGEX_TRANSPOSE_OPTION, filter->option,
866  G_REGEX_CASELESS, 0)) {
867  ml_loge
868  ("%s: transpose: \'%s\' is not valid option string: it should be in the form of NEW_IDX_DIM0:NEW_IDX_DIM1:NEW_IDX_DIM2:3 (Now transpose mode's rank is fixed to 3. Note that the index of the last dim is always fixed to 3)\n",
869  filter_name, filter->option);
870  break;
871  }
872 
873  strv = g_strsplit (filter->option, ":", NNS_TENSOR_TRANSPOSE_RANK_LIMIT);
874  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
875  filter->data_transpose.trans_order[i] =
876  (uint8_t) g_ascii_strtoull (strv[i], NULL, 10);
877  }
878 
879  ret = filter->loaded = TRUE;
880  g_strfreev (strv);
881  break;
882  }
883  case GTT_STAND:
884  {
885  gchar **options = NULL;
886  guint i, num_options;
887 
888  if (!g_regex_match_simple (REGEX_STAND_OPTION, filter->option,
889  G_REGEX_CASELESS, 0)) {
890  ml_loge
891  ("%s: stand: \'%s\' is not a valid option string: it should be in the form of (default|dc-average)[:TYPE][,per-channel:(false|true)]\n",
892  filter_name, filter->option);
893  break;
894  }
895 
896  filter->data_stand.out_type = _NNS_END;
897  filter->data_stand.per_channel = FALSE;
898 
899  options = g_strsplit (filter->option, ",", -1);
900  num_options = g_strv_length (options);
901 
902  for (i = 0; i < num_options; i++) {
903  gchar **strv = g_strsplit (options[i], ":", -1);
904 
905  if (g_ascii_strcasecmp (strv[0], "default") == 0 ||
906  g_ascii_strcasecmp (strv[0], "dc-average") == 0) {
907  filter->data_stand.mode =
909  if (g_strv_length (strv) > 1)
910  filter->data_stand.out_type = gst_tensor_get_type (strv[1]);
911  } else if (g_ascii_strcasecmp (strv[0], "per-channel") == 0) {
912  if (g_strv_length (strv) > 1 &&
913  g_ascii_strcasecmp (strv[1], "true") == 0)
914  filter->data_stand.per_channel = TRUE;
915  } else {
916  filter->data_stand.mode = STAND_END;
917  ml_logw ("Unknown option for stand mode: %s", strv[0]);
918  }
919 
920  g_strfreev (strv);
921  }
922 
923  g_strfreev (options);
924  ret = filter->loaded = TRUE;
925  break;
926  }
927  case GTT_CLAMP:
928  {
929  gchar **strv = NULL;
930 
931  if (!g_regex_match_simple (REGEX_CLAMP_OPTION, filter->option,
932  G_REGEX_CASELESS, 0)) {
933  ml_loge
934  ("%s: clamp: \'%s\' is not valid option string: it should be in the form of [CLAMP_MIN:CLAMP_MAX]\n",
935  filter_name, filter->option);
936  break;
937  }
938 
939  strv = g_strsplit (filter->option, ":", 2);
940 
941  filter->data_clamp.min = g_ascii_strtod (strv[0], NULL);
942  if (errno == ERANGE) {
943  ml_loge ("%s: clamp: CLAMP_MIN value has an invalid range\n",
944  filter_name);
945  g_strfreev (strv);
946  break;
947  }
948  filter->data_clamp.max = g_ascii_strtod (strv[1], NULL);
949  if (errno == ERANGE) {
950  ml_loge ("%s: clamp: CLAMP_MAX value has an invalid range\n",
951  filter_name);
952  g_strfreev (strv);
953  break;
954  }
955 
956  g_strfreev (strv);
957 
958  if (filter->data_clamp.min > filter->data_clamp.max) {
959  ml_loge ("%s: clamp: CLAMP_MIN is larger than CLAMP_MAX\n",
960  filter_name);
961  break;
962  }
963 
964  ret = filter->loaded = TRUE;
965  break;
966  }
967  case GTT_PADDING:
968  {
969  gchar **options = NULL;
970  guint i, num_options;
971 
972  if (!g_regex_match_simple (REGEX_PADDING_OPTION, filter->option,
973  G_REGEX_CASELESS, 0)) {
974  ml_loge
975  ("%s: padding: \'%s\' is not valid option string: it should be in the form of left|right|top|bottom|front|back:PADDING,[layout:(NCHW|NHWC)]\n",
976  filter_name, filter->option);
977  break;
978  }
979 
980  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
981  filter->data_padding.pad[i] = 0;
982  filter->data_padding.layout = _NNS_LAYOUT_ANY;
983 
984  options = g_strsplit (filter->option, ",", -1);
985  num_options = g_strv_length (options);
986 
987  for (i = 0; i < num_options; i++) {
988  gchar **strv = g_strsplit (options[i], ":", 2);
989  if (g_ascii_strcasecmp (strv[0], "left") == 0) {
990  filter->data_padding.pad[PADDING_LEFT] =
991  (guint) g_ascii_strtoull (strv[1], NULL, 10);
992  } else if (g_ascii_strcasecmp (strv[0], "right") == 0) {
993  filter->data_padding.pad[PADDING_RIGHT] =
994  (guint) g_ascii_strtoull (strv[1], NULL, 10);
995  } else if (g_ascii_strcasecmp (strv[0], "top") == 0) {
996  filter->data_padding.pad[PADDING_TOP] =
997  (guint) g_ascii_strtoull (strv[1], NULL, 10);
998  } else if (g_ascii_strcasecmp (strv[0], "bottom") == 0) {
999  filter->data_padding.pad[PADDING_BOTTOM] =
1000  (guint) g_ascii_strtoull (strv[1], NULL, 10);
1001  } else if (g_ascii_strcasecmp (strv[0], "front") == 0) {
1002  filter->data_padding.pad[PADDING_FRONT] =
1003  (guint) g_ascii_strtoull (strv[1], NULL, 10);
1004  } else if (g_ascii_strcasecmp (strv[0], "back") == 0) {
1005  filter->data_padding.pad[PADDING_BACK] =
1006  (guint) g_ascii_strtoull (strv[1], NULL, 10);
1007  } else if (g_ascii_strcasecmp (strv[0], "layout") == 0) {
1008  if (g_ascii_strcasecmp (strv[1], "NHWC") == 0)
1009  filter->data_padding.layout = _NNS_LAYOUT_NHWC;
1010  else
1011  filter->data_padding.layout = _NNS_LAYOUT_NCHW;
1012  } else {
1013  ml_logw ("Unknown option for padding mode: %s", strv[0]);
1014  }
1015  g_strfreev (strv);
1016  }
1017  g_strfreev (options);
1018 
1019  if (filter->data_padding.layout == _NNS_LAYOUT_NHWC) {
1020  guint prev_left = filter->data_padding.pad[PADDING_LEFT],
1021  prev_right = filter->data_padding.pad[PADDING_RIGHT];
1022  filter->data_padding.pad[PADDING_LEFT] =
1023  filter->data_padding.pad[PADDING_FRONT];
1024  filter->data_padding.pad[PADDING_RIGHT] =
1025  filter->data_padding.pad[PADDING_BACK];
1026  filter->data_padding.pad[PADDING_FRONT] = prev_left;
1027  filter->data_padding.pad[PADDING_BACK] = prev_right;
1028  }
1029 
1030  ret = filter->loaded = TRUE;
1031  break;
1032  }
1033  default:
1034  GST_ERROR_OBJECT (filter, "Cannot identify mode\n");
1035  ret = FALSE;
1036  }
1037 
1038  g_free (filter_name);
1039  return ret;
1040 }
1041 
1045 static void
1046 gst_tensor_transform_set_property (GObject * object, guint prop_id,
1047  const GValue * value, GParamSpec * pspec)
1048 {
1049  GstTensorTransform *filter = GST_TENSOR_TRANSFORM (object);
1050 
1051  switch (prop_id) {
1052  case PROP_SILENT:
1053  filter->silent = g_value_get_boolean (value);
1054  break;
1055  case PROP_MODE:
1056  filter->mode = g_value_get_enum (value);
1058  break;
1059  case PROP_OPTION:
1060  {
1061  gchar *backup_option = filter->option;
1062  filter->option = g_value_dup_string (value);
1063  if (gst_tensor_transform_set_option_data (filter)) {
1064  silent_debug (filter, "Option = %s --> %s\n", backup_option,
1065  filter->option);
1066  g_free (backup_option);
1067  } else {
1068  /* ERROR! Revert the change! */
1069  g_free (filter->option);
1070  filter->option = backup_option;
1072  }
1073  break;
1074  }
1075  case PROP_ACCELERATION:
1076 #ifdef HAVE_ORC
1077  filter->acceleration = g_value_get_boolean (value);
1078  silent_debug (filter, "acceleration = %d\n", filter->acceleration);
1079 #else
1080  GST_WARNING_OBJECT (filter, "Orc acceleration is not supported");
1081  filter->acceleration = FALSE;
1082 #endif
1083  break;
1084  case PROP_APPLY:
1085  {
1086  gint64 val;
1087  const gchar *param = g_value_get_string (value);
1088  gchar **strv = g_strsplit_set (param, ",", -1);
1089  guint i, num = g_strv_length (strv);
1090  gchar *endptr = NULL;
1091 
1092  for (i = 0; i < num; i++) {
1093  errno = 0;
1094  val = g_ascii_strtoll (strv[i], &endptr, 10);
1095  if (errno == ERANGE || errno == EINVAL || (endptr == strv[i])) {
1096  ml_loge ("Cannot convert string %s to a gint64 value", strv[i]);
1097  }
1098  filter->apply = g_list_append (filter->apply, GINT_TO_POINTER (val));
1099  }
1100  g_strfreev (strv);
1101  break;
1102  }
1103  default:
1104  G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1105  break;
1106  }
1107 }
1108 
1112 static void
1113 gst_tensor_transform_get_property (GObject * object, guint prop_id,
1114  GValue * value, GParamSpec * pspec)
1115 {
1116  GstTensorTransform *filter = GST_TENSOR_TRANSFORM (object);
1117 
1118  switch (prop_id) {
1119  case PROP_SILENT:
1120  g_value_set_boolean (value, filter->silent);
1121  break;
1122  case PROP_MODE:
1123  g_value_set_enum (value, filter->mode);
1124  break;
1125  case PROP_OPTION:
1126  g_value_set_string (value, filter->option);
1127  break;
1128  case PROP_ACCELERATION:
1129  g_value_set_boolean (value, filter->acceleration);
1130  break;
1131  case PROP_APPLY:
1132  {
1133  GList *list;
1134  gchar *p;
1135  GPtrArray *arr;
1136  gchar **strings;
1137 
1138  if (filter->apply == NULL) {
1139  g_value_set_string (value, "");
1140  return;
1141  }
1142 
1143  arr = g_ptr_array_new ();
1144  for (list = filter->apply; list != NULL; list = list->next) {
1145  g_ptr_array_add (arr, g_strdup_printf ("%i",
1146  GPOINTER_TO_INT (list->data)));
1147  }
1148  g_ptr_array_add (arr, NULL);
1149  strings = (gchar **) g_ptr_array_free (arr, FALSE);
1150  p = g_strjoinv (",", strings);
1151 
1152  g_strfreev (strings);
1153  g_value_take_string (value, p);
1154  break;
1155  }
1157  g_value_set_uint (value, NNS_TENSOR_TRANSPOSE_RANK_LIMIT);
1158  break;
1159  default:
1160  G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1161  break;
1162  }
1163 }
1164 
1168 static void
1170 {
1171  GstTensorTransform *filter;
1172 
1173  filter = GST_TENSOR_TRANSFORM (object);
1174 
1175  if (filter->option) {
1176  g_free (filter->option);
1177  filter->option = NULL;
1178  }
1179 
1180  if (filter->operators) {
1181  g_slist_free_full (filter->operators, g_free);
1182  filter->operators = NULL;
1183  }
1184 
1185  if (filter->apply) {
1186  g_list_free (filter->apply);
1187  filter->apply = NULL;
1188  }
1189 
1190  G_OBJECT_CLASS (parent_class)->finalize (object);
1191 }
1192 
1202 static GstFlowReturn
1204  GstTensorInfo * in_info, GstTensorInfo * out_info,
1205  const uint8_t * inptr, uint8_t * outptr)
1206 {
1207  uint32_t *fromDim = in_info->dimension;
1208  uint32_t *toDim = out_info->dimension;
1209  unsigned int from = filter->data_dimchg.from;
1210  unsigned int to = filter->data_dimchg.to;
1211  unsigned int i, j, k;
1212  unsigned int loopLimit = 1;
1213  gsize loopBlockSize, copyblocksize, copyblocklimit;
1214 
1215  if (from == to) {
1217  nns_memcpy (outptr, inptr, gst_tensor_info_get_size (in_info));
1218  GST_WARNING_OBJECT (filter,
1219  "Calling tensor_transform with high memcpy overhead WITHOUT any effects! Check your stream whether you really need tensor_transform.\n");
1220  return GST_FLOW_OK;
1221  }
1222 
1225  g_assert (fromDim[from] == toDim[to]);
1226 
1227  loopBlockSize = copyblocksize = gst_tensor_get_element_size (in_info->type);
1228  copyblocklimit = 1;
1229 
1230  if (from < to) {
1237  for (i = NNS_TENSOR_RANK_LIMIT - 1; i > to; i--) {
1238  if (toDim[i] == 0)
1239  continue;
1240  loopLimit *= toDim[i];
1241  }
1242 
1243  for (i = 0; i < to; i++) {
1244  if (toDim[i] == 0)
1245  break;
1246  loopBlockSize *= toDim[i];
1247  }
1248 
1249  for (i = 0; i < from; i++) {
1250  if (fromDim[i] == 0)
1251  break;
1252  copyblocksize *= fromDim[i];
1253  }
1254  for (i = 0; i < to; i++) {
1255  if (toDim[i] == 0)
1256  break;
1257  copyblocklimit *= toDim[i];
1258  }
1259 
1260  for (i = 0; i < loopLimit; i++) {
1261  /* [i1][i2][...][iN][b][...] i = i1 x i2 x ... x iN */
1262  uint8_t *destptr = outptr + loopBlockSize * toDim[to] * i;
1263  const uint8_t *srcptr = inptr + loopBlockSize * toDim[to] * i;
1264 
1265  for (j = 0; j < toDim[to]; j++) {
1266  uint8_t *j_destptr = destptr + loopBlockSize * j;
1267  for (k = 0; k < copyblocklimit; k++) {
1268  nns_memcpy (j_destptr + copyblocksize * k,
1269  srcptr + k * copyblocksize * toDim[to] + j * copyblocksize,
1270  copyblocksize);
1271  }
1272  }
1273  }
1274  } else {
1280  ml_loge
1281  ("tensor-transform/dimchg operation is not permitted if from >= to.\n");
1282  return GST_FLOW_ERROR;
1283  }
1284 
1285  return GST_FLOW_OK;
1286 }
1287 
1297 static GstFlowReturn
1299  GstTensorInfo * in_info, GstTensorInfo * out_info,
1300  const uint8_t * inptr, uint8_t * outptr)
1301 {
1302  gulong i, num;
1303  gsize in_element_size, out_element_size;
1304 
1305  num = gst_tensor_get_element_count (in_info->dimension);
1306 
1307 #ifdef HAVE_ORC
1308  if (filter->acceleration) {
1309  orc_typecast (inptr, outptr, num, in_info->type, out_info->type);
1310  return GST_FLOW_OK;
1311  }
1312 #endif
1313 
1314  in_element_size = gst_tensor_get_element_size (in_info->type);
1315  out_element_size = gst_tensor_get_element_size (out_info->type);
1316 
1317  for (i = 0; i < num; ++i) {
1319  (gpointer) (inptr + in_element_size * i), in_info->type,
1320  (gpointer) (outptr + out_element_size * i), out_info->type);
1321  }
1322 
1323  return GST_FLOW_OK;
1324 }
1325 
1335 static GstFlowReturn
1337  GstTensorInfo * in_info, GstTensorInfo * out_info,
1338  const uint8_t * inptr, uint8_t * outptr)
1339 {
1340  gulong i, num, j, ch;
1341  gsize in_element_size, out_element_size;
1342 
1343  GSList *walk;
1345  tensor_data_s value;
1346 
1347  num = gst_tensor_get_element_count (in_info->dimension);
1348 
1349 #ifdef HAVE_ORC
1350  if (filter->acceleration) {
1351  walk = filter->operators;
1356  orc_typecast (inptr, outptr, num, in_info->type, out_info->type);
1357 
1358  if (!filter->data_arithmetic.per_channel_arith) {
1359  while (walk) {
1360  op_s = (tensor_transform_operator_s *) walk->data;
1361 
1362  if (op_s->op != GTT_OP_TYPECAST) {
1363  gst_tensor_data_typecast (&op_s->value, out_info->type);
1364  orc_operator (outptr, num, &op_s->value, op_s->op);
1365  }
1366 
1367  walk = g_slist_next (walk);
1368  }
1369  } else {
1370  gsize typesize = 0;
1371  guint ch_dim = filter->data_arithmetic.ch_dim;
1372  gsize ch_offset, ch_size = 1;
1373  uint8_t *tmp_outptr = NULL;
1374 
1375  for (i = 0; i < ch_dim; ++i) {
1376  ch_size *= in_info->dimension[i];
1377  }
1378  ch_offset = ch_size * in_info->dimension[ch_dim];
1379  orc_typesize (typesize, out_info->type);
1380 
1381  while (walk) {
1382  op_s = (tensor_transform_operator_s *) walk->data;
1383  if (op_s->op == GTT_OP_TYPECAST) {
1384  walk = g_slist_next (walk);
1385  continue;
1386  }
1387 
1388  if (op_s->applying_ch == -1) {
1389  gst_tensor_data_typecast (&op_s->value, out_info->type);
1390  orc_operator (outptr, num, &op_s->value, op_s->op);
1391  } else {
1392  for (i = 0; i < num / ch_offset; ++i) {
1393  tmp_outptr =
1394  outptr + (ch_size * op_s->applying_ch +
1395  ch_offset * i) * typesize;
1396  gst_tensor_data_typecast (&op_s->value, out_info->type);
1397  orc_operator (tmp_outptr, ch_size, &op_s->value, op_s->op);
1398  }
1399  }
1400  walk = g_slist_next (walk);
1401  }
1402  }
1403  return GST_FLOW_OK;
1404  }
1405 #endif
1406 
1407  in_element_size = gst_tensor_get_element_size (in_info->type);
1408  out_element_size = gst_tensor_get_element_size (out_info->type);
1409 
1410  /* per-channel */
1411  if (filter->data_arithmetic.per_channel_arith) {
1412  guint ch_dim = filter->data_arithmetic.ch_dim;
1413  gsize ch_offset, ch_size = 1;
1414  for (i = 0; i < ch_dim; ++i) {
1415  ch_size *= in_info->dimension[i];
1416  }
1417  ch_offset = ch_size * in_info->dimension[ch_dim];
1418 
1426  for (i = 0; i < num / ch_offset; ++i) {
1427  for (ch = 0; ch < in_info->dimension[ch_dim]; ++ch) {
1428  for (j = 0; j < ch_size; ++j) {
1429  gulong data_idx = (i * ch_offset) + (ch * ch_size) + j;
1430  gst_tensor_data_set (&value, in_info->type,
1431  (gpointer) (inptr + in_element_size * data_idx));
1432 
1433  walk = filter->operators;
1434  while (walk) {
1435  op_s = (tensor_transform_operator_s *) walk->data;
1436  switch (op_s->op) {
1437  case GTT_OP_TYPECAST:
1438  gst_tensor_data_typecast (&value, op_s->value.type);
1439  break;
1440  case GTT_OP_ADD:
1441  case GTT_OP_MUL:
1442  case GTT_OP_DIV:
1443  {
1444  gst_tensor_data_typecast (&op_s->value, value.type);
1445 
1446  if (op_s->applying_ch == (int) ch || op_s->applying_ch == -1) {
1447  gst_tensor_transform_do_operator (filter, &value,
1448  &op_s->value, op_s->op);
1449  }
1450  break;
1451  }
1452  default:
1453  g_assert (0);
1454  return GST_FLOW_ERROR;
1455  }
1456 
1457  walk = g_slist_next (walk);
1458  }
1459 
1460  /* set output value */
1461  g_assert (out_info->type == value.type);
1462  gst_tensor_data_get (&value, outptr + out_element_size * data_idx);
1463  }
1464  }
1465  }
1466 
1467  return GST_FLOW_OK;
1468  }
1469 
1470  for (i = 0; i < num; ++i) {
1471  /* init value with input tensor type */
1472  gst_tensor_data_set (&value, in_info->type,
1473  (gpointer) (inptr + in_element_size * i));
1474 
1475  walk = filter->operators;
1476  while (walk) {
1477  op_s = (tensor_transform_operator_s *) walk->data;
1478 
1482  switch (op_s->op) {
1483  case GTT_OP_TYPECAST:
1484  gst_tensor_data_typecast (&value, op_s->value.type);
1485  break;
1486  case GTT_OP_ADD:
1487  case GTT_OP_MUL:
1488  case GTT_OP_DIV:
1489  gst_tensor_data_typecast (&op_s->value, value.type);
1490  gst_tensor_transform_do_operator (filter, &value, &op_s->value,
1491  op_s->op);
1492  break;
1493  default:
1494  g_assert (0);
1495  return GST_FLOW_ERROR;
1496  }
1497 
1498  walk = g_slist_next (walk);
1499  }
1500 
1501  /* set output value */
1502  g_assert (out_info->type == value.type);
1503  gst_tensor_data_get (&value, outptr + out_element_size * i);
1504  }
1505 
1506  return GST_FLOW_OK;
1507 }
1508 
1512 #define transposeloop(cl,ck,cj,ci,sl,sk,sj,si,typesize) do { \
1513  size_t i, j, k, l; \
1514  int inidx = 0, outidx=0; \
1515  for(cl=0;cl<sl;cl++) \
1516  for(ci=0;ci<si;ci++) \
1517  for(cj=0;cj<sj;cj++) \
1518  for(ck=0;ck<sk;ck++){ \
1519  const uint8_t *_in; \
1520  uint8_t *_out; \
1521  outidx = si*sj*sk*cl + sj*sk*ci + sk*cj + ck; \
1522  inidx = SK*SJ*SI*l + SJ*SI*k + SI*j + i; \
1523  _in = inptr + inidx * typesize; \
1524  _out = outptr + outidx * typesize; \
1525  nns_memcpy(_out, _in, typesize); \
1526  } \
1527  } while(0);
1528 
1538 static GstFlowReturn
1540  GstTensorInfo * in_info, GstTensorInfo * out_info,
1541  const uint8_t * inptr, uint8_t * outptr)
1542 {
1543  int i, from, to;
1544  gboolean checkdim = FALSE;
1545  uint32_t *fromDim = in_info->dimension;
1546  gsize type_size = gst_tensor_get_element_size (in_info->type);
1547  gsize indexI, indexJ, SL, SI, SJ, SK;
1548  UNUSED (out_info);
1549 
1550  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
1551  from = i;
1552  to = filter->data_transpose.trans_order[i];
1553  if (from != to) {
1554  checkdim = TRUE;
1555  break;
1556  }
1557  }
1558 
1559  if (!checkdim) {
1560  nns_memcpy (outptr, inptr, gst_tensor_info_get_size (in_info));
1561  GST_WARNING_OBJECT (filter,
1562  "Calling tensor_transform with high memcpy overhead WITHOUT any effects!");
1563  return GST_FLOW_OK;
1564  }
1565 
1566  indexI = filter->data_transpose.trans_order[0];
1567  indexJ = filter->data_transpose.trans_order[1];
1568  SL = fromDim[3] > 0 ? fromDim[3] : 1;
1569  SI = fromDim[0] > 0 ? fromDim[0] : 1;
1570  SJ = fromDim[1] > 0 ? fromDim[1] : 1;
1571  SK = fromDim[2] > 0 ? fromDim[2] : 1;
1572 
1573  switch (indexI) {
1574  case 0:
1575  if (indexJ == 1) {
1576  transposeloop (l, i, j, k, SL, SI, SJ, SK, type_size);
1577  } else {
1578  transposeloop (l, i, k, j, SL, SI, SK, SJ, type_size);
1579  }
1580  break;
1581  case 1:
1582  if (indexJ == 0) {
1583  transposeloop (l, j, i, k, SL, SJ, SI, SK, type_size);
1584  } else {
1585  transposeloop (l, j, k, i, SL, SJ, SK, SI, type_size);
1586  }
1587  break;
1588  case 2:
1589  if (indexJ == 0) {
1590  transposeloop (l, k, i, j, SL, SK, SI, SJ, type_size);
1591  } else {
1592  transposeloop (l, k, j, i, SL, SK, SJ, SI, type_size);
1593  }
1594  break;
1595  }
1596 
1597  return GST_FLOW_OK;
1598 }
1599 
1610 static GstFlowReturn
1612  GstTensorInfo * in_info, GstTensorInfo * out_info,
1613  const uint8_t * inptr, uint8_t * outptr)
1614 {
1615  GstFlowReturn ret = GST_FLOW_OK;
1616  gsize in_element_size, out_element_size, data_size, ch_size;
1617  gulong i, num, data_idx, ch;
1618  gdouble tmp, *average, *std;
1619 
1620  in_element_size = gst_tensor_get_element_size (in_info->type);
1621  out_element_size = gst_tensor_get_element_size (out_info->type);
1622  num = gst_tensor_get_element_count (in_info->dimension);
1623 
1624  data_size = gst_tensor_info_get_size (in_info);
1625  ch_size = in_info->dimension[0];
1626 
1627  /* calc average and std */
1628  average = std = NULL;
1629  if (filter->data_stand.per_channel) {
1630  gst_tensor_data_raw_average_per_channel ((gpointer) inptr, data_size,
1631  in_info->type, in_info->dimension, &average);
1632  /* calculate std only for default mode */
1633  if (filter->data_stand.mode == STAND_DEFAULT)
1634  gst_tensor_data_raw_std_per_channel ((gpointer) inptr, data_size,
1635  in_info->type, in_info->dimension, average, &std);
1636  } else {
1637  gst_tensor_data_raw_average ((gpointer) inptr, data_size,
1638  in_info->type, &average);
1639  /* calculate std only for default mode */
1640  if (filter->data_stand.mode == STAND_DEFAULT)
1641  gst_tensor_data_raw_std ((gpointer) inptr, data_size, in_info->type,
1642  average, &std);
1643  }
1644 
1645  switch (filter->data_stand.mode) {
1646  case STAND_DEFAULT:
1647  {
1648  if (!filter->data_stand.per_channel) {
1649  for (i = 0; i < num; i++) {
1650  data_idx = in_element_size * i;
1651  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1652  in_info->type, &tmp, _NNS_FLOAT64);
1653 
1654  tmp = fabs ((tmp - *average) / *std);
1655 
1656  data_idx = out_element_size * i;
1658  (gpointer) (outptr + data_idx), out_info->type);
1659  }
1660  } else {
1661  for (ch = 0; ch < ch_size; ++ch) {
1662  for (i = 0; i < num / ch_size; i++) {
1663  data_idx = in_element_size * ((i * ch_size) + ch);
1664  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1665  in_info->type, &tmp, _NNS_FLOAT64);
1666 
1667  tmp = fabs ((tmp - average[ch]) / std[ch]);
1668 
1669  data_idx = out_element_size * ((i * ch_size) + ch);
1671  (gpointer) (outptr + data_idx), out_info->type);
1672  }
1673  }
1674  }
1675  break;
1676  }
1677  case STAND_DC_AVERAGE:
1678  {
1679  if (!filter->data_stand.per_channel) {
1680  for (i = 0; i < num; i++) {
1681  data_idx = in_element_size * i;
1682  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1683  in_info->type, &tmp, _NNS_FLOAT64);
1684 
1685  tmp -= *average;
1686 
1687  data_idx = out_element_size * i;
1689  (gpointer) (outptr + data_idx), out_info->type);
1690  }
1691  } else {
1692  for (ch = 0; ch < ch_size; ++ch) {
1693  for (i = 0; i < num / ch_size; i++) {
1694  data_idx = in_element_size * ((i * ch_size) + ch);
1695  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1696  in_info->type, &tmp, _NNS_FLOAT64);
1697 
1698  tmp -= average[ch];
1699 
1700  data_idx = out_element_size * ((i * ch_size) + ch);
1702  (gpointer) (outptr + data_idx), out_info->type);
1703  }
1704  }
1705  }
1706  break;
1707  }
1708  default:
1709  GST_ERROR_OBJECT (filter, "Cannot identify mode\n");
1710  ret = GST_FLOW_ERROR;
1711  }
1712 
1713  g_free (average);
1714  g_free (std);
1715 
1716  return ret;
1717 }
1718 
1730 static GstFlowReturn
1732  GstTensorInfo * in_info, GstTensorInfo * out_info,
1733  const uint8_t * inptr, uint8_t * outptr)
1734 {
1735  gsize in_element_size, out_element_size;
1736  gulong i, num, data_idx;
1737  gdouble tmp;
1738 
1739  in_element_size = gst_tensor_get_element_size (in_info->type);
1740  out_element_size = gst_tensor_get_element_size (out_info->type);
1741  num = gst_tensor_get_element_count (in_info->dimension);
1742 
1743  for (i = 0; i < num; ++i) {
1744  data_idx = in_element_size * i;
1745  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx), in_info->type,
1746  &tmp, _NNS_FLOAT64);
1747 
1748  tmp = CLAMP (tmp, filter->data_clamp.min, filter->data_clamp.max);
1749 
1750  data_idx = out_element_size * i;
1751  gst_tensor_data_raw_typecast (&tmp, _NNS_FLOAT64, outptr + data_idx,
1752  out_info->type);
1753  }
1754 
1755  return GST_FLOW_OK;
1756 }
1757 
1767 static GstFlowReturn
1769  GstTensorInfo * in_info, GstTensorInfo * out_info, const uint8_t * inptr,
1770  uint8_t * outptr)
1771 {
1772  gsize element_size, in_loop_size, out_loop_size, copy_block_size;
1773  guint i, j, k, left, top, front, loop_limit = 1;
1774  element_size = gst_tensor_get_element_size (in_info->type);
1775 
1776  in_loop_size = (gsize) in_info->dimension[2] * in_info->dimension[1]
1777  * in_info->dimension[0] * element_size;
1778  out_loop_size =(gsize) out_info->dimension[2] * out_info->dimension[1]
1779  * out_info->dimension[0] * element_size;
1780  copy_block_size = in_info->dimension[0] * element_size;
1781 
1783  if (in_info->dimension[i] == 0)
1784  break;
1785  loop_limit *= in_info->dimension[i];
1786  }
1787 
1788  left = filter->data_padding.pad[PADDING_LEFT];
1789  top = filter->data_padding.pad[PADDING_TOP];
1790  front = filter->data_padding.pad[PADDING_FRONT];
1791 
1793  memset (outptr, 0, out_loop_size * loop_limit);
1794 
1795  for (i = 0; i < loop_limit; i++)
1796  for (j = 0; j < in_info->dimension[2]; j++)
1797  for (k = 0; k < in_info->dimension[1]; k++) {
1798  guint in_idx = j * in_info->dimension[1] * in_info->dimension[0]
1799  + k * in_info->dimension[0];
1800  guint out_idx = j * out_info->dimension[1] * out_info->dimension[0]
1801  + k * out_info->dimension[0];
1802 
1803  out_idx += left + top * out_info->dimension[0]
1804  + front * out_info->dimension[1] * out_info->dimension[0];
1805 
1806  memcpy (outptr + out_idx * element_size + out_loop_size * i,
1807  inptr + in_idx * element_size + in_loop_size * i, copy_block_size);
1808  }
1809 
1810  return GST_FLOW_OK;
1811 }
1812 
1820 static GstFlowReturn
1821 gst_tensor_transform_transform (GstBaseTransform * trans,
1822  GstBuffer * inbuf, GstBuffer * outbuf)
1823 {
1824  GstTensorTransform *filter;
1825  GstTensorInfo *in_info, *out_info;
1826  GstFlowReturn res = GST_FLOW_ERROR;
1827  GstMemory *in_mem[NNS_TENSOR_SIZE_LIMIT] = { 0, };
1828  GstMemory *out_mem[NNS_TENSOR_SIZE_LIMIT] = { 0, };
1829  GstMapInfo in_map[NNS_TENSOR_SIZE_LIMIT];
1830  GstMapInfo out_map[NNS_TENSOR_SIZE_LIMIT];
1831  uint8_t *inptr, *outptr;
1832  guint i, num_tensors, num_mems;
1833  gsize buf_size, hsize;
1834  GstTensorMetaInfo meta;
1835  GstTensorInfo in_flex_info, out_flex_info;
1836  gboolean in_flexible, out_flexible;
1837 
1838  filter = GST_TENSOR_TRANSFORM_CAST (trans);
1839 
1840  g_return_val_if_fail (filter->loaded, GST_FLOW_ERROR);
1841  inbuf = gst_tensor_buffer_from_config (inbuf, &filter->in_config);
1842 
1843  in_flexible =
1844  gst_tensor_pad_caps_is_flexible (GST_BASE_TRANSFORM_SINK_PAD (trans));
1845  out_flexible =
1846  gst_tensor_pad_caps_is_flexible (GST_BASE_TRANSFORM_SRC_PAD (trans));
1847 
1848  num_mems = gst_tensor_buffer_get_count (inbuf);
1849  if (in_flexible) {
1850  num_tensors = num_mems;
1851  g_return_val_if_fail (out_flexible, GST_FLOW_ERROR);
1852  } else {
1853  num_tensors = filter->in_config.info.num_tensors;
1854  g_return_val_if_fail (num_mems == num_tensors, GST_FLOW_ERROR);
1855  }
1856 
1857  for (i = 0; i < num_tensors; i++) {
1858  in_info = gst_tensors_info_get_nth_info (&filter->in_config.info, i);
1859  out_info = gst_tensors_info_get_nth_info (&filter->out_config.info, i);
1860 
1861  if (filter->apply && !g_list_find (filter->apply, GINT_TO_POINTER (i))) {
1862  GstMemory *mem = gst_tensor_buffer_get_nth_memory (inbuf, i);
1863 
1864  if (!in_flexible && out_flexible) {
1865  GstMemory *old = mem;
1866 
1867  /* append meta */
1868  gst_tensor_info_convert_to_meta (out_info, &meta);
1869  mem = gst_tensor_meta_info_append_header (&meta, old);
1870  gst_memory_unref (old);
1871  }
1872 
1873  gst_tensor_buffer_append_memory (outbuf, mem, out_info);
1874  continue;
1875  }
1876 
1877  /* parse input buffer */
1878  in_mem[i] = gst_tensor_buffer_get_nth_memory (inbuf, i);
1879  if (!gst_memory_map (in_mem[i], &in_map[i], GST_MAP_READ)) {
1880  ml_loge ("Cannot map input buffer to gst-buf at tensor-transform.\n");
1881  res = GST_FLOW_ERROR;
1882  goto done;
1883  }
1884  inptr = in_map[i].data;
1885 
1886  if (in_flexible) {
1887  in_info = &in_flex_info;
1888  out_info = &out_flex_info;
1889 
1890  gst_tensor_meta_info_parse_header (&meta, inptr);
1892  if (!gst_tensor_meta_info_convert (&meta, in_info)) {
1893  res = GST_FLOW_ERROR;
1894  goto done;
1895  }
1896 
1897  gst_tensor_transform_convert_dimension (filter, GST_PAD_SINK,
1898  i, in_info, out_info);
1899 
1900  hsize = gst_tensor_meta_info_get_header_size (&meta);
1901  inptr += hsize;
1902  }
1903 
1904  /* prepare output buffer */
1905  buf_size = gst_tensor_info_get_size (out_info);
1906  if (out_flexible) {
1907  gst_tensor_info_convert_to_meta (out_info, &meta);
1908  hsize = gst_tensor_meta_info_get_header_size (&meta);
1909  buf_size += hsize;
1910  }
1911 
1912  out_mem[i] = gst_allocator_alloc (NULL, buf_size, NULL);
1913  gst_tensor_buffer_append_memory (outbuf, out_mem[i], out_info);
1914 
1915  if (!gst_memory_map (out_mem[i], &out_map[i], GST_MAP_WRITE)) {
1916  ml_loge ("Cannot map output buffer to gst-buf at tensor-transform.\n");
1917  res = GST_FLOW_ERROR;
1918  goto done;
1919  }
1920  outptr = out_map[i].data;
1921 
1922  if (out_flexible) {
1923  gst_tensor_meta_info_update_header (&meta, outptr);
1924  outptr += hsize;
1925  }
1926 
1927  switch (filter->mode) {
1928  case GTT_DIMCHG:
1929  res = gst_tensor_transform_dimchg (filter, in_info, out_info,
1930  inptr, outptr);
1931  break;
1932  case GTT_TYPECAST:
1933  res = gst_tensor_transform_typecast (filter, in_info, out_info,
1934  inptr, outptr);
1935  break;
1936  case GTT_ARITHMETIC:
1937  res = gst_tensor_transform_arithmetic (filter, in_info, out_info,
1938  inptr, outptr);
1939  break;
1940  case GTT_TRANSPOSE:
1941  res = gst_tensor_transform_transpose (filter, in_info, out_info,
1942  inptr, outptr);
1943  break;
1944  case GTT_STAND:
1945  res = gst_tensor_transform_stand (filter, in_info, out_info,
1946  inptr, outptr);
1947  break;
1948  case GTT_CLAMP:
1949  res = gst_tensor_transform_clamp (filter, in_info, out_info,
1950  inptr, outptr);
1951  break;
1952  case GTT_PADDING:
1953  res = gst_tensor_transform_padding (filter, in_info, out_info,
1954  inptr, outptr);
1955  break;
1956  default:
1957  ml_loge ("Not supported tensor transform mode");
1958  res = GST_FLOW_NOT_SUPPORTED;
1959  goto done;
1960  }
1961  }
1962 
1963 done:
1964  for (i = 0; i < num_tensors; i++) {
1965  if (in_mem[i]) {
1966  gst_memory_unmap (in_mem[i], &in_map[i]);
1967  gst_memory_unref (in_mem[i]);
1968  }
1969  if (out_mem[i])
1970  gst_memory_unmap (out_mem[i], &out_map[i]);
1971  }
1972 
1973  return res;
1974 }
1975 
1983 static gboolean
1985  const GstCaps * caps, GstTensorsConfig * config)
1986 {
1987  GstStructure *structure;
1988  g_return_val_if_fail (config != NULL, FALSE);
1989 
1990  structure = gst_caps_get_structure (caps, 0);
1991 
1992  if (!gst_tensors_config_from_structure (config, structure)) {
1993  GST_WARNING_OBJECT (filter, "caps is not tensor %s\n",
1994  gst_structure_get_name (structure));
1995  return FALSE;
1996  }
1997 
1998  return gst_tensors_config_validate (config);
1999 }
2000 
2010 static gboolean
2012  GstPadDirection direction, guint idx, const GstTensorInfo * in_info,
2013  GstTensorInfo * out_info)
2014 {
2015  guint i;
2016 
2017  /* copy input info first, then update output info */
2018  gst_tensor_info_copy (out_info, in_info);
2019 
2020  if (filter->apply && !g_list_find (filter->apply, GINT_TO_POINTER (idx)))
2021  return TRUE;
2022 
2023  switch (filter->mode) {
2024  case GTT_DIMCHG:
2025  {
2026  unsigned int from = filter->data_dimchg.from;
2027  unsigned int to = filter->data_dimchg.to;
2028 
2029  if (direction == GST_PAD_SINK) {
2030  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
2031  if ((i < from && i < to) || (i > from && i > to) || from == to) {
2032  out_info->dimension[i] = in_info->dimension[i];
2033  } else if (i == to) {
2034  out_info->dimension[i] = in_info->dimension[from];
2035  } else if (from > to) {
2036  g_assert (i > 0 && i > to);
2037  out_info->dimension[i] = in_info->dimension[i - 1];
2038  } else {
2039  g_assert (i < to && i < (NNS_TENSOR_RANK_LIMIT - 1));
2040  out_info->dimension[i] = in_info->dimension[i + 1];
2041  }
2042  }
2043  } else {
2044  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
2045  if ((i < from && i < to) || (i > from && i > to) || from == to) {
2046  out_info->dimension[i] = in_info->dimension[i];
2047  } else if (i == from) {
2048  out_info->dimension[i] = in_info->dimension[to];
2049  } else if (from > to) {
2050  g_assert (i < from && i < (NNS_TENSOR_RANK_LIMIT - 1));
2051  out_info->dimension[i] = in_info->dimension[i + 1];
2052  } else {
2053  g_assert (i > 0 && i > from);
2054  out_info->dimension[i] = in_info->dimension[i - 1];
2055  }
2056  }
2057  }
2058  break;
2059  }
2060  case GTT_TYPECAST:
2062  if (direction == GST_PAD_SINK) {
2064  out_info->type = filter->data_typecast.to;
2065  } else {
2066  /* cannot get the incoming data type on sink pad */
2067  out_info->type = _NNS_END;
2068  }
2069  break;
2070 
2071  case GTT_ARITHMETIC:
2072  /* check arith mode option has typecast operator */
2073  if (filter->data_arithmetic.out_type != _NNS_END) {
2074  if (direction == GST_PAD_SINK) {
2075  out_info->type = filter->data_arithmetic.out_type;
2076  } else {
2077  /* cannot get the incoming data type on sink pad */
2078  out_info->type = _NNS_END;
2079  }
2080  }
2081  break;
2082 
2083  case GTT_TRANSPOSE:
2084  if (direction == GST_PAD_SINK) {
2085  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
2086  out_info->dimension[i] =
2087  in_info->dimension[filter->data_transpose.trans_order[i]];
2088  }
2089  } else {
2090  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
2091  g_assert (filter->data_transpose.trans_order[i] <
2093  out_info->dimension[filter->data_transpose.trans_order[i]] =
2094  in_info->dimension[i];
2095  }
2096  }
2097  break;
2098 
2099  case GTT_STAND:
2101  if (direction == GST_PAD_SINK) {
2102  if (filter->data_stand.out_type != _NNS_END)
2103  out_info->type = filter->data_stand.out_type;
2104  } else {
2105  /* cannot get the incoming data type on sink pad */
2106  out_info->type = _NNS_END;
2107  }
2108  break;
2109 
2110  case GTT_CLAMP:
2111  /* same tensors info, do nothing. */
2112  break;
2113 
2114  case GTT_PADDING:
2115  if (direction == GST_PAD_SINK) {
2116  out_info->dimension[0] +=
2117  filter->data_padding.pad[PADDING_LEFT] +
2118  filter->data_padding.pad[PADDING_RIGHT];
2119  out_info->dimension[1] +=
2120  filter->data_padding.pad[PADDING_TOP] +
2121  filter->data_padding.pad[PADDING_BOTTOM];
2122  out_info->dimension[2] +=
2123  filter->data_padding.pad[PADDING_FRONT] +
2124  filter->data_padding.pad[PADDING_BACK];
2125  }
2126  break;
2127  default:
2128  return FALSE;
2129  }
2130 
2131  return TRUE;
2132 }
2133 
2144 static GstCaps *
2145 gst_tensor_transform_transform_caps (GstBaseTransform * trans,
2146  GstPadDirection direction, GstCaps * caps, GstCaps * filtercap)
2147 {
2148  GstTensorTransform *filter;
2149  GstCaps *result = NULL;
2150  GstStructure *structure;
2151  guint i, j;
2152 
2153  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2154 
2155  silent_debug (filter, "Calling TransformCaps, direction = %d\n", direction);
2156  silent_debug_caps (filter, caps, "from");
2157  silent_debug_caps (filter, filtercap, "filter");
2158 
2159  result = gst_caps_new_empty ();
2160  for (i = 0; i < gst_caps_get_size (caps); i++) {
2161  GstTensorsConfig in_config, out_config;
2162  GstTensorInfo *in_info, *out_info;
2163  gboolean is_types_not_fixed = FALSE;
2164  GstCaps *result_aux = gst_caps_new_empty ();
2165 
2166  gst_tensors_config_init (&out_config);
2167 
2168  structure = gst_caps_get_structure (caps, i);
2169  gst_tensors_config_from_structure (&in_config, structure);
2170 
2171  if (gst_tensors_config_is_flexible (&in_config)) {
2172  /* output caps is also flexible */
2174  } else {
2175  for (j = 0; j < in_config.info.num_tensors; j++) {
2176  in_info = gst_tensors_info_get_nth_info (&in_config.info, j);
2177  out_info = gst_tensors_info_get_nth_info (&out_config.info, j);
2178 
2179  gst_tensor_transform_convert_dimension (filter, direction,
2180  j, in_info, out_info);
2181  if (out_info->type == _NNS_END) {
2182  /* types cannot be specified */
2183  is_types_not_fixed = TRUE;
2184  }
2185  }
2186  }
2187 
2188  out_config.rate_d = in_config.rate_d;
2189  out_config.rate_n = in_config.rate_n;
2190  out_config.info.num_tensors = in_config.info.num_tensors;
2191 
2192  if (gst_structure_has_name (structure, NNS_MIMETYPE_TENSOR)) {
2193  gst_caps_append (result_aux, gst_tensor_caps_from_config (&out_config));
2194  } else {
2195  gst_caps_append (result_aux, gst_tensors_caps_from_config (&out_config));
2196 
2197  /* remove `types` field from caps */
2198  if (is_types_not_fixed) {
2199  GstStructure *s = gst_caps_get_structure (result_aux, 0);
2200  gst_structure_remove_field (s, "types");
2201  }
2202  }
2203 
2204  gst_caps_append (result, result_aux);
2205 
2206  gst_tensors_config_free (&in_config);
2207  gst_tensors_config_free (&out_config);
2208  }
2209 
2210  if (filtercap && gst_caps_get_size (filtercap) > 0) {
2211  GstCaps *intersection;
2212 
2213  intersection =
2214  gst_caps_intersect_full (result, filtercap, GST_CAPS_INTERSECT_FIRST);
2215 
2216  gst_caps_unref (result);
2217  result = intersection;
2218  }
2219 
2220  silent_debug_caps (filter, result, "to");
2221  return result;
2222 }
2223 
2227 static GstCaps *
2228 gst_tensor_transform_fixate_caps (GstBaseTransform * trans,
2229  GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
2230 {
2231  GstTensorTransform *filter;
2232  GstCaps *result;
2233 
2234  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2235 
2236  silent_debug (filter, "Calling FixateCaps, direction = %d\n", direction);
2237  silent_debug_caps (filter, caps, "caps");
2238  silent_debug_caps (filter, othercaps, "othercaps");
2239 
2240  result =
2241  gst_tensor_transform_transform_caps (trans, direction, caps, othercaps);
2242  gst_caps_unref (othercaps);
2243 
2244  result = gst_caps_make_writable (result);
2245  result = gst_caps_fixate (result);
2246 
2247  silent_debug_caps (filter, result, "result");
2248  return result;
2249 }
2250 
2254 static gboolean
2255 gst_tensor_transform_set_caps (GstBaseTransform * trans,
2256  GstCaps * incaps, GstCaps * outcaps)
2257 {
2258  GstTensorTransform *filter;
2259  GstTensorsConfig in_config, out_config;
2260  GstTensorsConfig config;
2261  GstTensorInfo *in_info, *out_info;
2262  gboolean in_flexible, out_flexible;
2263  gboolean allowed = FALSE;
2264  guint i;
2265 
2266  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2267 
2268  silent_debug (filter, "Calling SetCaps\n");
2269  silent_debug_caps (filter, incaps, "incaps");
2270  silent_debug_caps (filter, outcaps, "outcaps");
2271 
2272  if (!gst_tensor_transform_read_caps (filter, incaps, &in_config)) {
2273  GST_ERROR_OBJECT (filter, "Cannot read cap of incaps\n");
2274  goto error;
2275  }
2276 
2277  if (!gst_tensor_transform_read_caps (filter, outcaps, &out_config)) {
2278  GST_ERROR_OBJECT (filter, "Cannot read cap of outcaps\n");
2279  goto error;
2280  }
2281 
2282  in_flexible = gst_tensors_config_is_flexible (&in_config);
2283  out_flexible = gst_tensors_config_is_flexible (&out_config);
2284 
2285  /* compare type and dimension */
2286  gst_tensors_config_init (&config);
2287  config.info.format = out_config.info.format;
2288 
2289  config.rate_n = in_config.rate_n;
2290  config.rate_d = in_config.rate_d;
2291  config.info.num_tensors = in_config.info.num_tensors;
2292 
2293  if (!in_flexible) {
2294  for (i = 0; i < in_config.info.num_tensors; i++) {
2295  in_info = gst_tensors_info_get_nth_info (&in_config.info, i);
2296  out_info = gst_tensors_info_get_nth_info (&config.info, i);
2297 
2298  if (!gst_tensor_transform_convert_dimension (filter, GST_PAD_SINK,
2299  i, in_info, out_info)) {
2300  GST_ERROR_OBJECT (filter,
2301  "Tensor info is not matched with given properties.");
2302  goto error;
2303  }
2304  }
2305  }
2306 
2307  if (out_flexible) {
2308  GST_INFO_OBJECT (filter, "Output tensor is flexible.");
2309 
2310  /* set output configuration if input is static */
2311  if (!in_flexible)
2312  out_config = config;
2313  } else if (!gst_tensors_config_is_equal (&out_config, &config)) {
2314  GST_ERROR_OBJECT (filter,
2315  "Tensor info is not matched with given properties.\n");
2316  goto error;
2317  }
2318 
2319  /* set in/out tensor info */
2320  filter->in_config = in_config;
2321  filter->out_config = out_config;
2322  allowed = TRUE;
2323 
2324 error:
2325  if (!allowed)
2326  GST_ERROR_OBJECT (filter, "Set Caps Failed!\n");
2327 
2328  return allowed;
2329 }
2330 
2334 static gboolean
2335 gst_tensor_transform_transform_size (GstBaseTransform * trans,
2336  GstPadDirection direction, GstCaps * caps, gsize size, GstCaps * othercaps,
2337  gsize * othersize)
2338 {
2339  UNUSED (trans);
2340  UNUSED (direction);
2341  UNUSED (caps);
2342  UNUSED (size);
2343  UNUSED (othercaps);
2348  *othersize = 0;
2349 
2350  return TRUE;
2351 }
_GstTensorTransform::data_padding
tensor_transform_padding data_padding
Definition: gsttensor_transform.h:180
find_key_strv
gint find_key_strv(const gchar **strv, const gchar *key)
Find the index value of the given key string array.
Definition: nnstreamer_plugin_api_util_impl.c:1586
gst_tensor_get_type
tensor_type gst_tensor_get_type(const gchar *typestr)
Get tensor type from string input.
Definition: nnstreamer_plugin_api_util_impl.c:1218
GTT_TRANSPOSE
@ GTT_TRANSPOSE
Definition: gsttensor_transform.h:62
gst_tensors_config_is_flexible
#define gst_tensors_config_is_flexible(c)
Macro to check stream format (flexible tensors for caps negotiation)
Definition: nnstreamer_plugin_api_util.h:279
gst_tensors_config_is_equal
gboolean gst_tensors_config_is_equal(const GstTensorsConfig *c1, const GstTensorsConfig *c2)
Compare tensor config info (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:881
gst_tensor_buffer_from_config
GstBuffer * gst_tensor_buffer_from_config(GstBuffer *in, GstTensorsConfig *config)
Configure gst-buffer with tensors information. NNStreamer handles single memory chunk as single tenso...
Definition: nnstreamer_plugin_api_impl.c:535
tensor_transform_stand_mode
tensor_transform_stand_mode
Definition: gsttensor_transform.h:80
g_assert
g_assert(sizeof(DTYPE_UNSIGNED)==sizeof(DTYPE_SIGNED))
tensor_transform_operator_s::op
tensor_transform_operator op
Definition: gsttensor_transform.h:118
gst_tensor_transform_transform
static GstFlowReturn gst_tensor_transform_transform(GstBaseTransform *trans, GstBuffer *inbuf, GstBuffer *outbuf)
non-ip transform. required vmethod for BaseTransform class.
Definition: gsttensor_transform.c:1821
_tensor_transform_clamp::min
double min
Definition: gsttensor_transform.h:152
gst_tensor_transform_typecast
static GstFlowReturn gst_tensor_transform_typecast(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "typecast" case.
Definition: gsttensor_transform.c:1298
_NNS_LAYOUT_ANY
@ _NNS_LAYOUT_ANY
Definition: tensor_typedef.h:222
_NNS_UINT64
@ _NNS_UINT64
Definition: tensor_typedef.h:149
GstTensorInfo
Internal data structure for tensor info.
Definition: tensor_typedef.h:261
_GstTensorTransform::operators
GSList * operators
Definition: gsttensor_transform.h:184
ml_logw
#define ml_logw
Definition: nnstreamer_log.h:77
tensor_data_s
Structure for tensor data.
Definition: tensor_data.h:23
_GstTensorTransform::option
gchar * option
Definition: gsttensor_transform.h:172
NNS_TENSOR_SIZE_LIMIT
#define NNS_TENSOR_SIZE_LIMIT
The number of tensors NNStreamer supports is 256. The max memories of gst-buffer is 16 (See NNS_TENSO...
Definition: tensor_typedef.h:42
_NNS_INT64
@ _NNS_INT64
Definition: tensor_typedef.h:148
sink_factory
static GstStaticPadTemplate sink_factory
The capabilities of the inputs.
Definition: gsttensor_transform.c:135
gst_tensor_transform_stand_string
static const gchar * gst_tensor_transform_stand_string[]
Definition: gsttensor_transform.c:118
GTT_DIMCHG
@ GTT_DIMCHG
Definition: gsttensor_transform.h:59
PADDING_LEFT
@ PADDING_LEFT
Definition: gsttensor_transform.h:89
gst_tensor_data_raw_std_per_channel
gboolean gst_tensor_data_raw_std_per_channel(gpointer raw, gsize length, tensor_type type, tensor_dim dim, gdouble *averages, gdouble **results)
Calculate standard deviation of the tensor per channel (the first dim).
Definition: tensor_data.c:455
FALSE
return FALSE
Definition: gsttensor_transform.c:596
tensor_transform_operator_s
Internal data structure for operator of arithmetic mode.
Definition: gsttensor_transform.h:116
result
case tensor_data_s gboolean * result
Definition: gsttensor_if.c:821
CAPS_STRING
#define CAPS_STRING
Definition: gsttensor_transform.c:69
GST_TENSOR_TRANSFORM_CAST
#define GST_TENSOR_TRANSFORM_CAST(obj)
Definition: gsttensor_transform.h:52
gst_tensor_meta_info_update_header
gboolean gst_tensor_meta_info_update_header(GstTensorMetaInfo *meta, gpointer header)
Update header from tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1505
GST_DEBUG_CATEGORY_STATIC
GST_DEBUG_CATEGORY_STATIC(gst_tensor_transform_debug)
_NNS_FLOAT16
@ _NNS_FLOAT16
Definition: tensor_typedef.h:150
STAND_END
@ STAND_END
Definition: gsttensor_transform.h:84
_tensor_transform_arithmetic::out_type
tensor_type out_type
Definition: gsttensor_transform.h:127
gst_tensor_transform_read_caps
static gboolean gst_tensor_transform_read_caps(GstTensorTransform *filter, const GstCaps *caps, GstTensorsConfig *config)
Read cap, parse tensor configuration (dim/type) from the cap.
Definition: gsttensor_transform.c:1984
_NNS_UINT16
@ _NNS_UINT16
Definition: tensor_typedef.h:143
gst_tensor_transform_get_operator
static tensor_transform_operator gst_tensor_transform_get_operator(const gchar *str)
Get the corresponding operator from the string value.
Definition: gsttensor_transform.c:319
REGEX_CLAMP_OPTION
#define REGEX_CLAMP_OPTION
Definition: gsttensor_transform.c:74
PROP_APPLY
@ PROP_APPLY
Definition: gsttensor_transform.c:105
_GstTensorTransform::acceleration
gboolean acceleration
Definition: gsttensor_transform.h:183
gst_tensor_info_copy
void gst_tensor_info_copy(GstTensorInfo *dest, const GstTensorInfo *src)
Copy tensor info.
Definition: nnstreamer_plugin_api_util_impl.c:248
PADDING_TOP
@ PADDING_TOP
Definition: gsttensor_transform.h:91
nnstreamer_log.h
Internal log util for NNStreamer plugins and native APIs.
switch
switch(cv->type)
Definition: gsttensor_if.c:833
GTT_OP_DIV
@ GTT_OP_DIV
Definition: gsttensor_transform.h:75
nns_memcpy
#define nns_memcpy
Definition: tensor_common.h:52
GTT_OP_TYPECAST
@ GTT_OP_TYPECAST
Definition: gsttensor_transform.h:72
gst_tensor_transform_arithmetic
static GstFlowReturn gst_tensor_transform_arithmetic(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "arithmetic" case.
Definition: gsttensor_transform.c:1336
gst_tensor_transform_dimchg
static GstFlowReturn gst_tensor_transform_dimchg(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "dimchg" case.
Definition: gsttensor_transform.c:1203
GstTensorMetaInfo
Data structure to describe a tensor data. This represents the basic information of a memory block for...
Definition: tensor_typedef.h:310
_GstTensorTransform::out_config
GstTensorsConfig out_config
Definition: gsttensor_transform.h:187
src_factory
static GstStaticPadTemplate src_factory
The capabilities of the outputs.
Definition: gsttensor_transform.c:143
GstTensorsConfig::rate_d
int rate_d
Definition: tensor_typedef.h:288
GTT_CLAMP
@ GTT_CLAMP
Definition: gsttensor_transform.h:64
gst_tensor_data_raw_average
gboolean gst_tensor_data_raw_average(gpointer raw, gsize length, tensor_type type, gdouble **result)
Calculate average value of the tensor.
Definition: tensor_data.c:315
_tensor_transform_stand::per_channel
gboolean per_channel
Definition: gsttensor_transform.h:145
gst_tensor_transform_transform_size
static gboolean gst_tensor_transform_transform_size(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, gsize size, GstCaps *othercaps, gsize *othersize)
Tell the framework the required size of buffer based on the info of the other side pad....
Definition: gsttensor_transform.c:2335
GTT_OP_UNKNOWN
@ GTT_OP_UNKNOWN
Definition: gsttensor_transform.h:77
gst_tensor_transform_transform_caps
static GstCaps * gst_tensor_transform_transform_caps(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, GstCaps *filter)
configure srcpad cap from "proposed" cap. (required vmethod for BaseTransform)
Definition: gsttensor_transform.c:2145
NNS_TENSOR_TRANSPOSE_RANK_LIMIT
#define NNS_TENSOR_TRANSPOSE_RANK_LIMIT
The transpose rank is fixed to 4. This RANK does not affect other/tensors(s)'s NNS_TENSOR_RANK_LIMIT.
Definition: gsttensor_transform.c:87
transposeloop
#define transposeloop(cl, ck, cj, ci, sl, sk, sj, si, typesize)
Definition: gsttensor_transform.c:1512
_tensor_transform_stand::out_type
tensor_type out_type
Definition: gsttensor_transform.h:144
GTT_PADDING
@ GTT_PADDING
Definition: gsttensor_transform.h:65
silent_debug
#define silent_debug(self,...)
Macro for debug message.
Definition: tensor_common.h:276
G_DEFINE_TYPE
G_DEFINE_TYPE(GstTensorTransform, gst_tensor_transform, GST_TYPE_BASE_TRANSFORM)
gst_tensor_pad_caps_is_flexible
#define gst_tensor_pad_caps_is_flexible(p)
Macro to check current pad caps is flexible tensor.
Definition: tensor_common.h:231
tensor_transform_operator_s::value
tensor_data_s value
Definition: gsttensor_transform.h:120
gst_tensor_meta_info_append_header
GstMemory * gst_tensor_meta_info_append_header(GstTensorMetaInfo *meta, GstMemory *mem)
Append header to memory.
Definition: nnstreamer_plugin_api_impl.c:1566
g_free
g_free(self->option[(opnum) - 1])
opnum: \
_GstTensorTransform::data_dimchg
tensor_transform_dimchg data_dimchg
Definition: gsttensor_transform.h:174
REGEX_TYPECAST_OPTION
#define REGEX_TYPECAST_OPTION
Definition: gsttensor_transform.c:71
_GstTensorTransform::silent
gboolean silent
Definition: gsttensor_transform.h:170
g_value_set_string
g_value_set_string(value, self->option[opnum - 1])
opnum: \
gst_tensor_data_raw_average_per_channel
gboolean gst_tensor_data_raw_average_per_channel(gpointer raw, gsize length, tensor_type type, tensor_dim dim, gdouble **results)
Calculate average value of the tensor per channel (the first dim).
Definition: tensor_data.c:360
PADDING_RIGHT
@ PADDING_RIGHT
Definition: gsttensor_transform.h:90
gst_tensor_meta_info_parse_header
gboolean gst_tensor_meta_info_parse_header(GstTensorMetaInfo *meta, gpointer header)
Parse header and fill the tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1527
GstTensorsConfig::rate_n
int rate_n
Definition: tensor_typedef.h:287
_tensor_transform_padding::pad
guint pad[NNS_TENSOR_RANK_LIMIT]
Definition: gsttensor_transform.h:159
_NNS_END
@ _NNS_END
Definition: tensor_typedef.h:152
_NNS_TENSOR_FORMAT_FLEXIBLE
@ _NNS_TENSOR_FORMAT_FLEXIBLE
Definition: tensor_typedef.h:196
gst_tensor_data_set
gboolean gst_tensor_data_set(tensor_data_s *td, tensor_type type, gpointer value)
Set tensor element data with given type.
GST_TYPE_TENSOR_TRANSFORM_MODE
#define GST_TYPE_TENSOR_TRANSFORM_MODE
Definition: gsttensor_transform.c:176
PADDING_BACK
@ PADDING_BACK
Definition: gsttensor_transform.h:94
STAND_DEFAULT
@ STAND_DEFAULT
Definition: gsttensor_transform.h:82
gst_tensor_get_element_count
gulong gst_tensor_get_element_count(const tensor_dim dim)
Count the number of elements of a tensor.
Definition: nnstreamer_plugin_api_util_impl.c:1186
gst_tensor_get_element_size
gsize gst_tensor_get_element_size(tensor_type type)
Get element size of tensor type (byte per element)
Definition: nnstreamer_plugin_api_util_impl.c:1205
gst_tensor_data_raw_typecast
gboolean gst_tensor_data_raw_typecast(gpointer input, tensor_type in_type, gpointer output, tensor_type out_type)
Typecast tensor element value.
Definition: tensor_data.c:290
REGEX_TRANSPOSE_OPTION
#define REGEX_TRANSPOSE_OPTION
Definition: gsttensor_transform.c:72
_NNS_LAYOUT_NCHW
@ _NNS_LAYOUT_NCHW
Definition: tensor_typedef.h:224
GST_TENSOR_TRANSFORM
#define GST_TENSOR_TRANSFORM(obj)
Definition: gsttensor_transform.h:44
gst_tensor_transform_init
static void gst_tensor_transform_init(GstTensorTransform *filter)
initialize the new element (G_DEFINE_TYPE requires this) instantiate pads and add them to element set...
Definition: gsttensor_transform.c:299
_GstTensorTransform::in_config
GstTensorsConfig in_config
Definition: gsttensor_transform.h:186
PROP_ACCELERATION
@ PROP_ACCELERATION
Definition: gsttensor_transform.c:104
_NNS_FLOAT32
@ _NNS_FLOAT32
Definition: tensor_typedef.h:147
gst_tensors_config_free
void gst_tensors_config_free(GstTensorsConfig *config)
Free allocated data in tensors config structure.
Definition: nnstreamer_plugin_api_util_impl.c:845
gst_tensor_transform_set_caps
static gboolean gst_tensor_transform_set_caps(GstBaseTransform *trans, GstCaps *incaps, GstCaps *outcaps)
set caps. required vmethod of BaseTransform
Definition: gsttensor_transform.c:2255
gst_tensor_data_get
gboolean gst_tensor_data_get(tensor_data_s *td, gpointer value)
Get tensor element value.
Definition: tensor_data.c:143
REGEX_DIMCHG_OPTION
#define REGEX_DIMCHG_OPTION
Definition: gsttensor_transform.c:70
NNS_TENSOR_PADDING_RANK_LIMIT
#define NNS_TENSOR_PADDING_RANK_LIMIT
The padding rank is fixed to 3. This RANK does not affect other/tensors(s)'s NNS_TENSOR_RANK_LIMIT.
Definition: gsttensor_transform.c:93
gst_tensor_transform_get_stand_mode
static tensor_transform_stand_mode gst_tensor_transform_get_stand_mode(const gchar *str)
Get the corresponding mode from the string value.
Definition: gsttensor_transform.c:334
gst_tensor_info_convert_to_meta
gboolean gst_tensor_info_convert_to_meta(GstTensorInfo *info, GstTensorMetaInfo *meta)
Convert GstTensorInfo structure to GstTensorMetaInfo.
Definition: nnstreamer_plugin_api_util_impl.c:260
_tensor_transform_dimchg::to
int to
Definition: gsttensor_transform.h:103
gst_tensor_transform_set_option_data
static gboolean gst_tensor_transform_set_option_data(GstTensorTransform *filter)
Setup internal data (data_* in GstTensorTransform)
Definition: gsttensor_transform.c:669
gst_tensor_data_raw_std
gboolean gst_tensor_data_raw_std(gpointer raw, gsize length, tensor_type type, gdouble *average, gdouble **result)
Calculate standard deviation of the tensor.
Definition: tensor_data.c:409
gst_tensor_meta_info_convert
gboolean gst_tensor_meta_info_convert(GstTensorMetaInfo *meta, GstTensorInfo *info)
Convert GstTensorMetaInfo structure to GstTensorInfo.
Definition: nnstreamer_plugin_api_util_impl.c:1562
gst_tensor_caps_from_config
GstCaps * gst_tensor_caps_from_config(const GstTensorsConfig *config)
Get tensor caps from tensors config (for other/tensor)
Definition: nnstreamer_plugin_api_impl.c:1395
GstTensorsConfig
Internal data structure for configured tensors info (for other/tensors).
Definition: tensor_typedef.h:284
ml_logi
#define ml_logi
Definition: nnstreamer_log.h:76
REGEX_STAND_OPTION
#define REGEX_STAND_OPTION
Definition: gsttensor_transform.c:73
_tensor_transform_dimchg::from
int from
Definition: gsttensor_transform.h:102
REGEX_ARITH_OPTION
#define REGEX_ARITH_OPTION
Definition: gsttensor_transform.c:77
GTT_ARITHMETIC
@ GTT_ARITHMETIC
Definition: gsttensor_transform.h:61
silent_debug_caps
#define silent_debug_caps(self, caps, msg)
Macro for capability debug message.
Definition: tensor_common.h:285
tensor_transform_operator_s::applying_ch
int applying_ch
Definition: gsttensor_transform.h:119
ml_loge
#define ml_loge
Definition: nnstreamer_log.h:78
_NNS_INT32
@ _NNS_INT32
Definition: tensor_typedef.h:140
gsttensor_transform.h
GStreamer plugin to transform tensor dimension or type.
TRUE
return TRUE
Definition: gsttensor_if.c:879
UNUSED
#define UNUSED(expr)
Definition: mqttcommon.h:19
_GstTensorTransform::apply
GList * apply
Definition: gsttensor_transform.h:188
nnstreamer_util.h
Optional NNStreamer utility functions for sub-plugin writers and users.
gst_tensor_transform_convert_dimension
static gboolean gst_tensor_transform_convert_dimension(GstTensorTransform *filter, GstPadDirection direction, guint idx, const GstTensorInfo *in_info, GstTensorInfo *out_info)
Dimension conversion calculation.
Definition: gsttensor_transform.c:2011
gst_tensor_transform_class_init
static void gst_tensor_transform_class_init(GstTensorTransformClass *klass)
initialize the tensor_transform's class
Definition: gsttensor_transform.c:224
gst_tensor_info_get_size
gsize gst_tensor_info_get_size(const GstTensorInfo *info)
Get data size of single tensor.
Definition: nnstreamer_plugin_api_util_impl.c:156
gst_tensor_transform_get_property
static void gst_tensor_transform_get_property(GObject *object, guint prop_id, GValue *value, GParamSpec *pspec)
Get property (gst element vmethod)
Definition: gsttensor_transform.c:1113
_tensor_transform_transpose::trans_order
uint8_t trans_order[NNS_TENSOR_RANK_LIMIT]
Definition: gsttensor_transform.h:136
gst_tensor_transform_fixate_caps
static GstCaps * gst_tensor_transform_fixate_caps(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, GstCaps *othercaps)
fixate caps. required vmethod of BaseTransform
Definition: gsttensor_transform.c:2228
_GstTensorTransform::data_stand
tensor_transform_stand data_stand
Definition: gsttensor_transform.h:178
gst_tensors_info_get_nth_info
GstTensorInfo * gst_tensors_info_get_nth_info(GstTensorsInfo *info, guint index)
Get the pointer of nth tensor information.
Definition: nnstreamer_plugin_api_util_impl.c:296
PROP_SILENT
@ PROP_SILENT
Definition: gsttensor_transform.c:101
GTT_UNKNOWN
@ GTT_UNKNOWN
Definition: gsttensor_transform.h:67
gst_tensor_transform_set_property
static void gst_tensor_transform_set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
Set property (gst element vmethod)
Definition: gsttensor_transform.c:1046
gst_tensor_transform_stand
static GstFlowReturn gst_tensor_transform_stand(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "stand" case. : pixel = abs((pixel - average(tensor))/(std(tensor) +...
Definition: gsttensor_transform.c:1611
GTT_OP_MUL
case GTT_OP_MUL
Definition: gsttensor_transform.c:590
_tensor_transform_stand::mode
tensor_transform_stand_mode mode
Definition: gsttensor_transform.h:143
gst_tensors_caps_from_config
GstCaps * gst_tensors_caps_from_config(const GstTensorsConfig *config)
Get caps from tensors config (for other/tensors)
Definition: nnstreamer_plugin_api_impl.c:1372
_NNS_INT16
@ _NNS_INT16
Definition: tensor_typedef.h:142
STAND_DC_AVERAGE
@ STAND_DC_AVERAGE
Definition: gsttensor_transform.h:83
gst_tensor_transform_padding
static GstFlowReturn gst_tensor_transform_padding(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "padding" case.
Definition: gsttensor_transform.c:1768
gst_tensor_transform_clamp
static GstFlowReturn gst_tensor_transform_clamp(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "clamp" case. : pixel = if (pixel > max) ? max : if (pixel < min) ?...
Definition: gsttensor_transform.c:1731
gst_tensor_transform_operator_string
static const gchar * gst_tensor_transform_operator_string[]
Definition: gsttensor_transform.c:124
gst_tensor_transform_finalize
static void gst_tensor_transform_finalize(GObject *object)
Function to finalize instance (gst element vmethod)
Definition: gsttensor_transform.c:1169
PROP_MODE
@ PROP_MODE
Definition: gsttensor_transform.c:102
PADDING_BOTTOM
@ PADDING_BOTTOM
Definition: gsttensor_transform.h:92
PADDING_FRONT
@ PADDING_FRONT
Definition: gsttensor_transform.h:93
DEFAULT_ACCELERATION
#define DEFAULT_ACCELERATION
Flag to set orc acceleration.
Definition: gsttensor_transform.c:115
NNS_MIMETYPE_TENSOR
#define NNS_MIMETYPE_TENSOR
Definition: tensor_typedef.h:59
GST_TENSOR_TYPE_ALL
#define GST_TENSOR_TYPE_ALL
Possible tensor element types.
Definition: tensor_typedef.h:68
_tensor_transform_arithmetic::ch_dim
guint ch_dim
Definition: gsttensor_transform.h:129
GstTensorsInfo::num_tensors
unsigned int num_tensors
Definition: tensor_typedef.h:275
_NNS_FLOAT64
@ _NNS_FLOAT64
Definition: tensor_typedef.h:146
_GstTensorTransform::data_typecast
tensor_transform_typecast data_typecast
Definition: gsttensor_transform.h:175
gst_tensor_buffer_get_nth_memory
GstMemory * gst_tensor_buffer_get_nth_memory(GstBuffer *buffer, const guint index)
Get the nth GstMemory from given buffer.
Definition: nnstreamer_plugin_api_impl.c:1608
GTT_OP_ADD
@ GTT_OP_ADD
Definition: gsttensor_transform.h:73
_tensor_transform_clamp::max
double max
Definition: gsttensor_transform.h:152
REGEX_PADDING_OPTION
#define REGEX_PADDING_OPTION
Definition: gsttensor_transform.c:76
_GstTensorTransform::data_clamp
tensor_transform_clamp data_clamp
Definition: gsttensor_transform.h:179
tensor_data_s::type
tensor_type type
Definition: tensor_data.h:25
gst_tensors_config_init
void gst_tensors_config_init(GstTensorsConfig *config)
Initialize the tensors config info structure (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:830
PROP_OPTION
@ PROP_OPTION
Definition: gsttensor_transform.c:103
_NNS_LAYOUT_NHWC
@ _NNS_LAYOUT_NHWC
Definition: tensor_typedef.h:223
gst_tensor_buffer_get_count
guint gst_tensor_buffer_get_count(GstBuffer *buffer)
Get the number of tensors in the buffer.
Definition: nnstreamer_plugin_api_impl.c:1835
GTT_STAND
@ GTT_STAND
Definition: gsttensor_transform.h:63
GstTensorInfo::type
tensor_type type
Definition: tensor_typedef.h:266
GstTensorsConfig::info
GstTensorsInfo info
Definition: tensor_typedef.h:286
PROP_0
@ PROP_0
Definition: gsttensor_transform.c:100
gst_tensors_config_validate
gboolean gst_tensors_config_validate(const GstTensorsConfig *config)
Check the tensors are all configured (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:858
_GstTensorTransform::data_arithmetic
tensor_transform_arithmetic data_arithmetic
Definition: gsttensor_transform.h:176
_NNS_UINT32
@ _NNS_UINT32
Definition: tensor_typedef.h:141
handle_operator
#define handle_operator(d, v, oper, vtype)
Macro for operator.
Definition: gsttensor_transform.c:579
float16_not_supported
static void float16_not_supported(void)
Generate error if float16 is required.
Definition: gsttensor_transform.c:348
_tensor_transform_arithmetic::per_channel_arith
gboolean per_channel_arith
Definition: gsttensor_transform.h:128
REGEX_ARITH_OPTION_TYPECAST
#define REGEX_ARITH_OPTION_TYPECAST
Definition: gsttensor_transform.c:81
gst_tensor_data_typecast
gboolean gst_tensor_data_typecast(tensor_data_s *td, tensor_type type)
Typecast tensor element data.
Definition: tensor_data.c:203
_NNS_INT8
@ _NNS_INT8
Definition: tensor_typedef.h:144
GstTensorInfo::dimension
tensor_dim dimension
Definition: tensor_typedef.h:267
_GstTensorTransform::data_transpose
tensor_transform_transpose data_transpose
Definition: gsttensor_transform.h:177
gst_tensors_config_from_structure
gboolean gst_tensors_config_from_structure(GstTensorsConfig *config, const GstStructure *structure)
Parse structure and set tensors config (for other/tensors)
Definition: nnstreamer_plugin_api_impl.c:1413
_GstTensorTransformClass
GstTensorTransformClass inherits GstBaseTransformClass.
Definition: gsttensor_transform.h:198
_NNS_UINT8
@ _NNS_UINT8
Definition: tensor_typedef.h:145
NNS_TENSOR_RANK_LIMIT
#define NNS_TENSOR_RANK_LIMIT
Definition: tensor_typedef.h:34
tensor_transform_operator
tensor_transform_operator
Definition: gsttensor_transform.h:70
PROP_TRANSPOSE_RANK_LIMIT
@ PROP_TRANSPOSE_RANK_LIMIT
Definition: gsttensor_transform.c:106
_tensor_transform_typecast::to
tensor_type to
Definition: gsttensor_transform.h:110
GTT_TYPECAST
@ GTT_TYPECAST
Definition: gsttensor_transform.h:60
_GstTensorTransform::loaded
gboolean loaded
Definition: gsttensor_transform.h:182
_GstTensorTransform
Internal data structure for tensor_transform instances.
Definition: gsttensor_transform.h:166
if
if(!gst_tensordec_process_plugin_options(self,(opnum) - 1)) GST_ERROR_OBJECT(self
gst_tensor_transform_transpose
static GstFlowReturn gst_tensor_transform_transpose(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "transpose" case.
Definition: gsttensor_transform.c:1539
gst_tensor_buffer_append_memory
gboolean gst_tensor_buffer_append_memory(GstBuffer *buffer, GstMemory *memory, const GstTensorInfo *info)
Append memory to given buffer.
Definition: nnstreamer_plugin_api_impl.c:1688
GstTensorsInfo::format
tensor_format format
Definition: tensor_typedef.h:278
_GstTensorTransform::mode
tensor_transform_mode mode
Definition: gsttensor_transform.h:171
gst_tensor_transform_mode_get_type
static GType gst_tensor_transform_mode_get_type(void)
A private function to register GEnumValue array for the 'mode' property to a GType and return it.
Definition: gsttensor_transform.c:182
gst_tensor_meta_info_get_header_size
gsize gst_tensor_meta_info_get_header_size(GstTensorMetaInfo *meta)
Get the header size to handle a tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1456