Doxygen Book
gsttensor_transform.c
Go to the documentation of this file.
1 
46 #ifdef HAVE_CONFIG_H
47 #include <config.h>
48 #endif
49 
50 #include <string.h>
51 #include <math.h>
52 #include <nnstreamer_log.h>
53 #include <nnstreamer_util.h>
54 #include "gsttensor_transform.h"
55 
56 #ifdef HAVE_ORC
57 #include "nnstreamer-orc.h"
58 #endif
59 
63 #ifndef DBG
64 #define DBG (!filter->silent)
65 #endif
66 
67 GST_DEBUG_CATEGORY_STATIC (gst_tensor_transform_debug);
68 #define GST_CAT_DEFAULT gst_tensor_transform_debug
69 #define CAPS_STRING GST_TENSOR_CAP_DEFAULT ";" GST_TENSORS_CAP_MAKE ("{ static, flexible }")
70 #define REGEX_DIMCHG_OPTION "^([0-9]|1[0-5]):([0-9]|1[0-5])$"
71 #define REGEX_TYPECAST_OPTION "(^[u]?int(8|16|32|64)$|^float(16|32|64)$)"
72 #define REGEX_TRANSPOSE_OPTION "^(?:([0-2]):(?!.*\\1)){3}3$"
73 #define REGEX_STAND_OPTION "^(default|dc-average)(:([u]?int(8|16|32|64)|float(16|32|64)))?(,per-channel:(true|false))?$"
74 #define REGEX_CLAMP_OPTION "^((([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?))):"\
75  "((([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)))$"
76 #define REGEX_PADDING_OPTION "^((left|right|top|bottom|front|back):(\\d)(,)?)+(layout:(NCHW|NHWC))?$"
77 #define REGEX_ARITH_OPTION "^(typecast:([u]?int(8|16|32|64)|float(16|32|64)),)?"\
78  "(per-channel:(false|true@[0-9]+),)?"\
79  "(((add|mul|div)(:([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?))+(@[0-9]+)?)(,|))+$"
80 
81 #define REGEX_ARITH_OPTION_TYPECAST "(typecast:([u]?int(8|16|32|64)|float(16|32|64)))"
82 
87 #define NNS_TENSOR_TRANSPOSE_RANK_LIMIT (4)
88 
93 #define NNS_TENSOR_PADDING_RANK_LIMIT (3)
94 
98 enum
99 {
107 };
108 
112 #ifdef HAVE_ORC
113 #define DEFAULT_ACCELERATION TRUE
114 #else
115 #define DEFAULT_ACCELERATION FALSE
116 #endif
117 
118 static const gchar *gst_tensor_transform_stand_string[] = {
119  [STAND_DEFAULT] = "default",
120  [STAND_DC_AVERAGE] = "dc-average",
121  [STAND_END] = NULL
122 };
123 
124 static const gchar *gst_tensor_transform_operator_string[] = {
125  [GTT_OP_TYPECAST] = "typecast",
126  [GTT_OP_ADD] = "add",
127  [GTT_OP_MUL] = "mul",
128  [GTT_OP_DIV] = "div",
129  [GTT_OP_UNKNOWN] = NULL
130 };
131 
135 static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink",
136  GST_PAD_SINK,
137  GST_PAD_ALWAYS,
138  GST_STATIC_CAPS (CAPS_STRING));
139 
143 static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src",
144  GST_PAD_SRC,
145  GST_PAD_ALWAYS,
146  GST_STATIC_CAPS (CAPS_STRING));
147 
148 #define gst_tensor_transform_parent_class parent_class
149 G_DEFINE_TYPE (GstTensorTransform, gst_tensor_transform,
150  GST_TYPE_BASE_TRANSFORM);
151 
152 /* GObject vmethod implementations */
153 static void gst_tensor_transform_set_property (GObject * object, guint prop_id,
154  const GValue * value, GParamSpec * pspec);
155 static void gst_tensor_transform_get_property (GObject * object, guint prop_id,
156  GValue * value, GParamSpec * pspec);
157 static void gst_tensor_transform_finalize (GObject * object);
158 
159 /* GstBaseTransformer vmethod implementations */
160 static GstFlowReturn gst_tensor_transform_transform (GstBaseTransform * trans,
161  GstBuffer * inbuf, GstBuffer * outbuf);
162 static GstCaps *gst_tensor_transform_transform_caps (GstBaseTransform * trans,
163  GstPadDirection direction, GstCaps * caps, GstCaps * filter);
164 static GstCaps *gst_tensor_transform_fixate_caps (GstBaseTransform * trans,
165  GstPadDirection direction, GstCaps * caps, GstCaps * othercaps);
166 static gboolean gst_tensor_transform_set_caps (GstBaseTransform * trans,
167  GstCaps * incaps, GstCaps * outcaps);
168 static gboolean gst_tensor_transform_transform_size (GstBaseTransform * trans,
169  GstPadDirection direction, GstCaps * caps, gsize size,
170  GstCaps * othercaps, gsize * othersize);
171 
173  filter, GstPadDirection direction, guint idx, const GstTensorInfo * in_info,
174  GstTensorInfo * out_info);
175 
176 #define GST_TYPE_TENSOR_TRANSFORM_MODE (gst_tensor_transform_mode_get_type ())
177 
181 static GType
183 {
184  static GType mode_type = 0;
185 
186  if (mode_type == 0) {
187  static GEnumValue mode_types[] = {
188  {GTT_DIMCHG, "Mode for changing tensor dimensions, "
189  "option=FROM_DIM:TO_DIM (with a regex, " REGEX_DIMCHG_OPTION
190  ", where NNS_TENSOR_RANK_LIMIT is 16)",
191  "dimchg"},
192  {GTT_TYPECAST, "Mode for casting type of tensor, "
193  "option=" REGEX_TYPECAST_OPTION, "typecast"},
194  {GTT_ARITHMETIC, "Mode for arithmetic operations with tensor, "
195  "option=[typecast:TYPE,][per-channel:(false|true@DIM),]add|mul|div:NUMBER[@CH_IDX], ...",
196  "arithmetic"},
197  {GTT_TRANSPOSE, "Mode for transposing shape of tensor, "
198  "option=D1\':D2\':D3\':D4 (fixed to 3)",
199  "transpose"},
200  {GTT_STAND, "Mode for statistical standardization of tensor, "
201  "option=(default|dc-average)[:TYPE][,per-channel:(false|true)]",
202  "stand"},
203  {GTT_CLAMP, "Mode for clamping all elements of tensor into the range, "
204  "option=CLAMP_MIN:CLAMP_MAX",
205  "clamp"},
206  {GTT_PADDING, "Mode for padding of tensor, "
207  "option=left|right|top|bottom|front|back:NUMBER[,layout:(NCHW|NHWC)]",
208  "padding"},
209  {GTT_UNKNOWN, "Unknown or not-implemented-yet mode",
210  "unknown"},
211  {0, NULL, NULL},
212  };
213 
214  mode_type = g_enum_register_static ("gtt_mode_type", mode_types);
215  }
216 
217  return mode_type;
218 }
219 
223 static void
225 {
226  GObjectClass *gobject_class;
227  GstElementClass *gstelement_class;
228  GstBaseTransformClass *trans_class;
229 
230  GST_DEBUG_CATEGORY_INIT (gst_tensor_transform_debug, "tensor_transform", 0,
231  "Element to transforms tensor dimension or type");
232 
233  trans_class = (GstBaseTransformClass *) klass;
234  gstelement_class = (GstElementClass *) trans_class;
235  gobject_class = (GObjectClass *) gstelement_class;
236 
237  gobject_class->set_property = gst_tensor_transform_set_property;
238  gobject_class->get_property = gst_tensor_transform_get_property;
239  gobject_class->finalize = gst_tensor_transform_finalize;
240 
241  g_object_class_install_property (gobject_class, PROP_SILENT,
242  g_param_spec_boolean ("silent", "Silent", "Produce verbose output ?",
243  FALSE, G_PARAM_READWRITE));
244  g_object_class_install_property (gobject_class, PROP_MODE,
245  g_param_spec_enum ("mode", "Mode", "Mode used for transforming tensor",
247  G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
248  g_object_class_install_property (gobject_class, PROP_OPTION,
249  g_param_spec_string ("option", "Option",
250  "Option for the tensor transform mode ?", "", G_PARAM_READWRITE));
251  g_object_class_install_property (gobject_class, PROP_ACCELERATION,
252  g_param_spec_boolean ("acceleration", "Acceleration", "Orc acceleration",
253  DEFAULT_ACCELERATION, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
254  g_object_class_install_property (gobject_class, PROP_APPLY,
255  g_param_spec_string ("apply", "Apply", "Select tensors to apply, "
256  "separated with ',' in case of multiple tensors. Default to apply all tensors.",
257  "", G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
258  g_object_class_install_property (gobject_class, PROP_TRANSPOSE_RANK_LIMIT,
259  g_param_spec_uint ("transpose-rank-limit", "Transpose rank limit",
260  "The rank limit of transpose, which varies per version of nnstreamer and may be lower than the global rank limit if it is over 4.",
262  G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
263 
264  gst_element_class_set_details_simple (gstelement_class,
265  "TensorTransform",
266  "Filter/Tensor",
267  "Transforms other/tensor dimensions for different models or frameworks",
268  "MyungJoo Ham <myungjoo.ham@samsung.com>");
269 
270  gst_element_class_add_pad_template (gstelement_class,
271  gst_static_pad_template_get (&src_factory));
272  gst_element_class_add_pad_template (gstelement_class,
273  gst_static_pad_template_get (&sink_factory));
274  /* Refer: https://gstreamer.freedesktop.org/documentation/design/element-transform.html */
275  trans_class->passthrough_on_same_caps = FALSE;
276 
277  /* Processing units */
278  trans_class->transform = GST_DEBUG_FUNCPTR (gst_tensor_transform_transform);
279 
280  /* Negotiation units */
281  trans_class->transform_caps =
282  GST_DEBUG_FUNCPTR (gst_tensor_transform_transform_caps);
283  trans_class->fixate_caps =
284  GST_DEBUG_FUNCPTR (gst_tensor_transform_fixate_caps);
285  trans_class->set_caps = GST_DEBUG_FUNCPTR (gst_tensor_transform_set_caps);
286 
287  /* Allocation units */
288  trans_class->transform_size =
289  GST_DEBUG_FUNCPTR (gst_tensor_transform_transform_size);
290 }
291 
298 static void
300 {
301  filter->silent = TRUE;
302  filter->mode = GTT_UNKNOWN;
303  filter->option = NULL;
304  filter->loaded = FALSE;
305  filter->operators = NULL;
307  filter->apply = NULL;
308 
311 }
312 
320 {
321  int index;
322 
324 
325  return (index < 0) ? GTT_OP_UNKNOWN : index;
326 }
327 
335 {
336  int index;
337 
339 
340  return (index < 0) ? STAND_END : index;
341 }
342 
343 #ifndef FLOAT16_SUPPORT
344 
347 static void
349 {
350  ml_loge
351  ("Tensor_transform does not support float16 operators. Apply -Denable-float16=true for meson build option if your architecture support float16. Note that tensor-transform's float16 is adhoc and does NOT perform good (slow!).\n");
352  g_assert (0);
353 }
354 #endif
355 
356 #ifdef FLOAT16_SUPPORT
357 
361 static void
362 refrain_from_heavy_op_on_float16 (gulong n)
363 {
364  static int warned = 0;
365  /* 1 million */
366  if (n > 1000000) {
367  if (warned)
368  return;
369  ml_logw
370  ("Tensor_transform implementation for float16 does not support SIMD. Heavy tensor-transform operations of float16 is not recommended. Try to apply heavy ops with other types (e.g., float32) and convert it to float16 at the time when it's really needed.\n");
371  warned = 1;
372  }
373 }
374 
376 #define _conv_to_f16(intype, o, i, n) \
377  do { \
378  float16 *op = (gpointer) (o); \
379  intype *ip = (gpointer) (i); \
380  gulong idx; \
381  refrain_from_heavy_op_on_float16 (n); \
382  for (idx = 0; idx < n; idx++) \
383  *(op + idx) = (float16) *(ip + idx); \
384  } while (0)
385 
387 #define _conv_from_f16_action(n, op, ip, otypename) \
388  do { \
389  gulong idx; \
390  for (idx = 0; idx < n; idx++) \
391  *(op + idx) = (otypename) *(ip + idx); \
392  } while (0)
393 
395 #define _conv_from_f16(otype, o, i, n) \
396  do { \
397  float16 *ip = (gpointer) (i); \
398  refrain_from_heavy_op_on_float16 (n); \
399  switch (otype) { \
400  case _NNS_INT32: { \
401  int32_t *op = (gpointer) (o); \
402  _conv_from_f16_action (n, op, ip, int32_t); \
403  break; } \
404  case _NNS_UINT32: { \
405  uint32_t *op = (gpointer) (o); \
406  _conv_from_f16_action (n, op, ip, uint32_t); \
407  break; } \
408  case _NNS_INT16: { \
409  int16_t *op = (gpointer) (o); \
410  _conv_from_f16_action (n, op, ip, int16_t); \
411  break; } \
412  case _NNS_UINT16: { \
413  uint16_t *op = (gpointer) (o); \
414  _conv_from_f16_action (n, op, ip, uint16_t); \
415  break; } \
416  case _NNS_INT8: { \
417  int8_t *op = (gpointer) (o); \
418  _conv_from_f16_action (n, op, ip, int8_t); \
419  break; } \
420  case _NNS_UINT8: { \
421  uint8_t *op = (gpointer) (o); \
422  _conv_from_f16_action (n, op, ip, uint8_t); \
423  break; } \
424  case _NNS_FLOAT64: { \
425  double *op = (gpointer) (o); \
426  _conv_from_f16_action (n, op, ip, double); \
427  break; } \
428  case _NNS_FLOAT32: { \
429  float *op = (gpointer) (o); \
430  _conv_from_f16_action (n, op, ip, float); \
431  break; } \
432  case _NNS_FLOAT16: { \
433  float16 *op = (gpointer) (o); \
434  _conv_from_f16_action (n, op, ip, float16); \
435  break; } \
436  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (otype)); g_assert (0); \
437  } \
438  } while (0)
439 
441 #define _op_float16(i, n, v, op) \
442  do { \
443  gulong idx; \
444  float16 *data_in = (float16 *) (i); \
445  refrain_from_heavy_op_on_float16 (n); \
446  switch (op) { \
447  case GTT_OP_ADD: \
448  for (idx = 0; idx < n; idx++) \
449  data_in[idx] = data_in[idx] + (v); \
450  break; \
451  case GTT_OP_MUL: \
452  for (idx = 0; idx < n; idx++) \
453  data_in[idx] = data_in[idx] * (v); \
454  break; \
455  case GTT_OP_DIV: \
456  for (idx = 0; idx < n; idx++) \
457  data_in[idx] = data_in[idx] / (v); \
458  break; \
459  default: GST_ERROR_OBJECT (filter, "Unknown operator for float16: %d", op); break; \
460  } \
461  } while (0)
462 
463 #else /* ! FLOAT16_SUPPORT */
464 #define _conv_to_f16(intype, o, i, n) do { float16_not_supported (); } while (0)
465 #define _conv_from_f16(otype, o, i, n) do { float16_not_supported (); } while (0)
466 #define _op_float16(i, n, v, op) do { float16_not_supported (); } while (0)
467 #endif /* FLOAT16_SUPPORT */
468 
469 #ifdef HAVE_ORC
470 /* define macros for orc */
472 #define type_64bit_integer(t) ((t) == _NNS_INT64 || (t) == _NNS_UINT64)
473 #define orc_supported(f,itype,otype) ((f)->acceleration && !(type_64bit_integer (itype) || type_64bit_integer (otype)))
474 
475 #define orc_func_conv(intype,outtype) nns_orc_conv_
476 #define orc_func_add(intype) nns_orc_add_c_
477 #define orc_func_mul(intype) nns_orc_mul_c_
478 #define orc_func_div(intype) nns_orc_div_c_
479 
480 #define orc_typecast_to(i,o,n,intype,otype,intypename) do { \
481  switch (otype) { \
482  case _NNS_INT32: orc_func_conv (intype, s32) ((gpointer) o, (gpointer) i, n); break; \
483  case _NNS_UINT32: orc_func_conv (intype, u32) ((gpointer) o, (gpointer) i, n); break; \
484  case _NNS_INT16: orc_func_conv (intype, s16) ((gpointer) o, (gpointer) i, n); break; \
485  case _NNS_UINT16: orc_func_conv (intype, u16) ((gpointer) o, (gpointer) i, n); break; \
486  case _NNS_INT8: orc_func_conv (intype, s8) ((gpointer) o, (gpointer) i, n); break; \
487  case _NNS_UINT8: orc_func_conv (intype, u8) ((gpointer) o, (gpointer) i, n); break; \
488  case _NNS_FLOAT64: orc_func_conv (intype, f64) ((gpointer) o, (gpointer) i, n); break; \
489  case _NNS_FLOAT32: orc_func_conv (intype, f32) ((gpointer) o, (gpointer) i, n); break; \
490  case _NNS_FLOAT16: _conv_to_f16 (intypename, o, i, n); break; \
491  default: GST_ERROR_OBJECT (filter, "Unsupported output type %d", otype); g_assert (0); break; \
492  } \
493  } while (0)
494 
495 #define orc_typecast(i,o,n,itype,otype) do { \
496  switch (itype) { \
497  case _NNS_INT32: orc_typecast_to (i, o, n, s32, otype, int32_t); break; \
498  case _NNS_UINT32: orc_typecast_to (i, o, n, u32, otype, uint32_t); break; \
499  case _NNS_INT16: orc_typecast_to (i, o, n, s16, otype, int16_t); break; \
500  case _NNS_UINT16: orc_typecast_to (i, o, n, u16, otype, uint16_t); break; \
501  case _NNS_INT8: orc_typecast_to (i, o, n, s8, otype, int8_t); break; \
502  case _NNS_UINT8: orc_typecast_to (i, o, n, u8, otype, uint8_t); break; \
503  case _NNS_FLOAT64: orc_typecast_to (i, o, n, f64, otype, double); break; \
504  case _NNS_FLOAT32: orc_typecast_to (i, o, n, f32, otype, float); break; \
505  case _NNS_FLOAT16: _conv_from_f16 (otype, o, i, n); break; \
506  default: GST_ERROR_OBJECT (filter, "Unsupported input type %d", itype); g_assert (0); break; \
507  } \
508  } while (0)
509 
510 #define orc_typesize(size, type) do { \
511  switch (type) { \
512  case _NNS_INT32: size = sizeof(int32_t); break; \
513  case _NNS_UINT32: size = sizeof(uint32_t); break; \
514  case _NNS_INT16: size = sizeof(int16_t); break; \
515  case _NNS_UINT16: size = sizeof(uint16_t); break; \
516  case _NNS_INT8: size = sizeof(int8_t); break; \
517  case _NNS_UINT8: size = sizeof(uint8_t); break; \
518  case _NNS_FLOAT64: size = sizeof(double); break; \
519  case _NNS_FLOAT32: size = sizeof(float); break; \
520  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", type); g_assert (0); break; \
521  } \
522  } while (0)
523 
524 #define orc_operator_func(i,n,v,opfunc,op) do { \
525  switch ((v)->type) { \
526  case _NNS_INT32: opfunc (s32) ((gpointer) i, (v)->data._int32_t, n); break; \
527  case _NNS_UINT32: opfunc (u32) ((gpointer) i, (v)->data._uint32_t, n); break; \
528  case _NNS_INT16: opfunc (s16) ((gpointer) i, (v)->data._int16_t, n); break; \
529  case _NNS_UINT16: opfunc (u16) ((gpointer) i, (v)->data._uint16_t, n); break; \
530  case _NNS_INT8: opfunc (s8) ((gpointer) i, (v)->data._int8_t, n); break; \
531  case _NNS_UINT8: opfunc (u8) ((gpointer) i, (v)->data._uint8_t, n); break; \
532  case _NNS_FLOAT64: opfunc (f64) ((gpointer) i, (v)->data._double, n); break; \
533  case _NNS_FLOAT32: opfunc (f32) ((gpointer) i, (v)->data._float, n); break; \
534  case _NNS_FLOAT16: _op_float16 (i, n, (v)->data._float16, op); break; \
535  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (v)->type); g_assert (0); break; \
536  } \
537  } while (0)
538 
539 #define orc_operator_div_loop(i,n,val,typename) do { \
540  gsize idx_div; \
541  typename *data_in = (typename *) (i); \
542  for (idx_div = 0; idx_div < (n); ++idx_div) { \
543  data_in[idx_div] = data_in[idx_div] / (val); \
544  } \
545  } while (0)
546 
547 #define orc_operator(i,n,v,op) do { \
548  switch (op) { \
549  case GTT_OP_ADD: orc_operator_func (i, n, v, orc_func_add, op); break; \
550  case GTT_OP_MUL: orc_operator_func (i, n, v, orc_func_mul, op); break; \
551  case GTT_OP_DIV: \
552  switch ((v)->type) { \
553  case _NNS_INT32: orc_operator_div_loop (i, n, (v)->data._int32_t, int32_t); break; \
554  case _NNS_UINT32: orc_operator_div_loop (i, n, (v)->data._uint32_t, uint32_t); break; \
555  case _NNS_INT16: orc_operator_div_loop (i, n, (v)->data._int16_t, int16_t); break; \
556  case _NNS_UINT16: orc_operator_div_loop (i, n, (v)->data._uint16_t, uint16_t); break; \
557  case _NNS_INT8: orc_operator_div_loop (i, n, (v)->data._int8_t, int8_t); break; \
558  case _NNS_UINT8: orc_operator_div_loop (i, n, (v)->data._uint8_t, uint8_t); break; \
559  case _NNS_FLOAT64: orc_func_div (f64) ((gpointer) i, (v)->data._double, n); break; \
560  case _NNS_FLOAT32: orc_func_div (f32) ((gpointer) i, (v)->data._float, n); break; \
561  case _NNS_FLOAT16: _op_float16 (i, n, (v)->data._float16, op); break; \
562  default: GST_ERROR_OBJECT (filter, "Unsupported type %d", (v)->type); g_assert (0); break; \
563  } \
564  break; \
565  default: GST_ERROR_OBJECT (filter, "Unknown operator %d", op); break; \
566  } \
567  } while (0)
568 #endif /* HAVE_ORC */
569 
573 #define handle_operator(d,v,oper,vtype) do { \
574  switch (oper) { \
575  case GTT_OP_ADD: \
576  (d)->data._
577  break; \
578  case GTT_OP_MUL: \
579  (d)->data._
580  break; \
581  case GTT_OP_DIV: \
582  if ((v)->data._
583  GST_ERROR_OBJECT (filter, "Invalid state, denominator is 0."); \
584  return FALSE; \
585  } \
586  (d)->data._
587  break; \
588  default: \
589  GST_ERROR_OBJECT (filter, "Unknown operator %d", oper); \
590  return FALSE; \
591  } \
592  } while (0)
593 
602 static gboolean
603 gst_tensor_transform_do_operator (GstTensorTransform * filter,
604  tensor_data_s * desc, const tensor_data_s * val,
606 {
607  g_return_val_if_fail (desc != NULL, FALSE);
608  g_return_val_if_fail (val != NULL, FALSE);
609  g_return_val_if_fail (desc->type == val->type, FALSE);
610 
611  switch (desc->type) {
612  case _NNS_INT32:
613  handle_operator (desc, val, op, int32_t);
614  break;
615  case _NNS_UINT32:
616  handle_operator (desc, val, op, uint32_t);
617  break;
618  case _NNS_INT16:
619  handle_operator (desc, val, op, int16_t);
620  break;
621  case _NNS_UINT16:
622  handle_operator (desc, val, op, uint16_t);
623  break;
624  case _NNS_INT8:
625  handle_operator (desc, val, op, int8_t);
626  break;
627  case _NNS_UINT8:
628  handle_operator (desc, val, op, uint8_t);
629  break;
630  case _NNS_FLOAT64:
631  handle_operator (desc, val, op, double);
632  break;
633  case _NNS_FLOAT32:
634  handle_operator (desc, val, op, float);
635  break;
636  case _NNS_FLOAT16:
637 #ifdef FLOAT16_SUPPORT
638  handle_operator (desc, val, op, float16);
639 #else
641 #endif
642  break;
643  case _NNS_INT64:
644  handle_operator (desc, val, op, int64_t);
645  break;
646  case _NNS_UINT64:
647  handle_operator (desc, val, op, uint64_t);
648  break;
649  default:
650  GST_ERROR_OBJECT (filter, "Unknown tensor type %d", desc->type);
651  return FALSE;
652  }
653 
654  return TRUE;
655 }
656 
662 static gboolean
664 {
665  gchar *filter_name;
666  gboolean ret = FALSE;
667 
668  if (filter->mode == GTT_UNKNOWN || filter->option == NULL)
669  return TRUE;
670 
671  filter_name = gst_object_get_name ((GstObject *) filter);
672 
673  switch (filter->mode) {
674  case GTT_DIMCHG:
675  {
676  gchar **strv = NULL;
677 
678  if (!g_regex_match_simple (REGEX_DIMCHG_OPTION, filter->option,
679  G_REGEX_CASELESS, 0)) {
680  ml_loge
681  ("%s: dimchg: \'%s\' is not valid option string: it should be in the form of IDX_DIM_FROM:IDX_DIM_TO: with a regex, "
682  REGEX_DIMCHG_OPTION "\n", filter_name, filter->option);
683  break;
684  }
685 
686  strv = g_strsplit (filter->option, ":", 2);
687 
688  filter->data_dimchg.from = (int) g_ascii_strtoll (strv[0], NULL, 10);
689  filter->data_dimchg.to = (int) g_ascii_strtoll (strv[1], NULL, 10);
690  ret = filter->loaded = TRUE;
691  g_strfreev (strv);
692  break;
693  }
694  case GTT_TYPECAST:
695  {
696  if (g_regex_match_simple (REGEX_TYPECAST_OPTION, filter->option,
697  G_REGEX_CASELESS, 0)) {
698  filter->data_typecast.to = gst_tensor_get_type (filter->option);
699  ret = filter->loaded = TRUE;
700  } else {
701  ml_loge
702  ("%s: typecast: \'%s\' is not valid data type for tensor: data type of tensor should be one of %s\n",
703  filter_name, filter->option, GST_TENSOR_TYPE_ALL);
704  }
705  break;
706  }
707  case GTT_ARITHMETIC:
708  {
709  gchar *str_option;
710  gchar **str_operators;
711  gchar **str_op;
713  guint i, num_operators, num_op;
714  GRegex *regex_option_tc;
715 
716  filter->data_arithmetic.out_type = _NNS_END;
717  filter->data_arithmetic.per_channel_arith = FALSE;
718 
719  if (filter->operators) {
720  GST_WARNING_OBJECT (filter,
721  "There exists pre-defined operators (total %d), now reset these.",
722  g_slist_length (filter->operators));
723 
724  g_slist_free_full (filter->operators, g_free);
725  filter->operators = NULL;
726  }
727 
728  regex_option_tc = g_regex_new (REGEX_ARITH_OPTION_TYPECAST,
729  G_REGEX_CASELESS, 0, 0);
730 
731  if (!regex_option_tc) {
732  GST_ERROR_OBJECT (filter,
733  "arithmetic: failed to create a GRegex structure for %s\n",
735  break;
736  }
737 
738  if (g_regex_match_full (regex_option_tc, filter->option, -1,
739  1, 0, NULL, NULL)) {
740  str_option = g_regex_replace (regex_option_tc, filter->option, -1, 1,
741  "", 0, 0);
742  ml_loge
743  ("%s: arithmetic: [typecast:TYPE,] should be located at the first to prevent memory re-allocation: typecast(s) in the middle of \'%s\' will be ignored\n",
744  filter_name, filter->option);
745  } else {
746  str_option = g_strdup (filter->option);
747  }
748  g_regex_unref (regex_option_tc);
749 
750  if (!g_regex_match_simple (REGEX_ARITH_OPTION, str_option,
751  G_REGEX_CASELESS, 0)) {
752  ml_loge
753  ("%s: arithmetic: \'%s\' is not valid option string: it should be in the form of [typecast:TYPE,][per-channel:(false|true@DIM),]add|mul|div:NUMBER[@CH_IDX]..., ...\n",
754  filter_name, str_option);
755  g_free (str_option);
756  break;
757  }
758  str_operators = g_strsplit (str_option, ",", -1);
759  num_operators = g_strv_length (str_operators);
760 
761  for (i = 0; i < num_operators; ++i) {
762  str_op = g_strsplit (str_operators[i], ":", -1);
763  num_op = g_strv_length (str_op);
764 
765  if (str_op[0]) {
766  gchar **values = g_strsplit (str_op[1], "@", -1);
767  guint num_values = g_strv_length (values);
768 
769  /* check whether per-channel */
770  if (g_ascii_strcasecmp (str_op[0], "per-channel") == 0) {
771  if (num_values > 1 && g_ascii_strcasecmp (values[0], "true") == 0) {
772  ml_logi
773  ("Set per-channel for arithmetic and assume that %s-th dim is the channel",
774  values[1]);
775  filter->data_arithmetic.per_channel_arith = TRUE;
776  filter->data_arithmetic.ch_dim =
777  (guint) g_ascii_strtoull (values[1], NULL, 10);
778  }
779 
780  g_strfreev (values);
781  g_strfreev (str_op);
782  continue;
783  }
784 
785  op_s = g_new0 (tensor_transform_operator_s, 1);
786  g_assert (op_s);
787 
788  op_s->op = gst_tensor_transform_get_operator (str_op[0]);
789  op_s->applying_ch = -1; /* -1 means applying to all channels */
790  switch (op_s->op) {
791  case GTT_OP_TYPECAST:
792  if (num_op > 1 && str_op[1]) {
793  op_s->value.type = gst_tensor_get_type (values[0]);
794  filter->data_arithmetic.out_type = op_s->value.type;
795  } else {
796  GST_WARNING_OBJECT (filter, "Invalid option for typecast %s",
797  str_operators[i]);
798  op_s->op = GTT_OP_UNKNOWN;
799  }
800  break;
801  case GTT_OP_ADD:
802  case GTT_OP_MUL:
803  case GTT_OP_DIV:
804  if (num_op > 1 && str_op[1]) {
805  /* get operand */
806  if (strchr (values[0], '.') || strchr (values[0], 'e') ||
807  strchr (values[0], 'E')) {
808  double val;
809 
810  val = g_ascii_strtod (values[0], NULL);
811  gst_tensor_data_set (&op_s->value, _NNS_FLOAT64, &val);
812  } else {
813  int64_t val;
814 
815  val = g_ascii_strtoll (values[0], NULL, 10);
816  gst_tensor_data_set (&op_s->value, _NNS_INT64, &val);
817  }
818 
819  if (filter->data_arithmetic.per_channel_arith && num_values > 1) {
820  op_s->applying_ch = g_ascii_strtoll (values[1], NULL, 10);
821  }
822 
823  } else {
824  GST_WARNING_OBJECT (filter,
825  "Invalid option for arithmetic %s", str_operators[i]);
826  op_s->op = GTT_OP_UNKNOWN;
827  }
828  break;
829  default:
830  GST_WARNING_OBJECT (filter, "Unknown operator %s", str_op[0]);
831  break;
832  }
833 
834  /* append operator */
835  if (op_s->op != GTT_OP_UNKNOWN) {
836  filter->operators = g_slist_append (filter->operators, op_s);
837  } else {
838  g_free (op_s);
839  }
840 
841  g_strfreev (values);
842  } else {
843  GST_WARNING_OBJECT (filter, "Invalid option %s", str_operators[i]);
844  }
845 
846  g_strfreev (str_op);
847  }
848 
849  ret = filter->loaded = (filter->operators != NULL);
850  g_strfreev (str_operators);
851  g_free (str_option);
852  break;
853  }
854  case GTT_TRANSPOSE:
855  {
856  int i;
857  gchar **strv = NULL;
858 
859  if (!g_regex_match_simple (REGEX_TRANSPOSE_OPTION, filter->option,
860  G_REGEX_CASELESS, 0)) {
861  ml_loge
862  ("%s: transpose: \'%s\' is not valid option string: it should be in the form of NEW_IDX_DIM0:NEW_IDX_DIM1:NEW_IDX_DIM2:3 (Now transpose mode's rank is fixed to 3. Note that the index of the last dim is always fixed to 3)\n",
863  filter_name, filter->option);
864  break;
865  }
866 
867  strv = g_strsplit (filter->option, ":", NNS_TENSOR_TRANSPOSE_RANK_LIMIT);
868  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
869  filter->data_transpose.trans_order[i] =
870  (uint8_t) g_ascii_strtoull (strv[i], NULL, 10);
871  }
872 
873  ret = filter->loaded = TRUE;
874  g_strfreev (strv);
875  break;
876  }
877  case GTT_STAND:
878  {
879  gchar **options = NULL;
880  guint i, num_options;
881 
882  if (!g_regex_match_simple (REGEX_STAND_OPTION, filter->option,
883  G_REGEX_CASELESS, 0)) {
884  ml_loge
885  ("%s: stand: \'%s\' is not a valid option string: it should be in the form of (default|dc-average)[:TYPE][,per-channel:(false|true)]\n",
886  filter_name, filter->option);
887  break;
888  }
889 
890  filter->data_stand.out_type = _NNS_END;
891  filter->data_stand.per_channel = FALSE;
892 
893  options = g_strsplit (filter->option, ",", -1);
894  num_options = g_strv_length (options);
895 
896  for (i = 0; i < num_options; i++) {
897  gchar **strv = g_strsplit (options[i], ":", -1);
898 
899  if (g_ascii_strcasecmp (strv[0], "default") == 0 ||
900  g_ascii_strcasecmp (strv[0], "dc-average") == 0) {
901  filter->data_stand.mode =
903  if (g_strv_length (strv) > 1)
904  filter->data_stand.out_type = gst_tensor_get_type (strv[1]);
905  } else if (g_ascii_strcasecmp (strv[0], "per-channel") == 0) {
906  if (g_strv_length (strv) > 1 &&
907  g_ascii_strcasecmp (strv[1], "true") == 0)
908  filter->data_stand.per_channel = TRUE;
909  } else {
910  filter->data_stand.mode = STAND_END;
911  ml_logw ("Unknown option for stand mode: %s", strv[0]);
912  }
913 
914  g_strfreev (strv);
915  }
916 
917  g_strfreev (options);
918  ret = filter->loaded = TRUE;
919  break;
920  }
921  case GTT_CLAMP:
922  {
923  gchar **strv = NULL;
924 
925  if (!g_regex_match_simple (REGEX_CLAMP_OPTION, filter->option,
926  G_REGEX_CASELESS, 0)) {
927  ml_loge
928  ("%s: clamp: \'%s\' is not valid option string: it should be in the form of [CLAMP_MIN:CLAMP_MAX]\n",
929  filter_name, filter->option);
930  break;
931  }
932 
933  strv = g_strsplit (filter->option, ":", 2);
934 
935  filter->data_clamp.min = g_ascii_strtod (strv[0], NULL);
936  if (errno == ERANGE) {
937  ml_loge ("%s: clamp: CLAMP_MIN value has an invalid range\n",
938  filter_name);
939  g_strfreev (strv);
940  break;
941  }
942  filter->data_clamp.max = g_ascii_strtod (strv[1], NULL);
943  if (errno == ERANGE) {
944  ml_loge ("%s: clamp: CLAMP_MAX value has an invalid range\n",
945  filter_name);
946  g_strfreev (strv);
947  break;
948  }
949 
950  g_strfreev (strv);
951 
952  if (filter->data_clamp.min > filter->data_clamp.max) {
953  ml_loge ("%s: clamp: CLAMP_MIN is larger than CLAMP_MAX\n",
954  filter_name);
955  break;
956  }
957 
958  ret = filter->loaded = TRUE;
959  break;
960  }
961  case GTT_PADDING:
962  {
963  gchar **options = NULL;
964  guint i, num_options;
965 
966  if (!g_regex_match_simple (REGEX_PADDING_OPTION, filter->option,
967  G_REGEX_CASELESS, 0)) {
968  ml_loge
969  ("%s: padding: \'%s\' is not valid option string: it should be in the form of left|right|top|bottom|front|back:PADDING,[layout:(NCHW|NHWC)]\n",
970  filter_name, filter->option);
971  break;
972  }
973 
974  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++)
975  filter->data_padding.pad[i] = 0;
976  filter->data_padding.layout = _NNS_LAYOUT_ANY;
977 
978  options = g_strsplit (filter->option, ",", -1);
979  num_options = g_strv_length (options);
980 
981  for (i = 0; i < num_options; i++) {
982  gchar **strv = g_strsplit (options[i], ":", 2);
983  if (g_ascii_strcasecmp (strv[0], "left") == 0) {
984  filter->data_padding.pad[PADDING_LEFT] =
985  (guint) g_ascii_strtoull (strv[1], NULL, 10);
986  } else if (g_ascii_strcasecmp (strv[0], "right") == 0) {
987  filter->data_padding.pad[PADDING_RIGHT] =
988  (guint) g_ascii_strtoull (strv[1], NULL, 10);
989  } else if (g_ascii_strcasecmp (strv[0], "top") == 0) {
990  filter->data_padding.pad[PADDING_TOP] =
991  (guint) g_ascii_strtoull (strv[1], NULL, 10);
992  } else if (g_ascii_strcasecmp (strv[0], "bottom") == 0) {
993  filter->data_padding.pad[PADDING_BOTTOM] =
994  (guint) g_ascii_strtoull (strv[1], NULL, 10);
995  } else if (g_ascii_strcasecmp (strv[0], "front") == 0) {
996  filter->data_padding.pad[PADDING_FRONT] =
997  (guint) g_ascii_strtoull (strv[1], NULL, 10);
998  } else if (g_ascii_strcasecmp (strv[0], "back") == 0) {
999  filter->data_padding.pad[PADDING_BACK] =
1000  (guint) g_ascii_strtoull (strv[1], NULL, 10);
1001  } else if (g_ascii_strcasecmp (strv[0], "layout") == 0) {
1002  if (g_ascii_strcasecmp (strv[1], "NHWC") == 0)
1003  filter->data_padding.layout = _NNS_LAYOUT_NHWC;
1004  else
1005  filter->data_padding.layout = _NNS_LAYOUT_NCHW;
1006  } else {
1007  ml_logw ("Unknown option for padding mode: %s", strv[0]);
1008  }
1009  g_strfreev (strv);
1010  }
1011  g_strfreev (options);
1012 
1013  if (filter->data_padding.layout == _NNS_LAYOUT_NHWC) {
1014  guint prev_left = filter->data_padding.pad[PADDING_LEFT],
1015  prev_right = filter->data_padding.pad[PADDING_RIGHT];
1016  filter->data_padding.pad[PADDING_LEFT] =
1017  filter->data_padding.pad[PADDING_FRONT];
1018  filter->data_padding.pad[PADDING_RIGHT] =
1019  filter->data_padding.pad[PADDING_BACK];
1020  filter->data_padding.pad[PADDING_FRONT] = prev_left;
1021  filter->data_padding.pad[PADDING_BACK] = prev_right;
1022  }
1023 
1024  ret = filter->loaded = TRUE;
1025  break;
1026  }
1027  default:
1028  GST_ERROR_OBJECT (filter, "Cannot identify mode\n");
1029  ret = FALSE;
1030  }
1031 
1032  g_free (filter_name);
1033  return ret;
1034 }
1035 
1039 static void
1040 gst_tensor_transform_set_property (GObject * object, guint prop_id,
1041  const GValue * value, GParamSpec * pspec)
1042 {
1043  GstTensorTransform *filter = GST_TENSOR_TRANSFORM (object);
1044 
1045  switch (prop_id) {
1046  case PROP_SILENT:
1047  filter->silent = g_value_get_boolean (value);
1048  break;
1049  case PROP_MODE:
1050  filter->mode = g_value_get_enum (value);
1052  break;
1053  case PROP_OPTION:
1054  {
1055  gchar *backup_option = filter->option;
1056  filter->option = g_value_dup_string (value);
1057  if (gst_tensor_transform_set_option_data (filter)) {
1058  silent_debug (filter, "Option = %s --> %s\n", backup_option,
1059  filter->option);
1060  g_free (backup_option);
1061  } else {
1062  /* ERROR! Revert the change! */
1063  g_free (filter->option);
1064  filter->option = backup_option;
1066  }
1067  break;
1068  }
1069  case PROP_ACCELERATION:
1070 #ifdef HAVE_ORC
1071  filter->acceleration = g_value_get_boolean (value);
1072  silent_debug (filter, "acceleration = %d\n", filter->acceleration);
1073 #else
1074  GST_WARNING_OBJECT (filter, "Orc acceleration is not supported");
1075  filter->acceleration = FALSE;
1076 #endif
1077  break;
1078  case PROP_APPLY:
1079  {
1080  gint64 val;
1081  const gchar *param = g_value_get_string (value);
1082  gchar **strv = g_strsplit_set (param, ",", -1);
1083  guint i, num = g_strv_length (strv);
1084  gchar *endptr = NULL;
1085 
1086  for (i = 0; i < num; i++) {
1087  errno = 0;
1088  val = g_ascii_strtoll (strv[i], &endptr, 10);
1089  if (errno == ERANGE || errno == EINVAL || (endptr == strv[i])) {
1090  ml_loge ("Cannot convert string %s to a gint64 value", strv[i]);
1091  }
1092  filter->apply = g_list_append (filter->apply, GINT_TO_POINTER (val));
1093  }
1094  g_strfreev (strv);
1095  break;
1096  }
1097  default:
1098  G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1099  break;
1100  }
1101 }
1102 
1106 static void
1107 gst_tensor_transform_get_property (GObject * object, guint prop_id,
1108  GValue * value, GParamSpec * pspec)
1109 {
1110  GstTensorTransform *filter = GST_TENSOR_TRANSFORM (object);
1111 
1112  switch (prop_id) {
1113  case PROP_SILENT:
1114  g_value_set_boolean (value, filter->silent);
1115  break;
1116  case PROP_MODE:
1117  g_value_set_enum (value, filter->mode);
1118  break;
1119  case PROP_OPTION:
1120  g_value_set_string (value, filter->option);
1121  break;
1122  case PROP_ACCELERATION:
1123  g_value_set_boolean (value, filter->acceleration);
1124  break;
1125  case PROP_APPLY:
1126  {
1127  GList *list;
1128  gchar *p;
1129  GPtrArray *arr;
1130  gchar **strings;
1131 
1132  if (filter->apply == NULL) {
1133  g_value_set_string (value, "");
1134  return;
1135  }
1136 
1137  arr = g_ptr_array_new ();
1138  for (list = filter->apply; list != NULL; list = list->next) {
1139  g_ptr_array_add (arr, g_strdup_printf ("%i",
1140  GPOINTER_TO_INT (list->data)));
1141  }
1142  g_ptr_array_add (arr, NULL);
1143  strings = (gchar **) g_ptr_array_free (arr, FALSE);
1144  p = g_strjoinv (",", strings);
1145 
1146  g_strfreev (strings);
1147  g_value_take_string (value, p);
1148  break;
1149  }
1151  g_value_set_uint (value, NNS_TENSOR_TRANSPOSE_RANK_LIMIT);
1152  break;
1153  default:
1154  G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
1155  break;
1156  }
1157 }
1158 
1162 static void
1164 {
1165  GstTensorTransform *filter;
1166 
1167  filter = GST_TENSOR_TRANSFORM (object);
1168 
1169  if (filter->option) {
1170  g_free (filter->option);
1171  filter->option = NULL;
1172  }
1173 
1174  if (filter->operators) {
1175  g_slist_free_full (filter->operators, g_free);
1176  filter->operators = NULL;
1177  }
1178 
1179  if (filter->apply) {
1180  g_list_free (filter->apply);
1181  filter->apply = NULL;
1182  }
1183 
1184  G_OBJECT_CLASS (parent_class)->finalize (object);
1185 }
1186 
1196 static GstFlowReturn
1198  GstTensorInfo * in_info, GstTensorInfo * out_info,
1199  const uint8_t * inptr, uint8_t * outptr)
1200 {
1201  uint32_t *fromDim = in_info->dimension;
1202  uint32_t *toDim = out_info->dimension;
1203  unsigned int from = filter->data_dimchg.from;
1204  unsigned int to = filter->data_dimchg.to;
1205  unsigned int i, j, k;
1206  unsigned int loopLimit = 1;
1207  gsize loopBlockSize, copyblocksize, copyblocklimit;
1208 
1209  if (from == to) {
1211  nns_memcpy (outptr, inptr, gst_tensor_info_get_size (in_info));
1212  GST_WARNING_OBJECT (filter,
1213  "Calling tensor_transform with high memcpy overhead WITHOUT any effects! Check your stream whether you really need tensor_transform.\n");
1214  return GST_FLOW_OK;
1215  }
1216 
1219  g_assert (fromDim[from] == toDim[to]);
1220 
1221  loopBlockSize = copyblocksize = gst_tensor_get_element_size (in_info->type);
1222  copyblocklimit = 1;
1223 
1224  if (from < to) {
1231  for (i = NNS_TENSOR_RANK_LIMIT - 1; i > to; i--) {
1232  if (toDim[i] == 0)
1233  continue;
1234  loopLimit *= toDim[i];
1235  }
1236 
1237  for (i = 0; i < to; i++) {
1238  if (toDim[i] == 0)
1239  break;
1240  loopBlockSize *= toDim[i];
1241  }
1242 
1243  for (i = 0; i < from; i++) {
1244  if (fromDim[i] == 0)
1245  break;
1246  copyblocksize *= fromDim[i];
1247  }
1248  for (i = 0; i < to; i++) {
1249  if (toDim[i] == 0)
1250  break;
1251  copyblocklimit *= toDim[i];
1252  }
1253 
1254  for (i = 0; i < loopLimit; i++) {
1255  /* [i1][i2][...][iN][b][...] i = i1 x i2 x ... x iN */
1256  uint8_t *destptr = outptr + loopBlockSize * toDim[to] * i;
1257  const uint8_t *srcptr = inptr + loopBlockSize * toDim[to] * i;
1258 
1259  for (j = 0; j < toDim[to]; j++) {
1260  uint8_t *j_destptr = destptr + loopBlockSize * j;
1261  for (k = 0; k < copyblocklimit; k++) {
1262  nns_memcpy (j_destptr + copyblocksize * k,
1263  srcptr + k * copyblocksize * toDim[to] + j * copyblocksize,
1264  copyblocksize);
1265  }
1266  }
1267  }
1268  } else {
1274  ml_loge
1275  ("tensor-transform/dimchg operation is not permitted if from >= to.\n");
1276  return GST_FLOW_ERROR;
1277  }
1278 
1279  return GST_FLOW_OK;
1280 }
1281 
1291 static GstFlowReturn
1293  GstTensorInfo * in_info, GstTensorInfo * out_info,
1294  const uint8_t * inptr, uint8_t * outptr)
1295 {
1296  gulong i, num;
1297  gsize in_element_size, out_element_size;
1298 
1299  num = gst_tensor_get_element_count (in_info->dimension);
1300 
1301 #ifdef HAVE_ORC
1302  if (orc_supported (filter, in_info->type, out_info->type)) {
1303  orc_typecast (inptr, outptr, num, in_info->type, out_info->type);
1304  return GST_FLOW_OK;
1305  }
1306 #endif
1307 
1308  in_element_size = gst_tensor_get_element_size (in_info->type);
1309  out_element_size = gst_tensor_get_element_size (out_info->type);
1310 
1311  for (i = 0; i < num; ++i) {
1313  (gpointer) (inptr + in_element_size * i), in_info->type,
1314  (gpointer) (outptr + out_element_size * i), out_info->type);
1315  }
1316 
1317  return GST_FLOW_OK;
1318 }
1319 
1329 static GstFlowReturn
1331  GstTensorInfo * in_info, GstTensorInfo * out_info,
1332  const uint8_t * inptr, uint8_t * outptr)
1333 {
1334  gulong i, num, j, ch;
1335  gsize in_element_size, out_element_size;
1336 
1337  GSList *walk;
1339  tensor_data_s value;
1340 
1341  num = gst_tensor_get_element_count (in_info->dimension);
1342 
1343 #ifdef HAVE_ORC
1344  if (orc_supported (filter, in_info->type, out_info->type)) {
1345  walk = filter->operators;
1350  orc_typecast (inptr, outptr, num, in_info->type, out_info->type);
1351 
1352  if (!filter->data_arithmetic.per_channel_arith) {
1353  while (walk) {
1354  op_s = (tensor_transform_operator_s *) walk->data;
1355 
1356  if (op_s->op != GTT_OP_TYPECAST) {
1357  gst_tensor_data_typecast (&op_s->value, out_info->type);
1358  orc_operator (outptr, num, &op_s->value, op_s->op);
1359  }
1360 
1361  walk = g_slist_next (walk);
1362  }
1363  } else {
1364  gsize typesize = 0;
1365  guint ch_dim = filter->data_arithmetic.ch_dim;
1366  gsize ch_offset, ch_size = 1;
1367  uint8_t *tmp_outptr = NULL;
1368 
1369  for (i = 0; i < ch_dim; ++i) {
1370  ch_size *= in_info->dimension[i];
1371  }
1372  ch_offset = ch_size * in_info->dimension[ch_dim];
1373  orc_typesize (typesize, out_info->type);
1374 
1375  while (walk) {
1376  op_s = (tensor_transform_operator_s *) walk->data;
1377  if (op_s->op == GTT_OP_TYPECAST) {
1378  walk = g_slist_next (walk);
1379  continue;
1380  }
1381 
1382  if (op_s->applying_ch == -1) {
1383  gst_tensor_data_typecast (&op_s->value, out_info->type);
1384  orc_operator (outptr, num, &op_s->value, op_s->op);
1385  } else {
1386  for (i = 0; i < num / ch_offset; ++i) {
1387  tmp_outptr =
1388  outptr + (ch_size * op_s->applying_ch +
1389  ch_offset * i) * typesize;
1390  gst_tensor_data_typecast (&op_s->value, out_info->type);
1391  orc_operator (tmp_outptr, ch_size, &op_s->value, op_s->op);
1392  }
1393  }
1394  walk = g_slist_next (walk);
1395  }
1396  }
1397  return GST_FLOW_OK;
1398  }
1399 #endif
1400 
1401  in_element_size = gst_tensor_get_element_size (in_info->type);
1402  out_element_size = gst_tensor_get_element_size (out_info->type);
1403 
1404  /* per-channel */
1405  if (filter->data_arithmetic.per_channel_arith) {
1406  guint ch_dim = filter->data_arithmetic.ch_dim;
1407  gsize ch_offset, ch_size = 1;
1408  for (i = 0; i < ch_dim; ++i) {
1409  ch_size *= in_info->dimension[i];
1410  }
1411  ch_offset = ch_size * in_info->dimension[ch_dim];
1412 
1420  for (i = 0; i < num / ch_offset; ++i) {
1421  for (ch = 0; ch < in_info->dimension[ch_dim]; ++ch) {
1422  for (j = 0; j < ch_size; ++j) {
1423  gulong data_idx = (i * ch_offset) + (ch * ch_size) + j;
1424  gst_tensor_data_set (&value, in_info->type,
1425  (gpointer) (inptr + in_element_size * data_idx));
1426 
1427  walk = filter->operators;
1428  while (walk) {
1429  op_s = (tensor_transform_operator_s *) walk->data;
1430  switch (op_s->op) {
1431  case GTT_OP_TYPECAST:
1432  gst_tensor_data_typecast (&value, op_s->value.type);
1433  break;
1434  case GTT_OP_ADD:
1435  case GTT_OP_MUL:
1436  case GTT_OP_DIV:
1437  {
1438  gst_tensor_data_typecast (&op_s->value, value.type);
1439 
1440  if (op_s->applying_ch == (int) ch || op_s->applying_ch == -1) {
1441  gst_tensor_transform_do_operator (filter, &value,
1442  &op_s->value, op_s->op);
1443  }
1444  break;
1445  }
1446  default:
1447  g_assert (0);
1448  return GST_FLOW_ERROR;
1449  }
1450 
1451  walk = g_slist_next (walk);
1452  }
1453 
1454  /* set output value */
1455  g_assert (out_info->type == value.type);
1456  gst_tensor_data_get (&value, outptr + out_element_size * data_idx);
1457  }
1458  }
1459  }
1460 
1461  return GST_FLOW_OK;
1462  }
1463 
1464  for (i = 0; i < num; ++i) {
1465  /* init value with input tensor type */
1466  gst_tensor_data_set (&value, in_info->type,
1467  (gpointer) (inptr + in_element_size * i));
1468 
1469  walk = filter->operators;
1470  while (walk) {
1471  op_s = (tensor_transform_operator_s *) walk->data;
1472 
1476  switch (op_s->op) {
1477  case GTT_OP_TYPECAST:
1478  gst_tensor_data_typecast (&value, op_s->value.type);
1479  break;
1480  case GTT_OP_ADD:
1481  case GTT_OP_MUL:
1482  case GTT_OP_DIV:
1483  gst_tensor_data_typecast (&op_s->value, value.type);
1484  gst_tensor_transform_do_operator (filter, &value, &op_s->value,
1485  op_s->op);
1486  break;
1487  default:
1488  g_assert (0);
1489  return GST_FLOW_ERROR;
1490  }
1491 
1492  walk = g_slist_next (walk);
1493  }
1494 
1495  /* set output value */
1496  g_assert (out_info->type == value.type);
1497  gst_tensor_data_get (&value, outptr + out_element_size * i);
1498  }
1499 
1500  return GST_FLOW_OK;
1501 }
1502 
1506 #define transposeloop(cl,ck,cj,ci,sl,sk,sj,si,typesize) do { \
1507  size_t i, j, k, l; \
1508  int inidx = 0, outidx=0; \
1509  for(cl=0;cl<sl;cl++) \
1510  for(ci=0;ci<si;ci++) \
1511  for(cj=0;cj<sj;cj++) \
1512  for(ck=0;ck<sk;ck++){ \
1513  const uint8_t *_in; \
1514  uint8_t *_out; \
1515  outidx = si*sj*sk*cl + sj*sk*ci + sk*cj + ck; \
1516  inidx = SK*SJ*SI*l + SJ*SI*k + SI*j + i; \
1517  _in = inptr + inidx * typesize; \
1518  _out = outptr + outidx * typesize; \
1519  nns_memcpy(_out, _in, typesize); \
1520  } \
1521  } while(0);
1522 
1532 static GstFlowReturn
1534  GstTensorInfo * in_info, GstTensorInfo * out_info,
1535  const uint8_t * inptr, uint8_t * outptr)
1536 {
1537  int i, from, to;
1538  gboolean checkdim = FALSE;
1539  uint32_t *fromDim = in_info->dimension;
1540  gsize type_size = gst_tensor_get_element_size (in_info->type);
1541  gsize indexI, indexJ, SL, SI, SJ, SK;
1542  UNUSED (out_info);
1543 
1544  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
1545  from = i;
1546  to = filter->data_transpose.trans_order[i];
1547  if (from != to) {
1548  checkdim = TRUE;
1549  break;
1550  }
1551  }
1552 
1553  if (!checkdim) {
1554  nns_memcpy (outptr, inptr, gst_tensor_info_get_size (in_info));
1555  GST_WARNING_OBJECT (filter,
1556  "Calling tensor_transform with high memcpy overhead WITHOUT any effects!");
1557  return GST_FLOW_OK;
1558  }
1559 
1560  indexI = filter->data_transpose.trans_order[0];
1561  indexJ = filter->data_transpose.trans_order[1];
1562  SL = fromDim[3] > 0 ? fromDim[3] : 1;
1563  SI = fromDim[0] > 0 ? fromDim[0] : 1;
1564  SJ = fromDim[1] > 0 ? fromDim[1] : 1;
1565  SK = fromDim[2] > 0 ? fromDim[2] : 1;
1566 
1567  switch (indexI) {
1568  case 0:
1569  if (indexJ == 1) {
1570  transposeloop (l, i, j, k, SL, SI, SJ, SK, type_size);
1571  } else {
1572  transposeloop (l, i, k, j, SL, SI, SK, SJ, type_size);
1573  }
1574  break;
1575  case 1:
1576  if (indexJ == 0) {
1577  transposeloop (l, j, i, k, SL, SJ, SI, SK, type_size);
1578  } else {
1579  transposeloop (l, j, k, i, SL, SJ, SK, SI, type_size);
1580  }
1581  break;
1582  case 2:
1583  if (indexJ == 0) {
1584  transposeloop (l, k, i, j, SL, SK, SI, SJ, type_size);
1585  } else {
1586  transposeloop (l, k, j, i, SL, SK, SJ, SI, type_size);
1587  }
1588  break;
1589  }
1590 
1591  return GST_FLOW_OK;
1592 }
1593 
1604 static GstFlowReturn
1606  GstTensorInfo * in_info, GstTensorInfo * out_info,
1607  const uint8_t * inptr, uint8_t * outptr)
1608 {
1609  GstFlowReturn ret = GST_FLOW_OK;
1610  gsize in_element_size, out_element_size, data_size, ch_size;
1611  gulong i, num, data_idx, ch;
1612  gdouble tmp, *average, *std;
1613 
1614  in_element_size = gst_tensor_get_element_size (in_info->type);
1615  out_element_size = gst_tensor_get_element_size (out_info->type);
1616  num = gst_tensor_get_element_count (in_info->dimension);
1617 
1618  data_size = gst_tensor_info_get_size (in_info);
1619  ch_size = in_info->dimension[0];
1620 
1621  /* calc average and std */
1622  average = std = NULL;
1623  if (filter->data_stand.per_channel) {
1624  gst_tensor_data_raw_average_per_channel ((gpointer) inptr, data_size,
1625  in_info->type, in_info->dimension, &average);
1626  /* calculate std only for default mode */
1627  if (filter->data_stand.mode == STAND_DEFAULT)
1628  gst_tensor_data_raw_std_per_channel ((gpointer) inptr, data_size,
1629  in_info->type, in_info->dimension, average, &std);
1630  } else {
1631  gst_tensor_data_raw_average ((gpointer) inptr, data_size,
1632  in_info->type, &average);
1633  /* calculate std only for default mode */
1634  if (filter->data_stand.mode == STAND_DEFAULT)
1635  gst_tensor_data_raw_std ((gpointer) inptr, data_size, in_info->type,
1636  average, &std);
1637  }
1638 
1639  switch (filter->data_stand.mode) {
1640  case STAND_DEFAULT:
1641  {
1642  if (!filter->data_stand.per_channel) {
1643  for (i = 0; i < num; i++) {
1644  data_idx = in_element_size * i;
1645  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1646  in_info->type, &tmp, _NNS_FLOAT64);
1647 
1648  tmp = fabs ((tmp - *average) / *std);
1649 
1650  data_idx = out_element_size * i;
1652  (gpointer) (outptr + data_idx), out_info->type);
1653  }
1654  } else {
1655  for (ch = 0; ch < ch_size; ++ch) {
1656  for (i = 0; i < num / ch_size; i++) {
1657  data_idx = in_element_size * ((i * ch_size) + ch);
1658  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1659  in_info->type, &tmp, _NNS_FLOAT64);
1660 
1661  tmp = fabs ((tmp - average[ch]) / std[ch]);
1662 
1663  data_idx = out_element_size * ((i * ch_size) + ch);
1665  (gpointer) (outptr + data_idx), out_info->type);
1666  }
1667  }
1668  }
1669  break;
1670  }
1671  case STAND_DC_AVERAGE:
1672  {
1673  if (!filter->data_stand.per_channel) {
1674  for (i = 0; i < num; i++) {
1675  data_idx = in_element_size * i;
1676  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1677  in_info->type, &tmp, _NNS_FLOAT64);
1678 
1679  tmp -= *average;
1680 
1681  data_idx = out_element_size * i;
1683  (gpointer) (outptr + data_idx), out_info->type);
1684  }
1685  } else {
1686  for (ch = 0; ch < ch_size; ++ch) {
1687  for (i = 0; i < num / ch_size; i++) {
1688  data_idx = in_element_size * ((i * ch_size) + ch);
1689  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx),
1690  in_info->type, &tmp, _NNS_FLOAT64);
1691 
1692  tmp -= average[ch];
1693 
1694  data_idx = out_element_size * ((i * ch_size) + ch);
1696  (gpointer) (outptr + data_idx), out_info->type);
1697  }
1698  }
1699  }
1700  break;
1701  }
1702  default:
1703  GST_ERROR_OBJECT (filter, "Cannot identify mode\n");
1704  ret = GST_FLOW_ERROR;
1705  }
1706 
1707  g_free (average);
1708  g_free (std);
1709 
1710  return ret;
1711 }
1712 
1724 static GstFlowReturn
1726  GstTensorInfo * in_info, GstTensorInfo * out_info,
1727  const uint8_t * inptr, uint8_t * outptr)
1728 {
1729  gsize in_element_size, out_element_size;
1730  gulong i, num, data_idx;
1731  gdouble tmp;
1732 
1733  in_element_size = gst_tensor_get_element_size (in_info->type);
1734  out_element_size = gst_tensor_get_element_size (out_info->type);
1735  num = gst_tensor_get_element_count (in_info->dimension);
1736 
1737  for (i = 0; i < num; ++i) {
1738  data_idx = in_element_size * i;
1739  gst_tensor_data_raw_typecast ((gpointer) (inptr + data_idx), in_info->type,
1740  &tmp, _NNS_FLOAT64);
1741 
1742  tmp = CLAMP (tmp, filter->data_clamp.min, filter->data_clamp.max);
1743 
1744  data_idx = out_element_size * i;
1745  gst_tensor_data_raw_typecast (&tmp, _NNS_FLOAT64, outptr + data_idx,
1746  out_info->type);
1747  }
1748 
1749  return GST_FLOW_OK;
1750 }
1751 
1761 static GstFlowReturn
1763  GstTensorInfo * in_info, GstTensorInfo * out_info, const uint8_t * inptr,
1764  uint8_t * outptr)
1765 {
1766  gsize element_size, in_loop_size, out_loop_size, copy_block_size;
1767  guint i, j, k, left, top, front, loop_limit = 1;
1768  element_size = gst_tensor_get_element_size (in_info->type);
1769 
1770  in_loop_size = (gsize) in_info->dimension[2] * in_info->dimension[1]
1771  * in_info->dimension[0] * element_size;
1772  out_loop_size =(gsize) out_info->dimension[2] * out_info->dimension[1]
1773  * out_info->dimension[0] * element_size;
1774  copy_block_size = in_info->dimension[0] * element_size;
1775 
1777  if (in_info->dimension[i] == 0)
1778  break;
1779  loop_limit *= in_info->dimension[i];
1780  }
1781 
1782  left = filter->data_padding.pad[PADDING_LEFT];
1783  top = filter->data_padding.pad[PADDING_TOP];
1784  front = filter->data_padding.pad[PADDING_FRONT];
1785 
1787  memset (outptr, 0, out_loop_size * loop_limit);
1788 
1789  for (i = 0; i < loop_limit; i++)
1790  for (j = 0; j < in_info->dimension[2]; j++)
1791  for (k = 0; k < in_info->dimension[1]; k++) {
1792  guint in_idx = j * in_info->dimension[1] * in_info->dimension[0]
1793  + k * in_info->dimension[0];
1794  guint out_idx = j * out_info->dimension[1] * out_info->dimension[0]
1795  + k * out_info->dimension[0];
1796 
1797  out_idx += left + top * out_info->dimension[0]
1798  + front * out_info->dimension[1] * out_info->dimension[0];
1799 
1800  memcpy (outptr + out_idx * element_size + out_loop_size * i,
1801  inptr + in_idx * element_size + in_loop_size * i, copy_block_size);
1802  }
1803 
1804  return GST_FLOW_OK;
1805 }
1806 
1814 static GstFlowReturn
1815 gst_tensor_transform_transform (GstBaseTransform * trans,
1816  GstBuffer * inbuf, GstBuffer * outbuf)
1817 {
1818  GstTensorTransform *filter;
1819  GstTensorInfo *in_info, *out_info;
1820  GstFlowReturn res = GST_FLOW_ERROR;
1821  GstMemory *in_mem[NNS_TENSOR_SIZE_LIMIT] = { 0, };
1822  GstMemory *out_mem[NNS_TENSOR_SIZE_LIMIT] = { 0, };
1823  GstMapInfo in_map[NNS_TENSOR_SIZE_LIMIT];
1824  GstMapInfo out_map[NNS_TENSOR_SIZE_LIMIT];
1825  uint8_t *inptr, *outptr;
1826  guint i, num_tensors, num_mems;
1827  gsize buf_size, hsize;
1828  GstTensorMetaInfo meta;
1829  GstTensorInfo in_flex_info, out_flex_info;
1830  gboolean in_flexible, out_flexible;
1831 
1832  filter = GST_TENSOR_TRANSFORM_CAST (trans);
1833 
1834  g_return_val_if_fail (filter->loaded, GST_FLOW_ERROR);
1835  inbuf = gst_tensor_buffer_from_config (inbuf, &filter->in_config);
1836 
1837  in_flexible =
1838  gst_tensor_pad_caps_is_flexible (GST_BASE_TRANSFORM_SINK_PAD (trans));
1839  out_flexible =
1840  gst_tensor_pad_caps_is_flexible (GST_BASE_TRANSFORM_SRC_PAD (trans));
1841 
1842  num_mems = gst_tensor_buffer_get_count (inbuf);
1843  if (in_flexible) {
1844  num_tensors = num_mems;
1845  g_return_val_if_fail (out_flexible, GST_FLOW_ERROR);
1846  } else {
1847  num_tensors = filter->in_config.info.num_tensors;
1848  g_return_val_if_fail (num_mems == num_tensors, GST_FLOW_ERROR);
1849  }
1850 
1851  for (i = 0; i < num_tensors; i++) {
1852  in_info = gst_tensors_info_get_nth_info (&filter->in_config.info, i);
1853  out_info = gst_tensors_info_get_nth_info (&filter->out_config.info, i);
1854 
1855  if (filter->apply && !g_list_find (filter->apply, GINT_TO_POINTER (i))) {
1856  GstMemory *mem = gst_tensor_buffer_get_nth_memory (inbuf, i);
1857 
1858  if (!in_flexible && out_flexible) {
1859  GstMemory *old = mem;
1860 
1861  /* append meta */
1862  gst_tensor_info_convert_to_meta (out_info, &meta);
1863  mem = gst_tensor_meta_info_append_header (&meta, old);
1864  gst_memory_unref (old);
1865  }
1866 
1867  gst_tensor_buffer_append_memory (outbuf, mem, out_info);
1868  continue;
1869  }
1870 
1871  /* parse input buffer */
1872  in_mem[i] = gst_tensor_buffer_get_nth_memory (inbuf, i);
1873  if (!gst_memory_map (in_mem[i], &in_map[i], GST_MAP_READ)) {
1874  ml_loge ("Cannot map input buffer to gst-buf at tensor-transform.\n");
1875  res = GST_FLOW_ERROR;
1876  goto done;
1877  }
1878  inptr = in_map[i].data;
1879 
1880  if (in_flexible) {
1881  in_info = &in_flex_info;
1882  out_info = &out_flex_info;
1883 
1884  gst_tensor_meta_info_parse_header (&meta, inptr);
1886  if (!gst_tensor_meta_info_convert (&meta, in_info)) {
1887  res = GST_FLOW_ERROR;
1888  goto done;
1889  }
1890 
1891  gst_tensor_transform_convert_dimension (filter, GST_PAD_SINK,
1892  i, in_info, out_info);
1893 
1894  hsize = gst_tensor_meta_info_get_header_size (&meta);
1895  inptr += hsize;
1896  }
1897 
1898  /* prepare output buffer */
1899  buf_size = gst_tensor_info_get_size (out_info);
1900  if (out_flexible) {
1901  gst_tensor_info_convert_to_meta (out_info, &meta);
1902  hsize = gst_tensor_meta_info_get_header_size (&meta);
1903  buf_size += hsize;
1904  }
1905 
1906  out_mem[i] = gst_allocator_alloc (NULL, buf_size, NULL);
1907  gst_tensor_buffer_append_memory (outbuf, out_mem[i], out_info);
1908 
1909  if (!gst_memory_map (out_mem[i], &out_map[i], GST_MAP_WRITE)) {
1910  ml_loge ("Cannot map output buffer to gst-buf at tensor-transform.\n");
1911  res = GST_FLOW_ERROR;
1912  goto done;
1913  }
1914  outptr = out_map[i].data;
1915 
1916  if (out_flexible) {
1917  gst_tensor_meta_info_update_header (&meta, outptr);
1918  outptr += hsize;
1919  }
1920 
1921  switch (filter->mode) {
1922  case GTT_DIMCHG:
1923  res = gst_tensor_transform_dimchg (filter, in_info, out_info,
1924  inptr, outptr);
1925  break;
1926  case GTT_TYPECAST:
1927  res = gst_tensor_transform_typecast (filter, in_info, out_info,
1928  inptr, outptr);
1929  break;
1930  case GTT_ARITHMETIC:
1931  res = gst_tensor_transform_arithmetic (filter, in_info, out_info,
1932  inptr, outptr);
1933  break;
1934  case GTT_TRANSPOSE:
1935  res = gst_tensor_transform_transpose (filter, in_info, out_info,
1936  inptr, outptr);
1937  break;
1938  case GTT_STAND:
1939  res = gst_tensor_transform_stand (filter, in_info, out_info,
1940  inptr, outptr);
1941  break;
1942  case GTT_CLAMP:
1943  res = gst_tensor_transform_clamp (filter, in_info, out_info,
1944  inptr, outptr);
1945  break;
1946  case GTT_PADDING:
1947  res = gst_tensor_transform_padding (filter, in_info, out_info,
1948  inptr, outptr);
1949  break;
1950  default:
1951  ml_loge ("Not supported tensor transform mode");
1952  res = GST_FLOW_NOT_SUPPORTED;
1953  goto done;
1954  }
1955  }
1956 
1957 done:
1958  for (i = 0; i < num_tensors; i++) {
1959  if (in_mem[i]) {
1960  gst_memory_unmap (in_mem[i], &in_map[i]);
1961  gst_memory_unref (in_mem[i]);
1962  }
1963  if (out_mem[i])
1964  gst_memory_unmap (out_mem[i], &out_map[i]);
1965  }
1966 
1967  return res;
1968 }
1969 
1977 static gboolean
1979  const GstCaps * caps, GstTensorsConfig * config)
1980 {
1981  GstStructure *structure;
1982  g_return_val_if_fail (config != NULL, FALSE);
1983 
1984  structure = gst_caps_get_structure (caps, 0);
1985 
1986  if (!gst_tensors_config_from_structure (config, structure)) {
1987  GST_WARNING_OBJECT (filter, "caps is not tensor %s\n",
1988  gst_structure_get_name (structure));
1989  return FALSE;
1990  }
1991 
1992  return gst_tensors_config_validate (config);
1993 }
1994 
2004 static gboolean
2006  GstPadDirection direction, guint idx, const GstTensorInfo * in_info,
2007  GstTensorInfo * out_info)
2008 {
2009  guint i;
2010 
2011  /* copy input info first, then update output info */
2012  gst_tensor_info_copy (out_info, in_info);
2013 
2014  if (filter->apply && !g_list_find (filter->apply, GINT_TO_POINTER (idx)))
2015  return TRUE;
2016 
2017  switch (filter->mode) {
2018  case GTT_DIMCHG:
2019  {
2020  unsigned int from = filter->data_dimchg.from;
2021  unsigned int to = filter->data_dimchg.to;
2022 
2023  if (direction == GST_PAD_SINK) {
2024  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
2025  if ((i < from && i < to) || (i > from && i > to) || from == to) {
2026  out_info->dimension[i] = in_info->dimension[i];
2027  } else if (i == to) {
2028  out_info->dimension[i] = in_info->dimension[from];
2029  } else if (from > to) {
2030  g_assert (i > 0 && i > to);
2031  out_info->dimension[i] = in_info->dimension[i - 1];
2032  } else {
2033  g_assert (i < to && i < (NNS_TENSOR_RANK_LIMIT - 1));
2034  out_info->dimension[i] = in_info->dimension[i + 1];
2035  }
2036  }
2037  } else {
2038  for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
2039  if ((i < from && i < to) || (i > from && i > to) || from == to) {
2040  out_info->dimension[i] = in_info->dimension[i];
2041  } else if (i == from) {
2042  out_info->dimension[i] = in_info->dimension[to];
2043  } else if (from > to) {
2044  g_assert (i < from && i < (NNS_TENSOR_RANK_LIMIT - 1));
2045  out_info->dimension[i] = in_info->dimension[i + 1];
2046  } else {
2047  g_assert (i > 0 && i > from);
2048  out_info->dimension[i] = in_info->dimension[i - 1];
2049  }
2050  }
2051  }
2052  break;
2053  }
2054  case GTT_TYPECAST:
2056  if (direction == GST_PAD_SINK) {
2058  out_info->type = filter->data_typecast.to;
2059  } else {
2060  /* cannot get the incoming data type on sink pad */
2061  out_info->type = _NNS_END;
2062  }
2063  break;
2064 
2065  case GTT_ARITHMETIC:
2066  /* check arith mode option has typecast operator */
2067  if (filter->data_arithmetic.out_type != _NNS_END) {
2068  if (direction == GST_PAD_SINK) {
2069  out_info->type = filter->data_arithmetic.out_type;
2070  } else {
2071  /* cannot get the incoming data type on sink pad */
2072  out_info->type = _NNS_END;
2073  }
2074  }
2075  break;
2076 
2077  case GTT_TRANSPOSE:
2078  if (direction == GST_PAD_SINK) {
2079  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
2080  out_info->dimension[i] =
2081  in_info->dimension[filter->data_transpose.trans_order[i]];
2082  }
2083  } else {
2084  for (i = 0; i < NNS_TENSOR_TRANSPOSE_RANK_LIMIT; i++) {
2085  g_assert (filter->data_transpose.trans_order[i] <
2087  out_info->dimension[filter->data_transpose.trans_order[i]] =
2088  in_info->dimension[i];
2089  }
2090  }
2091  break;
2092 
2093  case GTT_STAND:
2095  if (direction == GST_PAD_SINK) {
2096  if (filter->data_stand.out_type != _NNS_END)
2097  out_info->type = filter->data_stand.out_type;
2098  } else {
2099  /* cannot get the incoming data type on sink pad */
2100  out_info->type = _NNS_END;
2101  }
2102  break;
2103 
2104  case GTT_CLAMP:
2105  /* same tensors info, do nothing. */
2106  break;
2107 
2108  case GTT_PADDING:
2109  if (direction == GST_PAD_SINK) {
2110  out_info->dimension[0] +=
2111  filter->data_padding.pad[PADDING_LEFT] +
2112  filter->data_padding.pad[PADDING_RIGHT];
2113  out_info->dimension[1] +=
2114  filter->data_padding.pad[PADDING_TOP] +
2115  filter->data_padding.pad[PADDING_BOTTOM];
2116  out_info->dimension[2] +=
2117  filter->data_padding.pad[PADDING_FRONT] +
2118  filter->data_padding.pad[PADDING_BACK];
2119  }
2120  break;
2121  default:
2122  return FALSE;
2123  }
2124 
2125  return TRUE;
2126 }
2127 
2138 static GstCaps *
2139 gst_tensor_transform_transform_caps (GstBaseTransform * trans,
2140  GstPadDirection direction, GstCaps * caps, GstCaps * filtercap)
2141 {
2142  GstTensorTransform *filter;
2143  GstCaps *result = NULL;
2144  GstStructure *structure;
2145  guint i, j;
2146 
2147  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2148 
2149  silent_debug (filter, "Calling TransformCaps, direction = %d\n", direction);
2150  silent_debug_caps (filter, caps, "from");
2151  silent_debug_caps (filter, filtercap, "filter");
2152 
2153  result = gst_caps_new_empty ();
2154  for (i = 0; i < gst_caps_get_size (caps); i++) {
2155  GstTensorsConfig in_config, out_config;
2156  GstTensorInfo *in_info, *out_info;
2157  gboolean is_types_not_fixed = FALSE;
2158  GstCaps *result_aux = gst_caps_new_empty ();
2159 
2160  gst_tensors_config_init (&out_config);
2161 
2162  structure = gst_caps_get_structure (caps, i);
2163  gst_tensors_config_from_structure (&in_config, structure);
2164 
2165  if (gst_tensors_config_is_flexible (&in_config)) {
2166  /* output caps is also flexible */
2168  } else {
2169  for (j = 0; j < in_config.info.num_tensors; j++) {
2170  in_info = gst_tensors_info_get_nth_info (&in_config.info, j);
2171  out_info = gst_tensors_info_get_nth_info (&out_config.info, j);
2172 
2173  gst_tensor_transform_convert_dimension (filter, direction,
2174  j, in_info, out_info);
2175  if (out_info->type == _NNS_END) {
2176  /* types cannot be specified */
2177  is_types_not_fixed = TRUE;
2178  }
2179  }
2180  }
2181 
2182  out_config.rate_d = in_config.rate_d;
2183  out_config.rate_n = in_config.rate_n;
2184  out_config.info.num_tensors = in_config.info.num_tensors;
2185 
2186  if (gst_structure_has_name (structure, NNS_MIMETYPE_TENSOR)) {
2187  gst_caps_append (result_aux, gst_tensor_caps_from_config (&out_config));
2188  } else {
2189  gst_caps_append (result_aux, gst_tensors_caps_from_config (&out_config));
2190 
2191  /* remove `types` field from caps */
2192  if (is_types_not_fixed) {
2193  GstStructure *s = gst_caps_get_structure (result_aux, 0);
2194  gst_structure_remove_field (s, "types");
2195  }
2196  }
2197 
2198  gst_caps_append (result, result_aux);
2199 
2200  gst_tensors_config_free (&in_config);
2201  gst_tensors_config_free (&out_config);
2202  }
2203 
2204  if (filtercap && gst_caps_get_size (filtercap) > 0) {
2205  GstCaps *intersection;
2206 
2207  intersection =
2208  gst_caps_intersect_full (result, filtercap, GST_CAPS_INTERSECT_FIRST);
2209 
2210  gst_caps_unref (result);
2211  result = intersection;
2212  }
2213 
2214  silent_debug_caps (filter, result, "to");
2215  return result;
2216 }
2217 
2221 static GstCaps *
2222 gst_tensor_transform_fixate_caps (GstBaseTransform * trans,
2223  GstPadDirection direction, GstCaps * caps, GstCaps * othercaps)
2224 {
2225  GstTensorTransform *filter;
2226  GstCaps *result;
2227 
2228  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2229 
2230  silent_debug (filter, "Calling FixateCaps, direction = %d\n", direction);
2231  silent_debug_caps (filter, caps, "caps");
2232  silent_debug_caps (filter, othercaps, "othercaps");
2233 
2234  result =
2235  gst_tensor_transform_transform_caps (trans, direction, caps, othercaps);
2236  gst_caps_unref (othercaps);
2237 
2238  result = gst_caps_make_writable (result);
2239  result = gst_caps_fixate (result);
2240 
2241  silent_debug_caps (filter, result, "result");
2242  return result;
2243 }
2244 
2248 static gboolean
2249 gst_tensor_transform_set_caps (GstBaseTransform * trans,
2250  GstCaps * incaps, GstCaps * outcaps)
2251 {
2252  GstTensorTransform *filter;
2253  GstTensorsConfig in_config, out_config;
2254  GstTensorsConfig config;
2255  GstTensorInfo *in_info, *out_info;
2256  gboolean in_flexible, out_flexible;
2257  gboolean allowed = FALSE;
2258  guint i;
2259 
2260  filter = GST_TENSOR_TRANSFORM_CAST (trans);
2261 
2262  silent_debug (filter, "Calling SetCaps\n");
2263  silent_debug_caps (filter, incaps, "incaps");
2264  silent_debug_caps (filter, outcaps, "outcaps");
2265 
2266  if (!gst_tensor_transform_read_caps (filter, incaps, &in_config)) {
2267  GST_ERROR_OBJECT (filter, "Cannot read cap of incaps\n");
2268  goto error;
2269  }
2270 
2271  if (!gst_tensor_transform_read_caps (filter, outcaps, &out_config)) {
2272  GST_ERROR_OBJECT (filter, "Cannot read cap of outcaps\n");
2273  goto error;
2274  }
2275 
2276  in_flexible = gst_tensors_config_is_flexible (&in_config);
2277  out_flexible = gst_tensors_config_is_flexible (&out_config);
2278 
2279  /* compare type and dimension */
2280  gst_tensors_config_init (&config);
2281  config.info.format = out_config.info.format;
2282 
2283  config.rate_n = in_config.rate_n;
2284  config.rate_d = in_config.rate_d;
2285  config.info.num_tensors = in_config.info.num_tensors;
2286 
2287  if (!in_flexible) {
2288  for (i = 0; i < in_config.info.num_tensors; i++) {
2289  in_info = gst_tensors_info_get_nth_info (&in_config.info, i);
2290  out_info = gst_tensors_info_get_nth_info (&config.info, i);
2291 
2292  if (!gst_tensor_transform_convert_dimension (filter, GST_PAD_SINK,
2293  i, in_info, out_info)) {
2294  GST_ERROR_OBJECT (filter,
2295  "Tensor info is not matched with given properties.");
2296  goto error;
2297  }
2298  }
2299  }
2300 
2301  if (out_flexible) {
2302  GST_INFO_OBJECT (filter, "Output tensor is flexible.");
2303 
2304  /* set output configuration if input is static */
2305  if (!in_flexible)
2306  out_config = config;
2307  } else if (!gst_tensors_config_is_equal (&out_config, &config)) {
2308  GST_ERROR_OBJECT (filter,
2309  "Tensor info is not matched with given properties.\n");
2310  goto error;
2311  }
2312 
2313  /* set in/out tensor info */
2314  filter->in_config = in_config;
2315  filter->out_config = out_config;
2316  allowed = TRUE;
2317 
2318 error:
2319  if (!allowed)
2320  GST_ERROR_OBJECT (filter, "Set Caps Failed!\n");
2321 
2322  return allowed;
2323 }
2324 
2328 static gboolean
2329 gst_tensor_transform_transform_size (GstBaseTransform * trans,
2330  GstPadDirection direction, GstCaps * caps, gsize size, GstCaps * othercaps,
2331  gsize * othersize)
2332 {
2333  UNUSED (trans);
2334  UNUSED (direction);
2335  UNUSED (caps);
2336  UNUSED (size);
2337  UNUSED (othercaps);
2342  *othersize = 0;
2343 
2344  return TRUE;
2345 }
_GstTensorTransform::data_padding
tensor_transform_padding data_padding
Definition: gsttensor_transform.h:180
find_key_strv
gint find_key_strv(const gchar **strv, const gchar *key)
Find the index value of the given key string array.
Definition: nnstreamer_plugin_api_util_impl.c:1586
gst_tensor_get_type
tensor_type gst_tensor_get_type(const gchar *typestr)
Get tensor type from string input.
Definition: nnstreamer_plugin_api_util_impl.c:1218
GTT_TRANSPOSE
@ GTT_TRANSPOSE
Definition: gsttensor_transform.h:62
gst_tensors_config_is_flexible
#define gst_tensors_config_is_flexible(c)
Macro to check stream format (flexible tensors for caps negotiation)
Definition: nnstreamer_plugin_api_util.h:279
gst_tensors_config_is_equal
gboolean gst_tensors_config_is_equal(const GstTensorsConfig *c1, const GstTensorsConfig *c2)
Compare tensor config info (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:881
gst_tensor_buffer_from_config
GstBuffer * gst_tensor_buffer_from_config(GstBuffer *in, GstTensorsConfig *config)
Configure gst-buffer with tensors information. NNStreamer handles single memory chunk as single tenso...
Definition: nnstreamer_plugin_api_impl.c:535
tensor_transform_stand_mode
tensor_transform_stand_mode
Definition: gsttensor_transform.h:80
g_assert
g_assert(sizeof(DTYPE_UNSIGNED)==sizeof(DTYPE_SIGNED))
tensor_transform_operator_s::op
tensor_transform_operator op
Definition: gsttensor_transform.h:118
gst_tensor_transform_transform
static GstFlowReturn gst_tensor_transform_transform(GstBaseTransform *trans, GstBuffer *inbuf, GstBuffer *outbuf)
non-ip transform. required vmethod for BaseTransform class.
Definition: gsttensor_transform.c:1815
_tensor_transform_clamp::min
double min
Definition: gsttensor_transform.h:152
gst_tensor_transform_typecast
static GstFlowReturn gst_tensor_transform_typecast(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "typecast" case.
Definition: gsttensor_transform.c:1292
_NNS_LAYOUT_ANY
@ _NNS_LAYOUT_ANY
Definition: tensor_typedef.h:222
_NNS_UINT64
@ _NNS_UINT64
Definition: tensor_typedef.h:149
GstTensorInfo
Internal data structure for tensor info.
Definition: tensor_typedef.h:261
_GstTensorTransform::operators
GSList * operators
Definition: gsttensor_transform.h:184
ml_logw
#define ml_logw
Definition: nnstreamer_log.h:77
tensor_data_s
Structure for tensor data.
Definition: tensor_data.h:23
_GstTensorTransform::option
gchar * option
Definition: gsttensor_transform.h:172
NNS_TENSOR_SIZE_LIMIT
#define NNS_TENSOR_SIZE_LIMIT
The number of tensors NNStreamer supports is 256. The max memories of gst-buffer is 16 (See NNS_TENSO...
Definition: tensor_typedef.h:42
_NNS_INT64
@ _NNS_INT64
Definition: tensor_typedef.h:148
sink_factory
static GstStaticPadTemplate sink_factory
The capabilities of the inputs.
Definition: gsttensor_transform.c:135
gst_tensor_transform_stand_string
static const gchar * gst_tensor_transform_stand_string[]
Definition: gsttensor_transform.c:118
GTT_DIMCHG
@ GTT_DIMCHG
Definition: gsttensor_transform.h:59
PADDING_LEFT
@ PADDING_LEFT
Definition: gsttensor_transform.h:89
gst_tensor_data_raw_std_per_channel
gboolean gst_tensor_data_raw_std_per_channel(gpointer raw, gsize length, tensor_type type, tensor_dim dim, gdouble *averages, gdouble **results)
Calculate standard deviation of the tensor per channel (the first dim).
Definition: tensor_data.c:455
FALSE
return FALSE
Definition: gsttensor_transform.c:590
tensor_transform_operator_s
Internal data structure for operator of arithmetic mode.
Definition: gsttensor_transform.h:116
result
case tensor_data_s gboolean * result
Definition: gsttensor_if.c:839
CAPS_STRING
#define CAPS_STRING
Definition: gsttensor_transform.c:69
GST_TENSOR_TRANSFORM_CAST
#define GST_TENSOR_TRANSFORM_CAST(obj)
Definition: gsttensor_transform.h:52
gst_tensor_meta_info_update_header
gboolean gst_tensor_meta_info_update_header(GstTensorMetaInfo *meta, gpointer header)
Update header from tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1505
GST_DEBUG_CATEGORY_STATIC
GST_DEBUG_CATEGORY_STATIC(gst_tensor_transform_debug)
_NNS_FLOAT16
@ _NNS_FLOAT16
Definition: tensor_typedef.h:150
STAND_END
@ STAND_END
Definition: gsttensor_transform.h:84
_tensor_transform_arithmetic::out_type
tensor_type out_type
Definition: gsttensor_transform.h:127
gst_tensor_transform_read_caps
static gboolean gst_tensor_transform_read_caps(GstTensorTransform *filter, const GstCaps *caps, GstTensorsConfig *config)
Read cap, parse tensor configuration (dim/type) from the cap.
Definition: gsttensor_transform.c:1978
_NNS_UINT16
@ _NNS_UINT16
Definition: tensor_typedef.h:143
gst_tensor_transform_get_operator
static tensor_transform_operator gst_tensor_transform_get_operator(const gchar *str)
Get the corresponding operator from the string value.
Definition: gsttensor_transform.c:319
REGEX_CLAMP_OPTION
#define REGEX_CLAMP_OPTION
Definition: gsttensor_transform.c:74
PROP_APPLY
@ PROP_APPLY
Definition: gsttensor_transform.c:105
_GstTensorTransform::acceleration
gboolean acceleration
Definition: gsttensor_transform.h:183
gst_tensor_info_copy
void gst_tensor_info_copy(GstTensorInfo *dest, const GstTensorInfo *src)
Copy tensor info.
Definition: nnstreamer_plugin_api_util_impl.c:248
PADDING_TOP
@ PADDING_TOP
Definition: gsttensor_transform.h:91
nnstreamer_log.h
Internal log util for NNStreamer plugins and native APIs.
switch
switch(cv->type)
Definition: gsttensor_if.c:851
GTT_OP_DIV
@ GTT_OP_DIV
Definition: gsttensor_transform.h:75
nns_memcpy
#define nns_memcpy
Definition: tensor_common.h:52
GTT_OP_TYPECAST
@ GTT_OP_TYPECAST
Definition: gsttensor_transform.h:72
gst_tensor_transform_arithmetic
static GstFlowReturn gst_tensor_transform_arithmetic(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "arithmetic" case.
Definition: gsttensor_transform.c:1330
gst_tensor_transform_dimchg
static GstFlowReturn gst_tensor_transform_dimchg(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "dimchg" case.
Definition: gsttensor_transform.c:1197
GstTensorMetaInfo
Data structure to describe a tensor data. This represents the basic information of a memory block for...
Definition: tensor_typedef.h:310
_GstTensorTransform::out_config
GstTensorsConfig out_config
Definition: gsttensor_transform.h:187
src_factory
static GstStaticPadTemplate src_factory
The capabilities of the outputs.
Definition: gsttensor_transform.c:143
GstTensorsConfig::rate_d
int rate_d
Definition: tensor_typedef.h:288
GTT_CLAMP
@ GTT_CLAMP
Definition: gsttensor_transform.h:64
gst_tensor_data_raw_average
gboolean gst_tensor_data_raw_average(gpointer raw, gsize length, tensor_type type, gdouble **result)
Calculate average value of the tensor.
Definition: tensor_data.c:315
_tensor_transform_stand::per_channel
gboolean per_channel
Definition: gsttensor_transform.h:145
gst_tensor_transform_transform_size
static gboolean gst_tensor_transform_transform_size(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, gsize size, GstCaps *othercaps, gsize *othersize)
Tell the framework the required size of buffer based on the info of the other side pad....
Definition: gsttensor_transform.c:2329
GTT_OP_UNKNOWN
@ GTT_OP_UNKNOWN
Definition: gsttensor_transform.h:77
gst_tensor_transform_transform_caps
static GstCaps * gst_tensor_transform_transform_caps(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, GstCaps *filter)
configure srcpad cap from "proposed" cap. (required vmethod for BaseTransform)
Definition: gsttensor_transform.c:2139
NNS_TENSOR_TRANSPOSE_RANK_LIMIT
#define NNS_TENSOR_TRANSPOSE_RANK_LIMIT
The transpose rank is fixed to 4. This RANK does not affect other/tensors(s)'s NNS_TENSOR_RANK_LIMIT.
Definition: gsttensor_transform.c:87
transposeloop
#define transposeloop(cl, ck, cj, ci, sl, sk, sj, si, typesize)
Definition: gsttensor_transform.c:1506
_tensor_transform_stand::out_type
tensor_type out_type
Definition: gsttensor_transform.h:144
GTT_PADDING
@ GTT_PADDING
Definition: gsttensor_transform.h:65
silent_debug
#define silent_debug(self,...)
Macro for debug message.
Definition: tensor_common.h:276
G_DEFINE_TYPE
G_DEFINE_TYPE(GstTensorTransform, gst_tensor_transform, GST_TYPE_BASE_TRANSFORM)
gst_tensor_pad_caps_is_flexible
#define gst_tensor_pad_caps_is_flexible(p)
Macro to check current pad caps is flexible tensor.
Definition: tensor_common.h:231
tensor_transform_operator_s::value
tensor_data_s value
Definition: gsttensor_transform.h:120
gst_tensor_meta_info_append_header
GstMemory * gst_tensor_meta_info_append_header(GstTensorMetaInfo *meta, GstMemory *mem)
Append header to memory.
Definition: nnstreamer_plugin_api_impl.c:1544
g_free
g_free(self->option[(opnum) - 1])
opnum: \
_GstTensorTransform::data_dimchg
tensor_transform_dimchg data_dimchg
Definition: gsttensor_transform.h:174
REGEX_TYPECAST_OPTION
#define REGEX_TYPECAST_OPTION
Definition: gsttensor_transform.c:71
_GstTensorTransform::silent
gboolean silent
Definition: gsttensor_transform.h:170
g_value_set_string
g_value_set_string(value, self->option[opnum - 1])
opnum: \
gst_tensor_data_raw_average_per_channel
gboolean gst_tensor_data_raw_average_per_channel(gpointer raw, gsize length, tensor_type type, tensor_dim dim, gdouble **results)
Calculate average value of the tensor per channel (the first dim).
Definition: tensor_data.c:360
PADDING_RIGHT
@ PADDING_RIGHT
Definition: gsttensor_transform.h:90
gst_tensor_meta_info_parse_header
gboolean gst_tensor_meta_info_parse_header(GstTensorMetaInfo *meta, gpointer header)
Parse header and fill the tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1527
GstTensorsConfig::rate_n
int rate_n
Definition: tensor_typedef.h:287
_tensor_transform_padding::pad
guint pad[NNS_TENSOR_RANK_LIMIT]
Definition: gsttensor_transform.h:159
_NNS_END
@ _NNS_END
Definition: tensor_typedef.h:152
_NNS_TENSOR_FORMAT_FLEXIBLE
@ _NNS_TENSOR_FORMAT_FLEXIBLE
Definition: tensor_typedef.h:196
gst_tensor_data_set
gboolean gst_tensor_data_set(tensor_data_s *td, tensor_type type, gpointer value)
Set tensor element data with given type.
GST_TYPE_TENSOR_TRANSFORM_MODE
#define GST_TYPE_TENSOR_TRANSFORM_MODE
Definition: gsttensor_transform.c:176
PADDING_BACK
@ PADDING_BACK
Definition: gsttensor_transform.h:94
STAND_DEFAULT
@ STAND_DEFAULT
Definition: gsttensor_transform.h:82
gst_tensor_get_element_count
gulong gst_tensor_get_element_count(const tensor_dim dim)
Count the number of elements of a tensor.
Definition: nnstreamer_plugin_api_util_impl.c:1186
gst_tensor_get_element_size
gsize gst_tensor_get_element_size(tensor_type type)
Get element size of tensor type (byte per element)
Definition: nnstreamer_plugin_api_util_impl.c:1205
gst_tensor_data_raw_typecast
gboolean gst_tensor_data_raw_typecast(gpointer input, tensor_type in_type, gpointer output, tensor_type out_type)
Typecast tensor element value.
Definition: tensor_data.c:290
REGEX_TRANSPOSE_OPTION
#define REGEX_TRANSPOSE_OPTION
Definition: gsttensor_transform.c:72
_NNS_LAYOUT_NCHW
@ _NNS_LAYOUT_NCHW
Definition: tensor_typedef.h:224
GST_TENSOR_TRANSFORM
#define GST_TENSOR_TRANSFORM(obj)
Definition: gsttensor_transform.h:44
gst_tensor_transform_init
static void gst_tensor_transform_init(GstTensorTransform *filter)
initialize the new element (G_DEFINE_TYPE requires this) instantiate pads and add them to element set...
Definition: gsttensor_transform.c:299
_GstTensorTransform::in_config
GstTensorsConfig in_config
Definition: gsttensor_transform.h:186
PROP_ACCELERATION
@ PROP_ACCELERATION
Definition: gsttensor_transform.c:104
_NNS_FLOAT32
@ _NNS_FLOAT32
Definition: tensor_typedef.h:147
gst_tensors_config_free
void gst_tensors_config_free(GstTensorsConfig *config)
Free allocated data in tensors config structure.
Definition: nnstreamer_plugin_api_util_impl.c:845
gst_tensor_transform_set_caps
static gboolean gst_tensor_transform_set_caps(GstBaseTransform *trans, GstCaps *incaps, GstCaps *outcaps)
set caps. required vmethod of BaseTransform
Definition: gsttensor_transform.c:2249
gst_tensor_data_get
gboolean gst_tensor_data_get(tensor_data_s *td, gpointer value)
Get tensor element value.
Definition: tensor_data.c:143
REGEX_DIMCHG_OPTION
#define REGEX_DIMCHG_OPTION
Definition: gsttensor_transform.c:70
NNS_TENSOR_PADDING_RANK_LIMIT
#define NNS_TENSOR_PADDING_RANK_LIMIT
The padding rank is fixed to 3. This RANK does not affect other/tensors(s)'s NNS_TENSOR_RANK_LIMIT.
Definition: gsttensor_transform.c:93
gst_tensor_transform_get_stand_mode
static tensor_transform_stand_mode gst_tensor_transform_get_stand_mode(const gchar *str)
Get the corresponding mode from the string value.
Definition: gsttensor_transform.c:334
gst_tensor_info_convert_to_meta
gboolean gst_tensor_info_convert_to_meta(GstTensorInfo *info, GstTensorMetaInfo *meta)
Convert GstTensorInfo structure to GstTensorMetaInfo.
Definition: nnstreamer_plugin_api_util_impl.c:260
_tensor_transform_dimchg::to
int to
Definition: gsttensor_transform.h:103
gst_tensor_transform_set_option_data
static gboolean gst_tensor_transform_set_option_data(GstTensorTransform *filter)
Setup internal data (data_* in GstTensorTransform)
Definition: gsttensor_transform.c:663
gst_tensor_data_raw_std
gboolean gst_tensor_data_raw_std(gpointer raw, gsize length, tensor_type type, gdouble *average, gdouble **result)
Calculate standard deviation of the tensor.
Definition: tensor_data.c:409
gst_tensor_meta_info_convert
gboolean gst_tensor_meta_info_convert(GstTensorMetaInfo *meta, GstTensorInfo *info)
Convert GstTensorMetaInfo structure to GstTensorInfo.
Definition: nnstreamer_plugin_api_util_impl.c:1562
gst_tensor_caps_from_config
GstCaps * gst_tensor_caps_from_config(const GstTensorsConfig *config)
Get tensor caps from tensors config (for other/tensor)
Definition: nnstreamer_plugin_api_impl.c:1395
GstTensorsConfig
Internal data structure for configured tensors info (for other/tensors).
Definition: tensor_typedef.h:284
ml_logi
#define ml_logi
Definition: nnstreamer_log.h:76
REGEX_STAND_OPTION
#define REGEX_STAND_OPTION
Definition: gsttensor_transform.c:73
_tensor_transform_dimchg::from
int from
Definition: gsttensor_transform.h:102
REGEX_ARITH_OPTION
#define REGEX_ARITH_OPTION
Definition: gsttensor_transform.c:77
GTT_ARITHMETIC
@ GTT_ARITHMETIC
Definition: gsttensor_transform.h:61
silent_debug_caps
#define silent_debug_caps(self, caps, msg)
Macro for capability debug message.
Definition: tensor_common.h:285
tensor_transform_operator_s::applying_ch
int applying_ch
Definition: gsttensor_transform.h:119
ml_loge
#define ml_loge
Definition: nnstreamer_log.h:78
_NNS_INT32
@ _NNS_INT32
Definition: tensor_typedef.h:140
gsttensor_transform.h
GStreamer plugin to transform tensor dimension or type.
TRUE
return TRUE
Definition: gsttensor_if.c:897
UNUSED
#define UNUSED(expr)
Definition: mqttcommon.h:19
_GstTensorTransform::apply
GList * apply
Definition: gsttensor_transform.h:188
nnstreamer_util.h
Optional NNStreamer utility functions for sub-plugin writers and users.
gst_tensor_transform_convert_dimension
static gboolean gst_tensor_transform_convert_dimension(GstTensorTransform *filter, GstPadDirection direction, guint idx, const GstTensorInfo *in_info, GstTensorInfo *out_info)
Dimension conversion calculation.
Definition: gsttensor_transform.c:2005
gst_tensor_transform_class_init
static void gst_tensor_transform_class_init(GstTensorTransformClass *klass)
initialize the tensor_transform's class
Definition: gsttensor_transform.c:224
gst_tensor_info_get_size
gsize gst_tensor_info_get_size(const GstTensorInfo *info)
Get data size of single tensor.
Definition: nnstreamer_plugin_api_util_impl.c:156
gst_tensor_transform_get_property
static void gst_tensor_transform_get_property(GObject *object, guint prop_id, GValue *value, GParamSpec *pspec)
Get property (gst element vmethod)
Definition: gsttensor_transform.c:1107
_tensor_transform_transpose::trans_order
uint8_t trans_order[NNS_TENSOR_RANK_LIMIT]
Definition: gsttensor_transform.h:136
gst_tensor_transform_fixate_caps
static GstCaps * gst_tensor_transform_fixate_caps(GstBaseTransform *trans, GstPadDirection direction, GstCaps *caps, GstCaps *othercaps)
fixate caps. required vmethod of BaseTransform
Definition: gsttensor_transform.c:2222
_GstTensorTransform::data_stand
tensor_transform_stand data_stand
Definition: gsttensor_transform.h:178
gst_tensors_info_get_nth_info
GstTensorInfo * gst_tensors_info_get_nth_info(GstTensorsInfo *info, guint index)
Get the pointer of nth tensor information.
Definition: nnstreamer_plugin_api_util_impl.c:296
PROP_SILENT
@ PROP_SILENT
Definition: gsttensor_transform.c:101
GTT_UNKNOWN
@ GTT_UNKNOWN
Definition: gsttensor_transform.h:67
gst_tensor_transform_set_property
static void gst_tensor_transform_set_property(GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec)
Set property (gst element vmethod)
Definition: gsttensor_transform.c:1040
gst_tensor_transform_stand
static GstFlowReturn gst_tensor_transform_stand(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "stand" case. : pixel = abs((pixel - average(tensor))/(std(tensor) +...
Definition: gsttensor_transform.c:1605
GTT_OP_MUL
case GTT_OP_MUL
Definition: gsttensor_transform.c:584
_tensor_transform_stand::mode
tensor_transform_stand_mode mode
Definition: gsttensor_transform.h:143
gst_tensors_caps_from_config
GstCaps * gst_tensors_caps_from_config(const GstTensorsConfig *config)
Get caps from tensors config (for other/tensors)
Definition: nnstreamer_plugin_api_impl.c:1372
_NNS_INT16
@ _NNS_INT16
Definition: tensor_typedef.h:142
STAND_DC_AVERAGE
@ STAND_DC_AVERAGE
Definition: gsttensor_transform.h:83
gst_tensor_transform_padding
static GstFlowReturn gst_tensor_transform_padding(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "padding" case.
Definition: gsttensor_transform.c:1762
gst_tensor_transform_clamp
static GstFlowReturn gst_tensor_transform_clamp(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "clamp" case. : pixel = if (pixel > max) ? max : if (pixel < min) ?...
Definition: gsttensor_transform.c:1725
gst_tensor_transform_operator_string
static const gchar * gst_tensor_transform_operator_string[]
Definition: gsttensor_transform.c:124
gst_tensor_transform_finalize
static void gst_tensor_transform_finalize(GObject *object)
Function to finalize instance (gst element vmethod)
Definition: gsttensor_transform.c:1163
PROP_MODE
@ PROP_MODE
Definition: gsttensor_transform.c:102
PADDING_BOTTOM
@ PADDING_BOTTOM
Definition: gsttensor_transform.h:92
PADDING_FRONT
@ PADDING_FRONT
Definition: gsttensor_transform.h:93
DEFAULT_ACCELERATION
#define DEFAULT_ACCELERATION
Flag to set orc acceleration.
Definition: gsttensor_transform.c:115
NNS_MIMETYPE_TENSOR
#define NNS_MIMETYPE_TENSOR
Definition: tensor_typedef.h:59
GST_TENSOR_TYPE_ALL
#define GST_TENSOR_TYPE_ALL
Possible tensor element types.
Definition: tensor_typedef.h:68
_tensor_transform_arithmetic::ch_dim
guint ch_dim
Definition: gsttensor_transform.h:129
GstTensorsInfo::num_tensors
unsigned int num_tensors
Definition: tensor_typedef.h:275
_NNS_FLOAT64
@ _NNS_FLOAT64
Definition: tensor_typedef.h:146
_GstTensorTransform::data_typecast
tensor_transform_typecast data_typecast
Definition: gsttensor_transform.h:175
gst_tensor_buffer_get_nth_memory
GstMemory * gst_tensor_buffer_get_nth_memory(GstBuffer *buffer, const guint index)
Get the nth GstMemory from given buffer.
Definition: nnstreamer_plugin_api_impl.c:1586
GTT_OP_ADD
@ GTT_OP_ADD
Definition: gsttensor_transform.h:73
_tensor_transform_clamp::max
double max
Definition: gsttensor_transform.h:152
REGEX_PADDING_OPTION
#define REGEX_PADDING_OPTION
Definition: gsttensor_transform.c:76
_GstTensorTransform::data_clamp
tensor_transform_clamp data_clamp
Definition: gsttensor_transform.h:179
tensor_data_s::type
tensor_type type
Definition: tensor_data.h:25
gst_tensors_config_init
void gst_tensors_config_init(GstTensorsConfig *config)
Initialize the tensors config info structure (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:830
PROP_OPTION
@ PROP_OPTION
Definition: gsttensor_transform.c:103
_NNS_LAYOUT_NHWC
@ _NNS_LAYOUT_NHWC
Definition: tensor_typedef.h:223
gst_tensor_buffer_get_count
guint gst_tensor_buffer_get_count(GstBuffer *buffer)
Get the number of tensors in the buffer.
Definition: nnstreamer_plugin_api_impl.c:1813
GTT_STAND
@ GTT_STAND
Definition: gsttensor_transform.h:63
GstTensorInfo::type
tensor_type type
Definition: tensor_typedef.h:266
GstTensorsConfig::info
GstTensorsInfo info
Definition: tensor_typedef.h:286
PROP_0
@ PROP_0
Definition: gsttensor_transform.c:100
gst_tensors_config_validate
gboolean gst_tensors_config_validate(const GstTensorsConfig *config)
Check the tensors are all configured (for other/tensors)
Definition: nnstreamer_plugin_api_util_impl.c:858
_GstTensorTransform::data_arithmetic
tensor_transform_arithmetic data_arithmetic
Definition: gsttensor_transform.h:176
_NNS_UINT32
@ _NNS_UINT32
Definition: tensor_typedef.h:141
handle_operator
#define handle_operator(d, v, oper, vtype)
Macro for operator.
Definition: gsttensor_transform.c:573
float16_not_supported
static void float16_not_supported(void)
Generate error if float16 is required.
Definition: gsttensor_transform.c:348
_tensor_transform_arithmetic::per_channel_arith
gboolean per_channel_arith
Definition: gsttensor_transform.h:128
REGEX_ARITH_OPTION_TYPECAST
#define REGEX_ARITH_OPTION_TYPECAST
Definition: gsttensor_transform.c:81
gst_tensor_data_typecast
gboolean gst_tensor_data_typecast(tensor_data_s *td, tensor_type type)
Typecast tensor element data.
Definition: tensor_data.c:203
_NNS_INT8
@ _NNS_INT8
Definition: tensor_typedef.h:144
GstTensorInfo::dimension
tensor_dim dimension
Definition: tensor_typedef.h:267
_GstTensorTransform::data_transpose
tensor_transform_transpose data_transpose
Definition: gsttensor_transform.h:177
gst_tensors_config_from_structure
gboolean gst_tensors_config_from_structure(GstTensorsConfig *config, const GstStructure *structure)
Parse structure and set tensors config (for other/tensors)
Definition: nnstreamer_plugin_api_impl.c:1413
_GstTensorTransformClass
GstTensorTransformClass inherits GstBaseTransformClass.
Definition: gsttensor_transform.h:198
_NNS_UINT8
@ _NNS_UINT8
Definition: tensor_typedef.h:145
NNS_TENSOR_RANK_LIMIT
#define NNS_TENSOR_RANK_LIMIT
Definition: tensor_typedef.h:34
tensor_transform_operator
tensor_transform_operator
Definition: gsttensor_transform.h:70
PROP_TRANSPOSE_RANK_LIMIT
@ PROP_TRANSPOSE_RANK_LIMIT
Definition: gsttensor_transform.c:106
_tensor_transform_typecast::to
tensor_type to
Definition: gsttensor_transform.h:110
GTT_TYPECAST
@ GTT_TYPECAST
Definition: gsttensor_transform.h:60
_GstTensorTransform::loaded
gboolean loaded
Definition: gsttensor_transform.h:182
_GstTensorTransform
Internal data structure for tensor_transform instances.
Definition: gsttensor_transform.h:166
if
if(!gst_tensordec_process_plugin_options(self,(opnum) - 1)) GST_ERROR_OBJECT(self
gst_tensor_transform_transpose
static GstFlowReturn gst_tensor_transform_transpose(GstTensorTransform *filter, GstTensorInfo *in_info, GstTensorInfo *out_info, const uint8_t *inptr, uint8_t *outptr)
subrouting for tensor-transform, "transpose" case.
Definition: gsttensor_transform.c:1533
gst_tensor_buffer_append_memory
gboolean gst_tensor_buffer_append_memory(GstBuffer *buffer, GstMemory *memory, const GstTensorInfo *info)
Append memory to given buffer.
Definition: nnstreamer_plugin_api_impl.c:1666
GstTensorsInfo::format
tensor_format format
Definition: tensor_typedef.h:278
_GstTensorTransform::mode
tensor_transform_mode mode
Definition: gsttensor_transform.h:171
gst_tensor_transform_mode_get_type
static GType gst_tensor_transform_mode_get_type(void)
A private function to register GEnumValue array for the 'mode' property to a GType and return it.
Definition: gsttensor_transform.c:182
gst_tensor_meta_info_get_header_size
gsize gst_tensor_meta_info_get_header_size(GstTensorMetaInfo *meta)
Get the header size to handle a tensor meta.
Definition: nnstreamer_plugin_api_util_impl.c:1456