Line data Source code
1 : /**
2 : * Copyright (C) 2020 Samsung Electronics Co., Ltd. All Rights Reserved.
3 : *
4 : * Licensed under the Apache License, Version 2.0 (the "License");
5 : * you may not use this file except in compliance with the License.
6 : * You may obtain a copy of the License at
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : * Unless required by applicable law or agreed to in writing, software
9 : * distributed under the License is distributed on an "AS IS" BASIS,
10 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 : * See the License for the specific language governing permissions and
12 : * limitations under the License.
13 : */
14 : /**
15 : * @file nntrainer_internal.h
16 : * @date 13 April 2020
17 : * @brief NNTrainer C-API Internal Header.
18 : * This allows to construct and control NNTrainer Model.
19 : * @see https://github.com/nnstreamer/nntrainer
20 : * @author Jijoong Moon <jijoong.moon@samsung.com>
21 : * @author Parichay Kapoor <pk.kapoor@samsung.com>
22 : * @bug No known bugs except for NYI items
23 : */
24 :
25 : #ifndef __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__
26 : #define __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__
27 :
28 : #include <array>
29 : #include <mutex>
30 : #include <string>
31 : #include <unordered_map>
32 :
33 : #include <nntrainer.h>
34 :
35 : #include <dataset.h>
36 : #include <layer.h>
37 : #include <model.h>
38 : #include <optimizer.h>
39 : #include <tensor_dim.h>
40 :
41 : #include <nntrainer_log.h>
42 :
43 : /**
44 : * @brief Magic number of nntrainer.
45 : * @since_tizen 6.0
46 : */
47 : #define ML_NNTRAINER_MAGIC 0x777F888F
48 :
49 : /* Tizen ML feature */
50 : #if defined(__TIZEN__)
51 :
52 : /**
53 : * @brief Define enum for ML feature.
54 : * @since_tizen 7.0
55 : */
56 : typedef enum {
57 : ML_FEATURE = 0, /**< default option for ml feature */
58 : ML_FEATURE_INFERENCE, /**< inference option for ml feature */
59 : ML_FEATURE_TRAINING, /**< training option for ml feature */
60 : ML_FEATURE_SERVICE, /**< service option for ml feature */
61 : ML_FEATURE_MAX /**< max option for ml feature */
62 : } ml_feature_e;
63 :
64 : /**
65 : * @brief Define enum for ML feature state.
66 : * @since_tizen 6.0
67 : */
68 : typedef enum {
69 : NOT_CHECKED_YET = -1, /**< not checked option for feature state */
70 : NOT_SUPPORTED = 0, /**< not supported option for feature state */
71 : SUPPORTED = 1 /**< supported option for feature state */
72 : } feature_state_t;
73 :
74 : #if defined(__FEATURE_CHECK_SUPPORT__)
75 : /**
76 : * @brief Check feature state if it is supported.
77 : * @since_tizen 6.0
78 : * @return Error type
79 : */
80 : #define check_feature_state() \
81 : do { \
82 : int feature_ret = ml_tizen_get_feature_enabled(); \
83 : if (ML_ERROR_NONE != feature_ret) \
84 : return feature_ret; \
85 : } while (0);
86 :
87 : /**
88 : * @brief Set feature state if it is supported.
89 : * @since_tizen 6.0
90 : */
91 : #define set_feature_state(...) ml_train_tizen_set_feature_state(__VA_ARGS__)
92 : #else /** __FEATURE_CHECK_SUPPORT__ @sicne_tizen 6.0 */
93 : #define check_feature_state()
94 : #define set_feature_state(...)
95 : #endif /* __FEATURE_CHECK_SUPPORT__ */
96 :
97 : #else /* __TIZEN__ */
98 : #define check_feature_state()
99 : #define set_feature_state(...)
100 : #endif /* __TIZEN__ */
101 :
102 : #ifdef __cplusplus
103 : extern "C" {
104 : #endif /* __cplusplus */
105 :
106 : /**
107 : * @brief Struct to wrap neural network layer for the API.
108 : * @since_tizen 6.0
109 : * @note model mutex must be locked before layer lock, if model lock is needed
110 : */
111 : typedef struct {
112 : uint magic; /**< magic number */
113 : std::shared_ptr<ml::train::Layer> layer; /**< layer object */
114 : bool in_use; /**< in_use flag */
115 : std::mutex m; /**< mutex for the optimizer */
116 152 : } ml_train_layer;
117 :
118 : /**
119 : * @brief Struct to wrap learning rate scheduler for the API
120 : * @note optimizer mutex must be locked before learning rate scheduler lock, if
121 : * optimizer lock is needed
122 : */
123 : typedef struct {
124 : uint magic;
125 : std::shared_ptr<ml::train::LearningRateScheduler> lr_scheduler;
126 : bool in_use;
127 : std::mutex m;
128 59 : } ml_train_lr_scheduler;
129 :
130 : /**
131 : * @brief Struct to wrap neural network optimizer for the API
132 : * @note model mutex must be locked before optimizer lock, if model lock is
133 : * needed
134 : */
135 : typedef struct {
136 : uint magic;
137 : std::shared_ptr<ml::train::Optimizer> optimizer;
138 : ml_train_lr_scheduler *lr_scheduler;
139 : bool in_use;
140 : std::mutex m;
141 57 : } ml_train_optimizer;
142 :
143 : /**
144 : * @brief Struct to wrap data buffer for the API.
145 : * @since_tizen 6.0
146 : * @note model mutex must be locked before dataset lock, if model lock is needed
147 : */
148 : typedef struct {
149 : uint magic; /**< magic number */
150 : std::array<std::shared_ptr<ml::train::Dataset>, 3>
151 : dataset; /**< dataset object */
152 : bool in_use; /**< in_use flag */
153 : std::mutex m; /**< mutex for the dataset */
154 99 : } ml_train_dataset;
155 :
156 : /**
157 : * @brief Struct to wrap neural network model for the API.
158 : * @since_tizen 6.0
159 : */
160 : typedef struct {
161 : uint magic; /**< magic number */
162 : std::shared_ptr<ml::train::Model> model; /**< model object */
163 : std::unordered_map<std::string, ml_train_layer *>
164 : layers_map; /**< layers map */
165 : ml_train_optimizer *optimizer; /**< optimizer object */
166 : ml_train_dataset *dataset; /**< dataset object */
167 : std::mutex m; /**< mutex for the model */
168 46 : } ml_train_model;
169 :
170 : /**
171 : * @brief Check validity of handle to be not NULL.
172 : * @since_tizen 6.0
173 : */
174 : #define ML_TRAIN_VERIFY_VALID_HANDLE(obj_h) \
175 : do { \
176 : if (!obj_h) { \
177 : ml_loge("Error: Invalid Parameter : argument is empty."); \
178 : return ML_ERROR_INVALID_PARAMETER; \
179 : } \
180 : } while (0)
181 :
182 : /**
183 : * @brief Check validity of the user passed arguments
184 : */
185 : #define ML_TRAIN_GET_VALID_HANDLE(obj, obj_h, obj_type, obj_name) \
186 : do { \
187 : obj = (obj_type *)obj_h; \
188 : if (obj->magic != ML_NNTRAINER_MAGIC) { \
189 : ml_loge("Error: Invalid Parameter : %s is invalid.", obj_name); \
190 : return ML_ERROR_INVALID_PARAMETER; \
191 : } \
192 : } while (0)
193 :
194 : /**
195 : * @brief Get handle to lock the passed object.
196 : * @since_tizen 6.0
197 : * @note Check validity of the user passed arguments and lock the object.
198 : */
199 : #define ML_TRAIN_GET_VALID_HANDLE_LOCKED(obj, obj_h, obj_type, obj_name) \
200 : do { \
201 : ML_TRAIN_VERIFY_VALID_HANDLE(obj_h); \
202 : std::lock_guard<std::mutex> ml_train_lock(GLOCK); \
203 : ML_TRAIN_GET_VALID_HANDLE(obj, obj_h, obj_type, obj_name); \
204 : obj->m.lock(); \
205 : } while (0)
206 :
207 : /**
208 : * @brief Check validity of the user passed arguments, reset magic if in use
209 : * and lock the object
210 : */
211 : #define ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(obj, obj_h, obj_type, obj_name) \
212 : do { \
213 : ML_TRAIN_VERIFY_VALID_HANDLE(obj_h); \
214 : std::lock_guard<std::mutex> ml_train_lock(GLOCK); \
215 : ML_TRAIN_GET_VALID_HANDLE(obj, obj_h, obj_type, obj_name); \
216 : if (!obj->in_use) \
217 : obj->magic = 0; \
218 : obj->m.lock(); \
219 : } while (0)
220 :
221 : /**
222 : * @brief Reset object magic
223 : */
224 : #define ML_TRAIN_RESET_VALIDATED_HANDLE(obj) \
225 : do { \
226 : std::lock_guard<std::mutex> ml_train_lock(GLOCK); \
227 : obj->magic = 0; \
228 : } while (0)
229 :
230 : /**
231 : * @brief Check validity of passed model and lock the object.
232 : * @since_tizen 6.0
233 : */
234 : #define ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model) \
235 : ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnmodel, model, ml_train_model, "model")
236 :
237 : /**
238 : * @brief Check validity of passed model, reset magic and lock the object.
239 : * @since_tizen 6.0
240 : */
241 : #define ML_TRAIN_GET_VALID_MODEL_LOCKED_RESET(nnmodel, model) \
242 : do { \
243 : ML_TRAIN_VERIFY_VALID_HANDLE(model); \
244 : std::lock_guard<std::mutex> ml_train_lock(GLOCK); \
245 : ML_TRAIN_GET_VALID_HANDLE(nnmodel, model, ml_train_model, "model"); \
246 : nnmodel->magic = 0; \
247 : nnmodel->m.lock(); \
248 : } while (0)
249 :
250 : /**
251 : * @brief Check validity of passed layer and lock the object.
252 : * @since_tizen 6.0
253 : */
254 : #define ML_TRAIN_GET_VALID_LAYER_LOCKED(nnlayer, layer) \
255 : ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnlayer, layer, ml_train_layer, "layer")
256 :
257 : /**
258 : * @brief Check validity of passed layer, reset magic and lock the object.
259 : * @since_tizen 6.0
260 : */
261 : #define ML_TRAIN_GET_VALID_LAYER_LOCKED_RESET(nnlayer, layer) \
262 : ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nnlayer, layer, ml_train_layer, \
263 : "layer")
264 :
265 : /**
266 : * @brief Check validity of passed optimizer and lock the object.
267 : * @since_tizen 6.0
268 : */
269 : #define ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, opt) \
270 : ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnopt, opt, ml_train_optimizer, "optimizer")
271 :
272 : /**
273 : * @brief Check validity of passed optimizer, reset magic and lock the
274 : * object.
275 : * @since_tizen 6.0
276 : */
277 : #define ML_TRAIN_GET_VALID_OPT_LOCKED_RESET(nnopt, opt) \
278 : ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nnopt, opt, ml_train_optimizer, \
279 : "optimizer")
280 :
281 : /**
282 : * @brief Check validity of passed lr_scheduler and lock the object
283 : */
284 : #define ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED(nnlrscheduler, lrscheduler) \
285 : ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnlrscheduler, lrscheduler, \
286 : ml_train_lr_scheduler, "lr_scheduler")
287 :
288 : /**
289 : * @brief Check validity of passed lr_scheduler, reset magic and lock the
290 : * object
291 : */
292 : #define ML_TRAIN_GET_VALID_LR_SCHEDULER_LOCKED_RESET(nnlrscheduler, \
293 : lrscheduler) \
294 : ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET( \
295 : nnlrscheduler, lrscheduler, ml_train_lr_scheduler, "lr_scheduler")
296 :
297 : /**
298 : * @brief Check validity of passed dataset and lock the object
299 : */
300 : #define ML_TRAIN_GET_VALID_DATASET_LOCKED(nndataset, dataset) \
301 : ML_TRAIN_GET_VALID_HANDLE_LOCKED(nndataset, dataset, ml_train_dataset, \
302 : "dataset")
303 :
304 : /**
305 : * @brief Check validity of passed dataset, reset magic and lock the object.
306 : * @since_tizen 6.0
307 : */
308 : #define ML_TRAIN_GET_VALID_DATASET_LOCKED_RESET(nndataset, dataset) \
309 : ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nndataset, dataset, ml_train_dataset, \
310 : "dataset")
311 :
312 : /**
313 : * @brief Get all neural network layer names from the model.
314 : * @details Use this function to get already created Neural Network Layer names.
315 : * This can be used to obtain layers when model is defined with ini file.
316 : * @since_tizen 6.x
317 : * @note The caller must free the list of the layer names.
318 : * @param[in] model The NNTrainer model handler from the given description.
319 : * @param[out] layers_name List of names of layers in the model ended with NULL.
320 : * @return @c 0 on success. Otherwise a negative error value.
321 : * @retval #ML_ERROR_NONE Successful.
322 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
323 : */
324 : int ml_train_model_get_all_layer_names(ml_train_model_h model,
325 : const char **layers_name[]);
326 :
327 : /**
328 : * @brief Callback function to notify completion of training of the model.
329 : * @since_tizen 6.0
330 : * @param[in] model The NNTrainer model handler.
331 : * @param[in] data Internal data to be given to the callback, cb.
332 : */
333 : typedef void (*ml_train_run_cb)(ml_train_model_h model, void *data);
334 :
335 : /**
336 : * @brief Train the neural network model asynchronously.
337 : * @details Use this function to train the compiler neural network model with
338 : * the passed training hyperparameters. The callback will be called once the
339 : * requested training, validation and testing is completed.
340 : * @since_tizen 6.x
341 : * @param[in] model The NNTrainer model handler.
342 : * @param[in] cb The callback handler to be called after training finishes.
343 : * @param[in] data Internal data to be given to the callback, cb.
344 : * @param[in] ... Hyperparmeter for train model
345 : * @return @c 0 on success. Otherwise a negative error value.
346 : * @retval #ML_ERROR_NONE Successful.
347 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter.
348 : */
349 : int ml_train_model_run_async(ml_train_model_h model, ml_train_run_cb cb,
350 : void *data, ...);
351 :
352 : /**
353 : * @brief Insert layer at the specific location of the existing layers in neural
354 : * network model.
355 : * @details Use this function to insert a layer to the model.
356 : * @since_tizen 6.x
357 : * @param[in] model The NNTrainer model handler from the given description.
358 : * @param[in] layer The NNTrainer layer handler
359 : * @param[in] input_layer_names List of layers ended with NULL, which will
360 : * provide input to the layer being inserted.
361 : * @param[in] output_layer_names List of layers ended with NULL, which will
362 : * receive input to the layer being inserted.
363 : * @return @c 0 on success. Otherwise a negative error value.
364 : * @retval #ML_ERROR_NONE Successful.
365 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter.
366 : * @note If length of @a input_layer_names is more than 1, the layer to be
367 : * inserted should support multiple inputs. Otherwise
368 : * #ML_ERROR_INVALID_PARAMETER is returned. If the layer in @a
369 : * output_layer_names already have input connection, then they should support
370 : * multiple inputs. Otherwise #ML_ERROR_INVALID_PARAMETER is returned. If length
371 : * of @a output_layer_names is 0, then this layer will be treated as one of the
372 : * output layers, and a loss will be attached to this based on network
373 : * configuration. If both @a input_layer_names and @a output_layer_names are
374 : * empty, then this layer is attached at the end of the output layer of the
375 : * network. In case of multiple output layers, this layer is attached next to
376 : * the last created output layer.
377 : */
378 : int ml_train_model_insert_layer(ml_train_model_h model, ml_train_layer_h layer,
379 : const char *input_layer_names[],
380 : const char *output_layer_names[]);
381 :
382 : /**
383 : * @brief Compiles and finalizes the neural network model with single param.
384 : * @details Use this function to initialize neural network model. Various
385 : * @since_tizen 7.0
386 : * hyperparameter before compile the model can be set. Once compiled,
387 : * any modification to the properties of model or layers/dataset/optimizer in
388 : * the model will be restricted. Further, addition of layers or changing the
389 : * optimizer/dataset of the model will not be permitted.
390 : * API to solve va_list issue of Dllimport of C# interop.
391 : * The input format of single_param must be 'key = value' format, and it
392 : * received as shown in the example below. delimiter is '|'. e.g)
393 : * ml_train_model_compile_with_single_param(model, "loss=cross|batch_size=9")
394 : * @param[in] model The NNTrainer model handle.
395 : * @param[in] single_param hyperparameters for compiling the model
396 : * @return @c 0 on success. Otherwise a negative error value.
397 : * @retval #ML_ERROR_NONE Successful.
398 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
399 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
400 : */
401 : int ml_train_model_compile_with_single_param(ml_train_model_h model,
402 : const char *single_param);
403 :
404 : /**
405 : * @brief Trains the neural network model with single param.
406 : * @details Use this function to train the compiled neural network model with
407 : * the passed training hyperparameters. This function will return once the
408 : * training, along with requested validation and testing, is completed.
409 : * @since_tizen 7.0
410 : * API to solve va_list issue of Dllimport of C# interop.
411 : * The input format of single_param must be 'key = value' format, and it
412 : * received as shown in the example below. delimiter is '|'. e.g)
413 : * ml_train_model_run_with_single_param(model, "epochs=2|batch_size=16")
414 : * @param[in] model The NNTrainer model handle.
415 : * @param[in] single_param Hyperparameters for train model.
416 : * @return @c 0 on success. Otherwise a negative error value.
417 : * @retval #ML_ERROR_NONE Successful.
418 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
419 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
420 : */
421 : int ml_train_model_run_with_single_param(ml_train_model_h model,
422 : const char *single_param);
423 :
424 : /**
425 : * @brief Sets the neural network layer Property with single param.
426 : * @details Use this function to set neural network layer Property.
427 : * @since_tizen 7.0
428 : * API to solve va_list issue of Dllimport of C# interop.
429 : * The input format of single_param must be 'key = value' format, and it
430 : * received as shown in the example below. delimiter is '|'. e.g)
431 : * ml_train_layer_set_property_with_single_param(layer,
432 : * "unit=10|activation=softmax")
433 : * @param[in] layer The NNTrainer layer handle.
434 : * @param[in] single_param Property values.
435 : * @return @c 0 on success. Otherwise a negative error value.
436 : * @retval #ML_ERROR_NONE Successful.
437 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
438 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
439 : */
440 : int ml_train_layer_set_property_with_single_param(ml_train_layer_h layer,
441 : const char *single_param);
442 :
443 : /**
444 : * @brief Sets the neural network optimizer property with single param.
445 : * @details Use this function to set neural network optimizer property.
446 : * @since_tizen 7.0
447 : * API to solve va_list issue of Dllimport of C# interop.
448 : * The input format of single_param must be 'key = value' format, and it
449 : * received as shown in the example below. delimiter is '|'. e.g)
450 : * ml_train_optimizer_set_property_with_single_param(optimizer,
451 : * "beta1=0.002 | beta2=0.001 | epsilon=1e-7");
452 : * @param[in] optimizer The NNTrainer optimizer handle.
453 : * @param[in] single_param Property values.
454 : * @return @c 0 on success. Otherwise a negative error value.
455 : * @retval #ML_ERROR_NONE Successful.
456 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
457 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
458 : */
459 : int ml_train_optimizer_set_property_with_single_param(
460 : ml_train_optimizer_h optimizer, const char *single_param);
461 :
462 : /**
463 : * @brief Sets the learning rate scheduler property with single param.
464 : * @details Use this function to set learning rate scheduler property.
465 : * @since_tizen 7.5
466 : * API to solve va_list issue of Dllimport of C# interop.
467 : * The input format of single_param must be 'key = value' format, and it
468 : * received as shown in the example below. delimiter is '|'. e.g)
469 : * ml_train_lr_scheduler_set_property_with_single_param(lr_scheduler,
470 : * "learning_rate=0.01 | decay_rate=0.5 | decay_steps=1000");
471 : * @param[in] lr_scheduler The learning rate scheduler handle.
472 : * @param[in] single_param Property values.
473 : * @return @c 0 on success. Otherwise a negative error value.
474 : * @retval #ML_ERROR_NONE Successful.
475 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
476 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
477 : */
478 : int ml_train_lr_scheduler_set_property_with_single_param(
479 : ml_train_lr_scheduler_h lr_scheduler, const char *single_param);
480 :
481 : /**
482 : * @brief Sets the neural network dataset property with single param.
483 : * @details Use this function to set dataset property for a specific mode.
484 : * API to solve va_list issue of Dllimport of C# interop.
485 : * The input format of single_param must be 'key = value' format, and it
486 : * received as shown in the example below. delimiter is '|'. e.g)
487 : * ml_train_dataset_set_property_for_mode_with_single_param(dataset,
488 : * ML_TRAIN_DATASET_MODE_TEST, "key1=value2 | key1=value2");
489 : * @since_tizen 7.0
490 : * @param[in] dataset The NNTrainer dataset handle.
491 : * @param[in] mode The mode to set the property.
492 : * @param[in] single_param Property values.
493 : * @return @c 0 on success. Otherwise a negative error value.
494 : * @retval #ML_ERROR_NONE Successful.
495 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported.
496 : * @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
497 : */
498 : int ml_train_dataset_set_property_for_mode_with_single_param(
499 : ml_train_dataset_h dataset, ml_train_dataset_mode_e mode,
500 : const char *single_param);
501 :
502 : #if defined(__TIZEN__)
503 : /**
504 : * @brief Checks whether machine_learning.training feature is enabled or not.
505 : * @since_tizen 6.0
506 : * @return flag to indicate whether the feature is enabled or not.
507 : */
508 : int ml_tizen_get_feature_enabled(void);
509 :
510 : /**
511 : * @brief Set the feature status of machine_learning.training.
512 : * This is only used for Unit test.
513 : * @since_tizen 7.0
514 : * @param[in] feature The feature to be set.
515 : * @param[in] state The state to be set.
516 : */
517 : void ml_train_tizen_set_feature_state(ml_feature_e feature,
518 : feature_state_t state);
519 : #endif /* __TIZEN__ */
520 :
521 : #ifdef __cplusplus
522 : }
523 : #endif /* __cplusplus */
524 :
525 : #endif /* __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__ */
|