31 #include <tensorflow/c/c_api.h> 52 unsigned char *graph_data =
NULL;
54 long size, bytes_read;
67 bytes_read =
avio_read(model_file_context, graph_data, size);
69 if (bytes_read != size){
74 graph_buf = TF_NewBuffer();
75 graph_buf->data = (
void *)graph_data;
76 graph_buf->length =
size;
100 return TF_AllocateTensor(dt, input_dims, 4,
101 input_dims[1] * input_dims[2] * input_dims[3] * size);
107 TF_SessionOptions *sess_opts;
108 const TF_Operation *init_op = TF_GraphOperationByName(tf_model->
graph,
"init");
111 tf_model->
input.oper = TF_GraphOperationByName(tf_model->
graph, input_name);
112 if (!tf_model->
input.oper){
115 tf_model->
input.index = 0;
134 tf_model->
outputs[
i].oper = TF_GraphOperationByName(tf_model->
graph, output_names[
i]);
135 if (!tf_model->
outputs[i].oper){
164 sess_opts = TF_NewSessionOptions();
166 TF_DeleteSessionOptions(sess_opts);
167 if (TF_GetCode(tf_model->
status) != TF_OK)
178 if (TF_GetCode(tf_model->
status) != TF_OK)
189 TF_Buffer *graph_def;
190 TF_ImportGraphDefOptions *graph_opts;
196 tf_model->
graph = TF_NewGraph();
197 tf_model->
status = TF_NewStatus();
198 graph_opts = TF_NewImportGraphDefOptions();
199 TF_GraphImportGraphDef(tf_model->
graph, graph_def, graph_opts, tf_model->
status);
200 TF_DeleteImportGraphDefOptions(graph_opts);
201 TF_DeleteBuffer(graph_def);
202 if (TF_GetCode(tf_model->
status) != TF_OK){
203 TF_DeleteGraph(tf_model->
graph);
204 TF_DeleteStatus(tf_model->
status);
211 #define NAME_BUFFER_SIZE 256 217 TF_OperationDescription *op_desc;
219 int64_t strides[] = {1, 1, 1, 1};
230 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
231 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
237 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, size *
sizeof(
float));
238 memcpy(TF_TensorData(tensor), params->
kernel, size *
sizeof(
float));
239 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
240 if (TF_GetCode(tf_model->
status) != TF_OK){
243 op = TF_FinishOperation(op_desc, tf_model->
status);
244 if (TF_GetCode(tf_model->
status) != TF_OK){
249 op_desc = TF_NewOperation(tf_model->
graph,
"Transpose", name_buffer);
251 TF_AddInput(op_desc, input);
252 input.oper = transpose_op;
253 TF_AddInput(op_desc, input);
254 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
255 TF_SetAttrType(op_desc,
"Tperm", TF_INT32);
256 op = TF_FinishOperation(op_desc, tf_model->
status);
257 if (TF_GetCode(tf_model->
status) != TF_OK){
262 op_desc = TF_NewOperation(tf_model->
graph,
"Conv2D", name_buffer);
263 input.oper = *cur_op;
264 TF_AddInput(op_desc, input);
266 TF_AddInput(op_desc, input);
267 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
268 TF_SetAttrIntList(op_desc,
"strides", strides, 4);
269 TF_SetAttrString(op_desc,
"padding",
"VALID", 5);
270 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
271 if (TF_GetCode(tf_model->
status) != TF_OK){
276 op_desc = TF_NewOperation(tf_model->
graph,
"Const", name_buffer);
277 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
280 tensor = TF_AllocateTensor(TF_FLOAT, dims, dims_len, params->
output_num *
sizeof(
float));
281 memcpy(TF_TensorData(tensor), params->
biases, params->
output_num *
sizeof(
float));
282 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
283 if (TF_GetCode(tf_model->
status) != TF_OK){
286 op = TF_FinishOperation(op_desc, tf_model->
status);
287 if (TF_GetCode(tf_model->
status) != TF_OK){
292 op_desc = TF_NewOperation(tf_model->
graph,
"BiasAdd", name_buffer);
293 input.oper = *cur_op;
294 TF_AddInput(op_desc, input);
296 TF_AddInput(op_desc, input);
297 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
298 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
299 if (TF_GetCode(tf_model->
status) != TF_OK){
306 op_desc = TF_NewOperation(tf_model->
graph,
"Relu", name_buffer);
309 op_desc = TF_NewOperation(tf_model->
graph,
"Tanh", name_buffer);
312 op_desc = TF_NewOperation(tf_model->
graph,
"Sigmoid", name_buffer);
317 input.oper = *cur_op;
318 TF_AddInput(op_desc, input);
319 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
320 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
321 if (TF_GetCode(tf_model->
status) != TF_OK){
331 TF_OperationDescription *op_desc;
336 op_desc = TF_NewOperation(tf_model->
graph,
"DepthToSpace", name_buffer);
337 input.oper = *cur_op;
339 TF_AddInput(op_desc, input);
340 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
341 TF_SetAttrInt(op_desc,
"block_size", params->
block_size);
342 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
343 if (TF_GetCode(tf_model->
status) != TF_OK){
356 for (layer = 0; layer < conv_network->
layers_num; ++layer){
370 TF_OperationDescription *op_desc;
373 int64_t pads_shape[] = {4, 2};
377 op_desc = TF_NewOperation(tf_model->
graph,
"Const",
"pads");
378 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
379 tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 *
sizeof(
int32_t));
380 pads = (
int32_t *)TF_TensorData(tensor);
381 pads[0] = 0; pads[1] = 0;
382 pads[2] = pad; pads[3] = pad;
383 pads[4] = pad; pads[5] = pad;
384 pads[6] = 0; pads[7] = 0;
385 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
386 if (TF_GetCode(tf_model->
status) != TF_OK){
389 op = TF_FinishOperation(op_desc, tf_model->
status);
390 if (TF_GetCode(tf_model->
status) != TF_OK){
394 op_desc = TF_NewOperation(tf_model->
graph,
"MirrorPad",
"mirror_pad");
395 input.oper = *cur_op;
396 TF_AddInput(op_desc, input);
398 TF_AddInput(op_desc, input);
399 TF_SetAttrType(op_desc,
"T", TF_FLOAT);
400 TF_SetAttrType(op_desc,
"Tpaddings", TF_INT32);
401 TF_SetAttrString(op_desc,
"mode",
"SYMMETRIC", 9);
402 *cur_op = TF_FinishOperation(op_desc, tf_model->
status);
403 if (TF_GetCode(tf_model->
status) != TF_OK){
413 TF_OperationDescription *op_desc;
415 TF_Operation *transpose_op;
419 int64_t transpose_perm_shape[] = {4};
420 int64_t input_shape[] = {1, -1, -1, -1};
433 tf_model->
graph = TF_NewGraph();
434 tf_model->
status = TF_NewStatus();
436 #define CLEANUP_ON_ERROR(tf_model) \ 438 TF_DeleteGraph(tf_model->graph); \ 439 TF_DeleteStatus(tf_model->status); \ 443 op_desc = TF_NewOperation(tf_model->
graph,
"Placeholder",
"x");
444 TF_SetAttrType(op_desc,
"dtype", TF_FLOAT);
445 TF_SetAttrShape(op_desc,
"shape", input_shape, 4);
446 op = TF_FinishOperation(op_desc, tf_model->
status);
447 if (TF_GetCode(tf_model->
status) != TF_OK){
455 op_desc = TF_NewOperation(tf_model->
graph,
"Const",
"transpose_perm");
456 TF_SetAttrType(op_desc,
"dtype", TF_INT32);
457 tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 *
sizeof(
int32_t));
458 transpose_perm = (
int32_t *)TF_TensorData(tensor);
459 transpose_perm[0] = 1;
460 transpose_perm[1] = 2;
461 transpose_perm[2] = 3;
462 transpose_perm[3] = 0;
463 TF_SetAttrTensor(op_desc,
"value", tensor, tf_model->
status);
464 if (TF_GetCode(tf_model->
status) != TF_OK){
467 transpose_op = TF_FinishOperation(op_desc, tf_model->
status);
469 for (layer = 0; layer < conv_network->
layers_num; ++layer){
491 op_desc = TF_NewOperation(tf_model->
graph,
"Identity",
"y");
493 TF_AddInput(op_desc, input);
494 TF_FinishOperation(op_desc, tf_model->
status);
495 if (TF_GetCode(tf_model->
status) != TF_OK){
529 model->
model = (
void *)tf_model;
557 if (TF_GetCode(tf_model->
status) != TF_OK){
561 for (uint32_t
i = 0;
i < nb; ++
i) {
576 tf_model = (
TFModel *)(*model)->model;
578 TF_DeleteGraph(tf_model->
graph);
585 TF_DeleteStatus(tf_model->
status);
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
int64_t avio_size(AVIOContext *s)
Get the filesize.
static TF_Buffer * read_graph(const char *model_filename)
DNNActivationFunc activation
DNN inference functions interface for native backend.
#define AVIO_FLAG_READ
read-only
DNNModel * ff_dnn_load_model_tf(const char *model_filename)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
DNN inference functions interface for TensorFlow backend.
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
#define i(width, name, range_min, range_max)
static TF_Tensor * allocate_input_tensor(const DNNInputData *input)
static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename)
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
static int calculate_pad(const ConvolutionalNetwork *conv_network)
void ff_dnn_free_model_native(DNNModel **model)
static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op, ConvolutionalParams *params, const int layer)
DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
static void free_buffer(void *data, size_t length)
static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op, DepthToSpaceParams *params, const int layer)
DNNReturnType(* set_input_output)(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
TF_Tensor ** output_tensors
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
const char const char * params
static DNNReturnType add_pad_op(TFModel *tf_model, TF_Operation **cur_op, const int32_t pad)
static DNNReturnType set_input_output_tf(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
void ff_dnn_free_model_tf(DNNModel **model)
static void transpose_perm(int16_t *out, int16_t *in, int num_vect, const uint8_t line_len[2], int length_div)
Interpret the input data as in the following table:
DNNModel * ff_dnn_load_model_native(const char *model_filename)
#define av_malloc_array(a, b)
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
#define CLEANUP_ON_ERROR(tf_model)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().