From patchwork Sun Jun 28 15:34:41 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Fu, Ting" X-Patchwork-Id: 20672 Return-Path: X-Original-To: patchwork@ffaux-bg.ffmpeg.org Delivered-To: patchwork@ffaux-bg.ffmpeg.org Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by ffaux.localdomain (Postfix) with ESMTP id 85C8044B783 for ; Sun, 28 Jun 2020 18:40:47 +0300 (EEST) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 6A48A68B7D2; Sun, 28 Jun 2020 18:40:47 +0300 (EEST) X-Original-To: ffmpeg-devel@ffmpeg.org Delivered-To: ffmpeg-devel@ffmpeg.org Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 7BA8568B729 for ; Sun, 28 Jun 2020 18:40:42 +0300 (EEST) IronPort-SDR: +Cg0Xi/9z/8c0hWER38uR2+q16nXvEaxKXmFj3tkfsfmHp4gSJ0pTjJs+HqbP3HpVOcn1wkmF/ o303/tmXe01g== X-IronPort-AV: E=McAfee;i="6000,8403,9666"; a="230648489" X-IronPort-AV: E=Sophos;i="5.75,291,1589266800"; d="scan'208";a="230648489" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jun 2020 08:40:34 -0700 IronPort-SDR: Q40EpoXlllmKEDHF1fuukw3KcGeaD1nK9coEtE3MX2beLhdhXbwqnlF4Dywb0ZdwQXU5YLQBoa plqA+1WTmOow== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,291,1589266800"; d="scan'208";a="294655933" Received: from semmer-ubuntu.sh.intel.com ([10.239.159.54]) by orsmga002.jf.intel.com with ESMTP; 28 Jun 2020 08:40:33 -0700 From: Ting Fu To: ffmpeg-devel@ffmpeg.org Date: Sun, 28 Jun 2020 23:34:41 +0800 Message-Id: <20200628153442.29074-11-ting.fu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200628153442.29074-1-ting.fu@intel.com> References: <20200628153442.29074-1-ting.fu@intel.com> Subject: [FFmpeg-devel] [PATCH 11/12] dnn_backend_native_layer_mathunary: add atanh support X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches MIME-Version: 1.0 Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" It can be tested with the model generated with below python script: import tensorflow as tf import numpy as np import imageio in_img = imageio.imread('input.jpeg') in_img = in_img.astype(np.float32)/255.0 in_data = in_img[np.newaxis, :] x = tf.placeholder(tf.float32, shape=[1, None, None, 3], name='dnn_in') please uncomment the part you want to test x_sinh_1 = tf.sinh(x) x_out = tf.divide(x_sinh_1, 1.176) # sinh(1.0) x_cosh_1 = tf.cosh(x) x_out = tf.divide(x_cosh_1, 1.55) # cosh(1.0) x_tanh_1 = tf.tanh(x) x__out = tf.divide(x_tanh_1, 0.77) # tanh(1.0) x_asinh_1 = tf.asinh(x) x_out = tf.divide(x_asinh_1, 0.89) # asinh(1.0/1.1) x_acosh_1 = tf.add(x, 1.1) x_acosh_2 = tf.acosh(x_acosh_1) # accept (1, inf) x_out = tf.divide(x_acosh_2, 1.4) # acosh(2.1) x_atanh_1 = tf.divide(x, 1.1) x_atanh_2 = tf.atanh(x_atanh_1) # accept (-1, 1) x_out = tf.divide(x_atanh_2, 1.55) # atanhh(1.0/1.1) y = tf.identity(x_out, name='dnn_out') #please only preserve the x_out you want to test sess=tf.Session() sess.run(tf.global_variables_initializer()) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['dnn_out']) tf.train.write_graph(graph_def, '.', 'image_process.pb', as_text=False) print("image_process.pb generated, please use \ path_to_ffmpeg/tools/python/convert.py to generate image_process.model\n") output = sess.run(y, feed_dict={x: in_data}) imageio.imsave("out.jpg", np.squeeze(output)) Signed-off-by: Ting Fu --- libavfilter/dnn/dnn_backend_native_layer_mathunary.c | 4 ++++ libavfilter/dnn/dnn_backend_native_layer_mathunary.h | 1 + tools/python/convert_from_tensorflow.py | 2 +- tools/python/convert_header.py | 2 +- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c index b77b84a794..c83d50db64 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c +++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c @@ -124,6 +124,10 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper for (int i = 0; i < dims_count; ++i) dst[i] = acosh(src[i]); return 0; + case DMUO_ATANH: + for (int i = 0; i < dims_count; ++i) + dst[i] = atanh(src[i]); + return 0; default: return -1; } diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h index eb30231549..8076356ba4 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h +++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h @@ -42,6 +42,7 @@ typedef enum { DMUO_TANH = 9, DMUO_ASINH = 10, DMUO_ACOSH = 11, + DMUO_ATANH = 12, DMUO_COUNT } DNNMathUnaryOperation; diff --git a/tools/python/convert_from_tensorflow.py b/tools/python/convert_from_tensorflow.py index 1e73e3aefe..85db7bf710 100644 --- a/tools/python/convert_from_tensorflow.py +++ b/tools/python/convert_from_tensorflow.py @@ -72,7 +72,7 @@ class TFConverter: self.conv2d_scopename_inputname_dict = {} self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4, 'MathBinary':5, 'MathUnary':6} self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4} - self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4, 'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10, 'Acosh':11} + self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4, 'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10, 'Acosh':11, 'Atanh':12} self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2} self.name_operand_dict = {} diff --git a/tools/python/convert_header.py b/tools/python/convert_header.py index 8fc3438552..9851d84144 100644 --- a/tools/python/convert_header.py +++ b/tools/python/convert_header.py @@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE' major = 1 # increase minor when we don't have to re-convert the model file -minor = 17 +minor = 18