[FFmpeg-devel,2/6] avcodec/x86: cleanup simple_idct10

Submitted by James Darnley on June 15, 2017, 1:34 p.m.

Details

Message ID 20170615133426.4484-3-jdarnley@obe.tv
State Superseded
Headers show

Commit Message

James Darnley June 15, 2017, 1:34 p.m.
Use named arguments for the functions so we can remove a define.  The
stride/linesize argument is now ptrdiff_t type so we no longer need to
sign extend the register.
---
 libavcodec/x86/proresdsp.asm              |  2 +-
 libavcodec/x86/simple_idct10.asm          |  8 ++--
 libavcodec/x86/simple_idct10_template.asm | 80 ++++++++++++++-----------------
 3 files changed, 41 insertions(+), 49 deletions(-)

Patch hide | download patch | download mbox

diff --git a/libavcodec/x86/proresdsp.asm b/libavcodec/x86/proresdsp.asm
index 16fc262aeb..8318a81c5e 100644
--- a/libavcodec/x86/proresdsp.asm
+++ b/libavcodec/x86/proresdsp.asm
@@ -51,7 +51,7 @@  cextern w7_min_w5
 SECTION .text
 
 %macro idct_fn 0
-cglobal prores_idct_put_10, 4, 4, 15
+cglobal prores_idct_put_10, 4, 4, 15, pixels, lsize, block, qmat
     IDCT_FN    pw_1, 15, pw_88, 18, pw_4, pw_1019, r3
     RET
 %endmacro
diff --git a/libavcodec/x86/simple_idct10.asm b/libavcodec/x86/simple_idct10.asm
index 5dee533de0..7cfd33eaa3 100644
--- a/libavcodec/x86/simple_idct10.asm
+++ b/libavcodec/x86/simple_idct10.asm
@@ -68,21 +68,21 @@  CONST_DEC  w7_min_w5,    W7sh2, -W5sh2
 SECTION .text
 
 %macro idct_fn 0
-cglobal simple_idct10, 1, 1, 16
+cglobal simple_idct10, 1, 1, 16, block
     IDCT_FN    "", 12, "", 19
     RET
 
-cglobal simple_idct10_put, 3, 3, 16
+cglobal simple_idct10_put, 3, 3, 16, pixels, lsize, block
     IDCT_FN    "", 12, "", 19, 0, pw_1023
     RET
 
-cglobal simple_idct12, 1, 1, 16
+cglobal simple_idct12, 1, 1, 16, block
     ; coeffs are already 15bits, adding the offset would cause
     ; overflow in the input
     IDCT_FN    "", 15, pw_2, 16
     RET
 
-cglobal simple_idct12_put, 3, 3, 16
+cglobal simple_idct12_put, 3, 3, 16, pixels, lsize, block
     ; range isn't known, so the C simple_idct range is used
     ; Also, using a bias on input overflows, so use the bias
     ; on output of the first butterfly instead
diff --git a/libavcodec/x86/simple_idct10_template.asm b/libavcodec/x86/simple_idct10_template.asm
index 9d323d99b3..3f398985a5 100644
--- a/libavcodec/x86/simple_idct10_template.asm
+++ b/libavcodec/x86/simple_idct10_template.asm
@@ -115,18 +115,18 @@ 
     psubd       m3,  m9            ; a1[4-7] intermediate
 
     ; load/store
-    mova   [COEFFS+  0], m0
-    mova   [COEFFS+ 32], m2
-    mova   [COEFFS+ 64], m4
-    mova   [COEFFS+ 96], m6
-    mova        m10,[COEFFS+ 16]       ; { row[1] }[0-7]
-    mova        m8, [COEFFS+ 48]       ; { row[3] }[0-7]
-    mova        m13,[COEFFS+ 80]       ; { row[5] }[0-7]
-    mova        m14,[COEFFS+112]       ; { row[7] }[0-7]
-    mova   [COEFFS+ 16], m1
-    mova   [COEFFS+ 48], m3
-    mova   [COEFFS+ 80], m5
-    mova   [COEFFS+112], m7
+    mova   [blockq+  0], m0
+    mova   [blockq+ 32], m2
+    mova   [blockq+ 64], m4
+    mova   [blockq+ 96], m6
+    mova        m10,[blockq+ 16]       ; { row[1] }[0-7]
+    mova        m8, [blockq+ 48]       ; { row[3] }[0-7]
+    mova        m13,[blockq+ 80]       ; { row[5] }[0-7]
+    mova        m14,[blockq+112]       ; { row[7] }[0-7]
+    mova   [blockq+ 16], m1
+    mova   [blockq+ 48], m3
+    mova   [blockq+ 80], m5
+    mova   [blockq+112], m7
 %if %0 == 3
     pmullw      m10,[%3+ 16]
     pmullw      m8, [%3+ 48]
@@ -197,17 +197,17 @@ 
     ; row[5] = (a2 - b2) >> 15;
     ; row[3] = (a3 + b3) >> 15;
     ; row[4] = (a3 - b3) >> 15;
-    mova        m8, [COEFFS+ 0]        ; a0[0-3]
-    mova        m9, [COEFFS+16]        ; a0[4-7]
+    mova        m8, [blockq+ 0]        ; a0[0-3]
+    mova        m9, [blockq+16]        ; a0[4-7]
     SUMSUB_SHPK m8,  m9,  m10, m11, m0,  m1,  %2
-    mova        m0, [COEFFS+32]        ; a1[0-3]
-    mova        m1, [COEFFS+48]        ; a1[4-7]
+    mova        m0, [blockq+32]        ; a1[0-3]
+    mova        m1, [blockq+48]        ; a1[4-7]
     SUMSUB_SHPK m0,  m1,  m9,  m11, m2,  m3,  %2
-    mova        m1, [COEFFS+64]        ; a2[0-3]
-    mova        m2, [COEFFS+80]        ; a2[4-7]
+    mova        m1, [blockq+64]        ; a2[0-3]
+    mova        m2, [blockq+80]        ; a2[4-7]
     SUMSUB_SHPK m1,  m2,  m11, m3,  m4,  m5,  %2
-    mova        m2, [COEFFS+96]        ; a3[0-3]
-    mova        m3, [COEFFS+112]       ; a3[4-7]
+    mova        m2, [blockq+96]        ; a3[0-3]
+    mova        m3, [blockq+112]       ; a3[4-7]
     SUMSUB_SHPK m2,  m3,  m4,  m5,  m6,  m7,  %2
 %endmacro
 
@@ -223,20 +223,12 @@ 
 ; %7 = qmat (for prores)
 
 %macro IDCT_FN 4-7
-%if %0 == 4
-    ; No clamping, means pure idct
-%xdefine COEFFS r0
-%else
-    movsxd      r1,  r1d
-%xdefine COEFFS r2
-%endif
-
     ; for (i = 0; i < 8; i++)
     ;     idctRowCondDC(block + i*8);
-    mova        m10,[COEFFS+ 0]        ; { row[0] }[0-7]
-    mova        m8, [COEFFS+32]        ; { row[2] }[0-7]
-    mova        m13,[COEFFS+64]        ; { row[4] }[0-7]
-    mova        m12,[COEFFS+96]        ; { row[6] }[0-7]
+    mova        m10,[blockq+ 0]        ; { row[0] }[0-7]
+    mova        m8, [blockq+32]        ; { row[2] }[0-7]
+    mova        m13,[blockq+64]        ; { row[4] }[0-7]
+    mova        m12,[blockq+96]        ; { row[6] }[0-7]
 
 %if %0 == 7
     pmullw      m10,[%7+ 0]
@@ -251,10 +243,10 @@ 
 
     ; transpose for second part of IDCT
     TRANSPOSE8x8W 8, 0, 1, 2, 4, 11, 9, 10, 3
-    mova   [COEFFS+ 16], m0
-    mova   [COEFFS+ 48], m2
-    mova   [COEFFS+ 80], m11
-    mova   [COEFFS+112], m10
+    mova   [blockq+ 16], m0
+    mova   [blockq+ 48], m2
+    mova   [blockq+ 80], m11
+    mova   [blockq+112], m10
     SWAP         8,  10
     SWAP         1,   8
     SWAP         4,  13
@@ -267,14 +259,14 @@ 
     ; clip/store
 %if %0 == 4
     ; No clamping, means pure idct
-    mova  [r0+  0], m8
-    mova  [r0+ 16], m0
-    mova  [r0+ 32], m1
-    mova  [r0+ 48], m2
-    mova  [r0+ 64], m4
-    mova  [r0+ 80], m11
-    mova  [r0+ 96], m9
-    mova  [r0+112], m10
+    mova  [blockq+  0], m8
+    mova  [blockq+ 16], m0
+    mova  [blockq+ 32], m1
+    mova  [blockq+ 48], m2
+    mova  [blockq+ 64], m4
+    mova  [blockq+ 80], m11
+    mova  [blockq+ 96], m9
+    mova  [blockq+112], m10
 %else
 %ifidn %5, 0
     pxor        m3, m3