[FFmpeg-devel] avcodec: add native iLBC decoder

Submitted by Paul B Mahol on Sept. 13, 2018, 11:03 a.m.

Details

Message ID 20180913110303.7429-1-onemda@gmail.com
State New
Headers show

Commit Message

Paul B Mahol Sept. 13, 2018, 11:03 a.m.
Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
 libavcodec/Makefile    |    1 +
 libavcodec/allcodecs.c |    1 +
 libavcodec/ilbcdata.h  |  263 +++++++
 libavcodec/ilbcdec.c   | 1507 ++++++++++++++++++++++++++++++++++++++++
 4 files changed, 1772 insertions(+)
 create mode 100644 libavcodec/ilbcdata.h
 create mode 100644 libavcodec/ilbcdec.c

Comments

Carl Eugen Hoyos Sept. 13, 2018, 3:18 p.m.
2018-09-13 13:03 GMT+02:00, Paul B Mahol <onemda@gmail.com>:

> @@ -0,0 +1,263 @@
> +/*
> + * Copyright (c) 2013, The WebRTC project authors. All rights reserved.
> + *
> + * Redistribution and use in source and binary forms, with or without
> + * modification, are permitted provided that the following conditions are
> + * met:
> + *
> + *   * Redistributions of source code must retain the above copyright
> + *     notice, this list of conditions and the following disclaimer.
> + *
> + *   * Redistributions in binary form must reproduce the above copyright
> + *     notice, this list of conditions and the following disclaimer in
> + *     the documentation and/or other materials provided with the
> + *     distribution.
> + *
> + *   * Neither the name of Google nor the names of its contributors may
> + *     be used to endorse or promote products derived from this software
> + *     without specific prior written permission.
> + *
> + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> + */
> +
> +/*
> + * iLBC decoder
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
> USA
> + */

Is the additional license necessary?
There is nothing wrong with it, I just wonder if you were ok with anybody
else using your additions to the code under the original license or if you
intended that this is impossible.

Carl Eugen

Patch hide | download patch | download mbox

diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 3e16a13004..f619f300e7 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -384,6 +384,7 @@  OBJS-$(CONFIG_HUFFYUV_ENCODER)         += huffyuv.o huffyuvenc.o
 OBJS-$(CONFIG_IDCIN_DECODER)           += idcinvideo.o
 OBJS-$(CONFIG_IDF_DECODER)             += bintext.o cga_data.o
 OBJS-$(CONFIG_IFF_ILBM_DECODER)        += iff.o
+OBJS-$(CONFIG_ILBC_DECODER)            += ilbcdec.o
 OBJS-$(CONFIG_IMC_DECODER)             += imc.o
 OBJS-$(CONFIG_IMM4_DECODER)            += imm4.o
 OBJS-$(CONFIG_INDEO2_DECODER)          += indeo2.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 7dbfcb3dda..c0b4d56d0d 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -424,6 +424,7 @@  extern AVCodec ff_g729_decoder;
 extern AVCodec ff_gsm_decoder;
 extern AVCodec ff_gsm_ms_decoder;
 extern AVCodec ff_iac_decoder;
+extern AVCodec ff_ilbc_decoder;
 extern AVCodec ff_imc_decoder;
 extern AVCodec ff_interplay_acm_decoder;
 extern AVCodec ff_mace3_decoder;
diff --git a/libavcodec/ilbcdata.h b/libavcodec/ilbcdata.h
new file mode 100644
index 0000000000..62b32a541f
--- /dev/null
+++ b/libavcodec/ilbcdata.h
@@ -0,0 +1,263 @@ 
+/*
+ * Copyright (c) 2013, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *
+ *   * Neither the name of Google nor the names of its contributors may
+ *     be used to endorse or promote products derived from this software
+ *     without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * iLBC decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_ILBCDATA_H
+#define AVCODEC_ILBCDATA_H
+
+#include "libavutil/common.h"
+
+static const uint8_t lsf_dim_codebook[] = { 3, 3, 4 };
+static const uint8_t lsf_size_codebook[] = { 64, 128, 128 };
+static const int16_t lsf_weight_20ms[] = { 12288, 8192, 4096, 0 };
+static const int16_t lsf_weight_30ms[] = { 8192, 16384, 10923, 5461, 0, 0 };
+
+static const int16_t hp_out_coeffs[] = { 3849, -7699, 3849, 7918, -3833 };
+
+static const int16_t kPlcPfSlope[] = { 26667, 18729, 13653, 10258, 7901, 6214 };
+
+static const int16_t kPlcPitchFact[] = { 0, 5462, 10922, 16384, 21846, 27306 };
+
+static const int16_t kCbFiltersRev[] = {
+    -140, 446, -755, 3302, 2922, -590, 343, -138
+};
+
+static const int16_t kPlcPerSqr[] = { 839, 1343, 2048, 2998, 4247, 5849 };
+
+static const int16_t alpha[] = {
+    6554, 13107, 19661, 26214
+};
+
+static const int16_t kLpcChirpSyntDenum[] = {
+    32767, 29573, 26690, 24087, 21739, 19619, 17707, 15980, 14422, 13016, 11747
+};
+
+static const int16_t LpcChirpWeightDenum[] = {
+    32767, 13835, 5841, 2466, 1041, 440, 186, 78,  33,  14,  6
+};
+
+static const int16_t cos_tbl[64] = {
+    32767,  32729,  32610,  32413,  32138,  31786,  31357,   30853,
+    30274,  29622,  28899,  28106,  27246,  26320,  25330,   24279,
+    23170,  22006,  20788,  19520,  18205,  16846,  15447,   14010,
+    12540,  11039,   9512,   7962,   6393,   4808,   3212,    1608,
+    0,      -1608,  -3212,  -4808,  -6393,  -7962,  -9512,  -11039,
+    -12540, -14010, -15447, -16846, -18205, -19520, -20788, -22006,
+    -23170, -24279, -25330, -26320, -27246, -28106, -28899, -29622,
+    -30274, -30853, -31357, -31786, -32138, -32413, -32610, -32729,
+};
+
+static const int16_t cos_derivative_tbl[64] = {
+    -632,  -1893,  -3150,  -4399,  -5638,  -6863,  -8072,  -9261,
+    -10428, -11570, -12684, -13767, -14817, -15832, -16808, -17744,
+    -18637, -19486, -20287, -21039, -21741, -22390, -22986, -23526,
+    -24009, -24435, -24801, -25108, -25354, -25540, -25664, -25726,
+    -25726, -25664, -25540, -25354, -25108, -24801, -24435, -24009,
+    -23526, -22986, -22390, -21741, -21039, -20287, -19486, -18637,
+    -17744, -16808, -15832, -14817, -13767, -12684, -11570, -10428,
+    -9261,  -8072,  -6863,  -5638,  -4399,  -3150,  -1893,   -632
+};
+
+static const int16_t lsf_codebook[64 * 3 + 128 * 3 + 128 * 4] = {
+    1273, 2238, 3696, 3199, 5309, 8209, 3606, 5671, 7829,
+    2815, 5262, 8778, 2608, 4027, 5493, 1582, 3076, 5945,
+    2983, 4181, 5396, 2437, 4322, 6902, 1861, 2998, 4613,
+    2007, 3250, 5214, 1388, 2459, 4262, 2563, 3805, 5269,
+    2036, 3522, 5129, 1935, 4025, 6694, 2744, 5121, 7338,
+    2810, 4248, 5723, 3054, 5405, 7745, 1449, 2593, 4763,
+    3411, 5128, 6596, 2484, 4659, 7496, 1668, 2879, 4818,
+    1812, 3072, 5036, 1638, 2649, 3900, 2464, 3550, 4644,
+    1853, 2900, 4158, 2458, 4163, 5830, 2556, 4036, 6254,
+    2703, 4432, 6519, 3062, 4953, 7609, 1725, 3703, 6187,
+    2221, 3877, 5427, 2339, 3579, 5197, 2021, 4633, 7037,
+    2216, 3328, 4535, 2961, 4739, 6667, 2807, 3955, 5099,
+    2788, 4501, 6088, 1642, 2755, 4431, 3341, 5282, 7333,
+    2414, 3726, 5727, 1582, 2822, 5269, 2259, 3447, 4905,
+    3117, 4986, 7054, 1825, 3491, 5542, 3338, 5736, 8627,
+    1789, 3090, 5488, 2566, 3720, 4923, 2846, 4682, 7161,
+    1950, 3321, 5976, 1834, 3383, 6734, 3238, 4769, 6094,
+    2031, 3978, 5903, 1877, 4068, 7436, 2131, 4644, 8296,
+    2764, 5010, 8013, 2194, 3667, 6302, 2053, 3127, 4342,
+    3523, 6595, 10010, 3134, 4457, 5748, 3142, 5819, 9414,
+    2223, 4334, 6353, 2022, 3224, 4822, 2186, 3458, 5544,
+    2552, 4757, 6870, 10905, 12917, 14578, 9503, 11485, 14485,
+    9518, 12494, 14052, 6222, 7487, 9174, 7759, 9186, 10506,
+    8315, 12755, 14786, 9609, 11486, 13866, 8909, 12077, 13643,
+    7369, 9054, 11520, 9408, 12163, 14715, 6436, 9911, 12843,
+    7109, 9556, 11884, 7557, 10075, 11640, 6482, 9202, 11547,
+    6463, 7914, 10980, 8611, 10427, 12752, 7101, 9676, 12606,
+    7428, 11252, 13172, 10197, 12955, 15842, 7487, 10955, 12613,
+    5575, 7858, 13621, 7268, 11719, 14752, 7476, 11744, 13795,
+    7049, 8686, 11922, 8234, 11314, 13983, 6560, 11173, 14984,
+    6405, 9211, 12337, 8222, 12054, 13801, 8039, 10728, 13255,
+    10066, 12733, 14389, 6016, 7338, 10040, 6896, 8648, 10234,
+    7538, 9170, 12175, 7327, 12608, 14983, 10516, 12643, 15223,
+    5538, 7644, 12213, 6728, 12221, 14253, 7563, 9377, 12948,
+    8661, 11023, 13401, 7280, 8806, 11085, 7723, 9793, 12333,
+    12225, 14648, 16709, 8768, 13389, 15245, 10267, 12197, 13812,
+    5301, 7078, 11484, 7100, 10280, 11906, 8716, 12555, 14183,
+    9567, 12464, 15434, 7832, 12305, 14300, 7608, 10556, 12121,
+    8913, 11311, 12868, 7414, 9722, 11239, 8666, 11641, 13250,
+    9079, 10752, 12300, 8024, 11608, 13306, 10453, 13607, 16449,
+    8135, 9573, 10909, 6375, 7741, 10125, 10025, 12217, 14874,
+    6985, 11063, 14109, 9296, 13051, 14642, 8613, 10975, 12542,
+    6583, 10414, 13534, 6191, 9368, 13430, 5742, 6859, 9260,
+    7723, 9813, 13679, 8137, 11291, 12833, 6562, 8973, 10641,
+    6062, 8462, 11335, 6928, 8784, 12647, 7501, 8784, 10031,
+    8372, 10045, 12135, 8191, 9864, 12746, 5917, 7487, 10979,
+    5516, 6848, 10318, 6819, 9899, 11421, 7882, 12912, 15670,
+    9558, 11230, 12753, 7752, 9327, 11472, 8479, 9980, 11358,
+    11418, 14072, 16386, 7968, 10330, 14423, 8423, 10555, 12162,
+    6337, 10306, 14391, 8850, 10879, 14276, 6750, 11885, 15710,
+    7037, 8328, 9764, 6914, 9266, 13476, 9746, 13949, 15519,
+    11032, 14444, 16925, 8032, 10271, 11810, 10962, 13451, 15833,
+    10021, 11667, 13324, 6273, 8226, 12936, 8543, 10397, 13496,
+    7936, 10302, 12745, 6769, 8138, 10446, 6081, 7786, 11719,
+    8637, 11795, 14975, 8790, 10336, 11812, 7040, 8490, 10771,
+    7338, 10381, 13153, 6598, 7888, 9358, 6518, 8237, 12030,
+    9055, 10763, 12983, 6490, 10009, 12007, 9589, 12023, 13632,
+    6867, 9447, 10995, 7930, 9816, 11397, 10241, 13300, 14939,
+    5830, 8670, 12387, 9870, 11915, 14247, 9318, 11647, 13272,
+    6721, 10836, 12929, 6543, 8233, 9944, 8034, 10854, 12394,
+    9112, 11787, 14218, 9302, 11114, 13400, 9022, 11366, 13816,
+    6962, 10461, 12480, 11288, 13333, 15222, 7249, 8974, 10547,
+    10566, 12336, 14390, 6697, 11339, 13521, 11851, 13944, 15826,
+    6847, 8381, 11349, 7509, 9331, 10939, 8029, 9618, 11909,
+    13973, 17644, 19647, 22474, 14722, 16522, 20035, 22134, 16305, 18179, 21106, 23048,
+    15150, 17948, 21394, 23225, 13582, 15191, 17687, 22333, 11778, 15546, 18458, 21753,
+    16619, 18410, 20827, 23559, 14229, 15746, 17907, 22474, 12465, 15327, 20700, 22831,
+    15085, 16799, 20182, 23410, 13026, 16935, 19890, 22892, 14310, 16854, 19007, 22944,
+    14210, 15897, 18891, 23154, 14633, 18059, 20132, 22899, 15246, 17781, 19780, 22640,
+    16396, 18904, 20912, 23035, 14618, 17401, 19510, 21672, 15473, 17497, 19813, 23439,
+    18851, 20736, 22323, 23864, 15055, 16804, 18530, 20916, 16490, 18196, 19990, 21939,
+    11711, 15223, 21154, 23312, 13294, 15546, 19393, 21472, 12956, 16060, 20610, 22417,
+    11628, 15843, 19617, 22501, 14106, 16872, 19839, 22689, 15655, 18192, 20161, 22452,
+    12953, 15244, 20619, 23549, 15322, 17193, 19926, 21762, 16873, 18676, 20444, 22359,
+    14874, 17871, 20083, 21959, 11534, 14486, 19194, 21857, 17766, 19617, 21338, 23178,
+    13404, 15284, 19080, 23136, 15392, 17527, 19470, 21953, 14462, 16153, 17985, 21192,
+    17734, 19750, 21903, 23783, 16973, 19096, 21675, 23815, 16597, 18936, 21257, 23461,
+    15966, 17865, 20602, 22920, 15416, 17456, 20301, 22972, 18335, 20093, 21732, 23497,
+    15548, 17217, 20679, 23594, 15208, 16995, 20816, 22870, 13890, 18015, 20531, 22468,
+    13211, 15377, 19951, 22388, 12852, 14635, 17978, 22680, 16002, 17732, 20373, 23544,
+    11373, 14134, 19534, 22707, 17329, 19151, 21241, 23462, 15612, 17296, 19362, 22850,
+    15422, 19104, 21285, 23164, 13792, 17111, 19349, 21370, 15352, 17876, 20776, 22667,
+    15253, 16961, 18921, 22123, 14108, 17264, 20294, 23246, 15785, 17897, 20010, 21822,
+    17399, 19147, 20915, 22753, 13010, 15659, 18127, 20840, 16826, 19422, 22218, 24084,
+    18108, 20641, 22695, 24237, 18018, 20273, 22268, 23920, 16057, 17821, 21365, 23665,
+    16005, 17901, 19892, 23016, 13232, 16683, 21107, 23221, 13280, 16615, 19915, 21829,
+    14950, 18575, 20599, 22511, 16337, 18261, 20277, 23216, 14306, 16477, 21203, 23158,
+    12803, 17498, 20248, 22014, 14327, 17068, 20160, 22006, 14402, 17461, 21599, 23688,
+    16968, 18834, 20896, 23055, 15070, 17157, 20451, 22315, 15419, 17107, 21601, 23946,
+    16039, 17639, 19533, 21424, 16326, 19261, 21745, 23673, 16489, 18534, 21658, 23782,
+    16594, 18471, 20549, 22807, 18973, 21212, 22890, 24278, 14264, 18674, 21123, 23071,
+    15117, 16841, 19239, 23118, 13762, 15782, 20478, 23230, 14111, 15949, 20058, 22354,
+    14990, 16738, 21139, 23492, 13735, 16971, 19026, 22158, 14676, 17314, 20232, 22807,
+    16196, 18146, 20459, 22339, 14747, 17258, 19315, 22437, 14973, 17778, 20692, 23367,
+    15715, 17472, 20385, 22349, 15702, 18228, 20829, 23410, 14428, 16188, 20541, 23630,
+    16824, 19394, 21365, 23246, 13069, 16392, 18900, 21121, 12047, 16640, 19463, 21689,
+    14757, 17433, 19659, 23125, 15185, 16930, 19900, 22540, 16026, 17725, 19618, 22399,
+    16086, 18643, 21179, 23472, 15462, 17248, 19102, 21196, 17368, 20016, 22396, 24096,
+    12340, 14475, 19665, 23362, 13636, 16229, 19462, 22728, 14096, 16211, 19591, 21635,
+    12152, 14867, 19943, 22301, 14492, 17503, 21002, 22728, 14834, 16788, 19447, 21411,
+    14650, 16433, 19326, 22308, 14624, 16328, 19659, 23204, 13888, 16572, 20665, 22488,
+    12977, 16102, 18841, 22246, 15523, 18431, 21757, 23738, 14095, 16349, 18837, 20947,
+    13266, 17809, 21088, 22839, 15427, 18190, 20270, 23143, 11859, 16753, 20935, 22486,
+    12310, 17667, 21736, 23319, 14021, 15926, 18702, 22002, 12286, 15299, 19178, 21126,
+    15703, 17491, 21039, 23151, 12272, 14018, 18213, 22570, 14817, 16364, 18485, 22598,
+    17109, 19683, 21851, 23677, 12657, 14903, 19039, 22061, 14713, 16487, 20527, 22814,
+    14635, 16726, 18763, 21715, 15878, 18550, 20718, 22906
+};
+
+static const int16_t gain3[9]={
+    -16384, -10813, -5407, 0, 4096, 8192, 12288, 16384, 32767
+};
+
+static const int16_t gain4[17]={
+    -17203, -14746, -12288, -9830, -7373, -4915, -2458, 0, 2458, 4915, 7373, 9830,
+    12288, 14746, 17203, 19661, 32767
+};
+
+static const int16_t gain5[33]={
+    614,   1229,  1843,  2458,  3072,  3686,
+    4301,  4915,  5530,  6144,  6758,  7373,
+    7987,  8602,  9216,  9830,  10445, 11059,
+    11674, 12288, 12902, 13517, 14131, 14746,
+    15360, 15974, 16589, 17203, 17818, 18432,
+    19046, 19661, 32767
+};
+
+static const int16_t *const ilbc_gain[] = {
+    gain5, gain4, gain3,
+};
+
+static const int16_t ilbc_state[8] = {
+   -30473, -17838, -9257, -2537, 3639, 10893, 19958, 32636
+};
+
+static const int16_t frg_quant_mod[64] = {
+    /* First 37 values in Q8 */
+    569, 671, 786, 916, 1077, 1278,
+    1529, 1802, 2109, 2481, 2898, 3440,
+    3943, 4535, 5149, 5778, 6464, 7208,
+    7904, 8682, 9397, 10285, 11240, 12246,
+    13313, 14382, 15492, 16735, 18131, 19693,
+    21280, 22912, 24624, 26544, 28432, 30488,
+    32720,
+    /* 22 values in Q5 */
+    4383, 4684, 5012, 5363, 5739, 6146,
+    6603, 7113, 7679, 8285, 9040, 9850,
+    10838, 11882, 13103, 14467, 15950, 17669,
+    19712, 22016, 24800, 28576,
+    /* 5 values in Q3 */
+    8240, 9792, 12040, 15440, 22472
+};
+
+#endif /* AVCODEC_ILBCDATA_H */
diff --git a/libavcodec/ilbcdec.c b/libavcodec/ilbcdec.c
new file mode 100644
index 0000000000..638f692e5c
--- /dev/null
+++ b/libavcodec/ilbcdec.c
@@ -0,0 +1,1507 @@ 
+/*
+ * Copyright (c) 2013, The WebRTC project authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *
+ *   * Neither the name of Google nor the names of its contributors may
+ *     be used to endorse or promote products derived from this software
+ *     without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * iLBC decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "get_bits.h"
+#include "ilbcdata.h"
+
+#define LPC_N_20MS            1
+#define LPC_N_30MS            2
+#define LPC_N_MAX             2
+#define LSF_NSPLIT            3
+#define NASUB_MAX             4
+#define LPC_FILTERORDER       10
+#define NSUB_MAX              6
+#define SUBL                  40
+
+#define ST_MEM_L_TBL          85
+#define MEM_LF_TBL            147
+#define STATE_SHORT_LEN_20MS  57
+#define STATE_SHORT_LEN_30MS  58
+
+#define BLOCKL_MAX            240
+#define CB_MEML               147
+#define CB_NSTAGES            3
+#define CB_HALFFILTERLEN      4
+#define CB_FILTERLEN          8
+
+#define ENH_NBLOCKS_TOT 8
+#define ENH_BLOCKL     80
+#define ENH_BUFL     (ENH_NBLOCKS_TOT)*ENH_BLOCKL
+#define ENH_BUFL_FILTEROVERHEAD  3
+#define BLOCKL_MAX      240
+#define NSUB_20MS         4
+#define NSUB_30MS         6
+#define NSUB_MAX          6
+#define NASUB_20MS        2
+#define NASUB_30MS        4
+#define NASUB_MAX         4
+#define STATE_LEN        80
+#define STATE_SHORT_LEN_30MS  58
+#define STATE_SHORT_LEN_20MS  57
+
+#define SPL_MUL_16_16(a, b) ((int32_t) (((int16_t)(a)) * ((int16_t)(b))))
+#define SPL_MUL_16_16_RSFT(a, b, c) (SPL_MUL_16_16(a, b) >> (c))
+
+typedef struct ILBCFrame {
+    int16_t  lsf[LSF_NSPLIT*LPC_N_MAX];
+    int16_t  cb_index[CB_NSTAGES*(NASUB_MAX+1)];
+    int16_t  gain_index[CB_NSTAGES*(NASUB_MAX+1)];
+    int16_t  ifm;
+    int16_t  state_first;
+    int16_t  idx[STATE_SHORT_LEN_30MS];
+    int16_t  firstbits;
+    int16_t  start;
+} ILBCFrame;
+
+typedef struct ILBCContext {
+    AVClass         *class;
+    int              enhancer;
+
+    int              mode;
+    GetBitContext    gb;
+    ILBCFrame        frame;
+
+    int              prev_enh_pl;
+    int              consPLICount;
+    int              last_lag;
+    int              state_short_len;
+    int              lpc_n;
+    int16_t          nasub;
+    int16_t          nsub;
+    int              block_samples;
+    int16_t          no_of_words;
+    int16_t          no_of_bytes;
+    int16_t          lsfdeq[LPC_FILTERORDER*LPC_N_MAX];
+    int16_t          lsfold[LPC_FILTERORDER];
+    int16_t          syntMem[LPC_FILTERORDER];
+    int16_t          lsfdeqold[LPC_FILTERORDER];
+    int16_t          weightdenum[(LPC_FILTERORDER + 1) * NSUB_MAX];
+    int16_t          syntdenum[NSUB_MAX * (LPC_FILTERORDER + 1)];
+    int16_t          old_syntdenum[NSUB_MAX * (LPC_FILTERORDER + 1)];
+    int16_t          enh_buf[ENH_BUFL+ENH_BUFL_FILTEROVERHEAD];
+    int16_t          enh_period[ENH_NBLOCKS_TOT];
+    int16_t          prevResidual[NSUB_MAX*SUBL];
+    int16_t          decresidual[BLOCKL_MAX];
+    int16_t          PLCresidual[BLOCKL_MAX + LPC_FILTERORDER];
+    int16_t          seed;
+    int16_t          prevPLI;
+    int16_t          prevScale;
+    int16_t          prevLag;
+    int16_t          perSquare;
+    int16_t          prevLpc[LPC_FILTERORDER+1];
+    int16_t          PLClpc[LPC_FILTERORDER + 1];
+    int16_t          hpimemx[2];
+    int16_t          hpimemy[4];
+} ILBCContext;
+
+static int unpack_frame(ILBCContext *s)
+{
+    ILBCFrame *frame = &s->frame;
+    GetBitContext *gb = &s->gb;
+    int j;
+
+    frame->lsf[0] = get_bits(gb, 6);
+    frame->lsf[1] = get_bits(gb, 7);
+    frame->lsf[2] = get_bits(gb, 7);
+
+    if (s->mode = 20) {
+        frame->start          = get_bits(gb, 2);
+        frame->state_first    = get_bits1(gb);
+        frame->ifm            = get_bits(gb, 6);
+        frame->cb_index[0]    = get_bits(gb, 6) << 1;
+        frame->gain_index[0]  = get_bits(gb, 2) << 3;
+        frame->gain_index[1]  = get_bits1(gb) << 3;
+        frame->cb_index[3]    = get_bits(gb, 7) << 1;
+        frame->gain_index[3]  = get_bits1(gb) << 4;
+        frame->gain_index[4]  = get_bits1(gb) << 3;
+        frame->gain_index[6]  = get_bits1(gb) << 4;
+    } else {
+        frame->lsf[3]         = get_bits(gb, 6);
+        frame->lsf[4]         = get_bits(gb, 7);
+        frame->lsf[5]         = get_bits(gb, 7);
+        frame->start          = get_bits(gb, 3);
+        frame->state_first    = get_bits1(gb);
+        frame->ifm            = get_bits(gb, 6);
+        frame->cb_index[0]    = get_bits(gb, 4) << 3;
+        frame->gain_index[0]  = get_bits1(gb) << 4;
+        frame->gain_index[1]  = get_bits1(gb) << 3;
+        frame->cb_index[3]    = get_bits(gb, 6) << 2;
+        frame->gain_index[3]  = get_bits1(gb) << 4;
+        frame->gain_index[4]  = get_bits1(gb) << 3;
+    }
+
+    for (j = 0; j < 48; j++)
+        frame->idx[j] = get_bits1(gb) << 2;
+
+    if (s->mode == 20) {
+        for (; j < 57; j++)
+            frame->idx[j] = get_bits1(gb) << 2;
+
+        frame->gain_index[1] |= get_bits1(gb) << 2;
+        frame->gain_index[3] |= get_bits(gb, 2) << 2;
+        frame->gain_index[4] |= get_bits1(gb) << 2;
+        frame->gain_index[6] |= get_bits1(gb) << 3;
+        frame->gain_index[7]  = get_bits(gb, 2) << 2;
+    } else {
+        for (; j < 58; j++)
+            frame->idx[j] = get_bits1(gb) << 2;
+
+        frame->cb_index[0]    |= get_bits(gb, 2) << 1;
+        frame->gain_index[0]  |= get_bits1(gb) << 3;
+        frame->gain_index[1]  |= get_bits1(gb) << 2;
+        frame->cb_index[3]    |= get_bits1(gb) << 1;
+        frame->cb_index[6]     = get_bits1(gb) << 7;
+        frame->cb_index[6]    |= get_bits(gb, 6) << 1;
+        frame->cb_index[9]     = get_bits(gb, 7) << 1;
+        frame->cb_index[12]    = get_bits(gb, 3) << 5;
+        frame->cb_index[12]   |= get_bits(gb, 4) << 1;
+        frame->gain_index[3]  |= get_bits(gb, 2) << 2;
+        frame->gain_index[4]  |= get_bits(gb, 2) << 1;
+        frame->gain_index[6]   = get_bits(gb, 2) << 3;
+        frame->gain_index[7]   = get_bits(gb, 2) << 2;
+        frame->gain_index[9]   = get_bits1(gb) << 4;
+        frame->gain_index[10]  = get_bits1(gb) << 3;
+        frame->gain_index[12]  = get_bits1(gb) << 4;
+        frame->gain_index[13]  = get_bits1(gb) << 3;
+    }
+
+    for (j = 0; j < 56; j++)
+        frame->idx[j] |= get_bits(gb, 2);
+
+    if (s->mode == 20) {
+        frame->idx[56]        |= get_bits(gb, 2);
+        frame->cb_index[0]    |= get_bits1(gb);
+        frame->cb_index[1]     = get_bits(gb, 7);
+        frame->cb_index[2]     = get_bits(gb, 6) << 1;
+        frame->cb_index[2]    |= get_bits1(gb);
+        frame->gain_index[0]  |= get_bits(gb, 3);
+        frame->gain_index[1]  |= get_bits(gb, 2);
+        frame->gain_index[2]   = get_bits(gb, 3);
+        frame->cb_index[3]    |= get_bits1(gb);
+        frame->cb_index[4]     = get_bits(gb, 6) << 1;
+        frame->cb_index[4]    |= get_bits1(gb);
+        frame->cb_index[5]     = get_bits(gb, 7);
+        frame->cb_index[6]     = get_bits(gb, 8);
+        frame->cb_index[7]     = get_bits(gb, 8);
+        frame->cb_index[8]     = get_bits(gb, 8);
+        frame->gain_index[3]  |= get_bits(gb, 2);
+        frame->gain_index[4]  |= get_bits(gb, 2);
+        frame->gain_index[5]   = get_bits(gb, 3);
+        frame->gain_index[6]  |= get_bits(gb, 3);
+        frame->gain_index[7]  |= get_bits(gb, 2);
+        frame->gain_index[8]   = get_bits(gb, 3);
+    } else {
+        frame->idx[56]        |= get_bits(gb, 2);
+        frame->idx[57]        |= get_bits(gb, 2);
+        frame->cb_index[0]    |= get_bits1(gb);
+        frame->cb_index[1]     = get_bits(gb, 7);
+        frame->cb_index[2]     = get_bits(gb, 4) << 3;
+        frame->cb_index[2]    |= get_bits(gb, 3);
+        frame->gain_index[0]  |= get_bits(gb, 3);
+        frame->gain_index[1]  |= get_bits(gb, 2);
+        frame->gain_index[2]   = get_bits(gb, 3);
+        frame->cb_index[3]    |= get_bits1(gb);
+        frame->cb_index[4]     = get_bits(gb, 4) << 3;
+        frame->cb_index[4]    |= get_bits(gb, 3);
+        frame->cb_index[5]     = get_bits(gb, 7);
+        frame->cb_index[6]    |= get_bits1(gb);
+        frame->cb_index[7]     = get_bits(gb, 5) << 3;
+        frame->cb_index[7]    |= get_bits(gb, 3);
+        frame->cb_index[8]     = get_bits(gb, 8);
+        frame->cb_index[9]    |= get_bits1(gb);
+        frame->cb_index[10]    = get_bits(gb, 4) << 4;
+        frame->cb_index[10]   |= get_bits(gb, 4);
+        frame->cb_index[11]    = get_bits(gb, 8);
+        frame->cb_index[12]   |= get_bits1(gb);
+        frame->cb_index[13]    = get_bits(gb, 3) << 5;
+        frame->cb_index[13]   |= get_bits(gb, 5);
+        frame->cb_index[14]    = get_bits(gb, 8);
+        frame->gain_index[3]  |= get_bits(gb, 2);
+        frame->gain_index[4]  |= get_bits1(gb);
+        frame->gain_index[5]   = get_bits(gb, 3);
+        frame->gain_index[6]  |= get_bits(gb, 3);
+        frame->gain_index[7]  |= get_bits(gb, 2);
+        frame->gain_index[8]   = get_bits(gb, 3);
+        frame->gain_index[9]  |= get_bits(gb, 4);
+        frame->gain_index[10] |= get_bits1(gb) << 2;
+        frame->gain_index[10] |= get_bits(gb, 2);
+        frame->gain_index[11]  = get_bits(gb, 3);
+        frame->gain_index[12] |= get_bits(gb, 4);
+        frame->gain_index[13] |= get_bits(gb, 3);
+        frame->gain_index[14]  = get_bits(gb, 3);
+    }
+
+    return get_bits1(gb);
+}
+
+static void index_conv(int16_t *index)
+{
+    int k;
+
+    for (k = 4; k < 6; k++) {
+        if (index[k] >= 44 && index[k] < 108) {
+            index[k] += 64;
+        } else if (index[k] >= 108 && index[k] < 128) {
+            index[k] += 128;
+        }
+    }
+}
+
+static void lsf_dequantization(int16_t *lsfdeq, int16_t *index, int16_t lpc_n)
+{
+    int i, j, pos = 0, cb_pos = 0;
+
+    for (i = 0; i < LSF_NSPLIT; i++) {
+        for (j = 0; j < lsf_dim_codebook[i]; j++) {
+            lsfdeq[pos + j] = lsf_codebook[cb_pos + index[i] * lsf_dim_codebook[i] + j];
+        }
+
+        pos    += lsf_dim_codebook[i];
+        cb_pos += lsf_size_codebook[i] * lsf_dim_codebook[i];
+    }
+
+    if (lpc_n > 1) {
+        pos = 0;
+        cb_pos = 0;
+        for (i = 0; i < LSF_NSPLIT; i++) {
+            for (j = 0; j < lsf_dim_codebook[i]; j++) {
+                lsfdeq[LPC_FILTERORDER + pos + j] = lsf_codebook[cb_pos +
+                    index[LSF_NSPLIT + i] * lsf_dim_codebook[i] + j];
+            }
+
+            pos    += lsf_dim_codebook[i];
+            cb_pos += lsf_size_codebook[i] * lsf_dim_codebook[i];
+        }
+    }
+}
+
+static void lsf_check_stability(int16_t *lsf, int dim, int nb_vectors)
+{
+    for (int n = 0; n < 2; n++) {
+        for (int m = 0; m < nb_vectors; m++) {
+            for (int k = 0; k < dim - 1; k++) {
+                int i = m * dim + k;
+
+                if ((lsf[i + 1] - lsf[i]) < 319) {
+                    if (lsf[i + 1] < lsf[i]) {
+                        lsf[i + 1] = lsf[i] + 160;
+                        lsf[i]     = lsf[i + 1] - 160;
+                    } else {
+                        lsf[i]     -= 160;
+                        lsf[i + 1] += 160;
+                    }
+                }
+
+                lsf[i] = av_clip(lsf[i], 82, 25723);
+            }
+        }
+    }
+}
+
+static void lsf_interpolate(int16_t *out, int16_t *in1,
+                            int16_t *in2, int16_t coef,
+                            int size)
+{
+    int invcoef = 16384 - coef, i;
+
+    for (i = 0; i < size; i++)
+        out[i] = (coef * in1[i] + invcoef * in2[i] + 8192) >> 14;
+}
+
+static void lsf2lsp(int16_t *lsf, int16_t *lsp, int order)
+{
+    int16_t diff, freq;
+    int32_t tmp;
+    int i, k;
+
+    for (i = 0; i < order; i++) {
+        freq = (lsf[i] * 20861) >> 15;
+        /* 20861: 1.0/(2.0*PI) in Q17 */
+        /*
+           Upper 8 bits give the index k and
+           Lower 8 bits give the difference, which needs
+           to be approximated linearly
+         */
+        k = FFMIN(freq >> 8, 63);
+        diff = freq & 0xFF;
+
+        /* Calculate linear approximation */
+        tmp = cos_derivative_tbl[k] * diff;
+        lsp[i] = cos_tbl[k] + (tmp >> 12);
+    }
+}
+
+static void get_lsp_poly(int16_t *lsp, int32_t *f)
+{
+    int16_t high, low;
+    int i, j, k, l;
+    int32_t tmp;
+
+    f[0] = 16777216;
+    f[1] = lsp[0] * -1024;
+
+    for (i = 2, k = 2, l = 2; i <= 5; i++, k += 2) {
+        f[l] = f[l - 2];
+
+        for (j = i; j > 1; j--, l--) {
+            high = f[l - 1] >> 16;
+            low = (f[l - 1] - (high << 16)) >> 1;
+
+            tmp = ((high * lsp[k]) << 2) + (((low * lsp[k]) >> 15) << 2);
+
+            f[l] += f[l - 2];
+            f[l] -= tmp;
+        }
+
+        f[l] -= lsp[k] << 10;
+        l += i;
+    }
+}
+
+static void lsf2poly(int16_t *a, int16_t *lsf)
+{
+    int32_t f[2][6];
+    int16_t lsp[10];
+    int32_t tmp;
+    int i;
+
+    lsf2lsp(lsf, lsp, LPC_FILTERORDER);
+
+    get_lsp_poly(&lsp[0], f[0]);
+    get_lsp_poly(&lsp[1], f[1]);
+
+    for (i = 5; i > 0; i--) {
+        f[0][i] += f[0][i - 1];
+        f[1][i] -= f[1][i - 1];
+    }
+
+    a[0] = 4096;
+    for (i = 5; i > 0; i--) {
+        tmp = f[0][6 - i] + f[1][6 - i];
+        a[6 - i] = (tmp + 4096) >> 13;
+
+        tmp = f[0][6 - i] - f[1][6 - i];
+        a[5 + i] = (tmp + 4096) >> 13;
+    }
+}
+
+static void lsp_interpolate2polydec(int16_t *a, int16_t *lsf1,
+                                   int16_t *lsf2, int coef, int length)
+{
+    int16_t lsftmp[LPC_FILTERORDER];
+
+    lsf_interpolate(lsftmp, lsf1, lsf2, coef, length);
+    lsf2poly(a, lsftmp);
+}
+
+static void bw_expand(int16_t *out, const int16_t *in, const int16_t *coef, int length)
+{
+    int i;
+
+    out[0] = in[0];
+    for (i = 1; i < length; i++)
+        out[i] = (coef[i] * in[i] + 16384) >> 15;
+}
+
+static void lsp_interpolate(int16_t *syntdenum, int16_t *weightdenum,
+                            int16_t *lsfdeq, int16_t length,
+                            ILBCContext *s)
+{
+    int16_t lp[LPC_FILTERORDER + 1], *lsfdeq2;
+    int i, pos, lp_length;
+
+    lsfdeq2 = lsfdeq + length;
+    lp_length = length + 1;
+
+    if (s->mode == 30) {
+        lsp_interpolate2polydec(lp, (*s).lsfdeqold, lsfdeq, lsf_weight_30ms[0], length);
+        memcpy(syntdenum, lp, lp_length * 2);
+        bw_expand(weightdenum, lp, kLpcChirpSyntDenum, lp_length);
+
+        pos = lp_length;
+        for (i = 1; i < 6; i++) {
+            lsp_interpolate2polydec(lp, lsfdeq, lsfdeq2,
+                                                 lsf_weight_30ms[i],
+                                                 length);
+            memcpy(syntdenum + pos, lp, lp_length * 2);
+            bw_expand(weightdenum + pos, lp, kLpcChirpSyntDenum, lp_length);
+            pos += lp_length;
+        }
+    } else {
+        pos = 0;
+        for (i = 0; i < s->nsub; i++) {
+            lsp_interpolate2polydec(lp, s->lsfdeqold, lsfdeq,
+                                    lsf_weight_20ms[i], length);
+            memcpy(syntdenum + pos, lp, lp_length * 2);
+            bw_expand(weightdenum + pos, lp, kLpcChirpSyntDenum, lp_length);
+            pos += lp_length;
+        }
+    }
+
+    if (s->mode == 30) {
+        memcpy(s->lsfdeqold, lsfdeq2, length * 2);
+    } else {
+        memcpy(s->lsfdeqold, lsfdeq, length * 2);
+    }
+}
+
+static void filter_mafq12(int16_t *in_ptr, int16_t *out_ptr,
+                          int16_t *B, int16_t B_length,
+                          int16_t length)
+{
+    int o, i, j;
+
+    for (i = 0; i < length; i++) {
+        const int16_t *b_ptr = &B[0];
+        const int16_t *x_ptr = &in_ptr[i];
+
+        o = 0;
+        for (j = 0; j < B_length; j++)
+            o += b_ptr[j] * *x_ptr--;
+
+        o = av_clip(o, -134217728, 134215679);
+
+        out_ptr[i] = ((o + 2048) >> 12);
+    }
+}
+
+static void filter_arfq12(const int16_t *data_in,
+                          int16_t *data_out,
+                          const int16_t *coefficients,
+                          int coefficients_length,
+                          int data_length)
+{
+    int i, j;
+
+    for (i = 0; i < data_length; i++) {
+        int output = 0, sum = 0;
+
+        for (j = coefficients_length - 1; j > 0; j--) {
+            sum += coefficients[j] * data_out[i - j];
+        }
+
+        output = coefficients[0] * data_in[i] - sum;
+        output = av_clip(output, -134217728, 134215679);
+
+        data_out[i] = (output + 2048) >> 12;
+    }
+}
+
+static void state_construct(int16_t ifm, int16_t *idx,
+                           int16_t *syntDenum, int16_t *Out_fix,
+                           int16_t len)
+{
+    int k;
+    int16_t maxVal;
+    int16_t *tmp1, *tmp2, *tmp3;
+    /* Stack based */
+    int16_t numerator[1 + LPC_FILTERORDER];
+    int16_t sampleValVec[2 * STATE_SHORT_LEN_30MS + LPC_FILTERORDER];
+    int16_t sampleMaVec[2 * STATE_SHORT_LEN_30MS + LPC_FILTERORDER];
+    int16_t *sampleVal = &sampleValVec[LPC_FILTERORDER];
+    int16_t *sampleMa = &sampleMaVec[LPC_FILTERORDER];
+    int16_t *sampleAr = &sampleValVec[LPC_FILTERORDER];
+
+    /* initialization of coefficients */
+
+    for (k = 0; k < LPC_FILTERORDER + 1; k++) {
+        numerator[k] = syntDenum[LPC_FILTERORDER - k];
+    }
+
+    /* decoding of the maximum value */
+
+    maxVal = frg_quant_mod[ifm];
+
+    /* decoding of the sample values */
+    tmp1 = sampleVal;
+    tmp2 = &idx[len - 1];
+
+    if (ifm < 37) {
+        for (k = 0; k < len; k++) {
+            /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 2097152 (= 0.5 << 22)
+               maxVal is in Q8 and result is in Q(-1) */
+            (*tmp1) = (int16_t) ((SPL_MUL_16_16(maxVal, ilbc_state[(*tmp2)]) + 2097152) >> 22);
+            tmp1++;
+            tmp2--;
+        }
+    } else if (ifm < 59) {
+        for (k = 0; k < len; k++) {
+            /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 262144 (= 0.5 << 19)
+               maxVal is in Q5 and result is in Q(-1) */
+            (*tmp1) = (int16_t) ((SPL_MUL_16_16(maxVal, ilbc_state[(*tmp2)]) + 262144) >> 19);
+            tmp1++;
+            tmp2--;
+        }
+    } else {
+        for (k = 0; k < len; k++) {
+            /*the shifting is due to the Q13 in sq4_fixQ13[i], also the adding of 65536 (= 0.5 << 17)
+               maxVal is in Q3 and result is in Q(-1) */
+            (*tmp1) = (int16_t) ((SPL_MUL_16_16(maxVal, ilbc_state[(*tmp2)]) + 65536) >> 17);
+            tmp1++;
+            tmp2--;
+        }
+    }
+
+    /* Set the rest of the data to zero */
+    memset(&sampleVal[len], 0, len * 2);
+
+    /* circular convolution with all-pass filter */
+
+    /* Set the state to zero */
+    memset(sampleValVec, 0, LPC_FILTERORDER * 2);
+
+    /* Run MA filter + AR filter */
+    filter_mafq12(sampleVal, sampleMa, numerator, LPC_FILTERORDER + 1, len + LPC_FILTERORDER);
+    memset(&sampleMa[len + LPC_FILTERORDER], 0, (len - LPC_FILTERORDER) * 2);
+    filter_arfq12(sampleMa, sampleAr, syntDenum, LPC_FILTERORDER + 1, 2 * len);
+
+    tmp1 = &sampleAr[len - 1];
+    tmp2 = &sampleAr[2 * len - 1];
+    tmp3 = Out_fix;
+    for (k = 0; k < len; k++) {
+        (*tmp3) = (*tmp1) + (*tmp2);
+        tmp1--;
+        tmp2--;
+        tmp3++;
+    }
+}
+
+static int16_t gain_dequantization(int index, int max_in, int stage)
+{
+    int16_t scale = FFMAX(1638, FFABS(max_in));
+
+    return ((scale * ilbc_gain[stage][index]) + 8192) >> 14;
+}
+
+static void vector_rmultiplication(int16_t *out, const int16_t *in,
+                                   const int16_t *win,
+                                   int length, int shift)
+{
+    for (int i = 0; i < length; i++)
+        out[i] = (in[i] * win[-i]) >> shift;
+}
+
+static void vector_multiplication(int16_t *out, const int16_t *in,
+                                  const int16_t *win, int length,
+                                  int shift)
+{
+    for (int i = 0; i < length; i++)
+        out[i] = (in[i] * win[i]) >> shift;
+}
+
+static void add_vector_and_shift(int16_t *out, const int16_t *in1,
+                                 const int16_t *in2, int length,
+                                 int shift)
+{
+    for (int i = 0; i < length; i++)
+        out[i] = (in1[i] + in2[i]) >> shift;
+}
+
+static void create_augmented_vector(int index, int16_t *buffer, int16_t *cbVec)
+{
+    int16_t cbVecTmp[4];
+    int16_t ilow = index - 4;
+
+    memcpy(cbVec, buffer - index, index * 2);
+
+    vector_multiplication(&cbVec[ilow], buffer - index - 4, alpha, 4, 15);
+    vector_rmultiplication(cbVecTmp, buffer - 4, &alpha[3], 4, 15);
+    add_vector_and_shift(&cbVec[ilow], &cbVec[ilow], cbVecTmp, 4, 0);
+
+    memcpy(cbVec + index, buffer - index, (SUBL - index) * sizeof(*cbVec));
+}
+
+static void get_codebook(int16_t * cbvec,   /* (o) Constructed codebook vector */
+                     int16_t * mem,     /* (i) Codebook buffer */
+                     int16_t index,     /* (i) Codebook index */
+                     int16_t lMem,      /* (i) Length of codebook buffer */
+                     int16_t cbveclen   /* (i) Codebook vector length */
+)
+{
+    int16_t k, base_size;
+    int16_t lag;
+    /* Stack based */
+    int16_t tempbuff2[SUBL + 5];
+
+    /* Determine size of codebook sections */
+    base_size = lMem - cbveclen + 1;
+
+    if (cbveclen == SUBL) {
+        base_size += cbveclen / 2;
+    }
+
+    /* No filter -> First codebook section */
+    if (index < lMem - cbveclen + 1) {
+        /* first non-interpolated vectors */
+
+        k = index + cbveclen;
+        /* get vector */
+        memcpy(cbvec, mem + lMem - k, cbveclen * 2);
+    } else if (index < base_size) {
+
+        /* Calculate lag */
+
+        k = (int16_t) SPL_MUL_16_16(2, (index - (lMem - cbveclen + 1))) + cbveclen;
+
+        lag = k / 2;
+
+        create_augmented_vector(lag, mem + lMem, cbvec);
+    } else {
+        int16_t memIndTest;
+
+        /* first non-interpolated vectors */
+
+        if (index - base_size < lMem - cbveclen + 1) {
+
+            /* Set up filter memory, stuff zeros outside memory buffer */
+
+            memIndTest = lMem - (index - base_size + cbveclen);
+
+            memset(mem - CB_HALFFILTERLEN, 0, CB_HALFFILTERLEN * 2);
+            memset(mem + lMem, 0, CB_HALFFILTERLEN * 2);
+
+            /* do filtering to get the codebook vector */
+
+            filter_mafq12(&mem[memIndTest + 4], cbvec, (int16_t *) kCbFiltersRev, CB_FILTERLEN, cbveclen);
+        } else {
+            /* interpolated vectors */
+            /* Stuff zeros outside memory buffer  */
+            memIndTest = lMem - cbveclen - CB_FILTERLEN;
+            memset(mem + lMem, 0, CB_HALFFILTERLEN * 2);
+
+            /* do filtering */
+            filter_mafq12(&mem[memIndTest + 7], tempbuff2, (int16_t *) kCbFiltersRev, CB_FILTERLEN, (int16_t) (cbveclen + 5));
+
+            /* Calculate lag index */
+            lag = (cbveclen << 1) - 20 + index - base_size - lMem - 1;
+
+            create_augmented_vector(lag, tempbuff2 + SUBL + 5, cbvec);
+        }
+    }
+}
+
+static void construct_vector (
+    int16_t *decvector,   /* (o) Decoded vector */
+    int16_t *index,       /* (i) Codebook indices */
+    int16_t *gain_index,  /* (i) Gain quantization indices */
+    int16_t *mem,         /* (i) Buffer for codevector construction */
+    int16_t lMem,         /* (i) Length of buffer */
+    int16_t veclen)
+{
+    int16_t gain[CB_NSTAGES];
+    int16_t cbvec0[SUBL];
+    int16_t cbvec1[SUBL];
+    int16_t cbvec2[SUBL];
+    int32_t a32;
+    int16_t *gainPtr;
+    int j;
+
+    /* gain de-quantization */
+
+    gain[0] = gain_dequantization(gain_index[0], 16384, 0);
+    gain[1] = gain_dequantization(gain_index[1], gain[0], 1);
+    gain[2] = gain_dequantization(gain_index[2], gain[1], 2);
+
+    /* codebook vector construction and construction of total vector */
+
+    /* Stack based */
+    get_codebook(cbvec0, mem, index[0], lMem, veclen);
+    get_codebook(cbvec1, mem, index[1], lMem, veclen);
+    get_codebook(cbvec2, mem, index[2], lMem, veclen);
+
+    gainPtr = &gain[0];
+    for (j = 0; j < veclen; j++) {
+        a32 = SPL_MUL_16_16(*gainPtr++, cbvec0[j]);
+        a32 += SPL_MUL_16_16(*gainPtr++, cbvec1[j]);
+        a32 += SPL_MUL_16_16(*gainPtr, cbvec2[j]);
+        gainPtr -= 2;
+        decvector[j] = (a32 + 8192) >> 14;
+    }
+}
+
+static void reverse_memcpy(int16_t *dest, int16_t *source, int length)
+{
+    int16_t* destPtr = dest;
+    int16_t* sourcePtr = source;
+    int j;
+
+    for (j = 0; j < length; j++)
+        *destPtr-- = *sourcePtr++;
+}
+
+static void decode_residual(ILBCContext *s,
+                            ILBCFrame *encbits,
+                            int16_t *decresidual,
+                            int16_t *syntdenum)
+{
+    int16_t meml_gotten, Nfor, Nback, diff, start_pos;
+    int16_t subcount, subframe;
+    int16_t *reverseDecresidual = s->enh_buf;        /* Reversed decoded data, used for decoding backwards in time (reuse memory in state) */
+    int16_t *memVec = s->prevResidual;
+    int16_t *mem = &memVec[CB_HALFFILTERLEN];   /* Memory for codebook */
+
+    diff = STATE_LEN - s->state_short_len;
+
+    if (encbits->state_first == 1) {
+        start_pos = (encbits->start - 1) * SUBL;
+    } else {
+        start_pos = (encbits->start - 1) * SUBL + diff;
+    }
+
+    /* decode scalar part of start state */
+
+    state_construct(encbits->ifm, encbits->idx, &syntdenum[(encbits->start - 1) * (LPC_FILTERORDER + 1)], &decresidual[start_pos], s->state_short_len);
+
+    if (encbits->state_first) { /* put adaptive part in the end */
+        /* setup memory */
+        memset(mem, 0, (int16_t) (CB_MEML - s->state_short_len) * 2);
+        memcpy(mem + CB_MEML - s->state_short_len, decresidual + start_pos, s->state_short_len * 2);
+
+        /* construct decoded vector */
+
+        construct_vector(&decresidual[start_pos + s->state_short_len], encbits->cb_index, encbits->gain_index, mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, (int16_t) diff);
+
+    } else { /* put adaptive part in the beginning */
+        /* setup memory */
+        meml_gotten = s->state_short_len;
+        reverse_memcpy(mem + CB_MEML - 1, decresidual + start_pos, meml_gotten);
+        memset(mem, 0, (int16_t) (CB_MEML - meml_gotten) * 2);
+
+        /* construct decoded vector */
+        construct_vector(reverseDecresidual, encbits->cb_index, encbits->gain_index, mem + CB_MEML - ST_MEM_L_TBL, ST_MEM_L_TBL, diff);
+
+        /* get decoded residual from reversed vector */
+        reverse_memcpy(&decresidual[start_pos - 1], reverseDecresidual, diff);
+    }
+
+    /* counter for predicted subframes */
+    subcount = 1;
+
+    /* forward prediction of subframes */
+    Nfor = s->nsub - encbits->start - 1;
+
+    if (Nfor > 0) {
+        /* setup memory */
+        memset(mem, 0, (CB_MEML - STATE_LEN) * 2);
+        memcpy(mem + CB_MEML - STATE_LEN, decresidual + (encbits->start - 1) * SUBL, STATE_LEN * 2);
+
+        /* loop over subframes to encode */
+        for (subframe = 0; subframe < Nfor; subframe++) {
+            /* construct decoded vector */
+            construct_vector(&decresidual[(encbits->start + 1 + subframe) * SUBL], encbits->cb_index + subcount * CB_NSTAGES, encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL, SUBL);
+
+            /* update memory */
+            memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+            memcpy(mem + CB_MEML - SUBL, &decresidual[(encbits->start + 1 + subframe) * SUBL], SUBL * 2);
+
+            subcount++;
+        }
+
+    }
+
+    /* backward prediction of subframes */
+    Nback = encbits->start - 1;
+
+    if (Nback > 0) {
+        /* setup memory */
+        meml_gotten = SUBL * (s->nsub + 1 - encbits->start);
+        if (meml_gotten > CB_MEML) {
+            meml_gotten = CB_MEML;
+        }
+
+        reverse_memcpy(mem + CB_MEML - 1, decresidual + (encbits->start - 1) * SUBL, meml_gotten);
+        memset(mem, 0, (int16_t) (CB_MEML - meml_gotten) * 2);
+
+        /* loop over subframes to decode */
+        for (subframe = 0; subframe < Nback; subframe++) {
+            /* construct decoded vector */
+            construct_vector(&reverseDecresidual[subframe * SUBL], encbits->cb_index + subcount * CB_NSTAGES,
+                        encbits->gain_index + subcount * CB_NSTAGES, mem, MEM_LF_TBL, SUBL);
+
+            /* update memory */
+            memmove(mem, mem + SUBL, (CB_MEML - SUBL) * sizeof(*mem));
+            memcpy(mem + CB_MEML - SUBL, &reverseDecresidual[subframe * SUBL], SUBL * 2);
+
+            subcount++;
+        }
+
+        /* get decoded residual from reversed vector */
+        reverse_memcpy(decresidual + SUBL * Nback - 1, reverseDecresidual, SUBL * Nback);
+    }
+}
+
+static int16_t max_abs_value_w16(const int16_t* vector, int length)
+{
+    int i = 0, absolute = 0, maximum = 0;
+
+    if (vector == NULL || length <= 0) {
+        return -1;
+    }
+
+    for (i = 0; i < length; i++) {
+        absolute = FFABS(vector[i]);
+        if (absolute > maximum)
+            maximum = absolute;
+    }
+
+    // Guard the case for abs(-32768).
+    return FFMIN(maximum, INT16_MAX);
+}
+
+static int16_t get_size_in_bits(uint32_t n)
+{
+    int16_t bits;
+
+    if (0xFFFF0000 & n) {
+        bits = 16;
+    } else {
+        bits = 0;
+    }
+
+    if (0x0000FF00 & (n >> bits)) bits += 8;
+    if (0x000000F0 & (n >> bits)) bits += 4;
+    if (0x0000000C & (n >> bits)) bits += 2;
+    if (0x00000002 & (n >> bits)) bits += 1;
+    if (0x00000001 & (n >> bits)) bits += 1;
+
+    return bits;
+}
+
+static int32_t scale_dot_product(const int16_t *v1, const int16_t *v2, int length, int scaling)
+{
+    int32_t sum = 0;
+
+    for (int i = 0; i < length; i++)
+        sum += (v1[i] * v2[i]) >> scaling;
+
+    return sum;
+}
+
+static void correlation(int32_t *corr, int32_t *ener, int16_t *buffer,
+                        int16_t lag, int16_t blen, int16_t srange, int16_t scale)
+{
+    int16_t *w16ptr;
+
+    w16ptr = &buffer[blen - srange - lag];
+
+    *corr = scale_dot_product(&buffer[blen - srange], w16ptr, srange, scale);
+    *ener = scale_dot_product(w16ptr, w16ptr, srange, scale);
+
+    if (*ener == 0) {
+        *corr = 0;
+        *ener = 1;
+    }
+}
+
+#define SPL_SHIFT_W32(x, c) (((c) >= 0) ? ((x) << (c)) : ((x) >> (-(c))))
+
+static int16_t norm_w32(int32_t a)
+{
+    if (a == 0) {
+        return 0;
+    } else if (a < 0) {
+        a = ~a;
+    }
+
+    return ff_clz(a);
+}
+
+static int32_t div_w32_w16(int32_t num, int16_t den)
+{
+    if (den != 0)
+        return num / den;
+    else
+        return 0x7FFFFFFF;
+}
+
+static void do_plc(int16_t *PLCresidual,       /* (o) concealed residual */
+                   int16_t *PLClpc,            /* (o) concealed LP parameters */
+                   int16_t PLI,                /* (i) packet loss indicator
+                                                      0 - no PL, 1 = PL */
+                   int16_t *decresidual,       /* (i) decoded residual */
+                   int16_t *lpc,               /* (i) decoded LPC (only used for no PL) */
+                   int16_t inlag,              /* (i) pitch lag */
+                   ILBCContext *s)             /* (i/o) decoder instance */
+{
+    int16_t i, pick;
+    int32_t cross, ener, cross_comp, ener_comp = 0;
+    int32_t measure, max_measure, energy;
+    int16_t max, cross_square_max, cross_square;
+    int16_t j, lag, tmp1, tmp2, randlag;
+    int16_t shift1, shift2, shift3, shift_max;
+    int16_t scale3;
+    int16_t corrLen;
+    int32_t tmpW32, tmp2W32;
+    int16_t use_gain;
+    int16_t tot_gain;
+    int16_t max_perSquare;
+    int16_t scale1, scale2;
+    int16_t totscale;
+    int32_t nom;
+    int16_t denom;
+    int16_t pitchfact;
+    int16_t use_lag;
+    int ind;
+    int16_t randvec[BLOCKL_MAX];
+
+    /* Packet Loss */
+    if (PLI == 1) {
+
+        s->consPLICount += 1;
+
+        /* if previous frame not lost,
+           determine pitch pred. gain */
+
+        if (s->prevPLI != 1) {
+
+            /* Maximum 60 samples are correlated, preserve as high accuracy
+               as possible without getting overflow */
+            max = max_abs_value_w16(s->prevResidual, s->block_samples);
+            scale3 = (get_size_in_bits(max) << 1) - 25;
+            if (scale3 < 0) {
+                scale3 = 0;
+            }
+
+            /* Store scale for use when interpolating between the
+             * concealment and the received packet */
+            s->prevScale = scale3;
+
+            /* Search around the previous lag +/-3 to find the
+               best pitch period */
+            lag = inlag - 3;
+
+            /* Guard against getting outside the frame */
+            corrLen = FFMIN(60, s->block_samples - (inlag + 3));
+
+            correlation(&cross, &ener, s->prevResidual, lag, s->block_samples, corrLen, scale3);
+
+            /* Normalize and store cross^2 and the number of shifts */
+            shift_max = get_size_in_bits(FFABS(cross)) - 15;
+            cross_square_max = (int16_t) SPL_MUL_16_16_RSFT(SPL_SHIFT_W32(cross, -shift_max), SPL_SHIFT_W32(cross, -shift_max), 15);
+
+            for (j = inlag - 2; j <= inlag + 3; j++) {
+                correlation(&cross_comp, &ener_comp, s->prevResidual, j, s->block_samples, corrLen, scale3);
+
+                /* Use the criteria (corr*corr)/energy to compare if
+                   this lag is better or not. To avoid the division,
+                   do a cross multiplication */
+                shift1 = get_size_in_bits(FFABS(cross_comp)) - 15;
+                cross_square = (int16_t) SPL_MUL_16_16_RSFT(SPL_SHIFT_W32(cross_comp, -shift1), SPL_SHIFT_W32(cross_comp, -shift1), 15);
+
+                shift2 = get_size_in_bits(ener) - 15;
+                measure = SPL_MUL_16_16(SPL_SHIFT_W32(ener, -shift2), cross_square);
+
+                shift3 = get_size_in_bits(ener_comp) - 15;
+                max_measure = SPL_MUL_16_16(SPL_SHIFT_W32(ener_comp, -shift3), cross_square_max);
+
+                /* Calculate shift value, so that the two measures can
+                   be put in the same Q domain */
+                if (((shift_max << 1) + shift3) > ((shift1 << 1) + shift2)) {
+                    tmp1 = FFMIN(31, (shift_max << 1) + shift3 - (shift1 << 1) - shift2);
+                    tmp2 = 0;
+                } else {
+                    tmp1 = 0;
+                    tmp2 = FFMIN(31, (shift1 << 1) + shift2 - (shift_max << 1) - shift3);
+                }
+
+                if ((measure >> tmp1) > (max_measure >> tmp2)) {
+                    /* New lag is better => record lag, measure and domain */
+                    lag = j;
+                    cross_square_max = cross_square;
+                    cross = cross_comp;
+                    shift_max = shift1;
+                    ener = ener_comp;
+                }
+            }
+
+            /* Calculate the periodicity for the lag with the maximum correlation.
+
+               Definition of the periodicity:
+               abs(corr(vec1, vec2))/(sqrt(energy(vec1))*sqrt(energy(vec2)))
+
+               Work in the Square domain to simplify the calculations
+               max_perSquare is less than 1 (in Q15)
+             */
+            tmp2W32 = scale_dot_product(&s->prevResidual[s->block_samples - corrLen], &s->prevResidual[s->block_samples - corrLen], corrLen, scale3);
+
+            if ((tmp2W32 > 0) && (ener_comp > 0)) {
+                /* norm energies to int16_t, compute the product of the energies and
+                   use the upper int16_t as the denominator */
+
+                scale1 = norm_w32(tmp2W32) - 16;
+                tmp1 = SPL_SHIFT_W32(tmp2W32, scale1);
+
+                scale2 = norm_w32(ener) - 16;
+                tmp2 =  SPL_SHIFT_W32(ener, scale2);
+                denom = SPL_MUL_16_16_RSFT(tmp1, tmp2, 16);    /* denom in Q(scale1+scale2-16) */
+
+                /* Square the cross correlation and norm it such that max_perSquare
+                   will be in Q15 after the division */
+
+                totscale = scale1 + scale2 - 1;
+                tmp1 = SPL_SHIFT_W32(cross, (totscale >> 1));
+                tmp2 = SPL_SHIFT_W32(cross, totscale - (totscale >> 1));
+
+                nom = SPL_MUL_16_16(tmp1, tmp2);
+                max_perSquare = div_w32_w16(nom, denom);
+            } else {
+                max_perSquare = 0;
+            }
+        } else {
+            /* previous frame lost, use recorded lag and gain */
+            lag = s->prevLag;
+            max_perSquare = s->perSquare;
+        }
+
+        /* Attenuate signal and scale down pitch pred gain if
+           several frames lost consecutively */
+
+        use_gain = 32767;       /* 1.0 in Q15 */
+
+        if (s->consPLICount * s->block_samples > 320) {
+            use_gain = 29491;   /* 0.9 in Q15 */
+        } else if (s->consPLICount * s->block_samples > 640) {
+            use_gain = 22938;   /* 0.7 in Q15 */
+        } else if (s->consPLICount * s->block_samples > 960) {
+            use_gain = 16384;   /* 0.5 in Q15 */
+        } else if (s->consPLICount * s->block_samples > 1280) {
+            use_gain = 0;       /* 0.0 in Q15 */
+        }
+
+        /* Compute mixing factor of picth repeatition and noise:
+           for max_per>0.7 set periodicity to 1.0
+           0.4<max_per<0.7 set periodicity to (maxper-0.4)/0.7-0.4)
+           max_per<0.4 set periodicity to 0.0
+         */
+
+        if (max_perSquare > 7868) {     /* periodicity > 0.7  (0.7^4=0.2401 in Q15) */
+            pitchfact = 32767;
+        } else if (max_perSquare > 839) {       /* 0.4 < periodicity < 0.7 (0.4^4=0.0256 in Q15) */
+            /* find best index and interpolate from that */
+            ind = 5;
+            while ((max_perSquare < kPlcPerSqr[ind]) && (ind > 0)) {
+                ind--;
+            }
+            /* pitch fact is approximated by first order */
+            tmpW32 = kPlcPitchFact[ind] + SPL_MUL_16_16_RSFT(kPlcPfSlope[ind], (max_perSquare - kPlcPerSqr[ind]), 11);
+
+            pitchfact = FFMIN(tmpW32, 32767); /* guard against overflow */
+
+        } else {                /* periodicity < 0.4 */
+            pitchfact = 0;
+        }
+
+        /* avoid repetition of same pitch cycle (buzzyness) */
+        use_lag = lag;
+        if (lag < 80) {
+            use_lag = 2 * lag;
+        }
+
+        /* compute concealed residual */
+        energy = 0;
+
+        for (i = 0; i < s->block_samples; i++) {
+            /* noise component -  52 < randlagFIX < 117 */
+            s->seed = SPL_MUL_16_16(s->seed, 31821) + 13849;
+            randlag = 53 + (s->seed & 63);
+
+            pick = i - randlag;
+
+            if (pick < 0) {
+                randvec[i] = s->prevResidual[s->block_samples + pick];
+            } else {
+                randvec[i] = s->prevResidual[pick];
+            }
+
+            /* pitch repeatition component */
+            pick = i - use_lag;
+
+            if (pick < 0) {
+                PLCresidual[i] = s->prevResidual[s->block_samples + pick];
+            } else {
+                PLCresidual[i] = PLCresidual[pick];
+            }
+
+            /* Attinuate total gain for each 10 ms */
+            if (i < 80) {
+                tot_gain = use_gain;
+            } else if (i < 160) {
+                tot_gain = SPL_MUL_16_16_RSFT(31130, use_gain, 15);    /* 0.95*use_gain */
+            } else {
+                tot_gain = SPL_MUL_16_16_RSFT(29491, use_gain, 15);    /* 0.9*use_gain */
+            }
+
+            /* mix noise and pitch repeatition */
+            PLCresidual[i] = SPL_MUL_16_16_RSFT(tot_gain, (pitchfact * PLCresidual[i] + (32767 - pitchfact) * randvec[i] + 16384) >> 15, 15);
+
+            /* Shifting down the result one step extra to ensure that no overflow
+               will occur */
+            energy += SPL_MUL_16_16_RSFT(PLCresidual[i], PLCresidual[i], (s->prevScale + 1));
+
+        }
+
+        /* less than 30 dB, use only noise */
+        if (energy < SPL_SHIFT_W32(s->block_samples * 900, -s->prevScale - 1)) {
+            energy = 0;
+            for (i = 0; i < s->block_samples; i++) {
+                PLCresidual[i] = randvec[i];
+            }
+        }
+
+        /* use the old LPC */
+        memcpy(PLClpc, (*s).prevLpc, (LPC_FILTERORDER + 1) * 2);
+
+        /* Update state in case there are multiple frame losses */
+        s->prevLag = lag;
+        s->perSquare = max_perSquare;
+    } else { /* no packet loss, copy input */
+        memcpy(PLCresidual, decresidual, s->block_samples * 2);
+        memcpy(PLClpc, lpc, (LPC_FILTERORDER + 1) * 2);
+        s->consPLICount = 0;
+    }
+
+    /* update state */
+    s->prevPLI = PLI;
+    memcpy(s->prevLpc, PLClpc, (LPC_FILTERORDER + 1) * 2);
+    memcpy(s->prevResidual, PLCresidual, s->block_samples * 2);
+
+    return;
+}
+
+static int XcorrCoef(int16_t *target, int16_t *regressor,
+                     int16_t subl, int16_t searchLen,
+                     int16_t offset, int16_t step)
+{
+    int16_t maxlag;
+    int16_t pos;
+    int16_t max;
+    int16_t cross_corr_scale, energy_scale;
+    int16_t cross_corr_sg_mod, cross_corr_sg_mod_max;
+    int32_t cross_corr, energy;
+    int16_t cross_corr_mod, energy_mod, enery_mod_max;
+    int16_t *tp, *rp;
+    int16_t *rp_beg, *rp_end;
+    int16_t totscale, totscale_max;
+    int16_t scalediff;
+    int32_t new_crit, max_crit;
+    int shifts;
+    int k;
+
+    /* Initializations, to make sure that the first one is selected */
+    cross_corr_sg_mod_max = 0;
+    enery_mod_max = INT16_MAX;
+    totscale_max = -500;
+    maxlag = 0;
+    pos = 0;
+
+    /* Find scale value and start position */
+    if (step == 1) {
+        max = max_abs_value_w16(regressor, (int16_t) (subl + searchLen - 1));
+        rp_beg = regressor;
+        rp_end = &regressor[subl];
+    } else {                    /* step== -1 */
+        max = max_abs_value_w16(&regressor[-searchLen], (int16_t) (subl + searchLen - 1));
+        rp_beg = &regressor[-1];
+        rp_end = &regressor[subl - 1];
+    }
+
+    /* Introduce a scale factor on the energy in int32_t in
+       order to make sure that the calculation does not
+       overflow */
+
+    if (max > 5000) {
+        shifts = 2;
+    } else {
+        shifts = 0;
+    }
+
+    /* Calculate the first energy, then do a +/- to get the other energies */
+    energy = scale_dot_product(regressor, regressor, subl, shifts);
+
+    for (k = 0; k < searchLen; k++) {
+        tp = target;
+        rp = &regressor[pos];
+
+        cross_corr = scale_dot_product(tp, rp, subl, shifts);
+
+        if ((energy > 0) && (cross_corr > 0)) {
+            /* Put cross correlation and energy on 16 bit word */
+            cross_corr_scale = norm_w32(cross_corr) - 16;
+            cross_corr_mod = (int16_t) SPL_SHIFT_W32(cross_corr, cross_corr_scale);
+            energy_scale = norm_w32(energy) - 16;
+            energy_mod = (int16_t) SPL_SHIFT_W32(energy, energy_scale);
+
+            /* Square cross correlation and store upper int16_t */
+            cross_corr_sg_mod = (int16_t) SPL_MUL_16_16_RSFT(cross_corr_mod, cross_corr_mod, 16);
+
+            /* Calculate the total number of (dynamic) right shifts that have
+               been performed on (cross_corr*cross_corr)/energy
+             */
+            totscale = energy_scale - (cross_corr_scale << 1);
+
+            /* Calculate the shift difference in order to be able to compare the two
+               (cross_corr*cross_corr)/energy in the same domain
+             */
+            scalediff = totscale - totscale_max;
+            scalediff = FFMIN(scalediff, 31);
+            scalediff = FFMAX(scalediff, -31);
+
+            /* Compute the cross multiplication between the old best criteria
+               and the new one to be able to compare them without using a
+               division */
+
+            if (scalediff < 0) {
+                new_crit = ((int32_t) cross_corr_sg_mod * enery_mod_max) >> (-scalediff);
+                max_crit = ((int32_t) cross_corr_sg_mod_max * energy_mod);
+            } else {
+                new_crit = ((int32_t) cross_corr_sg_mod * enery_mod_max);
+                max_crit = ((int32_t) cross_corr_sg_mod_max * energy_mod) >> scalediff;
+            }
+
+            /* Store the new lag value if the new criteria is larger
+               than previous largest criteria */
+
+            if (new_crit > max_crit) {
+                cross_corr_sg_mod_max = cross_corr_sg_mod;
+                enery_mod_max = energy_mod;
+                totscale_max = totscale;
+                maxlag = k;
+            }
+        }
+        pos += step;
+
+        /* Do a +/- to get the next energy */
+        energy += step * ((*rp_end * *rp_end - *rp_beg * *rp_beg) >> shifts);
+        rp_beg += step;
+        rp_end += step;
+    }
+
+    return maxlag + offset;
+}
+
+static void hp_output(int16_t *signal, const int16_t *ba, int16_t *y,
+                      int16_t *x, int16_t len)
+{
+    int32_t tmp;
+
+    for (int i = 0; i < len; i++) {
+        tmp = SPL_MUL_16_16(y[1], ba[3]);     /* (-a[1])*y[i-1] (low part) */
+        tmp += SPL_MUL_16_16(y[3], ba[4]);    /* (-a[2])*y[i-2] (low part) */
+        tmp = (tmp >> 15);
+        tmp += SPL_MUL_16_16(y[0], ba[3]);    /* (-a[1])*y[i-1] (high part) */
+        tmp += SPL_MUL_16_16(y[2], ba[4]);    /* (-a[2])*y[i-2] (high part) */
+        tmp = (tmp << 1);
+
+        tmp += SPL_MUL_16_16(signal[i], ba[0]);       /* b[0]*x[0] */
+        tmp += SPL_MUL_16_16(x[0], ba[1]);    /* b[1]*x[i-1] */
+        tmp += SPL_MUL_16_16(x[1], ba[2]);    /* b[2]*x[i-2] */
+
+        /* Update state (input part) */
+        x[1] = x[0];
+        x[0] = signal[i];
+
+        /* Convert back to Q0 and multiply with 2 */
+        signal[i] = av_clip(tmp + 1024, -67108864, 67108863) >> 11;
+
+        /* Update state (filtered part) */
+        y[2] = y[0];
+        y[3] = y[1];
+
+        /* upshift tmp by 3 with saturation */
+        if (tmp > 268435455) {
+            tmp = INT32_MAX;
+        } else if (tmp < -268435456) {
+            tmp = INT32_MIN;
+        } else {
+            tmp = tmp << 3;
+        }
+
+        y[0] = tmp >> 16;
+        y[1] = (tmp - (y[0] << 16)) >> 1;
+    }
+}
+
+static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
+                             int *got_frame_ptr, AVPacket *avpkt)
+{
+    const uint8_t *buf = avpkt->data;
+    AVFrame *frame     = data;
+    ILBCContext *s     = avctx->priv_data;
+    int mode = s->mode, ret;
+    int16_t *plc_data = &s->PLCresidual[LPC_FILTERORDER];
+
+    if ((ret = init_get_bits8(&s->gb, buf, avpkt->size)) < 0)
+        return ret;
+    memset(&s->frame, 0, sizeof(ILBCFrame));
+
+    frame->nb_samples = s->block_samples;
+    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+        return ret;
+
+    if (unpack_frame(s))
+        mode = 0;
+    if (s->frame.start < 1)
+        mode = 0;
+
+    if (mode) {
+        index_conv(s->frame.cb_index);
+
+        lsf_dequantization(s->lsfdeq, s->frame.lsf, s->lpc_n);
+        lsf_check_stability(s->lsfdeq, LPC_FILTERORDER, s->lpc_n);
+        lsp_interpolate(s->syntdenum, s->weightdenum,
+                        s->lsfdeq, LPC_FILTERORDER, s);
+        decode_residual(s, &s->frame, s->decresidual, s->syntdenum);
+
+        do_plc(s->PLCresidual, s->PLClpc, 0,
+                               s->decresidual, s->syntdenum + (LPC_FILTERORDER + 1) * (s->nsub - 1),
+                               s->last_lag, s);
+
+        memcpy(s->decresidual, s->PLCresidual, s->block_samples * 2);
+    }
+
+    if (s->enhancer) {
+    } else {
+        int16_t lag, i;
+
+        /* Find last lag (since the enhancer is not called to give this info) */
+        if (s->mode == 20) {
+            lag = XcorrCoef(&s->decresidual[s->block_samples-60], &s->decresidual[s->block_samples-80],
+                            60, 80, 20, -1);
+        } else {
+            lag = XcorrCoef(&s->decresidual[s->block_samples-ENH_BLOCKL],
+                            &s->decresidual[s->block_samples-ENH_BLOCKL-20],
+                            ENH_BLOCKL,
+                            100, 20, -1);
+        }
+
+        /* Store lag (it is needed if next packet is lost) */
+        s->last_lag = lag;
+
+        /* copy data and run synthesis filter */
+        memcpy(plc_data, s->decresidual, s->block_samples * 2);
+
+        /* Set up the filter state */
+        memcpy(&plc_data[-LPC_FILTERORDER], s->syntMem, LPC_FILTERORDER * 2);
+
+        for (i = 0; i < s->nsub; i++) {
+            filter_arfq12(plc_data+i*SUBL, plc_data+i*SUBL,
+                                      s->syntdenum + i*(LPC_FILTERORDER+1),
+                                      LPC_FILTERORDER+1, SUBL);
+        }
+
+        /* Save the filter state */
+        memcpy(s->syntMem, &plc_data[s->block_samples-LPC_FILTERORDER], LPC_FILTERORDER * 2);
+    }
+
+    memcpy(frame->data[0], plc_data, s->block_samples * 2);
+
+    hp_output((int16_t *)frame->data[0], hp_out_coeffs,
+              s->hpimemy, s->hpimemx, s->block_samples);
+
+    memcpy(s->old_syntdenum, s->syntdenum, s->nsub*(LPC_FILTERORDER+1) * 2);
+
+    s->prev_enh_pl = 0;
+    if (mode == 0)
+        s->prev_enh_pl = 1;
+
+    *got_frame_ptr = 1;
+
+    return avpkt->size;
+}
+
+static av_cold int ilbc_decode_init(AVCodecContext *avctx)
+{
+    ILBCContext *s  = avctx->priv_data;
+
+    if (avctx->block_align == 38)
+        s->mode = 20;
+    else if (avctx->block_align == 50)
+        s->mode = 30;
+    else if (avctx->bit_rate > 0)
+        s->mode = avctx->bit_rate <= 14000 ? 30 : 20;
+    else
+        return AVERROR_INVALIDDATA;
+
+    avctx->channels       = 1;
+    avctx->channel_layout = AV_CH_LAYOUT_MONO;
+    avctx->sample_rate    = 8000;
+    avctx->sample_fmt     = AV_SAMPLE_FMT_S16;
+
+    if (s->mode == 30) {
+        s->block_samples = 240;
+        s->nsub = NSUB_30MS;
+        s->nasub = NASUB_30MS;
+        s->lpc_n = LPC_N_30MS;
+        s->state_short_len = STATE_SHORT_LEN_30MS;
+    } else {
+        s->block_samples = 160;
+        s->nsub = NSUB_20MS;
+        s->nasub = NASUB_20MS;
+        s->lpc_n = LPC_N_20MS;
+        s->state_short_len = STATE_SHORT_LEN_20MS;
+    }
+
+    return 0;
+}
+
+AVCodec ff_ilbc_decoder = {
+    .name           = "ilbc",
+    .long_name      = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"),
+    .type           = AVMEDIA_TYPE_AUDIO,
+    .id             = AV_CODEC_ID_ILBC,
+    .init           = ilbc_decode_init,
+    .decode         = ilbc_decode_frame,
+    .capabilities   = AV_CODEC_CAP_DR1,
+    .priv_data_size = sizeof(ILBCContext),
+};