LCOV - code coverage report
Current view: top level - ASM_AVX2 - highbd_convolve_avx2.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 117 0.0 %
Date: 2019-11-25 17:38:06 Functions: 0 2 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2017, Alliance for Open Media. All rights reserved
       3             :  *
       4             :  * This source code is subject to the terms of the BSD 2 Clause License and
       5             :  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
       6             :  * was not distributed with this source code in the LICENSE file, you can
       7             :  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
       8             :  * Media Patent License 1.0 was not distributed with this source code in the
       9             :  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
      10             :  */
      11             : #include <immintrin.h>
      12             : 
      13             : #include "EbDefinitions.h"
      14             : #include "aom_dsp_rtcd.h"
      15             : 
      16             : #include "convolve.h"
      17             : #include "convolve_avx2.h"
      18             : #include "synonyms.h"
      19             : 
      20             :  // -----------------------------------------------------------------------------
      21             :  // Copy and average
      22             : 
      23           0 : void eb_av1_highbd_convolve_y_sr_avx2(const uint16_t *src, int32_t src_stride,
      24             :     uint16_t *dst, int32_t dst_stride, int32_t w, int32_t h,
      25             :     const InterpFilterParams *filter_params_x,
      26             :     const InterpFilterParams *filter_params_y,
      27             :     const int32_t subpel_x_q4, const int32_t subpel_y_q4,
      28             :     ConvolveParams *conv_params, int32_t bd) {
      29             :     int32_t i, j;
      30           0 :     const int32_t fo_vert = filter_params_y->taps / 2 - 1;
      31           0 :     const uint16_t *const src_ptr = src - fo_vert * src_stride;
      32             :     (void)filter_params_x;
      33             :     (void)subpel_x_q4;
      34             :     (void)conv_params;
      35             : 
      36           0 :     assert(conv_params->round_0 <= FILTER_BITS);
      37           0 :     assert(((conv_params->round_0 + conv_params->round_1) <= (FILTER_BITS + 1)) ||
      38             :         ((conv_params->round_0 + conv_params->round_1) == (2 * FILTER_BITS)));
      39             : 
      40             :     __m256i s[8], coeffs_y[4];
      41             : 
      42           0 :     const int32_t bits = FILTER_BITS;
      43             : 
      44           0 :     const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
      45           0 :     const __m256i round_const_bits = _mm256_set1_epi32((1 << bits) >> 1);
      46             :     const __m256i clip_pixel =
      47           0 :         _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
      48           0 :     const __m256i zero = _mm256_setzero_si256();
      49             : 
      50           0 :     prepare_coeffs_8tap_avx2(filter_params_y, subpel_y_q4, coeffs_y);
      51             : 
      52           0 :     for (j = 0; j < w; j += 8) {
      53           0 :         const uint16_t *data = &src_ptr[j];
      54             :         /* Vertical filter */
      55             :         {
      56             :             __m256i src6;
      57           0 :             __m256i s01 = _mm256_permute2x128_si256(
      58             :                 _mm256_castsi128_si256(
      59             :                     _mm_loadu_si128((__m128i *)(data + 0 * src_stride))),
      60             :                 _mm256_castsi128_si256(
      61             :                     _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
      62             :                 0x20);
      63           0 :             __m256i s12 = _mm256_permute2x128_si256(
      64             :                 _mm256_castsi128_si256(
      65             :                     _mm_loadu_si128((__m128i *)(data + 1 * src_stride))),
      66             :                 _mm256_castsi128_si256(
      67             :                     _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
      68             :                 0x20);
      69           0 :             __m256i s23 = _mm256_permute2x128_si256(
      70             :                 _mm256_castsi128_si256(
      71             :                     _mm_loadu_si128((__m128i *)(data + 2 * src_stride))),
      72             :                 _mm256_castsi128_si256(
      73             :                     _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
      74             :                 0x20);
      75           0 :             __m256i s34 = _mm256_permute2x128_si256(
      76             :                 _mm256_castsi128_si256(
      77             :                     _mm_loadu_si128((__m128i *)(data + 3 * src_stride))),
      78             :                 _mm256_castsi128_si256(
      79             :                     _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
      80             :                 0x20);
      81           0 :             __m256i s45 = _mm256_permute2x128_si256(
      82             :                 _mm256_castsi128_si256(
      83             :                     _mm_loadu_si128((__m128i *)(data + 4 * src_stride))),
      84             :                 _mm256_castsi128_si256(
      85             :                     _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
      86             :                 0x20);
      87           0 :             src6 = _mm256_castsi128_si256(
      88           0 :                 _mm_loadu_si128((__m128i *)(data + 6 * src_stride)));
      89           0 :             __m256i s56 = _mm256_permute2x128_si256(
      90             :                 _mm256_castsi128_si256(
      91             :                     _mm_loadu_si128((__m128i *)(data + 5 * src_stride))),
      92             :                 src6, 0x20);
      93             : 
      94           0 :             s[0] = _mm256_unpacklo_epi16(s01, s12);
      95           0 :             s[1] = _mm256_unpacklo_epi16(s23, s34);
      96           0 :             s[2] = _mm256_unpacklo_epi16(s45, s56);
      97             : 
      98           0 :             s[4] = _mm256_unpackhi_epi16(s01, s12);
      99           0 :             s[5] = _mm256_unpackhi_epi16(s23, s34);
     100           0 :             s[6] = _mm256_unpackhi_epi16(s45, s56);
     101             : 
     102           0 :             for (i = 0; i < h; i += 2) {
     103           0 :                 data = &src_ptr[i * src_stride + j];
     104             : 
     105           0 :                 const __m256i s67 = _mm256_permute2x128_si256(
     106             :                     src6,
     107             :                     _mm256_castsi128_si256(
     108             :                         _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
     109             :                     0x20);
     110             : 
     111           0 :                 src6 = _mm256_castsi128_si256(
     112           0 :                     _mm_loadu_si128((__m128i *)(data + 8 * src_stride)));
     113             : 
     114           0 :                 const __m256i s78 = _mm256_permute2x128_si256(
     115             :                     _mm256_castsi128_si256(
     116             :                         _mm_loadu_si128((__m128i *)(data + 7 * src_stride))),
     117             :                     src6, 0x20);
     118             : 
     119           0 :                 s[3] = _mm256_unpacklo_epi16(s67, s78);
     120           0 :                 s[7] = _mm256_unpackhi_epi16(s67, s78);
     121             : 
     122           0 :                 const __m256i res_a = convolve16_8tap_avx2(s, coeffs_y);
     123             : 
     124           0 :                 __m256i res_a_round = _mm256_sra_epi32(
     125             :                     _mm256_add_epi32(res_a, round_const_bits), round_shift_bits);
     126             : 
     127           0 :                 if (w - j > 4) {
     128           0 :                     const __m256i res_b = convolve16_8tap_avx2(s + 4, coeffs_y);
     129           0 :                     __m256i res_b_round = _mm256_sra_epi32(
     130             :                         _mm256_add_epi32(res_b, round_const_bits), round_shift_bits);
     131             : 
     132           0 :                     __m256i res_16bit = _mm256_packs_epi32(res_a_round, res_b_round);
     133           0 :                     res_16bit = _mm256_min_epi16(res_16bit, clip_pixel);
     134           0 :                     res_16bit = _mm256_max_epi16(res_16bit, zero);
     135             : 
     136           0 :                     _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j],
     137             :                         _mm256_castsi256_si128(res_16bit));
     138           0 :                     _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
     139           0 :                         _mm256_extracti128_si256(res_16bit, 1));
     140             :                 }
     141           0 :                 else if (w == 4) {
     142           0 :                     res_a_round = _mm256_packs_epi32(res_a_round, res_a_round);
     143           0 :                     res_a_round = _mm256_min_epi16(res_a_round, clip_pixel);
     144           0 :                     res_a_round = _mm256_max_epi16(res_a_round, zero);
     145             : 
     146           0 :                     _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j],
     147             :                         _mm256_castsi256_si128(res_a_round));
     148           0 :                     _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
     149           0 :                         _mm256_extracti128_si256(res_a_round, 1));
     150             :                 }
     151             :                 else {
     152           0 :                     res_a_round = _mm256_packs_epi32(res_a_round, res_a_round);
     153           0 :                     res_a_round = _mm256_min_epi16(res_a_round, clip_pixel);
     154           0 :                     res_a_round = _mm256_max_epi16(res_a_round, zero);
     155             : 
     156           0 :                     xx_storel_32((__m128i *)&dst[i * dst_stride + j],
     157             :                         _mm256_castsi256_si128(res_a_round));
     158           0 :                     xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
     159           0 :                         _mm256_extracti128_si256(res_a_round, 1));
     160             :                 }
     161             : 
     162           0 :                 s[0] = s[1];
     163           0 :                 s[1] = s[2];
     164           0 :                 s[2] = s[3];
     165             : 
     166           0 :                 s[4] = s[5];
     167           0 :                 s[5] = s[6];
     168           0 :                 s[6] = s[7];
     169             :             }
     170             :         }
     171             :     }
     172           0 : }
     173             : 
     174           0 : void eb_av1_highbd_convolve_x_sr_avx2(const uint16_t *src, int32_t src_stride,
     175             :     uint16_t *dst, int32_t dst_stride, int32_t w, int32_t h,
     176             :     const InterpFilterParams *filter_params_x,
     177             :     const InterpFilterParams *filter_params_y,
     178             :     const int32_t subpel_x_q4, const int32_t subpel_y_q4,
     179             :     ConvolveParams *conv_params, int32_t bd) {
     180             :     int32_t i, j;
     181           0 :     const int32_t fo_horiz = filter_params_x->taps / 2 - 1;
     182           0 :     const uint16_t *const src_ptr = src - fo_horiz;
     183             :     (void)subpel_y_q4;
     184             :     (void)filter_params_y;
     185             : 
     186             :     // Check that, even with 12-bit input, the intermediate values will fit
     187             :     // into an unsigned 16-bit intermediate array.
     188           0 :     assert(bd + FILTER_BITS + 2 - conv_params->round_0 <= 16);
     189             : 
     190             :     __m256i s[4], coeffs_x[4];
     191             : 
     192             :     const __m256i round_const_x =
     193           0 :         _mm256_set1_epi32(((1 << conv_params->round_0) >> 1));
     194           0 :     const __m128i round_shift_x = _mm_cvtsi32_si128(conv_params->round_0);
     195             : 
     196           0 :     const int32_t bits = FILTER_BITS - conv_params->round_0;
     197           0 :     const __m128i round_shift_bits = _mm_cvtsi32_si128(bits);
     198           0 :     const __m256i round_const_bits = _mm256_set1_epi32((1 << bits) >> 1);
     199             :     const __m256i clip_pixel =
     200           0 :         _mm256_set1_epi16(bd == 10 ? 1023 : (bd == 12 ? 4095 : 255));
     201           0 :     const __m256i zero = _mm256_setzero_si256();
     202             : 
     203           0 :     assert(bits >= 0);
     204           0 :     assert((FILTER_BITS - conv_params->round_1) >= 0 ||
     205             :         ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS));
     206             : 
     207           0 :     prepare_coeffs_8tap_avx2(filter_params_x, subpel_x_q4, coeffs_x);
     208             : 
     209           0 :     for (j = 0; j < w; j += 8) {
     210             :         /* Horizontal filter */
     211           0 :         for (i = 0; i < h; i += 2) {
     212             :             const __m256i row0 =
     213           0 :                 _mm256_loadu_si256((__m256i *)&src_ptr[i * src_stride + j]);
     214             :             __m256i row1 =
     215           0 :                 _mm256_loadu_si256((__m256i *)&src_ptr[(i + 1) * src_stride + j]);
     216             : 
     217           0 :             const __m256i r0 = _mm256_permute2x128_si256(row0, row1, 0x20);
     218           0 :             const __m256i r1 = _mm256_permute2x128_si256(row0, row1, 0x31);
     219             : 
     220             :             // even pixels
     221           0 :             s[0] = _mm256_alignr_epi8(r1, r0, 0);
     222           0 :             s[1] = _mm256_alignr_epi8(r1, r0, 4);
     223           0 :             s[2] = _mm256_alignr_epi8(r1, r0, 8);
     224           0 :             s[3] = _mm256_alignr_epi8(r1, r0, 12);
     225             : 
     226           0 :             __m256i res_even = convolve16_8tap_avx2(s, coeffs_x);
     227           0 :             res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_x),
     228             :                 round_shift_x);
     229             : 
     230             :             // odd pixels
     231           0 :             s[0] = _mm256_alignr_epi8(r1, r0, 2);
     232           0 :             s[1] = _mm256_alignr_epi8(r1, r0, 6);
     233           0 :             s[2] = _mm256_alignr_epi8(r1, r0, 10);
     234           0 :             s[3] = _mm256_alignr_epi8(r1, r0, 14);
     235             : 
     236           0 :             __m256i res_odd = convolve16_8tap_avx2(s, coeffs_x);
     237           0 :             res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_x),
     238             :                 round_shift_x);
     239             : 
     240           0 :             res_even = _mm256_sra_epi32(_mm256_add_epi32(res_even, round_const_bits),
     241             :                 round_shift_bits);
     242           0 :             res_odd = _mm256_sra_epi32(_mm256_add_epi32(res_odd, round_const_bits),
     243             :                 round_shift_bits);
     244             : 
     245           0 :             __m256i res_even1 = _mm256_packs_epi32(res_even, res_even);
     246           0 :             __m256i res_odd1 = _mm256_packs_epi32(res_odd, res_odd);
     247             : 
     248           0 :             __m256i res = _mm256_unpacklo_epi16(res_even1, res_odd1);
     249           0 :             res = _mm256_min_epi16(res, clip_pixel);
     250           0 :             res = _mm256_max_epi16(res, zero);
     251             : 
     252           0 :             if (w - j > 4) {
     253           0 :                 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j],
     254             :                     _mm256_castsi256_si128(res));
     255           0 :                 _mm_storeu_si128((__m128i *)&dst[i * dst_stride + j + dst_stride],
     256           0 :                     _mm256_extracti128_si256(res, 1));
     257             :             }
     258           0 :             else if (w == 4) {
     259           0 :                 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j],
     260             :                     _mm256_castsi256_si128(res));
     261           0 :                 _mm_storel_epi64((__m128i *)&dst[i * dst_stride + j + dst_stride],
     262           0 :                     _mm256_extracti128_si256(res, 1));
     263             :             }
     264             :             else {
     265           0 :                 xx_storel_32((__m128i *)&dst[i * dst_stride + j],
     266             :                     _mm256_castsi256_si128(res));
     267           0 :                 xx_storel_32((__m128i *)&dst[i * dst_stride + j + dst_stride],
     268           0 :                     _mm256_extracti128_si256(res, 1));
     269             :             }
     270             :         }
     271             :     }
     272           0 : }
     273             : 
     274             : // -----------------------------------------------------------------------------
     275             : // Horizontal Filtering
     276             : 
     277             : //HIGH_FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2);
     278             : //HIGH_FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2);
     279             : 
     280             : #undef HIGHBD_FUNC

Generated by: LCOV version 1.14