Branch
Hash :
5e27ca23
Author :
Date :
2025-09-19T14:21:49
x86: Reformat NASM code to improve readability (and simplify the checkstyle script)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
;
; Sample data conversion and quantization (32-bit AVX2)
;
; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
; Copyright (C) 2016, 2018, 2024-2025, D. R. Commander.
; Copyright (C) 2016, Matthieu Darbois.
;
; Based on the x86 SIMD extension for IJG JPEG library
; Copyright (C) 1999-2006, MIYASAKA Masaru.
; For conditions of distribution and use, see copyright notice in jsimdext.inc
;
; This file should be assembled with NASM (Netwide Assembler) or Yasm.
%include "jsimdext.inc"
%include "jdct.inc"
; --------------------------------------------------------------------------
SECTION SEG_TEXT
BITS 32
; Load data into workspace, applying unsigned->signed conversion
;
; GLOBAL(void)
; jsimd_convsamp_avx2(JSAMPARRAY sample_data, JDIMENSION start_col,
; DCTELEM *workspace)
%define sample_data ebp + 8 ; JSAMPARRAY sample_data
%define start_col ebp + 12 ; JDIMENSION start_col
%define workspace ebp + 16 ; DCTELEM *workspace
align 32
GLOBAL_FUNCTION(jsimd_convsamp_avx2)
EXTN(jsimd_convsamp_avx2):
push ebp
mov ebp, esp
push ebx
; push ecx ; need not be preserved
; push edx ; need not be preserved
push esi
push edi
mov esi, JSAMPARRAY [sample_data] ; (JSAMPROW *)
mov eax, JDIMENSION [start_col]
mov edi, POINTER [workspace] ; (DCTELEM *)
mov ebx, JSAMPROW [esi + 0 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi + 1 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm0, XMM_MMWORD [ebx + eax * SIZEOF_JSAMPLE]
movq xmm1, XMM_MMWORD [edx + eax * SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi + 2 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi + 3 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm2, XMM_MMWORD [ebx + eax * SIZEOF_JSAMPLE]
movq xmm3, XMM_MMWORD [edx + eax * SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi + 4 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi + 5 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm4, XMM_MMWORD [ebx + eax * SIZEOF_JSAMPLE]
movq xmm5, XMM_MMWORD [edx + eax * SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi + 6 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi + 7 * SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm6, XMM_MMWORD [ebx + eax * SIZEOF_JSAMPLE]
movq xmm7, XMM_MMWORD [edx + eax * SIZEOF_JSAMPLE]
vinserti128 ymm0, ymm0, xmm1, 1
vinserti128 ymm2, ymm2, xmm3, 1
vinserti128 ymm4, ymm4, xmm5, 1
vinserti128 ymm6, ymm6, xmm7, 1
vpxor ymm1, ymm1, ymm1 ; ymm1 = (all 0's)
vpunpcklbw ymm0, ymm0, ymm1
vpunpcklbw ymm2, ymm2, ymm1
vpunpcklbw ymm4, ymm4, ymm1
vpunpcklbw ymm6, ymm6, ymm1
vpcmpeqw ymm7, ymm7, ymm7
vpsllw ymm7, ymm7, 7 ; ymm7 = { 0xFF80 0xFF80 0xFF80 0xFF80 .. }
vpaddw ymm0, ymm0, ymm7
vpaddw ymm2, ymm2, ymm7
vpaddw ymm4, ymm4, ymm7
vpaddw ymm6, ymm6, ymm7
vmovdqu YMMWORD [YMMBLOCK(0, 0, edi, SIZEOF_DCTELEM)], ymm0
vmovdqu YMMWORD [YMMBLOCK(2, 0, edi, SIZEOF_DCTELEM)], ymm2
vmovdqu YMMWORD [YMMBLOCK(4, 0, edi, SIZEOF_DCTELEM)], ymm4
vmovdqu YMMWORD [YMMBLOCK(6, 0, edi, SIZEOF_DCTELEM)], ymm6
vzeroupper
pop edi
pop esi
; pop edx ; need not be preserved
; pop ecx ; need not be preserved
pop ebx
pop ebp
ret
; --------------------------------------------------------------------------
;
; Quantize/descale the coefficients, and store into coef_block
;
; This implementation is based on an algorithm described in
; "Optimizing subroutines in assembly language:
; An optimization guide for x86 platforms" (https://agner.org/optimize).
;
; GLOBAL(void)
; jsimd_quantize_avx2(JCOEFPTR coef_block, DCTELEM *divisors,
; DCTELEM *workspace)
%define RECIPROCAL(m, n, b) \
YMMBLOCK(DCTSIZE * 0 + (m), (n), (b), SIZEOF_DCTELEM)
%define CORRECTION(m, n, b) \
YMMBLOCK(DCTSIZE * 1 + (m), (n), (b), SIZEOF_DCTELEM)
%define SCALE(m, n, b) \
YMMBLOCK(DCTSIZE * 2 + (m), (n), (b), SIZEOF_DCTELEM)
%define coef_block ebp + 8 ; JCOEFPTR coef_block
%define divisors ebp + 12 ; DCTELEM *divisors
%define workspace ebp + 16 ; DCTELEM *workspace
align 32
GLOBAL_FUNCTION(jsimd_quantize_avx2)
EXTN(jsimd_quantize_avx2):
push ebp
mov ebp, esp
; push ebx ; unused
; push ecx ; unused
; push edx ; need not be preserved
push esi
push edi
mov esi, POINTER [workspace]
mov edx, POINTER [divisors]
mov edi, JCOEFPTR [coef_block]
vmovdqu ymm4, [YMMBLOCK(0, 0, esi, SIZEOF_DCTELEM)]
vmovdqu ymm5, [YMMBLOCK(2, 0, esi, SIZEOF_DCTELEM)]
vmovdqu ymm6, [YMMBLOCK(4, 0, esi, SIZEOF_DCTELEM)]
vmovdqu ymm7, [YMMBLOCK(6, 0, esi, SIZEOF_DCTELEM)]
vpabsw ymm0, ymm4
vpabsw ymm1, ymm5
vpabsw ymm2, ymm6
vpabsw ymm3, ymm7
vpaddw ymm0, YMMWORD [CORRECTION(0, 0, edx)]
; correction + roundfactor
vpaddw ymm1, YMMWORD [CORRECTION(2, 0, edx)]
vpaddw ymm2, YMMWORD [CORRECTION(4, 0, edx)]
vpaddw ymm3, YMMWORD [CORRECTION(6, 0, edx)]
vpmulhuw ymm0, YMMWORD [RECIPROCAL(0, 0, edx)] ; reciprocal
vpmulhuw ymm1, YMMWORD [RECIPROCAL(2, 0, edx)]
vpmulhuw ymm2, YMMWORD [RECIPROCAL(4, 0, edx)]
vpmulhuw ymm3, YMMWORD [RECIPROCAL(6, 0, edx)]
vpmulhuw ymm0, YMMWORD [SCALE(0, 0, edx)] ; scale
vpmulhuw ymm1, YMMWORD [SCALE(2, 0, edx)]
vpmulhuw ymm2, YMMWORD [SCALE(4, 0, edx)]
vpmulhuw ymm3, YMMWORD [SCALE(6, 0, edx)]
vpsignw ymm0, ymm0, ymm4
vpsignw ymm1, ymm1, ymm5
vpsignw ymm2, ymm2, ymm6
vpsignw ymm3, ymm3, ymm7
vmovdqu [YMMBLOCK(0, 0, edi, SIZEOF_DCTELEM)], ymm0
vmovdqu [YMMBLOCK(2, 0, edi, SIZEOF_DCTELEM)], ymm1
vmovdqu [YMMBLOCK(4, 0, edi, SIZEOF_DCTELEM)], ymm2
vmovdqu [YMMBLOCK(6, 0, edi, SIZEOF_DCTELEM)], ymm3
vzeroupper
pop edi
pop esi
; pop edx ; need not be preserved
; pop ecx ; unused
; pop ebx ; unused
pop ebp
ret
; For some reason, the OS X linker does not honor the request to align the
; segment unless we do this.
align 32