Hash :
19c791cd
Author :
Date :
2018-03-08T10:55:20
Improve code formatting consistency
With rare exceptions ...
- Always separate line continuation characters by one space from
preceding code.
- Always use two-space indentation. Never use tabs.
- Always use K&R-style conditional blocks.
- Always surround operators with spaces, except in raw assembly code.
- Always put a space after, but not before, a comma.
- Never put a space between type casts and variables/function calls.
- Never put a space between the function name and the argument list in
function declarations and prototypes.
- Always surround braces ('{' and '}') with spaces.
- Always surround statements (if, for, else, catch, while, do, switch)
with spaces.
- Always attach pointer symbols ('*' and '**') to the variable or
function name.
- Always precede pointer symbols ('*' and '**') by a space in type
casts.
- Use the MIN() macro from jpegint.h within the libjpeg and TurboJPEG
API libraries (using min() from tjutil.h is still necessary for
TJBench.)
- Where it makes sense (particularly in the TurboJPEG code), put a blank
line after variable declaration blocks.
- Always separate statements in one-liners by two spaces.
The purpose of this was to ease maintenance on my part and also to make
it easier for contributors to figure out how to format patch
submissions. This was admittedly confusing (even to me sometimes) when
we had 3 or 4 different style conventions in the same source tree. The
new convention is more consistent with the formatting of other OSS code
bases.
This commit corrects deviations from the chosen formatting style in the
libjpeg API code and reformats the TurboJPEG API code such that it
conforms to the same standard.
NOTES:
- Although it is no longer necessary for the function name in function
declarations to begin in Column 1 (this was historically necessary
because of the ansi2knr utility, which allowed libjpeg to be built
with non-ANSI compilers), we retain that formatting for the libjpeg
code because it improves readability when using libjpeg's function
attribute macros (GLOBAL(), etc.)
- This reformatting project was accomplished with the help of AStyle and
Uncrustify, although neither was completely up to the task, and thus
a great deal of manual tweaking was required. Note to developers of
code formatting utilities: the libjpeg-turbo code base is an
excellent test bed, because AFAICT, it breaks every single one of the
utilities that are currently available.
- The legacy (MMX, SSE, 3DNow!) assembly code for i386 has been
formatted to match the SSE2 code (refer to
ff5685d5344273df321eb63a005eaae19d2496e3.) I hadn't intended to
bother with this, but the Loongson MMI implementation demonstrated
that there is still academic value to the MMX implementation, as an
algorithmic model for other 64-bit vector implementations. Thus, it
is desirable to improve its readability in the same manner as that of
the SSE2 implementation.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
;
; jquanti.asm - sample data conversion and quantization (AVX2)
;
; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
; Copyright (C) 2016, 2018, D. R. Commander.
; Copyright (C) 2016, Matthieu Darbois.
;
; Based on the x86 SIMD extension for IJG JPEG library
; Copyright (C) 1999-2006, MIYASAKA Masaru.
; For conditions of distribution and use, see copyright notice in jsimdext.inc
;
; This file should be assembled with NASM (Netwide Assembler),
; can *not* be assembled with Microsoft's MASM or any compatible
; assembler (including Borland's Turbo Assembler).
; NASM is available from http://nasm.sourceforge.net/ or
; http://sourceforge.net/project/showfiles.php?group_id=6208
;
; [TAB8]
%include "jsimdext.inc"
%include "jdct.inc"
; --------------------------------------------------------------------------
SECTION SEG_TEXT
BITS 32
;
; Load data into workspace, applying unsigned->signed conversion
;
; GLOBAL(void)
; jsimd_convsamp_avx2(JSAMPARRAY sample_data, JDIMENSION start_col,
; DCTELEM *workspace);
;
%define sample_data ebp + 8 ; JSAMPARRAY sample_data
%define start_col ebp + 12 ; JDIMENSION start_col
%define workspace ebp + 16 ; DCTELEM *workspace
align 32
GLOBAL_FUNCTION(jsimd_convsamp_avx2)
EXTN(jsimd_convsamp_avx2):
push ebp
mov ebp, esp
push ebx
; push ecx ; need not be preserved
; push edx ; need not be preserved
push esi
push edi
mov esi, JSAMPARRAY [sample_data] ; (JSAMPROW *)
mov eax, JDIMENSION [start_col]
mov edi, POINTER [workspace] ; (DCTELEM *)
mov ebx, JSAMPROW [esi+0*SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi+1*SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm0, XMM_MMWORD [ebx+eax*SIZEOF_JSAMPLE]
movq xmm1, XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi+2*SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi+3*SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm2, XMM_MMWORD [ebx+eax*SIZEOF_JSAMPLE]
movq xmm3, XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi+4*SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi+5*SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm4, XMM_MMWORD [ebx+eax*SIZEOF_JSAMPLE]
movq xmm5, XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE]
mov ebx, JSAMPROW [esi+6*SIZEOF_JSAMPROW] ; (JSAMPLE *)
mov edx, JSAMPROW [esi+7*SIZEOF_JSAMPROW] ; (JSAMPLE *)
movq xmm6, XMM_MMWORD [ebx+eax*SIZEOF_JSAMPLE]
movq xmm7, XMM_MMWORD [edx+eax*SIZEOF_JSAMPLE]
vinserti128 ymm0, ymm0, xmm1, 1
vinserti128 ymm2, ymm2, xmm3, 1
vinserti128 ymm4, ymm4, xmm5, 1
vinserti128 ymm6, ymm6, xmm7, 1
vpxor ymm1, ymm1, ymm1 ; ymm1=(all 0's)
vpunpcklbw ymm0, ymm0, ymm1
vpunpcklbw ymm2, ymm2, ymm1
vpunpcklbw ymm4, ymm4, ymm1
vpunpcklbw ymm6, ymm6, ymm1
vpcmpeqw ymm7, ymm7, ymm7
vpsllw ymm7, ymm7, 7 ; ymm7={0xFF80 0xFF80 0xFF80 0xFF80 ..}
vpaddw ymm0, ymm0, ymm7
vpaddw ymm2, ymm2, ymm7
vpaddw ymm4, ymm4, ymm7
vpaddw ymm6, ymm6, ymm7
vmovdqu YMMWORD [YMMBLOCK(0,0,edi,SIZEOF_DCTELEM)], ymm0
vmovdqu YMMWORD [YMMBLOCK(2,0,edi,SIZEOF_DCTELEM)], ymm2
vmovdqu YMMWORD [YMMBLOCK(4,0,edi,SIZEOF_DCTELEM)], ymm4
vmovdqu YMMWORD [YMMBLOCK(6,0,edi,SIZEOF_DCTELEM)], ymm6
vzeroupper
pop edi
pop esi
; pop edx ; need not be preserved
; pop ecx ; need not be preserved
pop ebx
pop ebp
ret
; --------------------------------------------------------------------------
;
; Quantize/descale the coefficients, and store into coef_block
;
; This implementation is based on an algorithm described in
; "How to optimize for the Pentium family of microprocessors"
; (http://www.agner.org/assem/).
;
; GLOBAL(void)
; jsimd_quantize_avx2(JCOEFPTR coef_block, DCTELEM *divisors,
; DCTELEM *workspace);
;
%define RECIPROCAL(m, n, b) \
YMMBLOCK(DCTSIZE * 0 + (m), (n), (b), SIZEOF_DCTELEM)
%define CORRECTION(m, n, b) \
YMMBLOCK(DCTSIZE * 1 + (m), (n), (b), SIZEOF_DCTELEM)
%define SCALE(m, n, b) \
YMMBLOCK(DCTSIZE * 2 + (m), (n), (b), SIZEOF_DCTELEM)
%define coef_block ebp + 8 ; JCOEFPTR coef_block
%define divisors ebp + 12 ; DCTELEM *divisors
%define workspace ebp + 16 ; DCTELEM *workspace
align 32
GLOBAL_FUNCTION(jsimd_quantize_avx2)
EXTN(jsimd_quantize_avx2):
push ebp
mov ebp, esp
; push ebx ; unused
; push ecx ; unused
; push edx ; need not be preserved
push esi
push edi
mov esi, POINTER [workspace]
mov edx, POINTER [divisors]
mov edi, JCOEFPTR [coef_block]
vmovdqu ymm4, [YMMBLOCK(0,0,esi,SIZEOF_DCTELEM)]
vmovdqu ymm5, [YMMBLOCK(2,0,esi,SIZEOF_DCTELEM)]
vmovdqu ymm6, [YMMBLOCK(4,0,esi,SIZEOF_DCTELEM)]
vmovdqu ymm7, [YMMBLOCK(6,0,esi,SIZEOF_DCTELEM)]
vpabsw ymm0, ymm4
vpabsw ymm1, ymm5
vpabsw ymm2, ymm6
vpabsw ymm3, ymm7
vpaddw ymm0, YMMWORD [CORRECTION(0,0,edx)] ; correction + roundfactor
vpaddw ymm1, YMMWORD [CORRECTION(2,0,edx)]
vpaddw ymm2, YMMWORD [CORRECTION(4,0,edx)]
vpaddw ymm3, YMMWORD [CORRECTION(6,0,edx)]
vpmulhuw ymm0, YMMWORD [RECIPROCAL(0,0,edx)] ; reciprocal
vpmulhuw ymm1, YMMWORD [RECIPROCAL(2,0,edx)]
vpmulhuw ymm2, YMMWORD [RECIPROCAL(4,0,edx)]
vpmulhuw ymm3, YMMWORD [RECIPROCAL(6,0,edx)]
vpmulhuw ymm0, YMMWORD [SCALE(0,0,edx)] ; scale
vpmulhuw ymm1, YMMWORD [SCALE(2,0,edx)]
vpmulhuw ymm2, YMMWORD [SCALE(4,0,edx)]
vpmulhuw ymm3, YMMWORD [SCALE(6,0,edx)]
vpsignw ymm0, ymm0, ymm4
vpsignw ymm1, ymm1, ymm5
vpsignw ymm2, ymm2, ymm6
vpsignw ymm3, ymm3, ymm7
vmovdqu [YMMBLOCK(0,0,edi,SIZEOF_DCTELEM)], ymm0
vmovdqu [YMMBLOCK(2,0,edi,SIZEOF_DCTELEM)], ymm1
vmovdqu [YMMBLOCK(4,0,edi,SIZEOF_DCTELEM)], ymm2
vmovdqu [YMMBLOCK(6,0,edi,SIZEOF_DCTELEM)], ymm3
vzeroupper
pop edi
pop esi
; pop edx ; need not be preserved
; pop ecx ; unused
; pop ebx ; unused
pop ebp
ret
; For some reason, the OS X linker does not honor the request to align the
; segment unless we do this.
align 32