whitespace fixes

Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
This commit is contained in:
Denys Vlasenko 2022-02-11 23:03:27 +01:00
parent dda77e8376
commit 1f272c06d0
2 changed files with 11 additions and 11 deletions

View File

@ -71,8 +71,8 @@ sha1_process_block64:
movq 4*10(%rdi), %r12 movq 4*10(%rdi), %r12
bswapq %r11 bswapq %r11
bswapq %r12 bswapq %r12
rolq $32, %r11 # r11 = W[9]:W[8] rolq $32, %r11 # r11 = W[9]:W[8]
rolq $32, %r12 # r12 = W[11]:W[10] rolq $32, %r12 # r12 = W[11]:W[10]
movq %r11, %xmm2 movq %r11, %xmm2
movq %r12, %xmm4 movq %r12, %xmm4
punpcklqdq %xmm4, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) punpcklqdq %xmm4, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11])
@ -81,8 +81,8 @@ sha1_process_block64:
movq 4*14(%rdi), %r14 movq 4*14(%rdi), %r14
bswapq %r13 bswapq %r13
bswapq %r14 bswapq %r14
rolq $32, %r13 # r13 = W[13]:W[12] rolq $32, %r13 # r13 = W[13]:W[12]
rolq $32, %r14 # r14 = W[15]:W[14] rolq $32, %r14 # r14 = W[15]:W[14]
movq %r13, %xmm3 movq %r13, %xmm3
movq %r14, %xmm4 movq %r14, %xmm4
punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) punpcklqdq %xmm4, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15])

View File

@ -99,7 +99,7 @@ INTERLEAVE() {
) )
} }
# movaps bswap32_mask(%rip), $xmmT1 # movaps bswap32_mask(%rip), $xmmT1
# Load W[] to xmm0..3, byteswapping on the fly. # Load W[] to xmm0..3, byteswapping on the fly.
# For iterations 0..15, we pass RCONST+W[] in rsi,r8..r14 # For iterations 0..15, we pass RCONST+W[] in rsi,r8..r14
# for use in RD1As instead of spilling them to stack. # for use in RD1As instead of spilling them to stack.
@ -110,8 +110,8 @@ INTERLEAVE() {
# movaps %xmm0, $xmmT2 # movaps %xmm0, $xmmT2
# paddd $xmmRCONST, $xmmT2 # paddd $xmmRCONST, $xmmT2
# movq $xmmT2, %rsi # movq $xmmT2, %rsi
# #pextrq \$1, $xmmT2, %r8 #SSE4.1 insn # #pextrq \$1, $xmmT2, %r8 #SSE4.1 insn
# #movhpd $xmmT2, %r8 #can only move to mem, not to reg # #movhpd $xmmT2, %r8 #can only move to mem, not to reg
# shufps \$0x0e, $xmmT2, $xmmT2 # have to use two-insn sequence # shufps \$0x0e, $xmmT2, $xmmT2 # have to use two-insn sequence
# movq $xmmT2, %r8 # instead # movq $xmmT2, %r8 # instead
# ... # ...
@ -197,8 +197,8 @@ sha1_process_block64:
movq 4*10(%rdi), %r12 movq 4*10(%rdi), %r12
bswapq %r11 bswapq %r11
bswapq %r12 bswapq %r12
rolq \$32, %r11 # r11 = W[9]:W[8] rolq \$32, %r11 # r11 = W[9]:W[8]
rolq \$32, %r12 # r12 = W[11]:W[10] rolq \$32, %r12 # r12 = W[11]:W[10]
movq %r11, %xmm2 movq %r11, %xmm2
movq %r12, $xmmT1 movq %r12, $xmmT1
punpcklqdq $xmmT1, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11]) punpcklqdq $xmmT1, %xmm2 # xmm2 = r12:r11 = (W[8],W[9],W[10],W[11])
@ -207,8 +207,8 @@ sha1_process_block64:
movq 4*14(%rdi), %r14 movq 4*14(%rdi), %r14
bswapq %r13 bswapq %r13
bswapq %r14 bswapq %r14
rolq \$32, %r13 # r13 = W[13]:W[12] rolq \$32, %r13 # r13 = W[13]:W[12]
rolq \$32, %r14 # r14 = W[15]:W[14] rolq \$32, %r14 # r14 = W[15]:W[14]
movq %r13, %xmm3 movq %r13, %xmm3
movq %r14, $xmmT1 movq %r14, $xmmT1
punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15]) punpcklqdq $xmmT1, %xmm3 # xmm3 = r14:r13 = (W[12],W[13],W[14],W[15])