Browse Source

setjmp: optimize longjmp prologues

Use a branchless sequence that is one byte shorter on 64-bit, same size
on 32-bit. Thanks to Pete Cawley for suggesting this variant.
master
Alexander Monakov 6 years ago
committed by Rich Felker
parent
commit
4554f155dd
  1. 6
      src/setjmp/i386/longjmp.s
  2. 8
      src/setjmp/x32/longjmp.s
  3. 8
      src/setjmp/x86_64/longjmp.s

6
src/setjmp/i386/longjmp.s

@ -6,10 +6,8 @@ _longjmp:
longjmp: longjmp:
mov 4(%esp),%edx mov 4(%esp),%edx
mov 8(%esp),%eax mov 8(%esp),%eax
test %eax,%eax cmp $1,%eax
jnz 1f adc $0, %al
inc %eax
1:
mov (%edx),%ebx mov (%edx),%ebx
mov 4(%edx),%esi mov 4(%edx),%esi
mov 8(%edx),%edi mov 8(%edx),%edi

8
src/setjmp/x32/longjmp.s

@ -5,11 +5,9 @@
.type longjmp,@function .type longjmp,@function
_longjmp: _longjmp:
longjmp: longjmp:
mov %esi,%eax /* val will be longjmp return */ xor %eax,%eax
test %esi,%esi cmp $1,%esi /* CF = val ? 0 : 1 */
jnz 1f adc %esi,%eax /* eax = val + !val */
inc %eax /* if val==0, val=1 per longjmp semantics */
1:
mov (%rdi),%rbx /* rdi is the jmp_buf, restore regs from it */ mov (%rdi),%rbx /* rdi is the jmp_buf, restore regs from it */
mov 8(%rdi),%rbp mov 8(%rdi),%rbp
mov 16(%rdi),%r12 mov 16(%rdi),%r12

8
src/setjmp/x86_64/longjmp.s

@ -5,11 +5,9 @@
.type longjmp,@function .type longjmp,@function
_longjmp: _longjmp:
longjmp: longjmp:
mov %esi,%eax /* val will be longjmp return */ xor %eax,%eax
test %esi,%esi cmp $1,%esi /* CF = val ? 0 : 1 */
jnz 1f adc %esi,%eax /* eax = val + !val */
inc %eax /* if val==0, val=1 per longjmp semantics */
1:
mov (%rdi),%rbx /* rdi is the jmp_buf, restore regs from it */ mov (%rdi),%rbx /* rdi is the jmp_buf, restore regs from it */
mov 8(%rdi),%rbp mov 8(%rdi),%rbp
mov 16(%rdi),%r12 mov 16(%rdi),%r12

Loading…
Cancel
Save