From: Juho S. <js...@us...> - 2006-01-08 00:33:22
|
Update of /cvsroot/sbcl/sbcl/src/runtime In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv24342/src/runtime Modified Files: x86-assem.S x86-64-assem.S gencgc.c Log Message: 0.9.8.20: Final batch from sbcl-devel "Changes to GENCGC memory zeroing" in 2005-12). Use hand-coded assembly for zeroing memory in GENCGC instead of the platform memset/bzero. * Use MOVNTDQ on x86-64 * Use MOVNTDQ on x86 that supports SSE2 (basically Pentium 4 and newer) * Difference to the version posted on sbcl-devel: Do *not* use the MMX MOVNTQ for x86 that supports MMX but not SSE2. MOVNTQ apparently had very bad performance on K7 Athlons/Durons. * Use REP STOSL on remaining x86. Index: x86-assem.S =================================================================== RCS file: /cvsroot/sbcl/sbcl/src/runtime/x86-assem.S,v retrieving revision 1.25 retrieving revision 1.26 diff -u -d -r1.25 -r1.26 --- x86-assem.S 3 Jan 2006 09:52:38 -0000 1.25 +++ x86-assem.S 8 Jan 2006 00:33:13 -0000 1.26 @@ -841,7 +841,7 @@ int3 .byte trap_ContextRestore hlt # We should never return here. - + /* * This is part of the funky magic for exception handling on win32. * see handle_exception() in win32-os.c for details. @@ -858,5 +858,140 @@ .byte trap_ContextRestore hlt # We should never return here. #endif - + + /* fast_bzero implementations and code to detect which implementation + * to use. + */ + + .global GNAME(fast_bzero_pointer) + .data + .align 4 +GNAME(fast_bzero_pointer): + /* Variable containing a pointer to the bzero function to use. + * Initially points to a function that detects which implementation + * should be used, and then updates the variable. */ + .long fast_bzero_detect + + .text + .align align_8byte,0x90 + .global GNAME(fast_bzero) + TYPE(GNAME(fast_bzero)) +GNAME(fast_bzero): + /* Indirect function call */ + jmp *fast_bzero_pointer + SIZE(GNAME(fast_bzero)) + + + .text + .align align_8byte,0x90 + .global GNAME(fast_bzero_detect) + TYPE(GNAME(fast_bzero_detect)) +GNAME(fast_bzero_detect): + /* Decide whether to use SSE, MMX or REP version */ + push %eax /* CPUID uses EAX-EDX */ + push %ebx + push %ecx + push %edx + mov $1, %eax + cpuid + test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */ + jnz Lsse2 + /* Originally there was another case here for using the + * MOVNTQ instruction for processors that supported MMX but + * not SSE2. This turned out to be a loss especially on + * Athlons (where this instruction is apparently microcoded + * somewhat slowly). So for simplicity revert to REP STOSL + * for all non-SSE2 processors. + */ +Lbase: + movl $fast_bzero_base, fast_bzero_pointer + jmp Lrestore +Lsse2: + movl $fast_bzero_sse, fast_bzero_pointer + jmp Lrestore + +Lrestore: + pop %edx + pop %ecx + pop %ebx + pop %eax + jmp *fast_bzero_pointer + + SIZE(GNAME(fast_bzero_detect)) + + + .text + .align align_8byte,0x90 + .global GNAME(fast_bzero_sse) + TYPE(GNAME(fast_bzero_sse)) + +GNAME(fast_bzero_sse): + /* A fast routine for zero-filling blocks of memory that are + * guaranteed to start and end at a 4096-byte aligned address. + */ + push %esi /* Save temporary registers */ + push %edi + mov 16(%esp), %esi /* Parameter: amount of bytes to fill */ + mov 12(%esp), %edi /* Parameter: start address */ + shr $6, %esi /* Amount of 64-byte blocks to copy */ + jz Lend_sse /* If none, stop */ + movups %xmm7, -16(%esp) /* Save XMM register */ + xorps %xmm7, %xmm7 /* Zero the XMM register */ + jmp Lloop_sse + .align 16 +Lloop_sse: + + /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the + * non-caching double-quadword moving variant, i.e. the memory areas + * we're touching are not fetched into the L1 cache, since we're just + * going to overwrite the memory soon anyway. + */ + movntdq %xmm7, 0(%edi) + movntdq %xmm7, 16(%edi) + movntdq %xmm7, 32(%edi) + movntdq %xmm7, 48(%edi) + + add $64, %edi /* Advance pointer */ + dec %esi /* Decrement 64-byte block count */ + jnz Lloop_sse + movups -16(%esp), %xmm7 /* Restore the XMM register */ + sfence /* Ensure that weakly ordered writes are flushed. */ +Lend_sse: + mov 12(%esp), %esi /* Parameter: start address */ + prefetcht0 0(%esi) /* Prefetch the start of the block into cache, + * since it's likely to be used immediately. */ + pop %edi /* Restore temp registers */ + pop %esi + ret + SIZE(GNAME(fast_bzero_sse)) + + + .text + .align align_8byte,0x90 + .global GNAME(fast_bzero_base) + TYPE(GNAME(fast_bzero_base)) + +GNAME(fast_bzero_base): + /* A fast routine for zero-filling blocks of memory that are + * guaranteed to start and end at a 4096-byte aligned address. + */ + push %eax /* Save temporary registers */ + push %ecx + push %edi + mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */ + mov 16(%esp), %edi /* Parameter: start address */ + xor %eax, %eax /* Zero EAX */ + shr $2, %ecx /* Amount of 4-byte blocks to copy */ + jz Lend_base + cld /* Set direction of STOSL to increment */ + rep stosl /* Store EAX to *EDI, ECX times, incrementing + * EDI by 4 after each store */ +Lend_base: + pop %edi /* Restore temp registers */ + pop %ecx + pop %eax + ret + SIZE(GNAME(fast_bzero_base)) + + .end Index: x86-64-assem.S =================================================================== RCS file: /cvsroot/sbcl/sbcl/src/runtime/x86-64-assem.S,v retrieving revision 1.8 retrieving revision 1.9 diff -u -d -r1.8 -r1.9 --- x86-64-assem.S 1 Jul 2005 11:00:32 -0000 1.8 +++ x86-64-assem.S 8 Jan 2006 00:33:13 -0000 1.9 @@ -344,4 +344,45 @@ ret .size GNAME(post_signal_tramp),.-GNAME(post_signal_tramp) - .end + .text + .align align_8byte,0x90 + .global GNAME(fast_bzero) + .type GNAME(fast_bzero),@function + +GNAME(fast_bzero): + /* A fast routine for zero-filling blocks of memory that are + * guaranteed to start and end at a 4096-byte aligned address. + */ + shr $6, %rsi /* Amount of 64-byte blocks to copy */ + jz Lend /* If none, stop */ + mov %rsi, %rcx /* Save start address */ + movups %xmm7, -16(%rsp) /* Save XMM register */ + xorps %xmm7, %xmm7 /* Zero the XMM register */ + jmp Lloop + .align 16 +Lloop: + + /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the + * non-caching double-quadword moving variant, i.e. the memory areas + * we're touching are not fetched into the L1 cache, since we're just + * going to overwrite the memory soon anyway. + */ + movntdq %xmm7, 0(%rdi) + movntdq %xmm7, 16(%rdi) + movntdq %xmm7, 32(%rdi) + movntdq %xmm7, 48(%rdi) + + add $64, %rdi /* Advance pointer */ + dec %rsi /* Decrement 64-byte block count */ + jnz Lloop + mfence /* Ensure that the writes are globally visible, since + * MOVNTDQ is weakly ordered */ + movups -16(%rsp), %xmm7 /* Restore the XMM register */ + prefetcht0 0(%rcx) /* Prefetch the start of the block into cache, + * since it's likely to be used immediately. */ +Lend: + ret + .size GNAME(fast_bzero), .-GNAME(fast_bzero) + + + .end Index: gencgc.c =================================================================== RCS file: /cvsroot/sbcl/sbcl/src/runtime/gencgc.c,v retrieving revision 1.92 retrieving revision 1.93 diff -u -d -r1.92 -r1.93 --- gencgc.c 7 Jan 2006 18:53:40 -0000 1.92 +++ gencgc.c 8 Jan 2006 00:33:13 -0000 1.93 @@ -420,6 +420,9 @@ fpu_restore(fpu_state); } + +void fast_bzero(void*, size_t); /* in <arch>-assem.S */ + /* Zero the pages from START to END (inclusive), but use mmap/munmap instead * if zeroing it ourselves, i.e. in practice give the memory back to the * OS. Generally done after a large GC. @@ -451,7 +454,7 @@ if (start > end) return; - memset(page_address(start), 0, PAGE_BYTES*(1+end-start)); + fast_bzero(page_address(start), PAGE_BYTES*(1+end-start)); } /* Zero the pages from START to END (inclusive), except for those @@ -473,6 +476,7 @@ page_table[i].need_to_zero = 1; } } +' /* |