This is the mail archive of the gcc-bugs@gcc.gnu.org mailing list for the GCC project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[Bug rtl-optimization/17236] inefficient code for long long multiply on x86



------- Comment #5 from roger at eyesopen dot com  2007-02-02 00:17 -------
It looks like Ian's recent subreg lowering pass patch has improved code
generation on this testcase.  Previously, we'd spill three integer registers to
the stack for "LLM", we're now down to two.  [A significant improvement from
the five we spilled when this bug was reported]

Before:

LLM:    subl    $12, %esp
        movl    %ebx, (%esp)
        movl    28(%esp), %edx
        movl    20(%esp), %ebx
        movl    16(%esp), %ecx
        movl    24(%esp), %eax
        movl    %esi, 4(%esp)
        movl    %edx, %esi
        movl    %edi, 8(%esp)
        movl    %ebx, %edi
        movl    (%esp), %ebx
        imull   %ecx, %esi
        imull   %eax, %edi
        mull    %ecx
        addl    %edi, %esi
        movl    8(%esp), %edi
        leal    (%esi,%edx), %edx
        movl    4(%esp), %esi
        addl    $12, %esp
        ret

After:

LLM:    subl    $8, %esp
        movl    %ebx, (%esp)
        movl    20(%esp), %eax
        movl    %esi, 4(%esp)
        movl    24(%esp), %ecx
        movl    12(%esp), %esi
        movl    16(%esp), %ebx
        imull   %esi, %ecx
        imull   %eax, %ebx
        mull    %esi
        movl    4(%esp), %esi
        addl    %ebx, %ecx
        movl    (%esp), %ebx
        addl    $8, %esp
        leal    (%ecx,%edx), %edx
        ret


-- 


http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17236


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]