Skip to content
  1. Jan 15, 2011
    • Chris Lattner's avatar
      fix rdar://8785296 - -fcatch-undefined-behavior generates inefficient code · 8df83c4a
      Chris Lattner authored
      The basic issue is that isel (very reasonably!) expects conditional branches
      to be folded, so CGP leaving around a bunch dead computation feeding
      conditional branches isn't such a good idea.  Just fold branches on constants
      into unconditional branches.
      
      llvm-svn: 123526
      8df83c4a
    • Chris Lattner's avatar
      Now that instruction optzns can update the iterator as they go, we can · 1b93be50
      Chris Lattner authored
      have objectsize folding recursively simplify away their result when it
      folds.  It is important to catch this here, because otherwise we won't
      eliminate the cross-block values at isel and other times.
      
      llvm-svn: 123524
      1b93be50
    • Chris Lattner's avatar
      implement an instcombine xform that canonicalizes casts outside of and-with-constant operations. · 9c10d587
      Chris Lattner authored
      This fixes rdar://8808586 which observed that we used to compile:
      
      
      union xy {
              struct x { _Bool b[15]; } x;
              __attribute__((packed))
              struct y {
                      __attribute__((packed)) unsigned long b0to7;
                      __attribute__((packed)) unsigned int b8to11;
                      __attribute__((packed)) unsigned short b12to13;
                      __attribute__((packed)) unsigned char b14;
              } y;
      };
      
      struct x
      foo(union xy *xy)
      {
              return xy->x;
      }
      
      into:
      
      _foo:                                   ## @foo
      	movq	(%rdi), %rax
      	movabsq	$1095216660480, %rcx    ## imm = 0xFF00000000
      	andq	%rax, %rcx
      	movabsq	$-72057594037927936, %rdx ## imm = 0xFF00000000000000
      	andq	%rax, %rdx
      	movzbl	%al, %esi
      	orq	%rdx, %rsi
      	movq	%rax, %rdx
      	andq	$65280, %rdx            ## imm = 0xFF00
      	orq	%rsi, %rdx
      	movq	%rax, %rsi
      	andq	$16711680, %rsi         ## imm = 0xFF0000
      	orq	%rdx, %rsi
      	movl	%eax, %edx
      	andl	$-16777216, %edx        ## imm = 0xFFFFFFFFFF000000
      	orq	%rsi, %rdx
      	orq	%rcx, %rdx
      	movabsq	$280375465082880, %rcx  ## imm = 0xFF0000000000
      	movq	%rax, %rsi
      	andq	%rcx, %rsi
      	orq	%rdx, %rsi
      	movabsq	$71776119061217280, %r8 ## imm = 0xFF000000000000
      	andq	%r8, %rax
      	orq	%rsi, %rax
      	movzwl	12(%rdi), %edx
      	movzbl	14(%rdi), %esi
      	shlq	$16, %rsi
      	orl	%edx, %esi
      	movq	%rsi, %r9
      	shlq	$32, %r9
      	movl	8(%rdi), %edx
      	orq	%r9, %rdx
      	andq	%rdx, %rcx
      	movzbl	%sil, %esi
      	shlq	$32, %rsi
      	orq	%rcx, %rsi
      	movl	%edx, %ecx
      	andl	$-16777216, %ecx        ## imm = 0xFFFFFFFFFF000000
      	orq	%rsi, %rcx
      	movq	%rdx, %rsi
      	andq	$16711680, %rsi         ## imm = 0xFF0000
      	orq	%rcx, %rsi
      	movq	%rdx, %rcx
      	andq	$65280, %rcx            ## imm = 0xFF00
      	orq	%rsi, %rcx
      	movzbl	%dl, %esi
      	orq	%rcx, %rsi
      	andq	%r8, %rdx
      	orq	%rsi, %rdx
      	ret
      
      We now compile this into:
      
      _foo:                                   ## @foo
      ## BB#0:                                ## %entry
      	movzwl	12(%rdi), %eax
      	movzbl	14(%rdi), %ecx
      	shlq	$16, %rcx
      	orl	%eax, %ecx
      	shlq	$32, %rcx
      	movl	8(%rdi), %edx
      	orq	%rcx, %rdx
      	movq	(%rdi), %rax
      	ret
      
      A small improvement :-)
      
      llvm-svn: 123520
      9c10d587
  2. Jan 14, 2011
  3. Jan 13, 2011
  4. Jan 12, 2011
  5. Jan 11, 2011
Loading