pixman: add some new versions, update git recipe
authorKoen Kooi <koen@openembedded.org>
Tue, 20 Oct 2009 09:31:32 +0000 (11:31 +0200)
committerKoen Kooi <koen@openembedded.org>
Tue, 20 Oct 2009 09:31:32 +0000 (11:31 +0200)
conf/checksums.ini
recipes/xorg-lib/pixman/0001-ARM-Removal-of-unused-broken-NEON-code.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0003-ARM-Added-pixman_composite_src_8888_0565_asm_neon-f.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0004-ARM-Added-pixman_composite_add_8000_8000_asm_neon-f.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0005-ARM-Added-pixman_composite_over_8888_8888_asm_neon.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0006-ARM-Added-a-set-of-NEON-functions-not-fully-optimi.patch [new file with mode: 0644]
recipes/xorg-lib/pixman/0007-ARM-Enabled-new-NEON-optimizations.patch [new file with mode: 0644]
recipes/xorg-lib/pixman_0.15.18.bb [new file with mode: 0644]
recipes/xorg-lib/pixman_0.16.2.bb [new file with mode: 0644]
recipes/xorg-lib/pixman_git.bb

index aa9515d..7cfe8a3 100644 (file)
@@ -20502,6 +20502,10 @@ sha256=45d491879791140dc1f20287e6489f32dc59ae37628038d991d9511abede3fc2
 md5=fe922698df46b21d7e19f28ded4ca100
 sha256=3438437c131c9847b34106225a728c11e522776ac454bb8740a9bc7aea409f22
 
+[http://cairographics.org/releases/pixman-0.16.2.tar.gz]
+md5=02bd3669f53c404c19980d5efb6f86fb
+sha256=05f78c2fd3a5f054c0d716e8ba1b67a0c04a7a7e642d6946828ec383b389d185
+
 [http://xorg.freedesktop.org/releases/individual/lib/pixman-0.9.4.tar.bz2]
 md5=c354ab5b0da10227226d3ef604254875
 sha256=261d239e9e3070a0a81e5b3eab92e6236c6295979bdae0a1ded05cbef93f67aa
diff --git a/recipes/xorg-lib/pixman/0001-ARM-Removal-of-unused-broken-NEON-code.patch b/recipes/xorg-lib/pixman/0001-ARM-Removal-of-unused-broken-NEON-code.patch
new file mode 100644 (file)
index 0000000..227b95e
--- /dev/null
@@ -0,0 +1,830 @@
+From 2761591638f8c56732398b1fc6cf4bc7ca5005fd Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 27 Jul 2009 01:21:26 +0300
+Subject: [PATCH 1/7] ARM: Removal of unused/broken NEON code
+
+---
+ pixman/pixman-arm-neon.c |  786 ----------------------------------------------
+ 1 files changed, 0 insertions(+), 786 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 0a29e50..9caef61 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -1901,710 +1901,6 @@ pixman_fill_neon (uint32_t *bits,
+ #endif
+ }
+-/* TODO: is there a more generic way of doing this being introduced? */
+-#define NEON_SCANLINE_BUFFER_PIXELS (1024)
+-
+-static inline void
+-neon_quadword_copy (void *   dst,
+-                  void *   src,
+-                  uint32_t count,         /* of quadwords */
+-                  uint32_t trailer_count  /* of bytes */)
+-{
+-    uint8_t *t_dst = dst, *t_src = src;
+-
+-    /* Uses aligned multi-register loads to maximise read bandwidth
+-     * on uncached memory such as framebuffers
+-     * The accesses do not have the aligned qualifiers, so that the copy
+-     * may convert between aligned-uncached and unaligned-cached memory.
+-     * It is assumed that the CPU can infer alignedness from the address.
+-     */
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+-    asm volatile (
+-        "     cmp       %[count], #8                          \n"
+-        "     blt 1f    @ skip oversized fragments            \n"
+-        "0: @ start with eight quadwords at a time            \n"
+-        "     sub       %[count], %[count], #8                \n"
+-        "     vld1.8    {d16, d17, d18, d19}, [%[src]]!               \n"
+-        "     vld1.8    {d20, d21, d22, d23}, [%[src]]!               \n"
+-        "     vld1.8    {d24, d25, d26, d27}, [%[src]]!               \n"
+-        "     vld1.8    {d28, d29, d30, d31}, [%[src]]!               \n"
+-        "     cmp       %[count], #8                          \n"
+-        "     vst1.8    {d16, d17, d18, d19}, [%[dst]]!               \n"
+-        "     vst1.8    {d20, d21, d22, d23}, [%[dst]]!               \n"
+-        "     vst1.8    {d24, d25, d26, d27}, [%[dst]]!               \n"
+-        "     vst1.8    {d28, d29, d30, d31}, [%[dst]]!               \n"
+-        "     bge 0b                                          \n"
+-        "1: @ four quadwords                                  \n"
+-        "     tst       %[count], #4                          \n"
+-        "     beq 2f    @ skip oversized fragment             \n"
+-        "     vld1.8    {d16, d17, d18, d19}, [%[src]]!               \n"
+-        "     vld1.8    {d20, d21, d22, d23}, [%[src]]!               \n"
+-        "     vst1.8    {d16, d17, d18, d19}, [%[dst]]!               \n"
+-        "     vst1.8    {d20, d21, d22, d23}, [%[dst]]!               \n"
+-        "2: @ two quadwords                                   \n"
+-        "     tst       %[count], #2                          \n"
+-        "     beq 3f    @ skip oversized fragment             \n"
+-        "     vld1.8    {d16, d17, d18, d19}, [%[src]]!               \n"
+-        "     vst1.8    {d16, d17, d18, d19}, [%[dst]]!               \n"
+-        "3: @ one quadword                                    \n"
+-        "     tst       %[count], #1                          \n"
+-        "     beq 4f    @ skip oversized fragment             \n"
+-        "     vld1.8    {d16, d17}, [%[src]]!                 \n"
+-        "     vst1.8    {d16, d17}, [%[dst]]!                 \n"
+-        "4: @ end                                             \n"
+-
+-        /* Clobbered input registers marked as input/outputs */
+-      : [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count)
+-
+-        /* No unclobbered inputs */
+-      :
+-
+-        /* Clobbered vector registers */
+-      : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25",
+-        "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory");
+-
+-#else
+-
+-    while (count >= 8)
+-    {
+-      uint8x16x4_t t1 = vld4q_u8 (t_src);
+-      uint8x16x4_t t2 = vld4q_u8 (t_src + sizeof(uint8x16x4_t));
+-      
+-      t_src += sizeof(uint8x16x4_t) * 2;
+-      vst4q_u8 (t_dst, t1);
+-      vst4q_u8 (t_dst + sizeof(uint8x16x4_t), t2);
+-      t_dst += sizeof(uint8x16x4_t) * 2;
+-      count -= 8;
+-    }
+-
+-    if (count & 4)
+-    {
+-      uint8x16x4_t t1 = vld4q_u8 (t_src);
+-      
+-      t_src += sizeof(uint8x16x4_t);
+-      vst4q_u8 (t_dst, t1);
+-      t_dst += sizeof(uint8x16x4_t);
+-    }
+-
+-    if (count & 2)
+-    {
+-      uint8x8x4_t t1 = vld4_u8 (t_src);
+-      
+-      t_src += sizeof(uint8x8x4_t);
+-      vst4_u8 (t_dst, t1);
+-      t_dst += sizeof(uint8x8x4_t);
+-    }
+-
+-    if (count & 1)
+-    {
+-      uint8x16_t t1 = vld1q_u8 (t_src);
+-      
+-      t_src += sizeof(uint8x16_t);
+-      vst1q_u8 (t_dst, t1);
+-      t_dst += sizeof(uint8x16_t);
+-    }
+-
+-#endif  /* !USE_GCC_INLINE_ASM */
+-
+-    if (trailer_count)
+-    {
+-      if (trailer_count & 8)
+-      {
+-          uint8x8_t t1 = vld1_u8 (t_src);
+-          
+-          t_src += sizeof(uint8x8_t);
+-          vst1_u8 (t_dst, t1);
+-          t_dst += sizeof(uint8x8_t);
+-      }
+-
+-      if (trailer_count & 4)
+-      {
+-          *((uint32_t*) t_dst) = *((uint32_t*) t_src);
+-          
+-          t_dst += 4;
+-          t_src += 4;
+-      }
+-
+-      if (trailer_count & 2)
+-      {
+-          *((uint16_t*) t_dst) = *((uint16_t*) t_src);
+-          
+-          t_dst += 2;
+-          t_src += 2;
+-      }
+-
+-      if (trailer_count & 1)
+-      {
+-          *t_dst++ = *t_src++;
+-      }
+-    }
+-}
+-
+-static inline void
+-solid_over_565_8_pix_neon (uint32_t  glyph_colour,
+-                           uint16_t *dest,
+-                           uint8_t * in_mask,
+-                           uint32_t  dest_stride,    /* bytes, not elements */
+-                           uint32_t  mask_stride,
+-                           uint32_t  count           /* 8-pixel groups */)
+-{
+-    /* Inner loop of glyph blitter (solid colour, alpha mask) */
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+-    asm volatile (
+-        "     vld4.8 {d20[], d21[], d22[], d23[]}, [%[glyph_colour]]  @ splat solid colour components \n"
+-        "0:   @ loop                                                                                                                                                          \n"
+-        "     vld1.16   {d0, d1}, [%[dest]]         @ load first pixels from framebuffer                      \n"
+-        "     vld1.8    {d17}, [%[in_mask]]         @ load alpha mask of glyph                                                \n"
+-        "     vmull.u8  q9, d17, d23               @ apply glyph colour alpha to mask                         \n"
+-        "     vshrn.u16 d17, q9, #8                @ reformat it to match original mask                       \n"
+-        "     vmvn      d18, d17                   @ we need the inverse mask for the background      \n"
+-        "     vsli.u16  q3, q0, #5                 @ duplicate framebuffer blue bits                          \n"
+-        "     vshrn.u16 d2, q0, #8                 @ unpack red from framebuffer pixels                       \n"
+-        "     vshrn.u16 d4, q0, #3                 @ unpack green                                                                     \n"
+-        "     vsri.u8   d2, d2, #5                 @ duplicate red bits (extend 5 to 8)                       \n"
+-        "     vshrn.u16 d6, q3, #2                 @ unpack extended blue (truncate 10 to 8)          \n"
+-        "     vsri.u8   d4, d4, #6                 @ duplicate green bits (extend 6 to 8)                     \n"
+-        "     vmull.u8  q1, d2, d18                @ apply inverse mask to background red...          \n"
+-        "     vmull.u8  q2, d4, d18                @ ...green...                                                                      \n"
+-        "     vmull.u8  q3, d6, d18                @ ...blue                                                                          \n"
+-        "     subs      %[count], %[count], #1     @ decrement/test loop counter                                      \n"
+-        "     vmlal.u8  q1, d17, d22               @ add masked foreground red...                                     \n"
+-        "     vmlal.u8  q2, d17, d21               @ ...green...                                                                      \n"
+-        "     vmlal.u8  q3, d17, d20               @ ...blue                                                                          \n"
+-        "     add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait                \n"
+-        "     vsri.16   q1, q2, #5                 @ pack green behind red                                            \n"
+-        "     vsri.16   q1, q3, #11                @ pack blue into pixels                                            \n"
+-        "     vst1.16   {d2, d3}, [%[dest]]         @ store composited pixels                                         \n"
+-        "     add %[dest], %[dest], %[dest_stride]  @ advance framebuffer pointer                                     \n"
+-        "     bne 0b                               @ next please                                                                      \n"
+-
+-      /* Clobbered registers marked as input/outputs */
+-      : [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count)
+-        
+-        /* Inputs */
+-      : [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour)
+-
+-        /* Clobbers, including the inputs we modify, and potentially lots of memory */
+-      : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d19",
+-        "d20", "d21", "d22", "d23", "d24", "d25", "cc", "memory"
+-        );
+-
+-#else
+-
+-    uint8x8x4_t solid_colour = vld4_dup_u8 ((uint8_t*) &glyph_colour);
+-
+-    while (count--)
+-    {
+-      uint16x8_t pixels = vld1q_u16 (dest);
+-      uint8x8_t mask = vshrn_n_u16 (vmull_u8 (solid_colour.val[3], vld1_u8 (in_mask)), 8);
+-      uint8x8_t mask_image = vmvn_u8 (mask);
+-
+-      uint8x8_t t_red   = vshrn_n_u16 (pixels, 8);
+-      uint8x8_t t_green = vshrn_n_u16 (pixels, 3);
+-      uint8x8_t t_blue  = vshrn_n_u16 (vsli_n_u8 (pixels, pixels, 5), 2);
+-
+-      uint16x8_t s_red   = vmull_u8 (vsri_n_u8 (t_red, t_red, 5), mask_image);
+-      uint16x8_t s_green = vmull_u8 (vsri_n_u8 (t_green, t_green, 6), mask_image);
+-      uint16x8_t s_blue  = vmull_u8 (t_blue, mask_image);
+-
+-      s_red   = vmlal (s_red, mask, solid_colour.val[2]);
+-      s_green = vmlal (s_green, mask, solid_colour.val[1]);
+-      s_blue  = vmlal (s_blue, mask, solid_colour.val[0]);
+-
+-      pixels = vsri_n_u16 (s_red, s_green, 5);
+-      pixels = vsri_n_u16 (pixels, s_blue, 11);
+-      vst1q_u16 (dest, pixels);
+-
+-      dest += dest_stride;
+-      mask += mask_stride;
+-    }
+-
+-#endif
+-}
+-
+-#if 0 /* this is broken currently */
+-static void
+-neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
+-                              pixman_op_t               op,
+-                              pixman_image_t *          src_image,
+-                              pixman_image_t *          mask_image,
+-                              pixman_image_t *          dst_image,
+-                              int32_t                   src_x,
+-                              int32_t                   src_y,
+-                              int32_t                   mask_x,
+-                              int32_t                   mask_y,
+-                              int32_t                   dest_x,
+-                              int32_t                   dest_y,
+-                              int32_t                   width,
+-                              int32_t                   height)
+-{
+-    uint32_t  src, srca;
+-    uint16_t *dst_line, *aligned_line;
+-    uint8_t  *mask_line;
+-    uint32_t  dst_stride, mask_stride;
+-    uint32_t  kernel_count, copy_count, copy_tail;
+-    uint8_t   kernel_offset, copy_offset;
+-
+-    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+-
+-    /* bail out if fully transparent or degenerate */
+-    srca = src >> 24;
+-    if (src == 0)
+-      return;
+-
+-    if (width == 0 || height == 0)
+-      return;
+-
+-    if (width > NEON_SCANLINE_BUFFER_PIXELS)
+-    {
+-      /* split the blit, so we can use a fixed-size scanline buffer
+-       * TODO: there must be a more elegant way of doing this.
+-       */
+-      int x;
+-      for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+-      {
+-          neon_composite_over_n_8_0565 (
+-              impl, op,
+-              src_image, mask_image, dst_image,
+-              src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+-              (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+-      }
+-
+-      return;
+-    }
+-    
+-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+-
+-    /* keep within minimum number of aligned quadwords on width
+-     * while also keeping the minimum number of columns to process
+-     */
+-    {
+-      unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+-      unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+-      unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+-      /* the fast copy should be quadword aligned */
+-      copy_offset = dst_line - ((uint16_t*) aligned_left);
+-      aligned_line = dst_line - copy_offset;
+-      copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+-      copy_tail = 0;
+-
+-      if (aligned_right - aligned_left > ceiling_length)
+-      {
+-          /* unaligned routine is tightest */
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          kernel_offset = copy_offset;
+-      }
+-      else
+-      {
+-          /* aligned routine is equally tight, so it is safer to align */
+-          kernel_count = copy_count;
+-          kernel_offset = 0;
+-      }
+-
+-      /* We should avoid reading beyond scanline ends for safety */
+-      if (aligned_line < (dst_line - dest_x) ||
+-          (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+-      {
+-          /* switch to precise read */
+-          copy_offset = kernel_offset = 0;
+-          aligned_line = dst_line;
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          copy_count = (width * sizeof(*dst_line)) >> 4;
+-          copy_tail = (width * sizeof(*dst_line)) & 0xF;
+-      }
+-    }
+-
+-    {
+-      uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8];         /* deliberately not initialised */
+-      uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8];
+-      int y = height;
+-
+-      /* row-major order */
+-      /* left edge, middle block, right edge */
+-      for ( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride)
+-      {
+-          /* We don't want to overrun the edges of the glyph,
+-           * so realign the edge data into known buffers
+-           */
+-          neon_quadword_copy (glyph_line + copy_offset, mask_line, width >> 4, width & 0xF);
+-
+-          /* Uncached framebuffer access is really, really slow
+-           * if we do it piecemeal. It should be much faster if we
+-           * grab it all at once. One scanline should easily fit in
+-           * L1 cache, so this should not waste RAM bandwidth.
+-           */
+-          neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+-          /* Apply the actual filter */
+-          solid_over_565_8_pix_neon (
+-              src, scan_line + kernel_offset,
+-              glyph_line + kernel_offset, 8 * sizeof(*dst_line),
+-              8, kernel_count);
+-
+-          /* Copy the modified scanline back */
+-          neon_quadword_copy (dst_line, scan_line + copy_offset,
+-                              width >> 3, (width & 7) * 2);
+-      }
+-    }
+-}
+-#endif
+-
+-#ifdef USE_GCC_INLINE_ASM
+-
+-static inline void
+-plain_over_565_8_pix_neon (uint32_t  colour,
+-                         uint16_t *dest,
+-                         uint32_t  dest_stride,     /* bytes, not elements */
+-                         uint32_t  count            /* 8-pixel groups */)
+-{
+-    /* Inner loop for plain translucent rects
+-     * (solid colour without alpha mask)
+-     */
+-    asm volatile (
+-        "     vld4.8   {d20[], d21[], d22[], d23[]}, [%[colour]]  @ solid colour load/splat \n"
+-        "     vmull.u8  q12, d23, d22              @ premultiply alpha red   \n"
+-        "     vmull.u8  q13, d23, d21              @ premultiply alpha green \n"
+-        "     vmull.u8  q14, d23, d20              @ premultiply alpha blue  \n"
+-        "     vmvn      d18, d23                   @ inverse alpha for background \n"
+-        "0:   @ loop\n"
+-        "     vld1.16   {d0, d1}, [%[dest]]         @ load first pixels from framebuffer      \n"
+-        "     vshrn.u16 d2, q0, #8                 @ unpack red from framebuffer pixels       \n"
+-        "     vshrn.u16 d4, q0, #3                 @ unpack green                             \n"
+-        "     vsli.u16  q3, q0, #5                 @ duplicate framebuffer blue bits          \n"
+-        "     vsri.u8   d2, d2, #5                 @ duplicate red bits (extend 5 to 8)       \n"
+-        "     vsri.u8   d4, d4, #6                 @ duplicate green bits (extend 6 to 8)     \n"
+-        "     vshrn.u16 d6, q3, #2                 @ unpack extended blue (truncate 10 to 8)  \n"
+-        "     vmov      q0, q12                    @ retrieve foreground red   \n"
+-        "     vmlal.u8  q0, d2, d18                @ blend red - my kingdom for a four-operand MLA \n"
+-        "     vmov      q1, q13                    @ retrieve foreground green \n"
+-        "     vmlal.u8  q1, d4, d18                @ blend green               \n"
+-        "     vmov      q2, q14                    @ retrieve foreground blue  \n"
+-        "     vmlal.u8  q2, d6, d18                @ blend blue                \n"
+-        "     subs      %[count], %[count], #1     @ decrement/test loop counter              \n"
+-        "     vsri.16   q0, q1, #5                 @ pack green behind red                    \n"
+-        "     vsri.16   q0, q2, #11                @ pack blue into pixels                    \n"
+-        "     vst1.16   {d0, d1}, [%[dest]]         @ store composited pixels                 \n"
+-        "     add %[dest], %[dest], %[dest_stride]  @ advance framebuffer pointer             \n"
+-        "     bne 0b                               @ next please                              \n"
+-
+-        /* Clobbered registers marked as input/outputs */
+-      : [dest] "+r" (dest), [count] "+r" (count)
+-
+-        /* Inputs */
+-      : [dest_stride] "r" (dest_stride), [colour] "r" (&colour)
+-
+-        /* Clobbers, including the inputs we modify, and
+-         * potentially lots of memory
+-         */
+-      : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d18", "d19",
+-        "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
+-        "cc", "memory"
+-        );
+-}
+-
+-static void
+-neon_composite_over_n_0565 (pixman_implementation_t * impl,
+-                            pixman_op_t               op,
+-                            pixman_image_t *          src_image,
+-                            pixman_image_t *          mask_image,
+-                            pixman_image_t *          dst_image,
+-                            int32_t                   src_x,
+-                            int32_t                   src_y,
+-                            int32_t                   mask_x,
+-                            int32_t                   mask_y,
+-                            int32_t                   dest_x,
+-                            int32_t                   dest_y,
+-                            int32_t                   width,
+-                            int32_t                   height)
+-{
+-    uint32_t src, srca;
+-    uint16_t    *dst_line, *aligned_line;
+-    uint32_t dst_stride;
+-    uint32_t kernel_count, copy_count, copy_tail;
+-    uint8_t kernel_offset, copy_offset;
+-
+-    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+-
+-    /* bail out if fully transparent */
+-    srca = src >> 24;
+-    if (src == 0)
+-      return;
+-    
+-    if (width == 0 || height == 0)
+-      return;
+-
+-    if (width > NEON_SCANLINE_BUFFER_PIXELS)
+-    {
+-      /* split the blit, so we can use a fixed-size scanline buffer *
+-       * TODO: there must be a more elegant way of doing this.
+-       */
+-      int x;
+-      
+-      for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+-      {
+-          neon_composite_over_n_0565 (
+-              impl, op,
+-              src_image, mask_image, dst_image,
+-              src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+-              (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+-      }
+-      return;
+-    }
+-
+-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+-
+-    /* keep within minimum number of aligned quadwords on width
+-     * while also keeping the minimum number of columns to process
+-     */
+-    {
+-      unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+-      unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+-      unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+-      /* the fast copy should be quadword aligned */
+-      copy_offset = dst_line - ((uint16_t*) aligned_left);
+-      aligned_line = dst_line - copy_offset;
+-      copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+-      copy_tail = 0;
+-
+-      if (aligned_right - aligned_left > ceiling_length)
+-      {
+-          /* unaligned routine is tightest */
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          kernel_offset = copy_offset;
+-      }
+-      else
+-      {
+-          /* aligned routine is equally tight, so it is safer to align */
+-          kernel_count = copy_count;
+-          kernel_offset = 0;
+-      }
+-
+-      /* We should avoid reading beyond scanline ends for safety */
+-      if (aligned_line < (dst_line - dest_x) ||
+-          (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+-      {
+-          /* switch to precise read */
+-          copy_offset = kernel_offset = 0;
+-          aligned_line = dst_line;
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          copy_count = (width * sizeof(*dst_line)) >> 4;
+-          copy_tail = (width * sizeof(*dst_line)) & 0xF;
+-      }
+-    }
+-
+-    {
+-      uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8];  /* deliberately not initialised */
+-
+-      /* row-major order */
+-      /* left edge, middle block, right edge */
+-      for ( ; height--; aligned_line += dst_stride, dst_line += dst_stride)
+-      {
+-          /* Uncached framebuffer access is really, really slow if we do it piecemeal.
+-           * It should be much faster if we grab it all at once.
+-           * One scanline should easily fit in L1 cache, so this should
+-           * not waste RAM bandwidth.
+-           */
+-          neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+-          /* Apply the actual filter */
+-          plain_over_565_8_pix_neon (
+-              src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count);
+-
+-          /* Copy the modified scanline back */
+-          neon_quadword_copy (
+-              dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
+-      }
+-    }
+-}
+-
+-static inline void
+-ARGB8_over_565_8_pix_neon (uint32_t *src,
+-                           uint16_t *dest,
+-                           uint32_t  src_stride,     /* bytes, not elements */
+-                           uint32_t  count           /* 8-pixel groups */)
+-{
+-    asm volatile (
+-        "0:   @ loop\n"
+-        "     pld   [%[src], %[src_stride]]         @ preload from next scanline      \n"
+-        "     vld1.16   {d0, d1}, [%[dest]]         @ load pixels from framebuffer    \n"
+-        "     vld4.8   {d20, d21, d22, d23},[%[src]]! @ load source image pixels              \n"
+-        "     vsli.u16  q3, q0, #5                 @ duplicate framebuffer blue bits          \n"
+-        "     vshrn.u16 d2, q0, #8                 @ unpack red from framebuffer pixels       \n"
+-        "     vshrn.u16 d4, q0, #3                 @ unpack green                             \n"
+-        "     vmvn      d18, d23                   @ we need the inverse alpha for the background     \n"
+-        "     vsri.u8   d2, d2, #5                 @ duplicate red bits (extend 5 to 8)       \n"
+-        "     vshrn.u16 d6, q3, #2                 @ unpack extended blue (truncate 10 to 8)  \n"
+-        "     vsri.u8   d4, d4, #6                 @ duplicate green bits (extend 6 to 8)     \n"
+-        "     vmull.u8  q1, d2, d18                @ apply inverse alpha to background red... \n"
+-        "     vmull.u8  q2, d4, d18                @ ...green...                              \n"
+-        "     vmull.u8  q3, d6, d18                @ ...blue                                  \n"
+-        "     subs      %[count], %[count], #1     @ decrement/test loop counter              \n"
+-        "     vmlal.u8  q1, d23, d22               @ add blended foreground red...            \n"
+-        "     vmlal.u8  q2, d23, d21               @ ...green...                              \n"
+-        "     vmlal.u8  q3, d23, d20               @ ...blue                                  \n"
+-        "     vsri.16   q1, q2, #5                 @ pack green behind red                    \n"
+-        "     vsri.16   q1, q3, #11                @ pack blue into pixels                    \n"
+-        "     vst1.16   {d2, d3}, [%[dest]]!        @ store composited pixels                 \n"
+-        "     bne 0b                               @ next please                              \n"
+-
+-        /* Clobbered registers marked as input/outputs */
+-      : [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count)
+-
+-        /* Inputs */
+-      : [src_stride] "r" (src_stride)
+-
+-        /* Clobbers, including the inputs we modify, and potentially lots of memory */
+-      : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d17", "d18", "d20",
+-        "d21", "d22", "d23", "cc", "memory"
+-        );
+-}
+-
+-static void
+-neon_composite_over_8888_0565 (pixman_implementation_t * impl,
+-                               pixman_op_t               op,
+-                               pixman_image_t *          src_image,
+-                               pixman_image_t *          mask_image,
+-                               pixman_image_t *          dst_image,
+-                               int32_t                   src_x,
+-                               int32_t                   src_y,
+-                               int32_t                   mask_x,
+-                               int32_t                   mask_y,
+-                               int32_t                   dest_x,
+-                               int32_t                   dest_y,
+-                               int32_t                   width,
+-                               int32_t                   height)
+-{
+-    uint32_t    *src_line;
+-    uint16_t    *dst_line, *aligned_line;
+-    uint32_t dst_stride, src_stride;
+-    uint32_t kernel_count, copy_count, copy_tail;
+-    uint8_t kernel_offset, copy_offset;
+-
+-    /* we assume mask is opaque 
+-     * so the only alpha to deal with is embedded in src
+-     */
+-    if (width > NEON_SCANLINE_BUFFER_PIXELS)
+-    {
+-      /* split the blit, so we can use a fixed-size scanline buffer */
+-      int x;
+-      for (x = 0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS)
+-      {
+-          neon_composite_over_8888_0565 (
+-              impl, op,
+-              src_image, mask_image, dst_image,
+-              src_x + x, src_y, mask_x + x, mask_y, dest_x + x, dest_y,
+-              (x + NEON_SCANLINE_BUFFER_PIXELS > width) ? width - x : NEON_SCANLINE_BUFFER_PIXELS, height);
+-      }
+-      return;
+-    }
+-
+-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+-
+-    /* keep within minimum number of aligned quadwords on width
+-     * while also keeping the minimum number of columns to process
+-     */
+-    {
+-      unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+-      unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+-      unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
+-
+-      /* the fast copy should be quadword aligned */
+-      copy_offset = dst_line - ((uint16_t*) aligned_left);
+-      aligned_line = dst_line - copy_offset;
+-      copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+-      copy_tail = 0;
+-
+-      if (aligned_right - aligned_left > ceiling_length)
+-      {
+-          /* unaligned routine is tightest */
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          kernel_offset = copy_offset;
+-      }
+-      else
+-      {
+-          /* aligned routine is equally tight, so it is safer to align */
+-          kernel_count = copy_count;
+-          kernel_offset = 0;
+-      }
+-
+-      /* We should avoid reading beyond scanline ends for safety */
+-      if (aligned_line < (dst_line - dest_x) ||
+-          (aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - dest_x) + dst_image->bits.width))
+-      {
+-          /* switch to precise read */
+-          copy_offset = kernel_offset = 0;
+-          aligned_line = dst_line;
+-          kernel_count = (uint32_t) (ceiling_length >> 4);
+-          copy_count = (width * sizeof(*dst_line)) >> 4;
+-          copy_tail = (width * sizeof(*dst_line)) & 0xF;
+-      }
+-    }
+-
+-    /* Preload the first input scanline */
+-    {
+-      uint8_t *src_ptr = (uint8_t*) src_line;
+-      uint32_t count = (width + 15) / 16;
+-
+-#ifdef USE_GCC_INLINE_ASM
+-      asm volatile (
+-          "0: @ loop                                          \n"
+-          "   subs    %[count], %[count], #1                  \n"
+-          "   pld     [%[src]]                                \n"
+-          "   add     %[src], %[src], #64                     \n"
+-          "   bgt 0b                                          \n"
+-
+-          /* Clobbered input registers marked as input/outputs */
+-          : [src] "+r" (src_ptr), [count] "+r" (count)
+-          :     /* no unclobbered inputs */
+-          : "cc"
+-          );
+-#else
+-      do
+-      {
+-          __pld (src_ptr);
+-          src_ptr += 64;
+-      }
+-      while (--count);
+-#endif
+-    }
+-
+-    {
+-      uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; /* deliberately not initialised */
+-
+-      /* row-major order */
+-      /* left edge, middle block, right edge */
+-      for ( ; height--; src_line += src_stride, aligned_line += dst_stride)
+-      {
+-          /* Uncached framebuffer access is really, really slow if we do
+-           * it piecemeal. It should be much faster if we grab it all at
+-           * once. One scanline should easily fit in L1 cache, so this
+-           * should not waste RAM bandwidth.
+-           */
+-          neon_quadword_copy (scan_line, aligned_line, copy_count, copy_tail);
+-
+-          /* Apply the actual filter */
+-          ARGB8_over_565_8_pix_neon (
+-              src_line, scan_line + kernel_offset,
+-              src_stride * sizeof(*src_line), kernel_count);
+-
+-          /* Copy the modified scanline back */
+-          neon_quadword_copy (dst_line,
+-                              scan_line + copy_offset,
+-                              width >> 3, (width & 7) * 2);
+-      }
+-    }
+-}
+-
+-#endif  /* USE_GCC_INLINE_ASM */
+-
+ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ {
+     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
+@@ -2618,12 +1914,6 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ #ifdef USE_GCC_INLINE_ASM
+     { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_16_16,        0 },
+     { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_16_16,        0 },
+-#if 0 /* this code has some bugs */
+-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_n_0565,      0 },
+-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_n_0565,      0 },
+-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,   0 },
+-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,   0 },
+-#endif
+ #endif
+     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,   0 },
+     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,   0 },
+@@ -2674,79 +1964,6 @@ arm_neon_composite (pixman_implementation_t *imp,
+ }
+ static pixman_bool_t
+-pixman_blt_neon (void *src_bits,
+-                 void *dst_bits,
+-                 int   src_stride,
+-                 int   dst_stride,
+-                 int   src_bpp,
+-                 int   dst_bpp,
+-                 int   src_x,
+-                 int   src_y,
+-                 int   dst_x,
+-                 int   dst_y,
+-                 int   width,
+-                 int   height)
+-{
+-    if (!width || !height)
+-      return TRUE;
+-
+-    /* accelerate only straight copies involving complete bytes */
+-    if (src_bpp != dst_bpp || (src_bpp & 7))
+-      return FALSE;
+-
+-    {
+-      uint32_t bytes_per_pixel = src_bpp >> 3;
+-      uint32_t byte_width = width * bytes_per_pixel;
+-      /* parameter is in words for some reason */
+-      int32_t src_stride_bytes = src_stride * 4;
+-      int32_t dst_stride_bytes = dst_stride * 4;
+-      uint8_t *src_bytes = ((uint8_t*) src_bits) +
+-          src_y * src_stride_bytes + src_x * bytes_per_pixel;
+-      uint8_t *dst_bytes = ((uint8_t*) dst_bits) +
+-          dst_y * dst_stride_bytes + dst_x * bytes_per_pixel;
+-      uint32_t quadword_count = byte_width / 16;
+-      uint32_t offset         = byte_width % 16;
+-
+-      while (height--)
+-      {
+-          neon_quadword_copy (dst_bytes, src_bytes, quadword_count, offset);
+-          src_bytes += src_stride_bytes;
+-          dst_bytes += dst_stride_bytes;
+-      }
+-    }
+-
+-    return TRUE;
+-}
+-
+-static pixman_bool_t
+-arm_neon_blt (pixman_implementation_t *imp,
+-              uint32_t *               src_bits,
+-              uint32_t *               dst_bits,
+-              int                      src_stride,
+-              int                      dst_stride,
+-              int                      src_bpp,
+-              int                      dst_bpp,
+-              int                      src_x,
+-              int                      src_y,
+-              int                      dst_x,
+-              int                      dst_y,
+-              int                      width,
+-              int                      height)
+-{
+-    if (pixman_blt_neon (
+-            src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+-            src_x, src_y, dst_x, dst_y, width, height))
+-    {
+-      return TRUE;
+-    }
+-
+-    return _pixman_implementation_blt (
+-               imp->delegate,
+-               src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+-               src_x, src_y, dst_x, dst_y, width, height);
+-}
+-
+-static pixman_bool_t
+ arm_neon_fill (pixman_implementation_t *imp,
+                uint32_t *               bits,
+                int                      stride,
+@@ -2771,9 +1988,6 @@ _pixman_implementation_create_arm_neon (void)
+     pixman_implementation_t *imp = _pixman_implementation_create (general);
+     imp->composite = arm_neon_composite;
+-#if 0 /* this code has some bugs */
+-    imp->blt = arm_neon_blt;
+-#endif
+     imp->fill = arm_neon_fill;
+     return imp;
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch b/recipes/xorg-lib/pixman/0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch
new file mode 100644 (file)
index 0000000..af0a8aa
--- /dev/null
@@ -0,0 +1,1061 @@
+From d9d9173581331a3bf7e5d123db32025588b7f044 Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Sat, 10 Oct 2009 00:20:51 +0300
+Subject: [PATCH 2/7] ARM: Introduction of the new framework for NEON fast path optimizations
+
+GNU assembler and its macro preprocessor is now used to generate
+NEON optimized functions from a common template. This automatically
+takes care of nuisances like ensuring optimal alignment, dealing with
+leading/trailing pixels, doing prefetch, etc.
+
+As the first use for this framework, this commit also includes an
+implementation of pixman_composite_over_8888_0565_asm_neon function.
+---
+ configure.ac                 |    1 +
+ pixman/Makefile.am           |    4 +-
+ pixman/pixman-arm-neon-asm.S |  309 +++++++++++++++++++++
+ pixman/pixman-arm-neon-asm.h |  620 ++++++++++++++++++++++++++++++++++++++++++
+ pixman/pixman-arm-neon.c     |   55 ++++
+ 5 files changed, 988 insertions(+), 1 deletions(-)
+ create mode 100644 pixman/pixman-arm-neon-asm.S
+ create mode 100644 pixman/pixman-arm-neon-asm.h
+
+diff --git a/configure.ac b/configure.ac
+index c548174..522af15 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -71,6 +71,7 @@ AC_CANONICAL_HOST
+ test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS.
+ AC_PROG_CC
++AM_PROG_AS
+ AC_PROG_LIBTOOL
+ AC_CHECK_FUNCS([getisax])
+ AC_C_BIGENDIAN
+diff --git a/pixman/Makefile.am b/pixman/Makefile.am
+index 6020623..2543c6a 100644
+--- a/pixman/Makefile.am
++++ b/pixman/Makefile.am
+@@ -109,7 +109,9 @@ endif
+ if USE_ARM_NEON
+ noinst_LTLIBRARIES += libpixman-arm-neon.la
+ libpixman_arm_neon_la_SOURCES = \
+-        pixman-arm-neon.c
++      pixman-arm-neon.c \
++      pixman-arm-neon-asm.S \
++      pixman-arm-neon-asm.h
+ libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS) $(ARM_NEON_CFLAGS)
+ libpixman_arm_neon_la_LIBADD = $(DEP_LIBS)
+ libpixman_1_la_LIBADD += libpixman-arm-neon.la
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+new file mode 100644
+index 0000000..843899f
+--- /dev/null
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -0,0 +1,309 @@
++/*
++ * Copyright Â© 2009 Nokia Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Nokia Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission.  Nokia Corporation makes no
++ * representations about the suitability of this software for any purpose.
++ * It is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author:  Siarhei Siamashka (siarhei.siamashka@nokia.com)
++ */
++
++/* Prevent the stack from becoming executable for no reason... */
++#if defined(__linux__) && defined(__ELF__)
++.section .note.GNU-stack,"",%progbits
++#endif
++
++    .text
++    .fpu neon
++    .altmacro
++
++#include "pixman-arm-neon-asm.h"
++
++/*
++ * This file contains implementations of NEON optimized pixel processing
++ * functions functions. There is no full and detailed tutorial, but some
++ * functions (those which are exposing some new or interesting features)
++ * are extensively commented and can be used as examples.
++ *
++ * You may want to have a look at the following functions:
++ *  - pixman_composite_over_8888_0565_asm_neon
++ */
++
++/*
++ * Implementation of pixman_composite_over_8888_0565_asm_neon
++ *
++ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
++ * performs OVER compositing operation. Function fast_composite_over_8888_0565
++ * from pixman-fast-path.c does the same in C and can be used as a reference.
++ *
++ * First we need to have some NEON assembly code which can do the actual
++ * operation on the pixels and provide it to the template macro
++ *
++ * Template macro quite conveniently takes care of all the necessary code for
++ * memory reading and writing (including quite tricky cases of handling
++ * unaligned leading/trailing pixels), so we only need to deal with the data
++ * in NEON registers.
++ *
++ * NEON registers allocation in general is recommented to be the following:
++ * d0,  d1,  d2,  d3  - contain loaded source pixel data
++ * d4,  d5,  d6,  d7  - contain loaded destination pixels (if they are needed)
++ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
++ * d28, d29, d30, d31 - place for storing the result (destination pixels)
++ *
++ * As can be seen above, four 64-bit NEON registers are used for keeping
++ * intermediate pixel data and up to 8 pixels can be processed in one step
++ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
++ *
++ * This particular function uses the following allocation:
++ * d0,  d1,  d2,  d3  - contain loaded source pixel data
++ * d4,  d5            - contain loaded destination pixels (they are needed)
++ * d28, d29           - place for storing the result (destination pixels)
++ */
++
++/*
++ * Step one. We need to have some code to do some arithmetics on pixel data.
++ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
++ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
++ * perform all the needed calculations and write the result to {d28, d29}.
++ * The rationale for having two macros and not just one will be explained
++ * later. In practice, any single monolitic function which does the work can
++ * be split into two parts in any arbitrary way without affecting correctness.
++ *
++ * There is one special trick here too. Common template macro already makes
++ * our life a bit easier by doing R, G, B, A color components deinterleaving
++ * for 32bpp pixel formats. So it means that instead of having 8 packed
++ * pixels in {d0, d1, d2, d3} registers, we actually use d0 register for
++ * blue channel (a vector of eight 8-bit values), d1 register for green,
++ * d2 for red and d3 for alpha. There is no magic and simple conversion
++ * can be done with a few NEON instructions.
++ *
++ * Packed to planar conversion:
++ *  vuzp.8 d0, d1
++ *  vuzp.8 d2, d3
++ *  vuzp.8 d1, d3
++ *  vuzp.8 d0, d2
++ *
++ * Planar to packed conversion:
++ *  vzip.8 d0, d2
++ *  vzip.8 d1, d3
++ *  vzip.8 d2, d3
++ *  vzip.8 d0, d1
++ *
++ * Pixel can be loaded directly in planar format using VLD4.8 NEON
++ * instruction. But it is 1 cycle slower than VLD1.32 and sometimes
++ * code can be scheduled so that four extra VUZP.8 after VLD1.32 may
++ * be dual-issued with the other instructions resulting in overal
++ * 1 cycle improvement.
++ *
++ * But anyway, here is the code:
++ */
++.macro pixman_composite_over_8888_0565_process_pixblock_head
++    /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
++       and put data into d6 - red, d7 - green, d30 - blue */
++    vshrn.u16   d6, q2, #8
++    vshrn.u16   d7, q2, #3
++    vsli.u16    q2, q2, #5
++    vsri.u8     d6, d6, #5
++    vmvn.8      d3, d3      /* invert source alpha */
++    vsri.u8     d7, d7, #6
++    vshrn.u16   d30, q2, #2
++    /* now do alpha blending, storing results in 8-bit planar format
++       into d16 - red, d19 - green, d18 - blue */
++    vmull.u8    q10, d3, d6
++    vmull.u8    q11, d3, d7
++    vmull.u8    q12, d3, d30
++    vrshr.u16   q13, q10, #8
++    vrshr.u16   q3, q11, #8
++    vrshr.u16   q15, q12, #8
++    vraddhn.u16 d20, q10, q13
++    vraddhn.u16 d23, q11, q3
++    vraddhn.u16 d22, q12, q15
++.endm
++
++.macro pixman_composite_over_8888_0565_process_pixblock_tail
++    /* ... continue alpha blending */
++    vqadd.u8    d16, d2, d20
++    vqadd.u8    q9, q0, q11
++    /* convert the result to r5g6b5 and store it into {d28, d29} */
++    vshll.u8    q14, d16, #8
++    vshll.u8    q8, d19, #8
++    vshll.u8    q9, d18, #8
++    vsri.u16    q14, q8, #5
++    vsri.u16    q14, q9, #11
++.endm
++
++/*
++ * OK, now we got almost everything that we need. Using the above two
++ * macros, the work can be done right. But now we want to optimize
++ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
++ * a lot from good code scheduling and software pipelining.
++ *
++ * Let's construct some code, which will run in the core main loop.
++ * Some pseudo-code of the main loop will look like this:
++ *   head
++ *   while (...) {
++ *     tail
++ *     head
++ *   }
++ *   tail
++ *
++ * It may look a bit weird, but this setup allows to hide instruction
++ * latencies better and also utilize dual-issue capability more efficiently.
++ *
++ * So what we need now is a '*_tail_head' macro, which will be used
++ * in the core main loop. A trivial straightforward implementation
++ * of this macro would look like this:
++ *
++ *   pixman_composite_over_8888_0565_process_pixblock_tail
++ *   vst1.16     {d28, d29}, [DST_W, :128]!
++ *   vld1.16     {d4, d5}, [DST_R, :128]!
++ *   vld4.32     {d0, d1, d2, d3}, [SRC]!
++ *   pixman_composite_over_8888_0565_process_pixblock_head
++ *   cache_preload 8, 8
++ *
++ * Now it also got some VLD/VST instructions. We simply can't move from
++ * processing one block of pixels to the other one with just arithmetics.
++ * The previously processed data needs to be written to memory and new
++ * data needs to be fetched. Fortunately, this main loop does not deal
++ * with partial leading/trailing pixels and can load/store a full block
++ * of pixels in a bulk. Additionally, destination buffer is 16 bytes
++ * aligned here (which is good for performance).
++ *
++ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
++ * are the aliases for ARM registers which are used as pointers for
++ * accessing data. We maintain separate pointers for reading and writing
++ * destination buffer.
++ *
++ * Another new thing is 'cache_preload' macro. It is used for prefetching
++ * data into CPU cache and improve performance when dealing with large
++ * images which are far larger than cache size. It uses one argument
++ * (actually two, but they need to be the same here) - number of pixels
++ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
++ * details about this macro. Moreover, if good performance is needed
++ * the code from this macro needs to be copied into '*_tail_head' macro
++ * and mixed with the rest of code for optimal instructions scheduling.
++ * We are actually doing it below.
++ *
++ * Now after all the explanations, here is the optimized code.
++ * Different instruction streams (originaling from '*_head', '*_tail'
++ * and 'cache_preload' macro) use different indentation levels for
++ * better readability. Actually taking the code from one of these
++ * indentation levels and ignoring a few VLD/VST instructions would
++ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
++ * macro!
++ */
++
++#if 1
++
++.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
++        vqadd.u8    d16, d2, d20
++    vld1.16     {d4, d5}, [DST_R, :128]!
++        vqadd.u8    q9, q0, q11
++    vshrn.u16   d6, q2, #8
++    vld4.8      {d0, d1, d2, d3}, [SRC]!
++    vshrn.u16   d7, q2, #3
++    vsli.u16    q2, q2, #5
++        vshll.u8    q14, d16, #8
++                                    add PF_X, PF_X, #8
++        vshll.u8    q8, d19, #8
++                                    tst PF_CTL, #0xF
++    vsri.u8     d6, d6, #5
++                                    addne PF_X, PF_X, #8
++    vmvn.8      d3, d3
++                                    subne PF_CTL, PF_CTL, #1
++    vsri.u8     d7, d7, #6
++    vshrn.u16   d30, q2, #2
++    vmull.u8    q10, d3, d6
++                                    pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++    vmull.u8    q11, d3, d7
++    vmull.u8    q12, d3, d30
++                                    pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++        vsri.u16    q14, q8, #5
++                                    cmp PF_X, ORIG_W
++        vshll.u8    q9, d18, #8
++    vrshr.u16   q13, q10, #8
++                                    subge PF_X, PF_X, ORIG_W
++    vrshr.u16   q3, q11, #8
++    vrshr.u16   q15, q12, #8
++                                    subges PF_CTL, PF_CTL, #0x10
++        vsri.u16    q14, q9, #11
++                                    ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++    vraddhn.u16 d20, q10, q13
++    vraddhn.u16 d23, q11, q3
++                                    ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++    vraddhn.u16 d22, q12, q15
++        vst1.16     {d28, d29}, [DST_W, :128]!
++.endm
++
++#else
++
++/* If we did not care much about the performance, we would just use this... */
++.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
++    pixman_composite_over_8888_0565_process_pixblock_tail
++    vst1.16     {d28, d29}, [DST_W, :128]!
++    vld1.16     {d4, d5}, [DST_R, :128]!
++    vld4.32     {d0, d1, d2, d3}, [SRC]!
++    pixman_composite_over_8888_0565_process_pixblock_head
++    cache_preload 8, 8
++.endm
++
++#endif
++
++/*
++ * And now the final part. We are using 'generate_composite_function' macro
++ * to put all the stuff together. We are specifying the name of the function
++ * which we want to get, number of bits per pixel for the source, mask and
++ * destination (0 if unused, like mask in this case). Next come some bit
++ * flags:
++ *   FLAG_DST_READWRITE      - tells that the destination buffer is both read
++ *                             and written, for write-only buffer we would use
++ *                             FLAG_DST_WRITEONLY flag instead
++ *   FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
++ *                             and separate color channels for 32bpp format.
++ * The next things are:
++ *  - the number of pixels processed per iteration (8 in this case, because
++ *    that' the maximum what can fit into four 64-bit NEON registers).
++ *  - prefetch distance, measured in pixel blocks. In this case it is 5 times
++ *    by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
++ *    prefetch distance can be selected by running some benchmarks.
++ *
++ * After that we specify some macros, these are 'default_init',
++ * 'default_cleanup' (it is possible to have custom init/cleanup to be
++ * able to save/restore some extra NEON registers like d8-d15 or do
++ * anything else) followed by
++ * 'pixman_composite_over_8888_0565_process_pixblock_head',
++ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
++ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
++ * which we got implemented above.
++ *
++ * The last part is the NEON registers allocation scheme.
++ */
++generate_composite_function \
++    pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
++    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
++    8, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_over_8888_0565_process_pixblock_head, \
++    pixman_composite_over_8888_0565_process_pixblock_tail, \
++    pixman_composite_over_8888_0565_process_pixblock_tail_head, \
++    28, /* dst_w_basereg */ \
++    4,  /* dst_r_basereg */ \
++    0,  /* src_basereg   */ \
++    24  /* mask_basereg  */
+diff --git a/pixman/pixman-arm-neon-asm.h b/pixman/pixman-arm-neon-asm.h
+new file mode 100644
+index 0000000..d276ab9
+--- /dev/null
++++ b/pixman/pixman-arm-neon-asm.h
+@@ -0,0 +1,620 @@
++/*
++ * Copyright Â© 2009 Nokia Corporation
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that
++ * copyright notice and this permission notice appear in supporting
++ * documentation, and that the name of Nokia Corporation not be used in
++ * advertising or publicity pertaining to distribution of the software without
++ * specific, written prior permission.  Nokia Corporation makes no
++ * representations about the suitability of this software for any purpose.
++ * It is provided "as is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
++ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
++ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
++ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
++ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
++ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
++ * SOFTWARE.
++ *
++ * Author:  Siarhei Siamashka (siarhei.siamashka@nokia.com)
++ */
++
++/*
++ * This file contains a macro ('generate_composite_function') which can
++ * construct 2D image processing functions, based on a common template.
++ * Any combinations of source, destination and mask images with 8bpp,
++ * 16bpp, 32bpp color formats are supported.
++ *
++ * This macro takes care of:
++ *  - handling of leading and trailing unaligned pixels
++ *  - doing most of the work related to L2 cache preload
++ *  - encourages the use of software pipelining for better instructions
++ *    scheduling
++ *
++ * The user of this macro has to provide some configuration parameters
++ * (bit depths for the images, prefetch distance, etc.) and a set of
++ * macros, which should implement basic code chunks responsible for
++ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
++ * examples.
++ *
++ * TODO:
++ *  - support for 24bpp formats
++ *  - try overlapped pixel method (from Ian Rickards) when processing
++ *    exactly two blocks of pixels
++ */
++
++.set FLAG_DST_WRITEONLY,       0
++.set FLAG_DST_READWRITE,       1
++.set FLAG_DEINTERLEAVE_32BPP,  2
++
++/*
++ * It is possible to set this to 0 and improve performance a bit if unaligned
++ * memory accesses are supported
++ */
++#define RESPECT_STRICT_ALIGNMENT 1
++
++/*
++ * Definitions of supplementary pixld/pixst macros (for partial load/store of
++ * pixel data)
++ */
++
++.macro pixldst1 op, elem_size, reg1, mem_operand, abits
++.if abits > 0
++    op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
++.else
++    op&.&elem_size {d&reg1}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
++.if abits > 0
++    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
++.else
++    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
++.if abits > 0
++    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
++.else
++    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
++.endif
++.endm
++
++.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
++    op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
++.endm
++
++.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
++.if numbytes == 32
++    pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
++                              %(basereg+6), %(basereg+7), mem_operand, abits
++.elseif numbytes == 16
++    pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
++.elseif numbytes == 8
++    pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
++.elseif numbytes == 4
++    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
++        pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
++    .elseif elem_size == 16
++        pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
++        pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
++    .else
++        pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
++        pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
++        pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
++        pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
++    .endif
++.elseif numbytes == 2
++    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
++        pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
++    .else
++        pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
++        pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
++    .endif
++.elseif numbytes == 1
++    pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
++.else
++    .error "unsupported size: numbytes"
++.endif
++.endm
++
++.macro pixld numpix, bpp, basereg, mem_operand, abits=0
++.if bpp > 0
++.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++    pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
++                      %(basereg+6), %(basereg+7), mem_operand, abits
++.else
++    pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
++.endif
++.endif
++.endm
++
++.macro pixst numpix, bpp, basereg, mem_operand, abits=0
++.if bpp > 0
++.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++    pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
++                      %(basereg+6), %(basereg+7), mem_operand, abits
++.else
++    pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
++.endif
++.endif
++.endm
++
++.macro pixld_a numpix, bpp, basereg, mem_operand
++.if (bpp * numpix) <= 128
++    pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
++.else
++    pixld numpix, bpp, basereg, mem_operand, 128
++.endif
++.endm
++
++.macro pixst_a numpix, bpp, basereg, mem_operand
++.if (bpp * numpix) <= 128
++    pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
++.else
++    pixst numpix, bpp, basereg, mem_operand, 128
++.endif
++.endm
++
++.macro vuzp8 reg1, reg2
++    vuzp.8 d&reg1, d&reg2
++.endm
++
++.macro vzip8 reg1, reg2
++    vzip.8 d&reg1, d&reg2
++.endm
++
++/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
++.macro pixdeinterleave bpp, basereg
++.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++    vuzp8 %(basereg+0), %(basereg+1)
++    vuzp8 %(basereg+2), %(basereg+3)
++    vuzp8 %(basereg+1), %(basereg+3)
++    vuzp8 %(basereg+0), %(basereg+2)
++.endif
++.endm
++
++/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
++.macro pixinterleave bpp, basereg
++.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
++    vzip8 %(basereg+0), %(basereg+2)
++    vzip8 %(basereg+1), %(basereg+3)
++    vzip8 %(basereg+2), %(basereg+3)
++    vzip8 %(basereg+0), %(basereg+1)
++.endif
++.endm
++
++/*
++ * This is a macro for implementing cache preload. The main idea is that
++ * cache preload logic is mostly independent from the rest of pixels
++ * processing code. It starts at the top left pixel and moves forward
++ * across pixels and can jump across lines. Prefetch distance is handled
++ * in an 'incremental' way: it starts from 0 and advances to the optimal
++ * distance over time. After reaching optimal prefetch distance, it is
++ * kept constant. There are some checks which prevent prefetching
++ * unneeded pixel lines below the image (but it still prefetch a bit
++ * more data on the right side of the image - not a big issue and may
++ * be actually helpful when rendering text glyphs). Additional trick is
++ * the use of LDR instruction for prefetch instead of PLD when moving to
++ * the next line, the point is that we have a high chance of getting TLB
++ * miss in this case, and PLD would be useless.
++ *
++ * This sounds like it may introduce a noticeable overhead (when working with
++ * fully cached data). But in reality, due to having a separate pipeline and
++ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
++ * execute simultaneously with NEON and be completely shadowed by it. Thus
++ * we get no performance overhead at all (*). This looks like a very nice
++ * feature of Cortex-A8, if used wisely. We don't have a hardware hardware
++ * prefetcher, but still can implement some rather advanced prefetch logic
++ * in sofware for almost zero cost!
++ *
++ * (*) The overhead of the prefetcher is visible when running some trivial
++ * pixels processing like simple copy. Anyway, having prefetch is a must
++ * when working with graphics data.
++ */
++.macro cache_preload std_increment, boost_increment
++.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
++.if regs_shortage
++    ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
++.endif
++.if std_increment != 0
++    add PF_X, PF_X, #std_increment
++.endif
++    tst PF_CTL, #0xF
++    addne PF_X, PF_X, #boost_increment
++    subne PF_CTL, PF_CTL, #1
++    cmp PF_X, ORIG_W
++.if src_bpp_shift >= 0
++    pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++.endif
++.if dst_r_bpp != 0
++    pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++.endif
++.if mask_bpp_shift >= 0
++    pld [PF_MASK, PF_X, lsl #mask_bpp_shift]
++.endif
++    subge PF_X, PF_X, ORIG_W
++    subges PF_CTL, PF_CTL, #0x10
++.if src_bpp_shift >= 0
++    ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++.endif
++.if dst_r_bpp != 0
++    ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++.endif
++.if mask_bpp_shift >= 0
++    ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
++.endif
++.endif
++.endm
++
++/*
++ * Registers are allocated in the following way by default:
++ * d0, d1, d2, d3     - reserved for loading source pixel data
++ * d4, d5, d6, d7     - reserved for loading destination pixel data
++ * d24, d25, d26, d27 - reserved for loading mask pixel data
++ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
++ */
++.macro generate_composite_function fname, \
++                                   src_bpp, \
++                                   mask_bpp, \
++                                   dst_w_bpp, \
++                                   flags, \
++                                   pixblock_size, \
++                                   prefetch_distance, \
++                                   init, \
++                                   cleanup, \
++                                   process_pixblock_head, \
++                                   process_pixblock_tail, \
++                                   process_pixblock_tail_head, \
++                                   dst_w_basereg = 28, \
++                                   dst_r_basereg = 4, \
++                                   src_basereg   = 0, \
++                                   mask_basereg  = 24
++
++    .global fname
++fname:
++
++    W           .req        r0      /* width (is updated during processing) */
++    H           .req        r1      /* height (is updated during processing) */
++    DST_W       .req        r2      /* destination buffer pointer for writes */
++    DST_STRIDE  .req        r3      /* destination image stride */
++    SRC         .req        r4      /* source buffer pointer */
++    SRC_STRIDE  .req        r5      /* source image stride */
++    DST_R       .req        r6      /* destination buffer pointer for reads */
++
++    MASK        .req        r7      /* mask pointer */
++    MASK_STRIDE .req        r8      /* mask stride */
++
++    PF_CTL      .req        r9
++    PF_X        .req        r10
++    PF_SRC      .req        r11
++    PF_DST      .req        r12
++    PF_MASK     .req        r14
++
++.if mask_bpp == 0
++    ORIG_W      .req        r7      /* saved original width */
++    DUMMY       .req        r8      /* temporary register */
++    .set        regs_shortage, 0
++.elseif src_bpp == 0
++    ORIG_W      .req        r4      /* saved original width */
++    DUMMY       .req        r5      /* temporary register */
++    .set        regs_shortage, 0
++.else
++    ORIG_W      .req        r1      /* saved original width */
++    DUMMY       .req        r1      /* temporary register */
++    .set        regs_shortage, 1
++.endif
++
++    push        {r4-r12, lr}
++
++    .set mask_bpp_shift, -1
++
++.if src_bpp == 32
++    .set src_bpp_shift, 2
++.elseif src_bpp == 16
++    .set src_bpp_shift, 1
++.elseif src_bpp == 8
++    .set src_bpp_shift, 0
++.elseif src_bpp == 0
++    .set src_bpp_shift, -1
++.else
++    .error "requested src bpp (src_bpp) is not supported"
++.endif
++.if mask_bpp == 32
++    .set mask_bpp_shift, 2
++.elseif mask_bpp == 8
++    .set mask_bpp_shift, 0
++.elseif mask_bpp == 0
++    .set mask_bpp_shift, -1
++.else
++    .error "requested mask bpp (mask_bpp) is not supported"
++.endif
++.if dst_w_bpp == 32
++    .set dst_bpp_shift, 2
++.elseif dst_w_bpp == 16
++    .set dst_bpp_shift, 1
++.elseif dst_w_bpp == 8
++    .set dst_bpp_shift, 0
++.else
++    .error "requested dst bpp (dst_w_bpp) is not supported"
++.endif
++
++.if (((flags) & FLAG_DST_READWRITE) != 0)
++    .set dst_r_bpp, dst_w_bpp
++.else
++    .set dst_r_bpp, 0
++.endif
++.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
++    .set DEINTERLEAVE_32BPP_ENABLED, 1
++.else
++    .set DEINTERLEAVE_32BPP_ENABLED, 0
++.endif
++
++.if prefetch_distance < 0 || prefetch_distance > 15
++    .error "invalid prefetch distance (prefetch_distance)"
++.endif
++
++.if src_bpp > 0
++    ldr         SRC, [sp, #40]
++.endif
++.if mask_bpp > 0
++    ldr         MASK, [sp, #48]
++.endif
++    mov         PF_X, #0
++.if src_bpp > 0
++    ldr         SRC_STRIDE, [sp, #44]
++.endif
++.if mask_bpp > 0
++    ldr         MASK_STRIDE, [sp, #52]
++.endif
++    mov         DST_R, DST_W
++    mov         PF_SRC, SRC
++    mov         PF_DST, DST_R
++    mov         PF_MASK, MASK
++    mov         PF_CTL, H, lsl #4
++    /* pf_ctl = 10 | ((h - 1) << 4) */
++    add         PF_CTL, #(prefetch_distance - 0x10)
++
++    init
++.if regs_shortage
++    push        {r0, r1}
++.endif
++    subs        H, H, #1
++.if regs_shortage
++    str         H, [sp, #4] /* save updated height to stack */
++.else
++    mov         ORIG_W, W
++.endif
++    blt         9f
++    cmp         W, #(pixblock_size * 2)
++    blt         8f
++0:
++    /* ensure 16 byte alignment of the destination buffer */
++    tst         DST_R, #0xF
++    beq         2f
++
++.irp lowbit, 1, 2, 4, 8, 16
++.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
++.if lowbit < 16 /* we don't need more than 16-byte alignment */
++    tst         DST_R, #lowbit
++    beq         1f
++.endif
++    pixld       (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
++    pixld       (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
++.if dst_r_bpp > 0
++    pixld_a     (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
++.else
++    add         DST_R, DST_R, #lowbit
++.endif
++    add         PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
++    sub         W, W, #(lowbit * 8 / dst_w_bpp)
++1:
++.endif
++.endr
++    pixdeinterleave src_bpp, src_basereg
++    pixdeinterleave mask_bpp, mask_basereg
++    pixdeinterleave dst_r_bpp, dst_r_basereg
++
++    process_pixblock_head
++    cache_preload 0, pixblock_size
++    process_pixblock_tail
++
++    pixinterleave dst_w_bpp, dst_w_basereg
++.irp lowbit, 1, 2, 4, 8, 16
++.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
++.if lowbit < 16 /* we don't need more than 16-byte alignment */
++    tst         DST_W, #lowbit
++    beq         1f
++.endif
++    pixst_a     (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++
++    pixld_a     pixblock_size, dst_r_bpp, \
++                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
++    pixld       pixblock_size, src_bpp, \
++                (src_basereg - pixblock_size * src_bpp / 64), SRC
++    pixld       pixblock_size, mask_bpp, \
++                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
++    add         PF_X, PF_X, #pixblock_size
++    process_pixblock_head
++    cache_preload 0, pixblock_size
++    subs        W, W, #(pixblock_size * 2)
++    blt         2f
++1: /* innermost pipelined loop */
++    process_pixblock_tail_head
++    subs        W, W, #pixblock_size
++    bge         1b
++2:
++    process_pixblock_tail
++    pixst_a     pixblock_size, dst_w_bpp, \
++                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
++
++    /* process up to (pixblock_size - 1) remaining pixels */
++    tst         W, #(pixblock_size - 1)
++    beq         2f
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++    tst         W, #chunk_size
++    beq         1f
++    pixld       chunk_size, src_bpp, src_basereg, SRC
++    pixld       chunk_size, mask_bpp, mask_basereg, MASK
++    pixld_a     chunk_size, dst_r_bpp, dst_r_basereg, DST_R
++    add         PF_X, PF_X, #chunk_size
++1:
++.endif
++.endr
++    pixdeinterleave src_bpp, src_basereg
++    pixdeinterleave mask_bpp, mask_basereg
++    pixdeinterleave dst_r_bpp, dst_r_basereg
++
++    process_pixblock_head
++    cache_preload 0, pixblock_size
++    process_pixblock_tail
++
++    pixinterleave dst_w_bpp, dst_w_basereg
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++    tst         W, #chunk_size
++    beq         1f
++    pixst_a     chunk_size, dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++
++.if regs_shortage
++    ldrd        W, [sp] /* load W and H (width and height) from stack */
++.else
++    mov         W, ORIG_W
++.endif
++    add         DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
++.if src_bpp != 0
++    add         SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++    add         MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
++.endif
++    sub         DST_W, DST_W, W, lsl #dst_bpp_shift
++.if src_bpp != 0
++    sub         SRC, SRC, W, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++    sub         MASK, MASK, W, lsl #mask_bpp_shift
++.endif
++    subs        H, H, #1
++    mov         DST_R, DST_W
++.if regs_shortage
++    str         H, [sp, #4] /* save updated height to stack */
++.endif
++    bge         0b
++.if regs_shortage
++    pop         {r0, r1}
++.endif
++    cleanup
++    pop         {r4-r12, pc}  /* exit */
++
++8: /* handle small rectangle, width up to 15 pixels */
++    tst         W, #pixblock_size
++    beq         1f
++    pixld       pixblock_size, dst_r_bpp, \
++                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
++    pixld       pixblock_size, src_bpp, \
++                (src_basereg - pixblock_size * src_bpp / 64), SRC
++    pixld       pixblock_size, mask_bpp, \
++                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
++    process_pixblock_head
++    process_pixblock_tail
++    pixst       pixblock_size, dst_w_bpp, \
++                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
++1: /* process the remaining pixels, which do not fully fill one block */
++    tst         W, #(pixblock_size - 1)
++    beq         2f
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++    tst         W, #chunk_size
++    beq         1f
++    pixld       chunk_size, src_bpp, src_basereg, SRC
++    pixld       chunk_size, mask_bpp, mask_basereg, MASK
++    pixld       chunk_size, dst_r_bpp, dst_r_basereg, DST_R
++1:
++.endif
++.endr
++    pixdeinterleave src_bpp, src_basereg
++    pixdeinterleave mask_bpp, mask_basereg
++    pixdeinterleave dst_r_bpp, dst_r_basereg
++    process_pixblock_head
++    process_pixblock_tail
++    pixinterleave dst_w_bpp, dst_w_basereg
++.irp chunk_size, 16, 8, 4, 2, 1
++.if pixblock_size > chunk_size
++    tst         W, #chunk_size
++    beq         1f
++    pixst       chunk_size, dst_w_bpp, dst_w_basereg, DST_W
++1:
++.endif
++.endr
++2:
++.if regs_shortage
++    ldrd        W, [sp] /* load W and H (width and height) from stack */
++.else
++    mov         W, ORIG_W
++.endif
++    add         DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
++.if src_bpp != 0
++    add         SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++    add         MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
++.endif
++    sub         DST_W, DST_W, W, lsl #dst_bpp_shift
++.if src_bpp != 0
++    sub         SRC, SRC, W, lsl #src_bpp_shift
++.endif
++.if mask_bpp != 0
++    sub         MASK, MASK, W, lsl #mask_bpp_shift
++.endif
++    subs        H, H, #1
++    mov         DST_R, DST_W
++.if regs_shortage
++    str         H, [sp, #4] /* save updated height to stack */
++.endif
++    bge         8b
++9:
++.if regs_shortage
++    pop         {r0, r1}
++.endif
++    cleanup
++    pop         {r4-r12, pc}  /* exit */
++
++    .unreq      SRC
++    .unreq      MASK
++    .unreq      DST_R
++    .unreq      DST_W
++    .unreq      ORIG_W
++    .unreq      W
++    .unreq      H
++    .unreq      SRC_STRIDE
++    .unreq      DST_STRIDE
++    .unreq      MASK_STRIDE
++    .unreq      PF_CTL
++    .unreq      PF_X
++    .unreq      PF_SRC
++    .unreq      PF_DST
++    .unreq      PF_MASK
++    .unreq      DUMMY
++.endm
++
++.macro default_init
++.endm
++
++.macro default_cleanup
++.endm
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index 9caef61..fe57daa 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -1901,8 +1901,63 @@ pixman_fill_neon (uint32_t *bits,
+ #endif
+ }
++/*
++ * Use GNU assembler optimizations only if we are completely sure that
++ * the target system has compatible ABI and calling conventions. This
++ * check can be updated/extended if more systems turn out to be actually
++ * compatible.
++ */
++#if defined(__linux__) && defined(__ARM_EABI__) && defined(USE_GCC_INLINE_ASM)
++#define USE_GNU_ASSEMBLER_ASM
++#endif
++
++#ifdef USE_GNU_ASSEMBLER_ASM
++
++void
++pixman_composite_over_8888_0565_asm_neon (int32_t   w,
++                                          int32_t   h,
++                                          uint16_t *dst,
++                                          int32_t   dst_stride,
++                                          uint32_t *src,
++                                          int32_t   src_stride);
++
++static void
++neon_composite_over_8888_0565 (pixman_implementation_t *imp,
++                               pixman_op_t              op,
++                               pixman_image_t *         src_image,
++                               pixman_image_t *         mask_image,
++                               pixman_image_t *         dst_image,
++                               int32_t                  src_x,
++                               int32_t                  src_y,
++                               int32_t                  mask_x,
++                               int32_t                  mask_y,
++                               int32_t                  dest_x,
++                               int32_t                  dest_y,
++                               int32_t                  width,
++                               int32_t                  height)
++{
++    uint16_t *dst_line;
++    uint32_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++                         src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
++                         dst_stride, dst_line, 1);
++
++    pixman_composite_over_8888_0565_asm_neon (width, height,
++                                              dst_line, dst_stride,
++                                              src_line, src_stride);
++}
++
++#endif
++
+ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ {
++#ifdef USE_GNU_ASSEMBLER_ASM
++    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,   0 },
++    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,   0 },
++#endif
+     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
+     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,    0 },
+     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0003-ARM-Added-pixman_composite_src_8888_0565_asm_neon-f.patch b/recipes/xorg-lib/pixman/0003-ARM-Added-pixman_composite_src_8888_0565_asm_neon-f.patch
new file mode 100644 (file)
index 0000000..00b682e
--- /dev/null
@@ -0,0 +1,63 @@
+From b17297cf15122e5b38c082c9fe6f1ff708b7efa4 Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 12 Oct 2009 21:50:37 +0300
+Subject: [PATCH 3/7] ARM: Added pixman_composite_src_8888_0565_asm_neon function
+
+---
+ pixman/pixman-arm-neon-asm.S |   43 ++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 43 insertions(+), 0 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+index 843899f..20d2587 100644
+--- a/pixman/pixman-arm-neon-asm.S
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -307,3 +307,46 @@ generate_composite_function \
+     4,  /* dst_r_basereg */ \
+     0,  /* src_basereg   */ \
+     24  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_src_8888_0565_process_pixblock_head
++    vshll.u8    q8, d1, #8
++    vshll.u8    q14, d2, #8
++    vshll.u8    q9, d0, #8
++.endm
++
++.macro pixman_composite_src_8888_0565_process_pixblock_tail
++    vsri.u16    q14, q8, #5
++    vsri.u16    q14, q9, #11
++.endm
++
++.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
++        vsri.u16    q14, q8, #5
++                                    add PF_X, PF_X, #8
++                                    tst PF_CTL, #0xF
++    vld4.8      {d0, d1, d2, d3}, [SRC]!
++                                    addne PF_X, PF_X, #8
++                                    subne PF_CTL, PF_CTL, #1
++        vsri.u16    q14, q9, #11
++                                    cmp PF_X, ORIG_W
++                                    pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++    vshll.u8    q8, d1, #8
++        vst1.16     {d28, d29}, [DST_W, :128]!
++                                    subge PF_X, PF_X, ORIG_W
++                                    subges PF_CTL, PF_CTL, #0x10
++    vshll.u8    q14, d2, #8
++                                    ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++    vshll.u8    q9, d0, #8
++.endm
++
++generate_composite_function \
++    pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
++    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
++    8, /* number of pixels, processed in a single block */ \
++    10, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_src_8888_0565_process_pixblock_head, \
++    pixman_composite_src_8888_0565_process_pixblock_tail, \
++    pixman_composite_src_8888_0565_process_pixblock_tail_head
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0004-ARM-Added-pixman_composite_add_8000_8000_asm_neon-f.patch b/recipes/xorg-lib/pixman/0004-ARM-Added-pixman_composite_add_8000_8000_asm_neon-f.patch
new file mode 100644 (file)
index 0000000..445697a
--- /dev/null
@@ -0,0 +1,60 @@
+From 9cfedd684bdeabe9e97303e6f432c3ffb440426c Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 12 Oct 2009 21:51:54 +0300
+Subject: [PATCH 4/7] ARM: Added pixman_composite_add_8000_8000_asm_neon function
+
+---
+ pixman/pixman-arm-neon-asm.S |   40 ++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 40 insertions(+), 0 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+index 20d2587..373cbd0 100644
+--- a/pixman/pixman-arm-neon-asm.S
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -350,3 +350,43 @@ generate_composite_function \
+     pixman_composite_src_8888_0565_process_pixblock_head, \
+     pixman_composite_src_8888_0565_process_pixblock_tail, \
+     pixman_composite_src_8888_0565_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_add_8000_8000_process_pixblock_head
++    vqadd.u8    q14, q0, q2
++    vqadd.u8    q15, q1, q3
++.endm
++
++.macro pixman_composite_add_8000_8000_process_pixblock_tail
++.endm
++
++.macro pixman_composite_add_8000_8000_process_pixblock_tail_head
++    vld1.8      {d0, d1, d2, d3}, [SRC]!
++                                    add PF_X, PF_X, #32
++                                    tst PF_CTL, #0xF
++    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
++                                    addne PF_X, PF_X, #32
++                                    subne PF_CTL, PF_CTL, #1
++        vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
++                                    cmp PF_X, ORIG_W
++                                    pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++                                    pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++                                    subge PF_X, PF_X, ORIG_W
++                                    subges PF_CTL, PF_CTL, #0x10
++    vqadd.u8    q14, q0, q2
++                                    ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++                                    ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++    vqadd.u8    q15, q1, q3
++.endm
++
++generate_composite_function \
++    pixman_composite_add_8000_8000_asm_neon, 8, 0, 8, \
++    FLAG_DST_READWRITE, \
++    32, /* number of pixels, processed in a single block */ \
++    10, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_add_8000_8000_process_pixblock_head, \
++    pixman_composite_add_8000_8000_process_pixblock_tail, \
++    pixman_composite_add_8000_8000_process_pixblock_tail_head
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0005-ARM-Added-pixman_composite_over_8888_8888_asm_neon.patch b/recipes/xorg-lib/pixman/0005-ARM-Added-pixman_composite_over_8888_8888_asm_neon.patch
new file mode 100644 (file)
index 0000000..e4c8936
--- /dev/null
@@ -0,0 +1,87 @@
+From 9eb4ecf68cf5609240222d10f1a4c9dfebdb3498 Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 12 Oct 2009 21:52:49 +0300
+Subject: [PATCH 5/7] ARM: Added pixman_composite_over_8888_8888_asm_neon function
+
+---
+ pixman/pixman-arm-neon-asm.S |   67 ++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 67 insertions(+), 0 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+index 373cbd0..b11a9a7 100644
+--- a/pixman/pixman-arm-neon-asm.S
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -390,3 +390,70 @@ generate_composite_function \
+     pixman_composite_add_8000_8000_process_pixblock_head, \
+     pixman_composite_add_8000_8000_process_pixblock_tail, \
+     pixman_composite_add_8000_8000_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_over_8888_8888_process_pixblock_head
++    vmvn.8      d24, d3  /* get inverted alpha */
++    /* do alpha blending */
++    vmull.u8    q8, d24, d4
++    vmull.u8    q9, d24, d5
++    vmull.u8    q10, d24, d6
++    vmull.u8    q11, d24, d7
++.endm
++
++.macro pixman_composite_over_8888_8888_process_pixblock_tail
++    vrshr.u16   q14, q8, #8
++    vrshr.u16   q15, q9, #8
++    vrshr.u16   q12, q10, #8
++    vrshr.u16   q13, q11, #8
++    vraddhn.u16 d28, q14, q8
++    vraddhn.u16 d29, q15, q9
++    vraddhn.u16 d30, q12, q10
++    vraddhn.u16 d31, q13, q11
++    vqadd.u8    q14, q0, q14
++    vqadd.u8    q15, q1, q15
++.endm
++
++.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
++    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
++        vrshr.u16   q14, q8, #8
++                                    add PF_X, PF_X, #8
++                                    tst PF_CTL, #0xF
++        vrshr.u16   q15, q9, #8
++        vrshr.u16   q12, q10, #8
++        vrshr.u16   q13, q11, #8
++                                    addne PF_X, PF_X, #8
++                                    subne PF_CTL, PF_CTL, #1
++        vraddhn.u16 d28, q14, q8
++        vraddhn.u16 d29, q15, q9
++                                    cmp PF_X, ORIG_W
++        vraddhn.u16 d30, q12, q10
++        vraddhn.u16 d31, q13, q11
++        vqadd.u8    q14, q0, q14
++        vqadd.u8    q15, q1, q15
++    vld4.8      {d0, d1, d2, d3}, [SRC]!
++                                    pld [PF_SRC, PF_X, lsl #src_bpp_shift]
++    vmvn.8      d22, d3
++                                    pld [PF_DST, PF_X, lsl #dst_bpp_shift]
++        vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
++                                    subge PF_X, PF_X, ORIG_W
++    vmull.u8    q8, d22, d4
++                                    subges PF_CTL, PF_CTL, #0x10
++    vmull.u8    q9, d22, d5
++                                    ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
++    vmull.u8    q10, d22, d6
++                                    ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
++    vmull.u8    q11, d22, d7
++.endm
++
++generate_composite_function \
++    pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
++    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
++    8, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_over_8888_8888_process_pixblock_head, \
++    pixman_composite_over_8888_8888_process_pixblock_tail, \
++    pixman_composite_over_8888_8888_process_pixblock_tail_head
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0006-ARM-Added-a-set-of-NEON-functions-not-fully-optimi.patch b/recipes/xorg-lib/pixman/0006-ARM-Added-a-set-of-NEON-functions-not-fully-optimi.patch
new file mode 100644 (file)
index 0000000..0f89c88
--- /dev/null
@@ -0,0 +1,540 @@
+From 606a73203318e44af4362684368bc24d2aed841d Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 12 Oct 2009 21:57:17 +0300
+Subject: [PATCH 6/7] ARM: Added a set of NEON functions (not fully optimized)
+
+---
+ pixman/pixman-arm-neon-asm.S |  520 ++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 520 insertions(+), 0 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
+index b11a9a7..bca499a 100644
+--- a/pixman/pixman-arm-neon-asm.S
++++ b/pixman/pixman-arm-neon-asm.S
+@@ -457,3 +457,523 @@ generate_composite_function \
+     pixman_composite_over_8888_8888_process_pixblock_head, \
+     pixman_composite_over_8888_8888_process_pixblock_tail, \
+     pixman_composite_over_8888_8888_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_over_n_8_0565_process_pixblock_head
++    /* in */
++    vmull.u8    q0, d24, d8
++    vmull.u8    q1, d24, d9
++    vmull.u8    q6, d24, d10
++    vmull.u8    q7, d24, d11
++    vrshr.u16   q10, q0, #8
++    vrshr.u16   q11, q1, #8
++    vrshr.u16   q12, q6, #8
++    vrshr.u16   q13, q7, #8
++    vraddhn.u16 d0, q0, q10
++    vraddhn.u16 d1, q1, q11
++    vraddhn.u16 d2, q6, q12
++    vraddhn.u16 d3, q7, q13
++
++    vshrn.u16   d6, q2, #8
++    vshrn.u16   d7, q2, #3
++    vsli.u16    q2, q2, #5
++    vsri.u8     d6, d6, #5
++    vmvn.8      d3, d3
++    vsri.u8     d7, d7, #6
++    vshrn.u16   d30, q2, #2
++    /* now do alpha blending */
++    vmull.u8    q10, d3, d6
++    vmull.u8    q11, d3, d7
++    vmull.u8    q12, d3, d30
++    vrshr.u16   q13, q10, #8
++    vrshr.u16   q3, q11, #8
++    vrshr.u16   q15, q12, #8
++    vraddhn.u16 d20, q10, q13
++    vraddhn.u16 d23, q11, q3
++    vraddhn.u16 d22, q12, q15
++.endm
++
++.macro pixman_composite_over_n_8_0565_process_pixblock_tail
++    vqadd.u8    d16, d2, d20
++    vqadd.u8    q9, q0, q11
++    /* convert to r5g6b5 */
++    vshll.u8    q14, d16, #8
++    vshll.u8    q8, d19, #8
++    vshll.u8    q9, d18, #8
++    vsri.u16    q14, q8, #5
++    vsri.u16    q14, q9, #11
++.endm
++
++/* TODO: expand macros and do better instructions scheduling */
++.macro pixman_composite_over_n_8_0565_process_pixblock_tail_head
++    pixman_composite_over_n_8_0565_process_pixblock_tail
++    vst1.16     {d28, d29}, [DST_W, :128]!
++    vld1.16     {d4, d5}, [DST_R, :128]!
++    vld1.8      {d24}, [MASK]!
++    cache_preload 8, 8
++    pixman_composite_over_n_8_0565_process_pixblock_head
++.endm
++
++.macro pixman_composite_over_n_8_0565_init
++    add         DUMMY, sp, #40
++    vpush       {d8-d15}
++    vld1.32     {d11[0]}, [DUMMY]
++    vdup.8      d8, d11[0]
++    vdup.8      d9, d11[1]
++    vdup.8      d10, d11[2]
++    vdup.8      d11, d11[3]
++.endm
++
++.macro pixman_composite_over_n_8_0565_cleanup
++    vpop        {d8-d15}
++.endm
++
++generate_composite_function \
++    pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
++    FLAG_DST_READWRITE, \
++    8, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    pixman_composite_over_n_8_0565_init, \
++    pixman_composite_over_n_8_0565_cleanup, \
++    pixman_composite_over_n_8_0565_process_pixblock_head, \
++    pixman_composite_over_n_8_0565_process_pixblock_tail, \
++    pixman_composite_over_n_8_0565_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_src_0565_0565_process_pixblock_head
++.endm
++
++.macro pixman_composite_src_0565_0565_process_pixblock_tail
++.endm
++
++.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
++    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
++    vld1.16 {d0, d1, d2, d3}, [SRC]!
++    cache_preload 16, 16
++.endm
++
++generate_composite_function \
++    pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
++    FLAG_DST_WRITEONLY, \
++    16, /* number of pixels, processed in a single block */ \
++    10, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_src_0565_0565_process_pixblock_head, \
++    pixman_composite_src_0565_0565_process_pixblock_tail, \
++    pixman_composite_src_0565_0565_process_pixblock_tail_head, \
++    0, /* dst_w_basereg */ \
++    0, /* dst_r_basereg */ \
++    0, /* src_basereg   */ \
++    0  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_src_n_8_process_pixblock_head
++.endm
++
++.macro pixman_composite_src_n_8_process_pixblock_tail
++.endm
++
++.macro pixman_composite_src_n_8_process_pixblock_tail_head
++    vst1.8  {d0, d1, d2, d3}, [DST_W, :128]!
++.endm
++
++.macro pixman_composite_src_n_8_init
++    add         DUMMY, sp, #40
++    vld1.32     {d0[0]}, [DUMMY]
++    vsli.u64    d0, d0, #8
++    vsli.u64    d0, d0, #16
++    vsli.u64    d0, d0, #32
++    vmov        d1, d0
++    vmov        q1, q0
++.endm
++
++.macro pixman_composite_src_n_8_cleanup
++.endm
++
++generate_composite_function \
++    pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
++    FLAG_DST_WRITEONLY, \
++    32, /* number of pixels, processed in a single block */ \
++    0,  /* prefetch distance */ \
++    pixman_composite_src_n_8_init, \
++    pixman_composite_src_n_8_cleanup, \
++    pixman_composite_src_n_8_process_pixblock_head, \
++    pixman_composite_src_n_8_process_pixblock_tail, \
++    pixman_composite_src_n_8_process_pixblock_tail_head, \
++    0, /* dst_w_basereg */ \
++    0, /* dst_r_basereg */ \
++    0, /* src_basereg   */ \
++    0  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_src_n_0565_process_pixblock_head
++.endm
++
++.macro pixman_composite_src_n_0565_process_pixblock_tail
++.endm
++
++.macro pixman_composite_src_n_0565_process_pixblock_tail_head
++    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
++.endm
++
++.macro pixman_composite_src_n_0565_init
++    add         DUMMY, sp, #40
++    vld1.32     {d0[0]}, [DUMMY]
++    vsli.u64    d0, d0, #16
++    vsli.u64    d0, d0, #32
++    vmov        d1, d0
++    vmov        q1, q0
++.endm
++
++.macro pixman_composite_src_n_0565_cleanup
++.endm
++
++generate_composite_function \
++    pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
++    FLAG_DST_WRITEONLY, \
++    16, /* number of pixels, processed in a single block */ \
++    0,  /* prefetch distance */ \
++    pixman_composite_src_n_0565_init, \
++    pixman_composite_src_n_0565_cleanup, \
++    pixman_composite_src_n_0565_process_pixblock_head, \
++    pixman_composite_src_n_0565_process_pixblock_tail, \
++    pixman_composite_src_n_0565_process_pixblock_tail_head, \
++    0, /* dst_w_basereg */ \
++    0, /* dst_r_basereg */ \
++    0, /* src_basereg   */ \
++    0  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_src_n_8888_process_pixblock_head
++.endm
++
++.macro pixman_composite_src_n_8888_process_pixblock_tail
++.endm
++
++.macro pixman_composite_src_n_8888_process_pixblock_tail_head
++    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
++.endm
++
++.macro pixman_composite_src_n_8888_init
++    add         DUMMY, sp, #40
++    vld1.32     {d0[0]}, [DUMMY]
++    vsli.u64    d0, d0, #32
++    vmov        d1, d0
++    vmov        q1, q0
++.endm
++
++.macro pixman_composite_src_n_8888_cleanup
++.endm
++
++generate_composite_function \
++    pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
++    FLAG_DST_WRITEONLY, \
++    8, /* number of pixels, processed in a single block */ \
++    0, /* prefetch distance */ \
++    pixman_composite_src_n_8888_init, \
++    pixman_composite_src_n_8888_cleanup, \
++    pixman_composite_src_n_8888_process_pixblock_head, \
++    pixman_composite_src_n_8888_process_pixblock_tail, \
++    pixman_composite_src_n_8888_process_pixblock_tail_head, \
++    0, /* dst_w_basereg */ \
++    0, /* dst_r_basereg */ \
++    0, /* src_basereg   */ \
++    0  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_src_8888_8888_process_pixblock_head
++.endm
++
++.macro pixman_composite_src_8888_8888_process_pixblock_tail
++.endm
++
++.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
++    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
++    vld1.32 {d0, d1, d2, d3}, [SRC]!
++    cache_preload 8, 8
++.endm
++
++generate_composite_function \
++    pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
++    FLAG_DST_WRITEONLY, \
++    8, /* number of pixels, processed in a single block */ \
++    10, /* prefetch distance */ \
++    default_init, \
++    default_cleanup, \
++    pixman_composite_src_8888_8888_process_pixblock_head, \
++    pixman_composite_src_8888_8888_process_pixblock_tail, \
++    pixman_composite_src_8888_8888_process_pixblock_tail_head, \
++    0, /* dst_w_basereg */ \
++    0, /* dst_r_basereg */ \
++    0, /* src_basereg   */ \
++    0  /* mask_basereg  */
++
++/******************************************************************************/
++
++.macro pixman_composite_over_n_8_8888_process_pixblock_head
++    /* expecting deinterleaved source data in {d8, d9, d10, d11} */
++    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
++    /* and destination data in {d4, d5, d6, d7} */
++    /* mask is in d24 (d25, d26, d27 are unused) */
++
++    /* in */
++    vmull.u8    q0, d24, d8
++    vmull.u8    q1, d24, d9
++    vmull.u8    q6, d24, d10
++    vmull.u8    q7, d24, d11
++    vrshr.u16   q10, q0, #8
++    vrshr.u16   q11, q1, #8
++    vrshr.u16   q12, q6, #8
++    vrshr.u16   q13, q7, #8
++    vraddhn.u16 d0, q0, q10
++    vraddhn.u16 d1, q1, q11
++    vraddhn.u16 d2, q6, q12
++    vraddhn.u16 d3, q7, q13
++    vmvn.8      d24, d3  /* get inverted alpha */
++    /* source:      d0 - blue, d1 - green, d2 - red, d3 - alpha */
++    /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */
++    /* now do alpha blending */
++    vmull.u8    q8, d24, d4
++    vmull.u8    q9, d24, d5
++    vmull.u8    q10, d24, d6
++    vmull.u8    q11, d24, d7
++.endm
++
++.macro pixman_composite_over_n_8_8888_process_pixblock_tail
++    vrshr.u16   q14, q8, #8
++    vrshr.u16   q15, q9, #8
++    vrshr.u16   q12, q10, #8
++    vrshr.u16   q13, q11, #8
++    vraddhn.u16 d28, q14, q8
++    vraddhn.u16 d29, q15, q9
++    vraddhn.u16 d30, q12, q10
++    vraddhn.u16 d31, q13, q11
++    vqadd.u8    q14, q0, q14
++    vqadd.u8    q15, q1, q15
++.endm
++
++/* TODO: expand macros and do better instructions scheduling */
++.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
++    pixman_composite_over_n_8_8888_process_pixblock_tail
++    vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
++    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
++    vld1.8      {d24}, [MASK]!
++    cache_preload 8, 8
++    pixman_composite_over_n_8_8888_process_pixblock_head
++.endm
++
++.macro pixman_composite_over_n_8_8888_init
++    add         DUMMY, sp, #40
++    vpush       {d8-d15}
++    vld1.32     {d11[0]}, [DUMMY]
++    vdup.8      d8, d11[0]
++    vdup.8      d9, d11[1]
++    vdup.8      d10, d11[2]
++    vdup.8      d11, d11[3]
++.endm
++
++.macro pixman_composite_over_n_8_8888_cleanup
++    vpop        {d8-d15}
++.endm
++
++generate_composite_function \
++    pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
++    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
++    8, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    pixman_composite_over_n_8_8888_init, \
++    pixman_composite_over_n_8_8888_cleanup, \
++    pixman_composite_over_n_8_8888_process_pixblock_head, \
++    pixman_composite_over_n_8_8888_process_pixblock_tail, \
++    pixman_composite_over_n_8_8888_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_add_n_8_8_process_pixblock_head
++    /* expecting source data in {d8, d9, d10, d11} */
++    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
++    /* and destination data in {d4, d5, d6, d7} */
++    /* mask is in d24, d25, d26, d27 */
++    vmull.u8    q0, d24, d11
++    vmull.u8    q1, d25, d11
++    vmull.u8    q6, d26, d11
++    vmull.u8    q7, d27, d11
++    vrshr.u16   q10, q0, #8
++    vrshr.u16   q11, q1, #8
++    vrshr.u16   q12, q6, #8
++    vrshr.u16   q13, q7, #8
++    vraddhn.u16 d0, q0, q10
++    vraddhn.u16 d1, q1, q11
++    vraddhn.u16 d2, q6, q12
++    vraddhn.u16 d3, q7, q13
++    vqadd.u8    q14, q0, q2
++    vqadd.u8    q15, q1, q3
++.endm
++
++.macro pixman_composite_add_n_8_8_process_pixblock_tail
++.endm
++
++/* TODO: expand macros and do better instructions scheduling */
++.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
++    pixman_composite_add_n_8_8_process_pixblock_tail
++    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
++    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
++    vld1.8      {d24, d25, d26, d27}, [MASK]!
++    cache_preload 32, 32
++    pixman_composite_add_n_8_8_process_pixblock_head
++.endm
++
++.macro pixman_composite_add_n_8_8_init
++    add         DUMMY, sp, #40
++    vpush       {d8-d15}
++    vld1.32     {d11[0]}, [DUMMY]
++    vdup.8      d11, d11[3]
++.endm
++
++.macro pixman_composite_add_n_8_8_cleanup
++    vpop        {d8-d15}
++.endm
++
++generate_composite_function \
++    pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
++    FLAG_DST_READWRITE, \
++    32, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    pixman_composite_add_n_8_8_init, \
++    pixman_composite_add_n_8_8_cleanup, \
++    pixman_composite_add_n_8_8_process_pixblock_head, \
++    pixman_composite_add_n_8_8_process_pixblock_tail, \
++    pixman_composite_add_n_8_8_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_add_8_8_8_process_pixblock_head
++    /* expecting source data in {d0, d1, d2, d3} */
++    /* destination data in {d4, d5, d6, d7} */
++    /* mask in {d24, d25, d26, d27} */
++    vmull.u8    q8, d24, d0
++    vmull.u8    q9, d25, d1
++    vmull.u8    q10, d26, d2
++    vmull.u8    q11, d27, d3
++    vrshr.u16   q0, q8, #8
++    vrshr.u16   q1, q9, #8
++    vrshr.u16   q12, q10, #8
++    vrshr.u16   q13, q11, #8
++    vraddhn.u16 d0, q0, q8
++    vraddhn.u16 d1, q1, q9
++    vraddhn.u16 d2, q12, q10
++    vraddhn.u16 d3, q13, q11
++    vqadd.u8    q14, q0, q2
++    vqadd.u8    q15, q1, q3
++.endm
++
++.macro pixman_composite_add_8_8_8_process_pixblock_tail
++.endm
++
++/* TODO: expand macros and do better instructions scheduling */
++.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
++    pixman_composite_add_8_8_8_process_pixblock_tail
++    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
++    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
++    vld1.8      {d24, d25, d26, d27}, [MASK]!
++    vld1.8      {d0, d1, d2, d3}, [SRC]!
++    cache_preload 32, 32
++    pixman_composite_add_8_8_8_process_pixblock_head
++.endm
++
++.macro pixman_composite_add_8_8_8_init
++.endm
++
++.macro pixman_composite_add_8_8_8_cleanup
++.endm
++
++generate_composite_function \
++    pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
++    FLAG_DST_READWRITE, \
++    32, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    pixman_composite_add_8_8_8_init, \
++    pixman_composite_add_8_8_8_cleanup, \
++    pixman_composite_add_8_8_8_process_pixblock_head, \
++    pixman_composite_add_8_8_8_process_pixblock_tail, \
++    pixman_composite_add_8_8_8_process_pixblock_tail_head
++
++/******************************************************************************/
++
++.macro pixman_composite_over_8888_n_8888_process_pixblock_head
++    /* expecting source data in {d0, d1, d2, d3} */
++    /* destination data in {d4, d5, d6, d7} */
++    /* solid mask is in d15 */
++
++    /* 'in' */
++    vmull.u8    q8, d15, d3
++    vmull.u8    q6, d15, d2
++    vmull.u8    q5, d15, d1
++    vmull.u8    q4, d15, d0
++    vrshr.u16   q13, q8, #8
++    vrshr.u16   q12, q6, #8
++    vrshr.u16   q11, q5, #8
++    vrshr.u16   q10, q4, #8
++    vraddhn.u16 d3, q8, q13
++    vraddhn.u16 d2, q6, q12
++    vraddhn.u16 d1, q5, q11
++    vraddhn.u16 d0, q4, q10
++    vmvn.8      d24, d3  /* get inverted alpha */
++    /* now do alpha blending */
++    vmull.u8    q8, d24, d4
++    vmull.u8    q9, d24, d5
++    vmull.u8    q10, d24, d6
++    vmull.u8    q11, d24, d7
++.endm
++
++.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
++    vrshr.u16   q14, q8, #8
++    vrshr.u16   q15, q9, #8
++    vrshr.u16   q12, q10, #8
++    vrshr.u16   q13, q11, #8
++    vraddhn.u16 d28, q14, q8
++    vraddhn.u16 d29, q15, q9
++    vraddhn.u16 d30, q12, q10
++    vraddhn.u16 d31, q13, q11
++    vqadd.u8    q14, q0, q14
++    vqadd.u8    q15, q1, q15
++.endm
++
++/* TODO: expand macros and do better instructions scheduling */
++.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
++    vld4.8     {d4, d5, d6, d7}, [DST_R, :128]!
++    pixman_composite_over_8888_n_8888_process_pixblock_tail
++    vld4.8     {d0, d1, d2, d3}, [SRC]!
++    cache_preload 8, 8
++    pixman_composite_over_8888_n_8888_process_pixblock_head
++    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
++.endm
++
++.macro pixman_composite_over_8888_n_8888_init
++    add         DUMMY, sp, #48
++    vpush       {d8-d15}
++    vld1.32     {d15[0]}, [DUMMY]
++    vdup.8      d15, d15[3]
++.endm
++
++.macro pixman_composite_over_8888_n_8888_cleanup
++    vpop        {d8-d15}
++.endm
++
++generate_composite_function \
++    pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
++    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
++    8, /* number of pixels, processed in a single block */ \
++    5, /* prefetch distance */ \
++    pixman_composite_over_8888_n_8888_init, \
++    pixman_composite_over_8888_n_8888_cleanup, \
++    pixman_composite_over_8888_n_8888_process_pixblock_head, \
++    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
++    pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman/0007-ARM-Enabled-new-NEON-optimizations.patch b/recipes/xorg-lib/pixman/0007-ARM-Enabled-new-NEON-optimizations.patch
new file mode 100644 (file)
index 0000000..c14bac1
--- /dev/null
@@ -0,0 +1,592 @@
+From 98d458dea913d7d76c48c48de9ef3aee85cced3a Mon Sep 17 00:00:00 2001
+From: Siarhei Siamashka <siarhei.siamashka@nokia.com>
+Date: Mon, 12 Oct 2009 22:25:38 +0300
+Subject: [PATCH 7/7] ARM: Enabled new NEON optimizations
+
+---
+ pixman/pixman-arm-neon.c |  535 ++++++++++++++++++++++++++++++++++++++++++++--
+ 1 files changed, 520 insertions(+), 15 deletions(-)
+
+diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
+index fe57daa..2811099 100644
+--- a/pixman/pixman-arm-neon.c
++++ b/pixman/pixman-arm-neon.c
+@@ -34,6 +34,18 @@
+ #include <string.h>
+ #include "pixman-private.h"
++/*
++ * Use GNU assembler optimizations only if we are completely sure that
++ * the target system has compatible ABI and calling conventions. This
++ * check can be updated/extended if more systems turn out to be actually
++ * compatible.
++ */
++#if defined(__linux__) && defined(__ARM_EABI__) && defined(USE_GCC_INLINE_ASM)
++#define USE_GNU_ASSEMBLER_ARM_NEON
++#endif
++
++#ifndef USE_GNU_ASSEMBLER_ARM_NEON
++
+ /* Deal with an intrinsic that is defined differently in GCC */
+ #if !defined(__ARMCC_VERSION) && !defined(__pld)
+ #define __pld(_x) __builtin_prefetch (_x)
+@@ -1901,17 +1913,7 @@ pixman_fill_neon (uint32_t *bits,
+ #endif
+ }
+-/*
+- * Use GNU assembler optimizations only if we are completely sure that
+- * the target system has compatible ABI and calling conventions. This
+- * check can be updated/extended if more systems turn out to be actually
+- * compatible.
+- */
+-#if defined(__linux__) && defined(__ARM_EABI__) && defined(USE_GCC_INLINE_ASM)
+-#define USE_GNU_ASSEMBLER_ASM
+-#endif
+-
+-#ifdef USE_GNU_ASSEMBLER_ASM
++#else /* USE_GNU_ASSEMBLER_ARM_NEON */
+ void
+ pixman_composite_over_8888_0565_asm_neon (int32_t   w,
+@@ -1941,23 +1943,525 @@ neon_composite_over_8888_0565 (pixman_implementation_t *imp,
+     int32_t dst_stride, src_stride;
+     PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
+-                         src_stride, src_line, 1);
++                           src_stride, src_line, 1);
+     PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
+-                         dst_stride, dst_line, 1);
++                           dst_stride, dst_line, 1);
+     pixman_composite_over_8888_0565_asm_neon (width, height,
+                                               dst_line, dst_stride,
+                                               src_line, src_stride);
+ }
++void
++pixman_composite_src_8888_0565_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint16_t *dst,
++                                         int32_t   dst_stride,
++                                         uint32_t *src,
++                                         int32_t   src_stride);
++
++static void
++neon_composite_src_8888_0565 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint16_t *dst_line;
++    uint32_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_src_8888_0565_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src_line, src_stride);
++}
++
++void
++pixman_composite_src_0565_0565_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint16_t *dst,
++                                         int32_t   dst_stride,
++                                         uint16_t *src,
++                                         int32_t   src_stride);
++
++static void
++neon_composite_src_0565_0565 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint16_t *dst_line;
++    uint16_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_src_0565_0565_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src_line, src_stride);
++}
++
++
++void
++pixman_composite_src_8888_8888_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint32_t *dst,
++                                         int32_t   dst_stride,
++                                         uint32_t *src,
++                                         int32_t   src_stride);
++
++static void
++neon_composite_src_8888_8888 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint32_t *dst_line;
++    uint32_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_src_8888_8888_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src_line, src_stride);
++}
++
++void
++pixman_composite_over_8888_8888_asm_neon (int32_t   w,
++                                          int32_t   h,
++                                          uint32_t *dst,
++                                          int32_t   dst_stride,
++                                          uint32_t *src,
++                                          int32_t   src_stride);
++
++static void
++neon_composite_over_8888_8888 (pixman_implementation_t *imp,
++                               pixman_op_t              op,
++                               pixman_image_t *         src_image,
++                               pixman_image_t *         mask_image,
++                               pixman_image_t *         dst_image,
++                               int32_t                  src_x,
++                               int32_t                  src_y,
++                               int32_t                  mask_x,
++                               int32_t                  mask_y,
++                               int32_t                  dest_x,
++                               int32_t                  dest_y,
++                               int32_t                  width,
++                               int32_t                  height)
++{
++    uint32_t *dst_line;
++    uint32_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_over_8888_8888_asm_neon (width, height,
++                                              dst_line, dst_stride,
++                                              src_line, src_stride);
++}
++
++void
++pixman_composite_add_8000_8000_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint8_t  *dst,
++                                         int32_t   dst_stride,
++                                         uint8_t  *src,
++                                         int32_t   src_stride);
++
++static void
++neon_composite_add_8000_8000 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint8_t *dst_line;
++    uint8_t *src_line;
++    int32_t dst_stride, src_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_add_8000_8000_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src_line, src_stride);
++}
++
++void
++pixman_composite_over_n_8_0565_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint16_t *dst,
++                                         int32_t   dst_stride,
++                                         uint32_t  src,
++                                         int32_t   unused,
++                                         uint8_t  *mask,
++                                         int32_t   mask_stride);
++
++static void
++neon_composite_over_n_8_0565 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint16_t *dst_line;
++    uint8_t  *mask_line;
++    int32_t   dst_stride, mask_stride;
++    uint32_t  src;
++
++    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
++
++    if (src == 0)
++      return;
++
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t,
++                           dst_stride, dst_line, 1);
++    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t,
++                           mask_stride, mask_line, 1);
++
++    pixman_composite_over_n_8_0565_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src, 0,
++                                             mask_line, mask_stride);
++}
++
++void
++pixman_composite_over_n_8_8888_asm_neon (int32_t   w,
++                                         int32_t   h,
++                                         uint32_t *dst,
++                                         int32_t   dst_stride,
++                                         uint32_t  src,
++                                         int32_t   unused,
++                                         uint8_t  *mask,
++                                         int32_t   mask_stride);
++
++static void
++neon_composite_over_n_8_8888 (pixman_implementation_t *imp,
++                              pixman_op_t              op,
++                              pixman_image_t *         src_image,
++                              pixman_image_t *         mask_image,
++                              pixman_image_t *         dst_image,
++                              int32_t                  src_x,
++                              int32_t                  src_y,
++                              int32_t                  mask_x,
++                              int32_t                  mask_y,
++                              int32_t                  dest_x,
++                              int32_t                  dest_y,
++                              int32_t                  width,
++                              int32_t                  height)
++{
++    uint32_t *dst_line;
++    uint8_t  *mask_line;
++    int32_t   dst_stride, mask_stride;
++    uint32_t  src;
++
++    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
++
++    if (src == 0)
++      return;
++
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
++                           dst_stride, dst_line, 1);
++    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t,
++                           mask_stride, mask_line, 1);
++
++    pixman_composite_over_n_8_8888_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src, 0,
++                                             mask_line, mask_stride);
++}
++
++void
++pixman_composite_add_8_8_8_asm_neon (int32_t   w,
++                                     int32_t   h,
++                                     uint8_t  *dst,
++                                     int32_t   dst_stride,
++                                     uint8_t  *src,
++                                     int32_t   src_stride,
++                                     uint8_t  *mask,
++                                     int32_t   mask_stride);
++
++static void
++neon_composite_add_8_8_8 (pixman_implementation_t *imp,
++                          pixman_op_t              op,
++                          pixman_image_t *         src_image,
++                          pixman_image_t *         mask_image,
++                          pixman_image_t *         dst_image,
++                          int32_t                  src_x,
++                          int32_t                  src_y,
++                          int32_t                  mask_x,
++                          int32_t                  mask_y,
++                          int32_t                  dest_x,
++                          int32_t                  dest_y,
++                          int32_t                  width,
++                          int32_t                  height)
++{
++    uint8_t *src_line;
++    uint8_t *dst_line;
++    uint8_t *mask_line;
++    int32_t  src_stride, dst_stride, mask_stride;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t,
++                           dst_stride, dst_line, 1);
++    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t,
++                           mask_stride, mask_line, 1);
++
++    pixman_composite_add_8_8_8_asm_neon (width, height,
++                                         dst_line, dst_stride,
++                                         src_line, src_stride,
++                                         mask_line, mask_stride);
++}
++
++void
++pixman_composite_add_n_8_8_asm_neon (int32_t   w,
++                                     int32_t   h,
++                                     uint8_t  *dst,
++                                     int32_t   dst_stride,
++                                     uint32_t  src,
++                                     int32_t   unused,
++                                     uint8_t  *mask,
++                                     int32_t   mask_stride);
++
++static void
++neon_composite_add_n_8_8 (pixman_implementation_t *imp,
++                          pixman_op_t              op,
++                          pixman_image_t *         src_image,
++                          pixman_image_t *         mask_image,
++                          pixman_image_t *         dst_image,
++                          int32_t                  src_x,
++                          int32_t                  src_y,
++                          int32_t                  mask_x,
++                          int32_t                  mask_y,
++                          int32_t                  dest_x,
++                          int32_t                  dest_y,
++                          int32_t                  width,
++                          int32_t                  height)
++{
++    uint8_t *dst_line;
++    uint8_t *mask_line;
++    int32_t  dst_stride, mask_stride;
++    uint32_t src;
++
++    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
++
++    if (src == 0)
++      return;
++
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t,
++                           dst_stride, dst_line, 1);
++    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t,
++                           mask_stride, mask_line, 1);
++
++    pixman_composite_add_n_8_8_asm_neon (width, height,
++                                             dst_line, dst_stride,
++                                             src, 0,
++                                             mask_line, mask_stride);
++}
++
++void
++pixman_composite_over_8888_n_8888_asm_neon (int32_t   w,
++                                            int32_t   h,
++                                            uint32_t *dst,
++                                            int32_t   dst_stride,
++                                            uint32_t *src,
++                                            int32_t   src_stride,
++                                            uint32_t  mask);
++
++static void
++neon_composite_over_8888_n_8888 (pixman_implementation_t *imp,
++                                 pixman_op_t              op,
++                                 pixman_image_t *         src_image,
++                                 pixman_image_t *         mask_image,
++                                 pixman_image_t *         dst_image,
++                                 int32_t                  src_x,
++                                 int32_t                  src_y,
++                                 int32_t                  mask_x,
++                                 int32_t                  mask_y,
++                                 int32_t                  dest_x,
++                                 int32_t                  dest_y,
++                                 int32_t                  width,
++                                 int32_t                  height)
++{
++    uint32_t *dst_line;
++    uint32_t *src_line;
++    int32_t   dst_stride, src_stride;
++    uint32_t  mask;
++
++    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
++
++    if (mask == 0)
++      return;
++
++    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t,
++                           src_stride, src_line, 1);
++    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t,
++                           dst_stride, dst_line, 1);
++
++    pixman_composite_over_8888_n_8888_asm_neon (width, height,
++                                                dst_line, dst_stride,
++                                                src_line, src_stride,
++                                                mask);
++}
++
++void
++pixman_composite_src_n_8_asm_neon (int32_t   w,
++                                   int32_t   h,
++                                   uint8_t  *dst,
++                                   int32_t   dst_stride,
++                                   uint8_t   src);
++
++void
++pixman_composite_src_n_0565_asm_neon (int32_t   w,
++                                      int32_t   h,
++                                      uint16_t *dst,
++                                      int32_t   dst_stride,
++                                      uint16_t  src);
++
++void
++pixman_composite_src_n_8888_asm_neon (int32_t   w,
++                                      int32_t   h,
++                                      uint32_t *dst,
++                                      int32_t   dst_stride,
++                                      uint32_t  src);
++
++static pixman_bool_t
++pixman_fill_neon (uint32_t *bits,
++                  int       stride,
++                  int       bpp,
++                  int       x,
++                  int       y,
++                  int       width,
++                  int       height,
++                  uint32_t  _xor)
++{
++    /* stride is always multiple of 32bit units in pixman */
++    uint32_t byte_stride = stride * sizeof(uint32_t);
++
++    switch (bpp)
++    {
++    case 8:
++      pixman_composite_src_n_8_asm_neon (
++              width,
++              height,
++              (uint8_t *)(((char *) bits) + y * byte_stride + x),
++              byte_stride,
++              _xor & 0xff);
++      return TRUE;
++    case 16:
++      pixman_composite_src_n_0565_asm_neon (
++              width,
++              height,
++              (uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
++              byte_stride / 2,
++              _xor & 0xffff);
++      return TRUE;
++    case 32:
++      pixman_composite_src_n_8888_asm_neon (
++              width,
++              height,
++              (uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
++              byte_stride / 4,
++              _xor);
++      return TRUE;
++    default:
++      return FALSE;
++    }
++}
++
+ #endif
+ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+ {
+-#ifdef USE_GNU_ASSEMBLER_ASM
++#ifdef USE_GNU_ASSEMBLER_ARM_NEON
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_0565_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565,    0 },
++    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_8_8_8,        0 },
++    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
++    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,    0 },
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
++    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_0565_0565,    0 },
+     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,   0 },
+     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,   0 },
+-#endif
++    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,   0 },
++    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,   0 },
++    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,   0 },
++    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
++    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
++    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888,    0 },
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,    0 },
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,    0 },
++    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
++#else
+     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
+     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,    0 },
+     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
+@@ -1980,6 +2484,7 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
+     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,    0 },
+     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,    0 },
+     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
++#endif
+     { PIXMAN_OP_NONE },
+ };
+-- 
+1.6.2.4
+
diff --git a/recipes/xorg-lib/pixman_0.15.18.bb b/recipes/xorg-lib/pixman_0.15.18.bb
new file mode 100644 (file)
index 0000000..2517f8f
--- /dev/null
@@ -0,0 +1,13 @@
+SECTION = "libs"
+PRIORITY = "optional"
+DESCRIPTION = "Low-level pixel manipulation library."
+LICENSE = "X11"
+
+DEFAULT_PREFERENCE = "-1"
+
+SRC_URI = "http://cairographics.org/releases/pixman-${PV}.tar.gz \
+         "
+
+inherit autotools_stage
+AUTOTOOLS_STAGE_PKGCONFIG = "1"
+
diff --git a/recipes/xorg-lib/pixman_0.16.2.bb b/recipes/xorg-lib/pixman_0.16.2.bb
new file mode 100644 (file)
index 0000000..2517f8f
--- /dev/null
@@ -0,0 +1,13 @@
+SECTION = "libs"
+PRIORITY = "optional"
+DESCRIPTION = "Low-level pixel manipulation library."
+LICENSE = "X11"
+
+DEFAULT_PREFERENCE = "-1"
+
+SRC_URI = "http://cairographics.org/releases/pixman-${PV}.tar.gz \
+         "
+
+inherit autotools_stage
+AUTOTOOLS_STAGE_PKGCONFIG = "1"
+
index b8aa2a4..8a0ee26 100644 (file)
@@ -7,15 +7,20 @@ PV = "0.17.1"
 PR = "r2"
 PR_append = "+gitr${SRCREV}"
 
-SRCREV = "7af985a69a9147e54dd5946a8062dbc2e534b735"
+SRCREV = "dc46ad274a47d351bacf3c2167c359d23dbaf8b3"
 
 DEFAULT_PREFERENCE = "-1"
 DEFAULT_PREFERENCE_angstrom = "1"
 
 SRC_URI = "git://anongit.freedesktop.org/pixman;protocol=git \
+file://0002-ARM-Introduction-of-the-new-framework-for-NEON-fast.patch;patch=1 \
+file://0003-ARM-Added-pixman_composite_src_8888_0565_asm_neon-f.patch;patch=1 \
+file://0004-ARM-Added-pixman_composite_add_8000_8000_asm_neon-f.patch;patch=1 \
+file://0005-ARM-Added-pixman_composite_over_8888_8888_asm_neon.patch;patch=1 \
+file://0006-ARM-Added-a-set-of-NEON-functions-not-fully-optimi.patch;patch=1 \
+file://0007-ARM-Enabled-new-NEON-optimizations.patch;patch=1 \
            file://pixman-28986.patch;patch=1 \
            file://nearest-neighbour.patch;patch=1 \
-           file://remove-broken.patch;patch=1 \
            file://over-8888-0565.patch;patch=1 \
 "