2 /* filter_neon_intrinsics.c - NEON optimised filter functions
4 * Copyright (c) 2013 Glenn Randers-Pehrson
5 * Written by James Yu <james.yu at linaro.org>, October 2013.
6 * Based on filter_neon.S, written by Mans Rullgard, 2011.
8 * Last changed in libpng 1.6.8 [December 19, 2013]
10 * This code is released under the libpng license.
11 * For conditions of distribution and use, see the disclaimer
12 * and license in png.h
15 #include "../pngpriv.h"
17 /* This code requires -mfpu=neon on the command line: */
18 #if PNG_ARM_NEON_IMPLEMENTATION == 1 /* intrinsics code from pngpriv.h */
22 /* libpng row pointers are not necessarily aligned to any particular boundary,
23 * however this code will only work with appropriate alignment. arm/arm_init.c
24 * checks for this (and will not compile unless it is done). This code uses
25 * variants of png_aligncast to avoid compiler warnings.
27 #define png_ptr(type,pointer) png_aligncast(type *,pointer)
28 #define png_ptrc(type,pointer) png_aligncastconst(const type *,pointer)
30 /* The following relies on a variable 'temp_pointer' being declared with type
31 * 'type'. This is written this way just to hide the GCC strict aliasing
32 * warning; note that the code is safe because there never is an alias between
33 * the input and output pointers.
35 #define png_ldr(type,pointer)\
36 (temp_pointer = png_ptr(type,pointer), *temp_pointer)
38 #ifdef PNG_READ_SUPPORTED
39 #if PNG_ARM_NEON_OPT > 0
42 png_read_filter_row_up_neon(png_row_infop row_info
, png_bytep row
,
43 png_const_bytep prev_row
)
46 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
47 png_const_bytep pp
= prev_row
;
49 for (; rp
< rp_stop
; rp
+= 16, pp
+= 16)
55 qrp
= vaddq_u8(qrp
, qpp
);
61 png_read_filter_row_sub3_neon(png_row_infop row_info
, png_bytep row
,
62 png_const_bytep prev_row
)
65 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
67 uint8x16_t vtmp
= vld1q_u8(rp
);
68 uint8x8x2_t
*vrpt
= png_ptr(uint8x8x2_t
, &vtmp
);
69 uint8x8x2_t vrp
= *vrpt
;
72 vdest
.val
[3] = vdup_n_u8(0);
76 uint8x8_t vtmp1
, vtmp2
;
77 uint32x2_t
*temp_pointer
;
79 vtmp1
= vext_u8(vrp
.val
[0], vrp
.val
[1], 3);
80 vdest
.val
[0] = vadd_u8(vdest
.val
[3], vrp
.val
[0]);
81 vtmp2
= vext_u8(vrp
.val
[0], vrp
.val
[1], 6);
82 vdest
.val
[1] = vadd_u8(vdest
.val
[0], vtmp1
);
84 vtmp1
= vext_u8(vrp
.val
[1], vrp
.val
[1], 1);
85 vdest
.val
[2] = vadd_u8(vdest
.val
[1], vtmp2
);
86 vdest
.val
[3] = vadd_u8(vdest
.val
[2], vtmp1
);
88 vtmp
= vld1q_u8(rp
+ 12);
89 vrpt
= png_ptr(uint8x8x2_t
, &vtmp
);
92 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[0]), 0);
94 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[1]), 0);
96 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[2]), 0);
98 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[3]), 0);
106 png_read_filter_row_sub4_neon(png_row_infop row_info
, png_bytep row
,
107 png_const_bytep prev_row
)
110 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
113 vdest
.val
[3] = vdup_n_u8(0);
115 for (; rp
< rp_stop
; rp
+= 16)
117 uint32x2x4_t vtmp
= vld4_u32(png_ptr(uint32_t,rp
));
118 uint8x8x4_t
*vrpt
= png_ptr(uint8x8x4_t
,&vtmp
);
119 uint8x8x4_t vrp
= *vrpt
;
120 uint32x2x4_t
*temp_pointer
;
122 vdest
.val
[0] = vadd_u8(vdest
.val
[3], vrp
.val
[0]);
123 vdest
.val
[1] = vadd_u8(vdest
.val
[0], vrp
.val
[1]);
124 vdest
.val
[2] = vadd_u8(vdest
.val
[1], vrp
.val
[2]);
125 vdest
.val
[3] = vadd_u8(vdest
.val
[2], vrp
.val
[3]);
126 vst4_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2x4_t
,&vdest
), 0);
133 png_read_filter_row_avg3_neon(png_row_infop row_info
, png_bytep row
,
134 png_const_bytep prev_row
)
137 png_const_bytep pp
= prev_row
;
138 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
144 vdest
.val
[3] = vdup_n_u8(0);
147 vrpt
= png_ptr(uint8x8x2_t
,&vtmp
);
150 for (; rp
< rp_stop
; pp
+= 12)
152 uint8x8_t vtmp1
, vtmp2
, vtmp3
;
157 uint32x2_t
*temp_pointer
;
160 vppt
= png_ptr(uint8x8x2_t
,&vtmp
);
163 vtmp1
= vext_u8(vrp
.val
[0], vrp
.val
[1], 3);
164 vdest
.val
[0] = vhadd_u8(vdest
.val
[3], vpp
.val
[0]);
165 vdest
.val
[0] = vadd_u8(vdest
.val
[0], vrp
.val
[0]);
167 vtmp2
= vext_u8(vpp
.val
[0], vpp
.val
[1], 3);
168 vtmp3
= vext_u8(vrp
.val
[0], vrp
.val
[1], 6);
169 vdest
.val
[1] = vhadd_u8(vdest
.val
[0], vtmp2
);
170 vdest
.val
[1] = vadd_u8(vdest
.val
[1], vtmp1
);
172 vtmp2
= vext_u8(vpp
.val
[0], vpp
.val
[1], 6);
173 vtmp1
= vext_u8(vrp
.val
[1], vrp
.val
[1], 1);
175 vtmp
= vld1q_u8(rp
+ 12);
176 vrpt
= png_ptr(uint8x8x2_t
,&vtmp
);
179 vdest
.val
[2] = vhadd_u8(vdest
.val
[1], vtmp2
);
180 vdest
.val
[2] = vadd_u8(vdest
.val
[2], vtmp3
);
182 vtmp2
= vext_u8(vpp
.val
[1], vpp
.val
[1], 1);
184 vdest
.val
[3] = vhadd_u8(vdest
.val
[2], vtmp2
);
185 vdest
.val
[3] = vadd_u8(vdest
.val
[3], vtmp1
);
187 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[0]), 0);
189 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[1]), 0);
191 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[2]), 0);
193 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[3]), 0);
199 png_read_filter_row_avg4_neon(png_row_infop row_info
, png_bytep row
,
200 png_const_bytep prev_row
)
203 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
204 png_const_bytep pp
= prev_row
;
207 vdest
.val
[3] = vdup_n_u8(0);
209 for (; rp
< rp_stop
; rp
+= 16, pp
+= 16)
212 uint8x8x4_t
*vrpt
, *vppt
;
213 uint8x8x4_t vrp
, vpp
;
214 uint32x2x4_t
*temp_pointer
;
216 vtmp
= vld4_u32(png_ptr(uint32_t,rp
));
217 vrpt
= png_ptr(uint8x8x4_t
,&vtmp
);
219 vtmp
= vld4_u32(png_ptrc(uint32_t,pp
));
220 vppt
= png_ptr(uint8x8x4_t
,&vtmp
);
223 vdest
.val
[0] = vhadd_u8(vdest
.val
[3], vpp
.val
[0]);
224 vdest
.val
[0] = vadd_u8(vdest
.val
[0], vrp
.val
[0]);
225 vdest
.val
[1] = vhadd_u8(vdest
.val
[0], vpp
.val
[1]);
226 vdest
.val
[1] = vadd_u8(vdest
.val
[1], vrp
.val
[1]);
227 vdest
.val
[2] = vhadd_u8(vdest
.val
[1], vpp
.val
[2]);
228 vdest
.val
[2] = vadd_u8(vdest
.val
[2], vrp
.val
[2]);
229 vdest
.val
[3] = vhadd_u8(vdest
.val
[2], vpp
.val
[3]);
230 vdest
.val
[3] = vadd_u8(vdest
.val
[3], vrp
.val
[3]);
232 vst4_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2x4_t
,&vdest
), 0);
237 paeth(uint8x8_t a
, uint8x8_t b
, uint8x8_t c
)
240 uint16x8_t p1
, pa
, pb
, pc
;
242 p1
= vaddl_u8(a
, b
); /* a + b */
243 pc
= vaddl_u8(c
, c
); /* c * 2 */
244 pa
= vabdl_u8(b
, c
); /* pa */
245 pb
= vabdl_u8(a
, c
); /* pb */
246 pc
= vabdq_u16(p1
, pc
); /* pc */
248 p1
= vcleq_u16(pa
, pb
); /* pa <= pb */
249 pa
= vcleq_u16(pa
, pc
); /* pa <= pc */
250 pb
= vcleq_u16(pb
, pc
); /* pb <= pc */
252 p1
= vandq_u16(p1
, pa
); /* pa <= pb && pa <= pc */
257 d
= vbsl_u8(d
, b
, c
);
258 e
= vbsl_u8(e
, a
, d
);
264 png_read_filter_row_paeth3_neon(png_row_infop row_info
, png_bytep row
,
265 png_const_bytep prev_row
)
268 png_const_bytep pp
= prev_row
;
269 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
274 uint8x8_t vlast
= vdup_n_u8(0);
276 vdest
.val
[3] = vdup_n_u8(0);
279 vrpt
= png_ptr(uint8x8x2_t
,&vtmp
);
282 for (; rp
< rp_stop
; pp
+= 12)
286 uint8x8_t vtmp1
, vtmp2
, vtmp3
;
287 uint32x2_t
*temp_pointer
;
290 vppt
= png_ptr(uint8x8x2_t
,&vtmp
);
293 vdest
.val
[0] = paeth(vdest
.val
[3], vpp
.val
[0], vlast
);
294 vdest
.val
[0] = vadd_u8(vdest
.val
[0], vrp
.val
[0]);
296 vtmp1
= vext_u8(vrp
.val
[0], vrp
.val
[1], 3);
297 vtmp2
= vext_u8(vpp
.val
[0], vpp
.val
[1], 3);
298 vdest
.val
[1] = paeth(vdest
.val
[0], vtmp2
, vpp
.val
[0]);
299 vdest
.val
[1] = vadd_u8(vdest
.val
[1], vtmp1
);
301 vtmp1
= vext_u8(vrp
.val
[0], vrp
.val
[1], 6);
302 vtmp3
= vext_u8(vpp
.val
[0], vpp
.val
[1], 6);
303 vdest
.val
[2] = paeth(vdest
.val
[1], vtmp3
, vtmp2
);
304 vdest
.val
[2] = vadd_u8(vdest
.val
[2], vtmp1
);
306 vtmp1
= vext_u8(vrp
.val
[1], vrp
.val
[1], 1);
307 vtmp2
= vext_u8(vpp
.val
[1], vpp
.val
[1], 1);
309 vtmp
= vld1q_u8(rp
+ 12);
310 vrpt
= png_ptr(uint8x8x2_t
,&vtmp
);
313 vdest
.val
[3] = paeth(vdest
.val
[2], vtmp2
, vtmp3
);
314 vdest
.val
[3] = vadd_u8(vdest
.val
[3], vtmp1
);
318 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[0]), 0);
320 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[1]), 0);
322 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[2]), 0);
324 vst1_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2_t
,&vdest
.val
[3]), 0);
330 png_read_filter_row_paeth4_neon(png_row_infop row_info
, png_bytep row
,
331 png_const_bytep prev_row
)
334 png_bytep rp_stop
= row
+ row_info
->rowbytes
;
335 png_const_bytep pp
= prev_row
;
337 uint8x8_t vlast
= vdup_n_u8(0);
339 vdest
.val
[3] = vdup_n_u8(0);
341 for (; rp
< rp_stop
; rp
+= 16, pp
+= 16)
344 uint8x8x4_t
*vrpt
, *vppt
;
345 uint8x8x4_t vrp
, vpp
;
346 uint32x2x4_t
*temp_pointer
;
348 vtmp
= vld4_u32(png_ptr(uint32_t,rp
));
349 vrpt
= png_ptr(uint8x8x4_t
,&vtmp
);
351 vtmp
= vld4_u32(png_ptrc(uint32_t,pp
));
352 vppt
= png_ptr(uint8x8x4_t
,&vtmp
);
355 vdest
.val
[0] = paeth(vdest
.val
[3], vpp
.val
[0], vlast
);
356 vdest
.val
[0] = vadd_u8(vdest
.val
[0], vrp
.val
[0]);
357 vdest
.val
[1] = paeth(vdest
.val
[0], vpp
.val
[1], vpp
.val
[0]);
358 vdest
.val
[1] = vadd_u8(vdest
.val
[1], vrp
.val
[1]);
359 vdest
.val
[2] = paeth(vdest
.val
[1], vpp
.val
[2], vpp
.val
[1]);
360 vdest
.val
[2] = vadd_u8(vdest
.val
[2], vrp
.val
[2]);
361 vdest
.val
[3] = paeth(vdest
.val
[2], vpp
.val
[3], vpp
.val
[2]);
362 vdest
.val
[3] = vadd_u8(vdest
.val
[3], vrp
.val
[3]);
366 vst4_lane_u32(png_ptr(uint32_t,rp
), png_ldr(uint32x2x4_t
,&vdest
), 0);
370 #endif /* PNG_ARM_NEON_OPT > 0 */
371 #endif /* PNG_READ_SUPPORTED */
372 #endif /* PNG_ARM_NEON_IMPLEMENTATION == 1 (intrinsics) */