+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
+ const uint8_t *s2, int stride)
+{
+ int i;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector signed short shorts1, shorts2;
+
+ for (i = 0; i < 4; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes =vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+
+ /* The code below is a copy of the code above...
+ * This is a manual unroll. */
+
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes = vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+#else