diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6fdfaa4a82b8bc20f588190e9185071c3c14e495..bfd499ee37530807a070886409b03a504ccca8e9 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1202,14 +1202,13 @@ struct cpu_spec *identify_cpu(unsigned long offset)
 	return NULL;
 }
 
-void do_feature_fixups(unsigned long offset, unsigned long value,
-		       void *fixup_start, void *fixup_end)
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
 {
 	struct fixup_entry {
 		unsigned long	mask;
 		unsigned long	value;
-		unsigned int	*start;
-		unsigned int	*end;
+		long		start_off;
+		long		end_off;
 	} *fcur, *fend;
 
 	fcur = fixup_start;
@@ -1224,8 +1223,8 @@ void do_feature_fixups(unsigned long offset, unsigned long value,
 		/* These PTRRELOCs will disappear once the new scheme for
 		 * modules and vdso is implemented
 		 */
-		pstart = PTRRELOC(fcur->start);
-		pend = PTRRELOC(fcur->end);
+		pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+		pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
 
 		for (p = pstart; p < pend; p++) {
 			*p = 0x60000000u;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 769e511783b0f201b4a9f24347bb2c7a63d3f440..a4c2964a3ca6b325f86a52eb1c7880c534ff43d7 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -103,7 +103,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
 	 */
 	spec = identify_cpu(offset);
 
-	do_feature_fixups(offset, spec->cpu_features,
+	do_feature_fixups(spec->cpu_features,
 			  PTRRELOC(&__start___ftr_fixup),
 			  PTRRELOC(&__stop___ftr_fixup));
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 1969b5686eee79d08127589685a6e24941ff964a..16278968dab68e2153a284709e2b4dfdd067adb4 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -354,9 +354,9 @@ void __init setup_system(void)
 	/* Apply the CPUs-specific and firmware specific fixups to kernel
 	 * text (nop out sections not relevant to this CPU or this firmware)
 	 */
-	do_feature_fixups(0, cur_cpu_spec->cpu_features,
+	do_feature_fixups(cur_cpu_spec->cpu_features,
 			  &__start___ftr_fixup, &__stop___ftr_fixup);
-	do_feature_fixups(0, powerpc_firmware_features,
+	do_feature_fixups(powerpc_firmware_features,
 			  &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
 
 	/*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1a7e19cdab39c37a9f0e759a6b2177744ef3a967..c913ad5cad2918e3daebe1251de37a18292f67c9 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -36,6 +36,8 @@
 #include <asm/vdso.h>
 #include <asm/vdso_datapage.h>
 
+#include "setup.h"
+
 #undef DEBUG
 
 #ifdef DEBUG
@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
 	return 0;
 }
 
+
+static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
+				      struct lib64_elfinfo *v64)
+{
+	void *start32;
+	unsigned long size32;
+
+#ifdef CONFIG_PPC64
+	void *start64;
+	unsigned long size64;
+
+	start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
+	if (start64)
+		do_feature_fixups(cur_cpu_spec->cpu_features,
+				  start64, start64 + size64);
+
+	start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
+	if (start64)
+		do_feature_fixups(powerpc_firmware_features,
+				  start64, start64 + size64);
+#endif /* CONFIG_PPC64 */
+
+	start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
+	if (start32)
+		do_feature_fixups(cur_cpu_spec->cpu_features,
+				  start32, start32 + size32);
+
+#ifdef CONFIG_PPC64
+	start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
+	if (start32)
+		do_feature_fixups(powerpc_firmware_features,
+				  start32, start32 + size32);
+#endif /* CONFIG_PPC64 */
+
+	return 0;
+}
+
 static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
 				       struct lib64_elfinfo *v64)
 {
@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
 	if (vdso_fixup_datapage(&v32, &v64))
 		return -1;
 
+	if (vdso_fixup_features(&v32, &v64))
+		return -1;
+
 	if (vdso_fixup_alt_funcs(&v32, &v64))
 		return -1;
 
@@ -714,6 +756,7 @@ void __init vdso_init(void)
 	 * Setup the syscall map in the vDOS
 	 */
 	vdso_setup_syscall_map();
+
 	/*
 	 * Initialize the vDSO images in memory, that is do necessary
 	 * fixups of vDSO symbols, locate trampolines, etc...
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 6187af2d54c393ea825e523172636ebbcd062427..26e138c4ce1756fe73ac995fc6c86e26cf5328ed 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -32,6 +32,18 @@ SECTIONS
   PROVIDE (_etext = .);
   PROVIDE (etext = .);
 
+  . = ALIGN(8);
+  __ftr_fixup : {
+    *(__ftr_fixup)
+  }
+
+#ifdef CONFIG_PPC64
+  . = ALIGN(8);
+  __fw_ftr_fixup : {
+    *(__fw_ftr_fixup)
+  }
+#endif
+
   /* Other stuff is appended to the text segment: */
   .rodata		: { *(.rodata .rodata.* .gnu.linkonce.r.*) }
   .rodata1		: { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960c582b3b0f0f9c8ba73a171195ee0c..2d70f35d50b520bdd2d81bf78d0b2e1eb9a7601b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -31,6 +31,16 @@ SECTIONS
   PROVIDE (_etext = .);
   PROVIDE (etext = .);
 
+  . = ALIGN(8);
+  __ftr_fixup : {
+    *(__ftr_fixup)
+  }
+
+  . = ALIGN(8);
+  __fw_ftr_fixup : {
+    *(__fw_ftr_fixup)
+  }
+
   /* Other stuff is appended to the text segment: */
   .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
   .rodata1        : { *(.rodata1) }
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 41a640f16bddd681fb8d912514891c20a4615716..27faeca2c7a209438eca4fb394b2ce868ce357c2 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -314,7 +314,7 @@ early_init(int r3, int r4, int r5)
 	 * that depend on which cpu we have.
 	 */
 	spec = identify_cpu(offset);
-	do_feature_fixups(offset, spec->cpu_features,
+	do_feature_fixups(spec->cpu_features,
 			  PTRRELOC(&__start___ftr_fixup),
 			  PTRRELOC(&__stop___ftr_fixup));
 
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
index 8e64be0cc47d3d0f7579445af6726a661f50a17b..c89bd58ee2839be66fc98059172523d246d412b4 100644
--- a/include/asm-powerpc/asm-compat.h
+++ b/include/asm-powerpc/asm-compat.h
@@ -14,6 +14,58 @@
 #  define ASM_CONST(x)		__ASM_CONST(x)
 #endif
 
+
+/*
+ * Feature section common macros
+ *
+ * Note that the entries now contain offsets between the table entry
+ * and the code rather than absolute code pointers in order to be
+ * useable with the vdso shared library. There is also an assumption
+ * that values will be negative, that is, the fixup table has to be
+ * located after the code it fixes up.
+ */
+#ifdef CONFIG_PPC64
+#ifdef __powerpc64__
+/* 64 bits kernel, 64 bits code */
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)	\
+99:							\
+	.section sect,"a";				\
+	.align 3;					\
+98:						       	\
+	.llong msk;					\
+	.llong val;					\
+	.llong label##b-98b;				\
+	.llong 99b-98b;		 			\
+	.previous
+#else /* __powerpc64__ */
+/* 64 bits kernel, 32 bits code (ie. vdso32) */
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)	\
+99:							\
+	.section sect,"a";				\
+	.align 3;					\
+98:						       	\
+	.llong msk;					\
+	.llong val;					\
+	.long 0xffffffff;      				\
+	.long label##b-98b;				\
+	.long 0xffffffff;	       			\
+	.long 99b-98b;		 			\
+	.previous
+#endif /* !__powerpc64__ */
+#else /* CONFIG_PPC64 */
+/* 32 bits kernel, 32 bits code */
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)	\
+99:						       	\
+	.section sect,"a";			       	\
+	.align 2;				       	\
+98:						       	\
+	.long msk;				       	\
+	.long val;				       	\
+	.long label##b-98b;			       	\
+	.long 99b-98b;				       	\
+	.previous
+#endif /* !CONFIG_PPC64 */
+
 #ifdef __powerpc64__
 
 /* operations for longs and pointers */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 65faf322ace06140313f8777f9fa7bd3332c9854..02e52d68cbbecffd7abdeaaa0242bdeb3ae575dc 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -92,8 +92,8 @@ extern struct cpu_spec		*cur_cpu_spec;
 extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
 
 extern struct cpu_spec *identify_cpu(unsigned long offset);
-extern void do_feature_fixups(unsigned long offset, unsigned long value,
-			      void *fixup_start, void *fixup_end);
+extern void do_feature_fixups(unsigned long value, void *fixup_start,
+			      void *fixup_end);
 
 #endif /* __ASSEMBLY__ */
 
@@ -435,32 +435,11 @@ static inline int cpu_has_feature(unsigned long feature)
 #ifdef __ASSEMBLY__
 
 #define BEGIN_FTR_SECTION_NESTED(label)	label:
-#define BEGIN_FTR_SECTION		BEGIN_FTR_SECTION_NESTED(98)
-
-#ifndef __powerpc64__
-#define END_FTR_SECTION_NESTED(msk, val, label) \
-99:						\
-	.section __ftr_fixup,"a";		\
-	.align 2;				\
-	.long msk;				\
-	.long val;				\
-	.long label##b;				\
-	.long 99b;				\
-	.previous
-#else /* __powerpc64__ */
+#define BEGIN_FTR_SECTION		BEGIN_FTR_SECTION_NESTED(97)
 #define END_FTR_SECTION_NESTED(msk, val, label) \
-99:						\
-	.section __ftr_fixup,"a";		\
-	.align 3;				\
-	.llong msk;				\
-	.llong val;				\
-	.llong label##b;				\
-	.llong 99b;	 			\
-	.previous
-#endif /* __powerpc64__ */
-
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup)
 #define END_FTR_SECTION(msk, val)		\
-	END_FTR_SECTION_NESTED(msk, val, 98)
+	END_FTR_SECTION_NESTED(msk, val, 97)
 
 #define END_FTR_SECTION_IFSET(msk)	END_FTR_SECTION((msk), (msk))
 #define END_FTR_SECTION_IFCLR(msk)	END_FTR_SECTION((msk), 0)
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index c16e0a6b9dab8490b031e96722173fdb937f652e..fdf9aff71150412a9d351241b3c10d65652eb38a 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -100,17 +100,12 @@ extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
 
 #else /* __ASSEMBLY__ */
 
-#define BEGIN_FW_FTR_SECTION		96:
-
+#define BEGIN_FW_FTR_SECTION_NESTED(label)	label:
+#define BEGIN_FW_FTR_SECTION			BEGIN_FW_FTR_SECTION_NESTED(97)
+#define END_FW_FTR_SECTION_NESTED(msk, val, label) \
+	MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup)
 #define END_FW_FTR_SECTION(msk, val)		\
-97:						\
-	.section __fw_ftr_fixup,"a";		\
-	.align 3;				\
-	.llong msk;				\
-	.llong val;				\
-	.llong 96b;				\
-	.llong 97b;				\
-	.previous
+	END_FW_FTR_SECTION_NESTED(msk, val, 97)
 
 #define END_FW_FTR_SECTION_IFSET(msk)	END_FW_FTR_SECTION((msk), (msk))
 #define END_FW_FTR_SECTION_IFCLR(msk)	END_FW_FTR_SECTION((msk), 0)
diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h
index 3b9a8e78680646f3ee64f2582b0c14b26d0a08c9..e3f08cf91486afdc7b5e1b8cd33da1de5b3144a3 100644
--- a/include/asm-powerpc/timex.h
+++ b/include/asm-powerpc/timex.h
@@ -30,13 +30,15 @@ static inline cycles_t get_cycles(void)
 	ret = 0;
 
 	__asm__ __volatile__(
-		"98:	mftb %0\n"
+		"97:	mftb %0\n"
 		"99:\n"
 		".section __ftr_fixup,\"a\"\n"
+		".align 2\n"
+		"98:\n"
 		"	.long %1\n"
 		"	.long 0\n"
-		"	.long 98b\n"
-		"	.long 99b\n"
+		"	.long 97b-98b\n"
+		"	.long 99b-98b\n"
 		".previous"
 		: "=r" (ret) : "i" (CPU_FTR_601));
 #endif