aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-01-28 17:35:07 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2009-01-30 17:51:45 -0500
commitda5de7c22eb705be709a57e486e7475a6969b994 (patch)
tree29b3655e38fea6bd6ef11437d2fea14b397c8b03 /arch/x86/include/asm
parent791bad9d28d405d9397ea0c370ffb7c7bdd2aa6e (diff)
x86/paravirt: use callee-saved convention for pte_val/make_pte/etc
Impact: Optimization In the native case, pte_val, make_pte, etc are all just identity functions, so there's no need to clobber a lot of registers over them. (This changes the 32-bit callee-save calling convention to return both EAX and EDX so functions can return 64-bit values.) Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r--arch/x86/include/asm/paravirt.h78
1 files changed, 39 insertions, 39 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index beb10ecdbe67..2d098b78bc10 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -19,7 +19,7 @@
19#define CLBR_ANY ((1 << 4) - 1) 19#define CLBR_ANY ((1 << 4) - 1)
20 20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) 21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX) 22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0) 23#define CLBR_SCRATCH (0)
24#else 24#else
25#define CLBR_RAX CLBR_EAX 25#define CLBR_RAX CLBR_EAX
@@ -308,11 +308,11 @@ struct pv_mmu_ops {
308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, 308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep, pte_t pte); 309 pte_t *ptep, pte_t pte);
310 310
311 pteval_t (*pte_val)(pte_t); 311 struct paravirt_callee_save pte_val;
312 pte_t (*make_pte)(pteval_t pte); 312 struct paravirt_callee_save make_pte;
313 313
314 pgdval_t (*pgd_val)(pgd_t); 314 struct paravirt_callee_save pgd_val;
315 pgd_t (*make_pgd)(pgdval_t pgd); 315 struct paravirt_callee_save make_pgd;
316 316
317#if PAGETABLE_LEVELS >= 3 317#if PAGETABLE_LEVELS >= 3
318#ifdef CONFIG_X86_PAE 318#ifdef CONFIG_X86_PAE
@@ -327,12 +327,12 @@ struct pv_mmu_ops {
327 327
328 void (*set_pud)(pud_t *pudp, pud_t pudval); 328 void (*set_pud)(pud_t *pudp, pud_t pudval);
329 329
330 pmdval_t (*pmd_val)(pmd_t); 330 struct paravirt_callee_save pmd_val;
331 pmd_t (*make_pmd)(pmdval_t pmd); 331 struct paravirt_callee_save make_pmd;
332 332
333#if PAGETABLE_LEVELS == 4 333#if PAGETABLE_LEVELS == 4
334 pudval_t (*pud_val)(pud_t); 334 struct paravirt_callee_save pud_val;
335 pud_t (*make_pud)(pudval_t pud); 335 struct paravirt_callee_save make_pud;
336 336
337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); 337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
338#endif /* PAGETABLE_LEVELS == 4 */ 338#endif /* PAGETABLE_LEVELS == 4 */
@@ -1155,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
1155 pteval_t ret; 1155 pteval_t ret;
1156 1156
1157 if (sizeof(pteval_t) > sizeof(long)) 1157 if (sizeof(pteval_t) > sizeof(long))
1158 ret = PVOP_CALL2(pteval_t, 1158 ret = PVOP_CALLEE2(pteval_t,
1159 pv_mmu_ops.make_pte, 1159 pv_mmu_ops.make_pte,
1160 val, (u64)val >> 32); 1160 val, (u64)val >> 32);
1161 else 1161 else
1162 ret = PVOP_CALL1(pteval_t, 1162 ret = PVOP_CALLEE1(pteval_t,
1163 pv_mmu_ops.make_pte, 1163 pv_mmu_ops.make_pte,
1164 val); 1164 val);
1165 1165
1166 return (pte_t) { .pte = ret }; 1166 return (pte_t) { .pte = ret };
1167} 1167}
@@ -1171,11 +1171,11 @@ static inline pteval_t pte_val(pte_t pte)
1171 pteval_t ret; 1171 pteval_t ret;
1172 1172
1173 if (sizeof(pteval_t) > sizeof(long)) 1173 if (sizeof(pteval_t) > sizeof(long))
1174 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val, 1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1175 pte.pte, (u64)pte.pte >> 32); 1175 pte.pte, (u64)pte.pte >> 32);
1176 else 1176 else
1177 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, 1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1178 pte.pte); 1178 pte.pte);
1179 1179
1180 return ret; 1180 return ret;
1181} 1181}
@@ -1185,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
1185 pgdval_t ret; 1185 pgdval_t ret;
1186 1186
1187 if (sizeof(pgdval_t) > sizeof(long)) 1187 if (sizeof(pgdval_t) > sizeof(long))
1188 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd, 1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1189 val, (u64)val >> 32); 1189 val, (u64)val >> 32);
1190 else 1190 else
1191 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, 1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1192 val); 1192 val);
1193 1193
1194 return (pgd_t) { ret }; 1194 return (pgd_t) { ret };
1195} 1195}
@@ -1199,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
1199 pgdval_t ret; 1199 pgdval_t ret;
1200 1200
1201 if (sizeof(pgdval_t) > sizeof(long)) 1201 if (sizeof(pgdval_t) > sizeof(long))
1202 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val, 1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1203 pgd.pgd, (u64)pgd.pgd >> 32); 1203 pgd.pgd, (u64)pgd.pgd >> 32);
1204 else 1204 else
1205 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, 1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1206 pgd.pgd); 1206 pgd.pgd);
1207 1207
1208 return ret; 1208 return ret;
1209} 1209}
@@ -1267,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
1267 pmdval_t ret; 1267 pmdval_t ret;
1268 1268
1269 if (sizeof(pmdval_t) > sizeof(long)) 1269 if (sizeof(pmdval_t) > sizeof(long))
1270 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd, 1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1271 val, (u64)val >> 32); 1271 val, (u64)val >> 32);
1272 else 1272 else
1273 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, 1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1274 val); 1274 val);
1275 1275
1276 return (pmd_t) { ret }; 1276 return (pmd_t) { ret };
1277} 1277}
@@ -1281,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
1281 pmdval_t ret; 1281 pmdval_t ret;
1282 1282
1283 if (sizeof(pmdval_t) > sizeof(long)) 1283 if (sizeof(pmdval_t) > sizeof(long))
1284 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val, 1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1285 pmd.pmd, (u64)pmd.pmd >> 32); 1285 pmd.pmd, (u64)pmd.pmd >> 32);
1286 else 1286 else
1287 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, 1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1288 pmd.pmd); 1288 pmd.pmd);
1289 1289
1290 return ret; 1290 return ret;
1291} 1291}
@@ -1307,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
1307 pudval_t ret; 1307 pudval_t ret;
1308 1308
1309 if (sizeof(pudval_t) > sizeof(long)) 1309 if (sizeof(pudval_t) > sizeof(long))
1310 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud, 1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1311 val, (u64)val >> 32); 1311 val, (u64)val >> 32);
1312 else 1312 else
1313 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud, 1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1314 val); 1314 val);
1315 1315
1316 return (pud_t) { ret }; 1316 return (pud_t) { ret };
1317} 1317}