Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _ASM_X86_ELF_H
3 : #define _ASM_X86_ELF_H
4 :
5 : /*
6 : * ELF register definitions..
7 : */
8 : #include <linux/thread_info.h>
9 :
10 : #include <asm/ptrace.h>
11 : #include <asm/user.h>
12 : #include <asm/auxvec.h>
13 : #include <asm/fsgsbase.h>
14 :
15 : typedef unsigned long elf_greg_t;
16 :
17 : #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
18 : typedef elf_greg_t elf_gregset_t[ELF_NGREG];
19 :
20 : typedef struct user_i387_struct elf_fpregset_t;
21 :
22 : #ifdef __i386__
23 :
24 : #define R_386_NONE 0
25 : #define R_386_32 1
26 : #define R_386_PC32 2
27 : #define R_386_GOT32 3
28 : #define R_386_PLT32 4
29 : #define R_386_COPY 5
30 : #define R_386_GLOB_DAT 6
31 : #define R_386_JMP_SLOT 7
32 : #define R_386_RELATIVE 8
33 : #define R_386_GOTOFF 9
34 : #define R_386_GOTPC 10
35 : #define R_386_NUM 11
36 :
37 : /*
38 : * These are used to set parameters in the core dumps.
39 : */
40 : #define ELF_CLASS ELFCLASS32
41 : #define ELF_DATA ELFDATA2LSB
42 : #define ELF_ARCH EM_386
43 :
44 : #else
45 :
46 : /* x86-64 relocation types */
47 : #define R_X86_64_NONE 0 /* No reloc */
48 : #define R_X86_64_64 1 /* Direct 64 bit */
49 : #define R_X86_64_PC32 2 /* PC relative 32 bit signed */
50 : #define R_X86_64_GOT32 3 /* 32 bit GOT entry */
51 : #define R_X86_64_PLT32 4 /* 32 bit PLT address */
52 : #define R_X86_64_COPY 5 /* Copy symbol at runtime */
53 : #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
54 : #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
55 : #define R_X86_64_RELATIVE 8 /* Adjust by program base */
56 : #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
57 : offset to GOT */
58 : #define R_X86_64_32 10 /* Direct 32 bit zero extended */
59 : #define R_X86_64_32S 11 /* Direct 32 bit sign extended */
60 : #define R_X86_64_16 12 /* Direct 16 bit zero extended */
61 : #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
62 : #define R_X86_64_8 14 /* Direct 8 bit sign extended */
63 : #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
64 : #define R_X86_64_PC64 24 /* Place relative 64-bit signed */
65 :
66 : /*
67 : * These are used to set parameters in the core dumps.
68 : */
69 : #define ELF_CLASS ELFCLASS64
70 : #define ELF_DATA ELFDATA2LSB
71 : #define ELF_ARCH EM_X86_64
72 :
73 : #endif
74 :
75 : #include <asm/vdso.h>
76 :
77 : #ifdef CONFIG_X86_64
78 : extern unsigned int vdso64_enabled;
79 : #endif
80 : #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
81 : extern unsigned int vdso32_enabled;
82 : #endif
83 :
84 : /*
85 : * This is used to ensure we don't load something for the wrong architecture.
86 : */
87 : #define elf_check_arch_ia32(x) \
88 : (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
89 :
90 : #include <asm/processor.h>
91 :
92 : #ifdef CONFIG_X86_32
93 : #include <asm/desc.h>
94 :
95 : #define elf_check_arch(x) elf_check_arch_ia32(x)
96 :
97 : /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
98 : contains a pointer to a function which might be registered using `atexit'.
99 : This provides a mean for the dynamic linker to call DT_FINI functions for
100 : shared libraries that have been loaded before the code runs.
101 :
102 : A value of 0 tells we have no such handler.
103 :
104 : We might as well make sure everything else is cleared too (except for %esp),
105 : just to make things more deterministic.
106 : */
107 : #define ELF_PLAT_INIT(_r, load_addr) \
108 : do { \
109 : _r->bx = 0; _r->cx = 0; _r->dx = 0; \
110 : _r->si = 0; _r->di = 0; _r->bp = 0; \
111 : _r->ax = 0; \
112 : } while (0)
113 :
114 : /*
115 : * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
116 : * now struct_user_regs, they are different)
117 : */
118 :
119 : #define ELF_CORE_COPY_REGS(pr_reg, regs) \
120 : do { \
121 : pr_reg[0] = regs->bx; \
122 : pr_reg[1] = regs->cx; \
123 : pr_reg[2] = regs->dx; \
124 : pr_reg[3] = regs->si; \
125 : pr_reg[4] = regs->di; \
126 : pr_reg[5] = regs->bp; \
127 : pr_reg[6] = regs->ax; \
128 : pr_reg[7] = regs->ds; \
129 : pr_reg[8] = regs->es; \
130 : pr_reg[9] = regs->fs; \
131 : savesegment(gs, pr_reg[10]); \
132 : pr_reg[11] = regs->orig_ax; \
133 : pr_reg[12] = regs->ip; \
134 : pr_reg[13] = regs->cs; \
135 : pr_reg[14] = regs->flags; \
136 : pr_reg[15] = regs->sp; \
137 : pr_reg[16] = regs->ss; \
138 : } while (0);
139 :
140 : #define ELF_PLATFORM (utsname()->machine)
141 : #define set_personality_64bit() do { } while (0)
142 :
143 : #else /* CONFIG_X86_32 */
144 :
145 : /*
146 : * This is used to ensure we don't load something for the wrong architecture.
147 : */
148 : #define elf_check_arch(x) \
149 : ((x)->e_machine == EM_X86_64)
150 :
151 : #define compat_elf_check_arch(x) \
152 : (elf_check_arch_ia32(x) || \
153 : (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
154 :
155 : static inline void elf_common_init(struct thread_struct *t,
156 : struct pt_regs *regs, const u16 ds)
157 : {
158 : /* ax gets execve's return value. */
159 28515421 : /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0;
160 28515421 : regs->si = regs->di = regs->bp = 0;
161 28515421 : regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
162 28515421 : regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
163 28515421 : t->fsbase = t->gsbase = 0;
164 28515421 : t->fsindex = t->gsindex = 0;
165 28515421 : t->ds = t->es = ds;
166 : }
167 :
168 : #define ELF_PLAT_INIT(_r, load_addr) \
169 : elf_common_init(¤t->thread, _r, 0)
170 :
171 : #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
172 : elf_common_init(¤t->thread, regs, __USER_DS)
173 :
174 : void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32);
175 : #define COMPAT_START_THREAD(ex, regs, new_ip, new_sp) \
176 : compat_start_thread(regs, new_ip, new_sp, ex->e_machine == EM_X86_64)
177 :
178 : void set_personality_ia32(bool);
179 : #define COMPAT_SET_PERSONALITY(ex) \
180 : set_personality_ia32((ex).e_machine == EM_X86_64)
181 :
182 : #define COMPAT_ELF_PLATFORM ("i686")
183 :
184 : /*
185 : * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
186 : * now struct_user_regs, they are different). Assumes current is the process
187 : * getting dumped.
188 : */
189 :
190 : #define ELF_CORE_COPY_REGS(pr_reg, regs) \
191 : do { \
192 : unsigned v; \
193 : (pr_reg)[0] = (regs)->r15; \
194 : (pr_reg)[1] = (regs)->r14; \
195 : (pr_reg)[2] = (regs)->r13; \
196 : (pr_reg)[3] = (regs)->r12; \
197 : (pr_reg)[4] = (regs)->bp; \
198 : (pr_reg)[5] = (regs)->bx; \
199 : (pr_reg)[6] = (regs)->r11; \
200 : (pr_reg)[7] = (regs)->r10; \
201 : (pr_reg)[8] = (regs)->r9; \
202 : (pr_reg)[9] = (regs)->r8; \
203 : (pr_reg)[10] = (regs)->ax; \
204 : (pr_reg)[11] = (regs)->cx; \
205 : (pr_reg)[12] = (regs)->dx; \
206 : (pr_reg)[13] = (regs)->si; \
207 : (pr_reg)[14] = (regs)->di; \
208 : (pr_reg)[15] = (regs)->orig_ax; \
209 : (pr_reg)[16] = (regs)->ip; \
210 : (pr_reg)[17] = (regs)->cs; \
211 : (pr_reg)[18] = (regs)->flags; \
212 : (pr_reg)[19] = (regs)->sp; \
213 : (pr_reg)[20] = (regs)->ss; \
214 : (pr_reg)[21] = x86_fsbase_read_cpu(); \
215 : (pr_reg)[22] = x86_gsbase_read_cpu_inactive(); \
216 : asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
217 : asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
218 : asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
219 : asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
220 : } while (0);
221 :
222 : /* I'm not sure if we can use '-' here */
223 : #define ELF_PLATFORM ("x86_64")
224 : extern void set_personality_64bit(void);
225 : extern int force_personality32;
226 :
227 : #endif /* !CONFIG_X86_32 */
228 :
229 : #define CORE_DUMP_USE_REGSET
230 : #define ELF_EXEC_PAGESIZE 4096
231 :
232 : /*
233 : * This is the base location for PIE (ET_DYN with INTERP) loads. On
234 : * 64-bit, this is above 4GB to leave the entire 32-bit address
235 : * space open for things that want to use the area for 32-bit pointers.
236 : */
237 : #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
238 : (DEFAULT_MAP_WINDOW / 3 * 2))
239 :
240 : /* This yields a mask that user programs can use to figure out what
241 : instruction set this CPU supports. This could be done in user space,
242 : but it's not easy, and we've already done it here. */
243 :
244 : #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX])
245 :
246 : extern u32 elf_hwcap2;
247 :
248 : /*
249 : * HWCAP2 supplies mask with kernel enabled CPU features, so that
250 : * the application can discover that it can safely use them.
251 : * The bits are defined in uapi/asm/hwcap2.h.
252 : */
253 : #define ELF_HWCAP2 (elf_hwcap2)
254 :
255 : /* This yields a string that ld.so will use to load implementation
256 : specific libraries for optimization. This is more specific in
257 : intent than poking at uname or /proc/cpuinfo.
258 :
259 : For the moment, we have only optimizations for the Intel generations,
260 : but that could change... */
261 :
262 : #define SET_PERSONALITY(ex) set_personality_64bit()
263 :
264 : /*
265 : * An executable for which elf_read_implies_exec() returns TRUE will
266 : * have the READ_IMPLIES_EXEC personality flag set automatically.
267 : *
268 : * The decision process for determining the results are:
269 : *
270 : * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 |
271 : * ELF: | | | |
272 : * ---------------------|------------|------------------|----------------|
273 : * missing PT_GNU_STACK | exec-all | exec-all | exec-none |
274 : * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack |
275 : * PT_GNU_STACK == RW | exec-none | exec-none | exec-none |
276 : *
277 : * exec-all : all PROT_READ user mappings are executable, except when
278 : * backed by files on a noexec-filesystem.
279 : * exec-none : only PROT_EXEC user mappings are executable.
280 : * exec-stack: only the stack and PROT_EXEC user mappings are executable.
281 : *
282 : * *this column has no architectural effect: NX markings are ignored by
283 : * hardware, but may have behavioral effects when "wants X" collides with
284 : * "cannot be X" constraints in memory permission flags, as in
285 : * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
286 : *
287 : */
288 : #define elf_read_implies_exec(ex, executable_stack) \
289 : (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT)
290 :
291 : struct task_struct;
292 :
293 : #define ARCH_DLINFO_IA32 \
294 : do { \
295 : if (VDSO_CURRENT_BASE) { \
296 : NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
297 : NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
298 : } \
299 : NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
300 : } while (0)
301 :
302 : /*
303 : * True on X86_32 or when emulating IA32 on X86_64
304 : */
305 57007730 : static inline int mmap_is_ia32(void)
306 : {
307 57007730 : return IS_ENABLED(CONFIG_X86_32) ||
308 : (IS_ENABLED(CONFIG_COMPAT) &&
309 57007730 : test_thread_flag(TIF_ADDR32));
310 : }
311 :
312 : extern unsigned long task_size_32bit(void);
313 : extern unsigned long task_size_64bit(int full_addr_space);
314 : extern unsigned long get_mmap_base(int is_legacy);
315 : extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
316 : extern unsigned long get_sigframe_size(void);
317 :
318 : #ifdef CONFIG_X86_32
319 :
320 : #define __STACK_RND_MASK(is32bit) (0x7ff)
321 : #define STACK_RND_MASK (0x7ff)
322 :
323 : #define ARCH_DLINFO ARCH_DLINFO_IA32
324 :
325 : /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
326 :
327 : #else /* CONFIG_X86_32 */
328 :
329 : /* 1GB for 64bit, 8MB for 32bit */
330 : #define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
331 : #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
332 :
333 : #define ARCH_DLINFO \
334 : do { \
335 : if (vdso64_enabled) \
336 : NEW_AUX_ENT(AT_SYSINFO_EHDR, \
337 : (unsigned long __force)current->mm->context.vdso); \
338 : NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
339 : } while (0)
340 :
341 : /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
342 : #define ARCH_DLINFO_X32 \
343 : do { \
344 : if (vdso64_enabled) \
345 : NEW_AUX_ENT(AT_SYSINFO_EHDR, \
346 : (unsigned long __force)current->mm->context.vdso); \
347 : NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
348 : } while (0)
349 :
350 : #define AT_SYSINFO 32
351 :
352 : #define COMPAT_ARCH_DLINFO \
353 : if (exec->e_machine == EM_X86_64) \
354 : ARCH_DLINFO_X32; \
355 : else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \
356 : ARCH_DLINFO_IA32
357 :
358 : #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
359 :
360 : #endif /* !CONFIG_X86_32 */
361 :
362 : #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
363 :
364 : #define VDSO_ENTRY \
365 : ((unsigned long)current->mm->context.vdso + \
366 : vdso_image_32.sym___kernel_vsyscall)
367 :
368 : struct linux_binprm;
369 :
370 : #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
371 : extern int arch_setup_additional_pages(struct linux_binprm *bprm,
372 : int uses_interp);
373 : extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
374 : int uses_interp, bool x32);
375 : #define COMPAT_ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
376 : compat_arch_setup_additional_pages(bprm, interpreter, \
377 : (ex->e_machine == EM_X86_64))
378 :
379 : extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs);
380 :
381 : /* Do not change the values. See get_align_mask() */
382 : enum align_flags {
383 : ALIGN_VA_32 = BIT(0),
384 : ALIGN_VA_64 = BIT(1),
385 : };
386 :
387 : struct va_alignment {
388 : int flags;
389 : unsigned long mask;
390 : unsigned long bits;
391 : } ____cacheline_aligned;
392 :
393 : extern struct va_alignment va_align;
394 : extern unsigned long align_vdso_addr(unsigned long);
395 : #endif /* _ASM_X86_ELF_H */
|