|
Lines 2378-2384
unsigned long expandable_stack_area(struct vm_area_struct *vma,
Link Here
|
| 2378 |
if (!next) |
2378 |
if (!next) |
| 2379 |
goto out; |
2379 |
goto out; |
| 2380 |
|
2380 |
|
| 2381 |
if (next->vm_flags & VM_GROWSUP) { |
2381 |
/* see comment in !CONFIG_STACK_GROWSUP */ |
|
|
2382 |
if ((next->vm_flags & VM_GROWSUP) || !(next->vm_flags & (VM_WRITE|VM_READ))) { |
| 2382 |
guard_gap = min(guard_gap, next->vm_start - address); |
2383 |
guard_gap = min(guard_gap, next->vm_start - address); |
| 2383 |
goto out; |
2384 |
goto out; |
| 2384 |
} |
2385 |
} |
|
Lines 2457-2464
unsigned long expandable_stack_area(struct vm_area_struct *vma,
Link Here
|
| 2457 |
* That's only ok if it's the same stack mapping |
2458 |
* That's only ok if it's the same stack mapping |
| 2458 |
* that has gotten split or there is sufficient gap |
2459 |
* that has gotten split or there is sufficient gap |
| 2459 |
* between mappings |
2460 |
* between mappings |
|
|
2461 |
* |
| 2462 |
* Please note that some application (e.g. Java) punches |
| 2463 |
* MAP_FIXED inside the stack and then PROT_NONE it |
| 2464 |
* to mimic a stack guard which will clash with our protection |
| 2465 |
* so pretend tha PROT_NONE vmas are OK |
| 2460 |
*/ |
2466 |
*/ |
| 2461 |
if (prev->vm_flags & VM_GROWSDOWN) { |
2467 |
if ((prev->vm_flags & VM_GROWSDOWN) || !(prev->vm_flags & (VM_WRITE|VM_READ))) { |
| 2462 |
guard_gap = min(guard_gap, address - prev->vm_end); |
2468 |
guard_gap = min(guard_gap, address - prev->vm_end); |
| 2463 |
goto out; |
2469 |
goto out; |
| 2464 |
} |
2470 |
} |
| 2465 |
- |
|
|