1 |
/* |
2 |
* fs/realpath.c |
3 |
* |
4 |
* Get the canonicalized absolute pathnames. The basis for SAKURA and TOMOYO. |
5 |
* |
6 |
* Copyright (C) 2005-2008 NTT DATA CORPORATION |
7 |
* |
8 |
* Version: 1.5.3-pre 2008/01/03 |
9 |
* |
10 |
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
11 |
* See README.ccs for ChangeLog. |
12 |
* |
13 |
*/ |
14 |
#include <linux/string.h> |
15 |
#include <linux/mm.h> |
16 |
#include <linux/utime.h> |
17 |
#include <linux/file.h> |
18 |
#include <linux/smp_lock.h> |
19 |
#include <linux/module.h> |
20 |
#include <linux/slab.h> |
21 |
#include <asm/uaccess.h> |
22 |
#include <asm/atomic.h> |
23 |
#include <linux/version.h> |
24 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
25 |
#include <linux/namei.h> |
26 |
#include <linux/mount.h> |
27 |
static const int lookup_flags = LOOKUP_FOLLOW; |
28 |
#else |
29 |
static const int lookup_flags = LOOKUP_FOLLOW | LOOKUP_POSITIVE; |
30 |
#endif |
31 |
#include <linux/realpath.h> |
32 |
#include <linux/proc_fs.h> |
33 |
#include <linux/ccs_common.h> |
34 |
|
35 |
extern int sbin_init_started; |
36 |
|
37 |
/***** realpath handler *****/ |
38 |
|
39 |
/* |
40 |
* GetAbsolutePath - return the path of a dentry but ignores chroot'ed root. |
41 |
* @dentry: dentry to report |
42 |
* @vfsmnt: vfsmnt to which the dentry belongs |
43 |
* @buffer: buffer to return value in |
44 |
* @buflen: buffer length |
45 |
* |
46 |
* Caller holds the dcache_lock. |
47 |
* Based on __d_path() in fs/dcache.c |
48 |
* |
49 |
* If dentry is a directory, trailing '/' is appended. |
50 |
* Characters other than ' ' < c < 127 are converted to \ooo style octal string. |
51 |
* Character \ is converted to \\ string. |
52 |
*/ |
53 |
static int GetAbsolutePath(struct dentry *dentry, struct vfsmount *vfsmnt, char *buffer, int buflen) |
54 |
{ |
55 |
char *start = buffer; |
56 |
char *end = buffer + buflen; |
57 |
bool is_dir = (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)); |
58 |
|
59 |
if (buflen < 256) goto out; |
60 |
|
61 |
*--end = '\0'; |
62 |
buflen--; |
63 |
|
64 |
for (;;) { |
65 |
struct dentry *parent; |
66 |
|
67 |
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
68 |
/* Global root? */ |
69 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
70 |
spin_lock(&vfsmount_lock); |
71 |
#endif |
72 |
if (vfsmnt->mnt_parent == vfsmnt) { |
73 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
74 |
spin_unlock(&vfsmount_lock); |
75 |
#endif |
76 |
break; |
77 |
} |
78 |
dentry = vfsmnt->mnt_mountpoint; |
79 |
vfsmnt = vfsmnt->mnt_parent; |
80 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
81 |
spin_unlock(&vfsmount_lock); |
82 |
#endif |
83 |
continue; |
84 |
} |
85 |
if (is_dir) { |
86 |
is_dir = 0; *--end = '/'; buflen--; |
87 |
} |
88 |
parent = dentry->d_parent; |
89 |
{ |
90 |
const char *sp = dentry->d_name.name; |
91 |
const char *cp = sp + dentry->d_name.len - 1; |
92 |
unsigned char c; |
93 |
|
94 |
/* Exception: Use /proc/self/ rather than /proc/\$/ for current process. */ |
95 |
if (IS_ROOT(parent) && *sp > '0' && *sp <= '9' && parent->d_sb && parent->d_sb->s_magic == PROC_SUPER_MAGIC) { |
96 |
char *ep; |
97 |
const pid_t pid = (pid_t) simple_strtoul(sp, &ep, 10); |
98 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
99 |
if (!*ep && pid == current->tgid) { sp = "self"; cp = sp + 3; } |
100 |
#else |
101 |
if (!*ep && pid == current->pid) { sp = "self"; cp = sp + 3; } |
102 |
#endif |
103 |
} |
104 |
|
105 |
while (sp <= cp) { |
106 |
c = * (unsigned char *) cp; |
107 |
if (c == '\\') { |
108 |
buflen -= 2; |
109 |
if (buflen < 0) goto out; |
110 |
*--end = '\\'; |
111 |
*--end = '\\'; |
112 |
} else if (c > ' ' && c < 127) { |
113 |
if (--buflen < 0) goto out; |
114 |
*--end = (char) c; |
115 |
} else { |
116 |
buflen -= 4; |
117 |
if (buflen < 0) goto out; |
118 |
*--end = (c & 7) + '0'; |
119 |
*--end = ((c >> 3) & 7) + '0'; |
120 |
*--end = (c >> 6) + '0'; |
121 |
*--end = '\\'; |
122 |
} |
123 |
cp--; |
124 |
} |
125 |
if (--buflen < 0) goto out; |
126 |
*--end = '/'; |
127 |
} |
128 |
dentry = parent; |
129 |
} |
130 |
if (*end == '/') { buflen++; end++; } |
131 |
{ |
132 |
const char *sp = dentry->d_name.name; |
133 |
const char *cp = sp + dentry->d_name.len - 1; |
134 |
unsigned char c; |
135 |
while (sp <= cp) { |
136 |
c = * (unsigned char *) cp; |
137 |
if (c == '\\') { |
138 |
buflen -= 2; |
139 |
if (buflen < 0) goto out; |
140 |
*--end = '\\'; |
141 |
*--end = '\\'; |
142 |
} else if (c > ' ' && c < 127) { |
143 |
if (--buflen < 0) goto out; |
144 |
*--end = (char) c; |
145 |
} else { |
146 |
buflen -= 4; |
147 |
if (buflen < 0) goto out; |
148 |
*--end = (c & 7) + '0'; |
149 |
*--end = ((c >> 3) & 7) + '0'; |
150 |
*--end = (c >> 6) + '0'; |
151 |
*--end = '\\'; |
152 |
} |
153 |
cp--; |
154 |
} |
155 |
} |
156 |
/* Move the pathname to the top of the buffer. */ |
157 |
memmove(start, end, strlen(end) + 1); |
158 |
return 0; |
159 |
out: |
160 |
return -ENOMEM; |
161 |
} |
162 |
|
163 |
/* Returns realpath(3) of the given dentry but ignores chroot'ed root. */ |
164 |
int realpath_from_dentry2(struct dentry *dentry, struct vfsmount *mnt, char *newname, int newname_len) |
165 |
{ |
166 |
int error; |
167 |
struct dentry *d_dentry; |
168 |
struct vfsmount *d_mnt; |
169 |
if (!dentry || !mnt || !newname || newname_len <= 0) return -EINVAL; |
170 |
d_dentry = dget(dentry); |
171 |
d_mnt = mntget(mnt); |
172 |
/***** CRITICAL SECTION START *****/ |
173 |
spin_lock(&dcache_lock); |
174 |
error = GetAbsolutePath(d_dentry, d_mnt, newname, newname_len); |
175 |
spin_unlock(&dcache_lock); |
176 |
/***** CRITICAL SECTION END *****/ |
177 |
dput(d_dentry); |
178 |
mntput(d_mnt); |
179 |
return error; |
180 |
} |
181 |
|
182 |
/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */ |
183 |
/* These functions use ccs_alloc(), so caller must ccs_free() if these functions didn't return NULL. */ |
184 |
char *realpath_from_dentry(struct dentry *dentry, struct vfsmount *mnt) |
185 |
{ |
186 |
char *buf = ccs_alloc(CCS_MAX_PATHNAME_LEN); |
187 |
if (buf && realpath_from_dentry2(dentry, mnt, buf, CCS_MAX_PATHNAME_LEN - 1) == 0) return buf; |
188 |
ccs_free(buf); |
189 |
return NULL; |
190 |
} |
191 |
|
192 |
char *realpath(const char *pathname) |
193 |
{ |
194 |
struct nameidata nd; |
195 |
if (pathname && path_lookup(pathname, lookup_flags, &nd) == 0) { |
196 |
char *buf = realpath_from_dentry(nd.dentry, nd.mnt); |
197 |
path_release(&nd); |
198 |
return buf; |
199 |
} |
200 |
return NULL; |
201 |
} |
202 |
|
203 |
char *realpath_nofollow(const char *pathname) |
204 |
{ |
205 |
struct nameidata nd; |
206 |
if (pathname && path_lookup(pathname, lookup_flags ^ LOOKUP_FOLLOW, &nd) == 0) { |
207 |
char *buf = realpath_from_dentry(nd.dentry, nd.mnt); |
208 |
path_release(&nd); |
209 |
return buf; |
210 |
} |
211 |
return NULL; |
212 |
} |
213 |
|
214 |
/***** Private memory allocator. *****/ |
215 |
|
216 |
/* |
217 |
* Round up an integer so that the returned pointers are appropriately aligned. |
218 |
* FIXME: Are there more requirements that is needed for assigning value atomically? |
219 |
*/ |
220 |
static inline unsigned int ROUNDUP(const unsigned int size) { |
221 |
if (sizeof(void *) >= sizeof(long)) { |
222 |
return ((size + sizeof(void *) - 1) / sizeof(void *)) * sizeof(void *); |
223 |
} else { |
224 |
return ((size + sizeof(long) - 1) / sizeof(long)) * sizeof(long); |
225 |
} |
226 |
} |
227 |
|
228 |
static unsigned int allocated_memory_for_elements = 0; |
229 |
|
230 |
unsigned int GetMemoryUsedForElements(void) |
231 |
{ |
232 |
return allocated_memory_for_elements; |
233 |
} |
234 |
|
235 |
/* Allocate memory for structures. The RAM is chunked, so NEVER try to kfree() the returned pointer. */ |
236 |
void *alloc_element(const unsigned int size) |
237 |
{ |
238 |
static DEFINE_MUTEX(lock); |
239 |
static char *buf = NULL; |
240 |
static unsigned int buf_used_len = PAGE_SIZE; |
241 |
char *ptr = NULL; |
242 |
const unsigned int word_aligned_size = ROUNDUP(size); |
243 |
if (word_aligned_size > PAGE_SIZE) return NULL; |
244 |
mutex_lock(&lock); |
245 |
if (buf_used_len + word_aligned_size > PAGE_SIZE) { |
246 |
if ((ptr = kmalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) { |
247 |
printk("ERROR: Out of memory for alloc_element().\n"); |
248 |
if (!sbin_init_started) panic("MAC Initialization failed.\n"); |
249 |
} else { |
250 |
memset(ptr, 0, PAGE_SIZE); |
251 |
buf = ptr; |
252 |
allocated_memory_for_elements += PAGE_SIZE; |
253 |
buf_used_len = word_aligned_size; |
254 |
ptr = buf; |
255 |
} |
256 |
} else if (word_aligned_size) { |
257 |
int i; |
258 |
ptr = buf + buf_used_len; |
259 |
buf_used_len += word_aligned_size; |
260 |
for (i = 0; i < word_aligned_size; i++) { |
261 |
if (ptr[i]) { |
262 |
printk(KERN_ERR "WARNING: Reserved memory was tainted! The system might go wrong.\n"); |
263 |
ptr[i] = '\0'; |
264 |
} |
265 |
} |
266 |
} |
267 |
mutex_unlock(&lock); |
268 |
return ptr; |
269 |
} |
270 |
|
271 |
/***** Shared memory allocator. *****/ |
272 |
|
273 |
static unsigned int allocated_memory_for_savename = 0; |
274 |
|
275 |
unsigned int GetMemoryUsedForSaveName(void) |
276 |
{ |
277 |
return allocated_memory_for_savename; |
278 |
} |
279 |
|
280 |
#define MAX_HASH 256 |
281 |
|
282 |
struct name_entry { |
283 |
struct list1_head list; |
284 |
struct path_info entry; |
285 |
}; |
286 |
|
287 |
struct free_memory_block_list { |
288 |
struct list_head list; |
289 |
char *ptr; /* Pointer to a free area. */ |
290 |
int len; /* Length of the area. */ |
291 |
}; |
292 |
|
293 |
static struct list1_head name_list[MAX_HASH]; /* The list of names. */ |
294 |
|
295 |
/* Keep the given name on the RAM. The RAM is shared, so NEVER try to modify or kfree() the returned name. */ |
296 |
const struct path_info *SaveName(const char *name) |
297 |
{ |
298 |
static LIST_HEAD(fmb_list); |
299 |
static DEFINE_MUTEX(lock); |
300 |
struct name_entry *ptr; |
301 |
unsigned int hash; |
302 |
struct free_memory_block_list *fmb; |
303 |
int len; |
304 |
char *cp; |
305 |
if (!name) return NULL; |
306 |
len = strlen(name) + 1; |
307 |
if (len > CCS_MAX_PATHNAME_LEN) { |
308 |
printk("ERROR: Name too long for SaveName().\n"); |
309 |
return NULL; |
310 |
} |
311 |
hash = full_name_hash((const unsigned char *) name, len - 1); |
312 |
mutex_lock(&lock); |
313 |
list1_for_each_entry(ptr, &name_list[hash % MAX_HASH], list) { |
314 |
if (hash == ptr->entry.hash && strcmp(name, ptr->entry.name) == 0) goto out; |
315 |
} |
316 |
list_for_each_entry(fmb, &fmb_list, list) { |
317 |
if (len <= fmb->len) goto ready; |
318 |
} |
319 |
cp = kmalloc(PAGE_SIZE, GFP_KERNEL); |
320 |
fmb = kmalloc(sizeof(*fmb), GFP_KERNEL); |
321 |
if (!cp || !fmb) { |
322 |
kfree(cp); |
323 |
kfree(fmb); |
324 |
printk("ERROR: Out of memory for SaveName().\n"); |
325 |
if (!sbin_init_started) panic("MAC Initialization failed.\n"); |
326 |
ptr = NULL; |
327 |
goto out; |
328 |
} |
329 |
memset(cp, 0, PAGE_SIZE); |
330 |
allocated_memory_for_savename += PAGE_SIZE; |
331 |
list_add(&fmb->list, &fmb_list); |
332 |
fmb->ptr = cp; |
333 |
fmb->len = PAGE_SIZE; |
334 |
ready: |
335 |
ptr = alloc_element(sizeof(*ptr)); |
336 |
if (!ptr) goto out; |
337 |
ptr->entry.name = fmb->ptr; |
338 |
memmove(fmb->ptr, name, len); |
339 |
fill_path_info(&ptr->entry); |
340 |
fmb->ptr += len; |
341 |
fmb->len -= len; |
342 |
list1_add_tail_mb(&ptr->list, &name_list[hash % MAX_HASH]); |
343 |
if (fmb->len == 0) { |
344 |
list_del(&fmb->list); |
345 |
kfree(fmb); |
346 |
} |
347 |
out: |
348 |
mutex_unlock(&lock); |
349 |
return ptr ? &ptr->entry : NULL; |
350 |
} |
351 |
|
352 |
/***** Dynamic memory allocator. *****/ |
353 |
|
354 |
struct cache_entry { |
355 |
struct list_head list; |
356 |
void *ptr; |
357 |
int size; |
358 |
}; |
359 |
|
360 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) |
361 |
static struct kmem_cache *ccs_cachep = NULL; |
362 |
#else |
363 |
static kmem_cache_t *ccs_cachep = NULL; |
364 |
#endif |
365 |
|
366 |
void __init realpath_Init(void) |
367 |
{ |
368 |
int i; |
369 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) |
370 |
ccs_cachep = kmem_cache_create("ccs_cache", sizeof(struct cache_entry), 0, 0, NULL); |
371 |
#else |
372 |
ccs_cachep = kmem_cache_create("ccs_cache", sizeof(struct cache_entry), 0, 0, NULL, NULL); |
373 |
#endif |
374 |
if (!ccs_cachep) panic("Can't create cache.\n"); |
375 |
for (i = 0; i < MAX_HASH; i++) { |
376 |
INIT_LIST1_HEAD(&name_list[i]); |
377 |
} |
378 |
if (CCS_MAX_PATHNAME_LEN > PAGE_SIZE) panic("Bad size."); |
379 |
INIT_LIST1_HEAD(&KERNEL_DOMAIN.acl_info_list); |
380 |
KERNEL_DOMAIN.domainname = SaveName(ROOT_NAME); |
381 |
list1_add_tail_mb(&KERNEL_DOMAIN.list, &domain_list); |
382 |
if (FindDomain(ROOT_NAME) != &KERNEL_DOMAIN) panic("Can't register KERNEL_DOMAIN"); |
383 |
} |
384 |
|
385 |
static LIST_HEAD(cache_list); |
386 |
static spinlock_t cache_list_lock = SPIN_LOCK_UNLOCKED; |
387 |
static unsigned int dynamic_memory_size = 0; |
388 |
|
389 |
unsigned int GetMemoryUsedForDynamic(void) |
390 |
{ |
391 |
return dynamic_memory_size; |
392 |
} |
393 |
|
394 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) |
395 |
static int round2(size_t size) |
396 |
{ |
397 |
#if PAGE_SIZE == 4096 |
398 |
size_t bsize = 32; |
399 |
#else |
400 |
size_t bsize = 64; |
401 |
#endif |
402 |
while (size > bsize) bsize <<= 1; |
403 |
return bsize; |
404 |
} |
405 |
#endif |
406 |
|
407 |
void *ccs_alloc(const size_t size) |
408 |
{ |
409 |
void *ret = kmalloc(size, GFP_KERNEL); |
410 |
if (ret) { |
411 |
struct cache_entry *new_entry = kmem_cache_alloc(ccs_cachep, GFP_KERNEL); |
412 |
if (!new_entry) { |
413 |
kfree(ret); ret = NULL; |
414 |
} else { |
415 |
INIT_LIST_HEAD(&new_entry->list); |
416 |
new_entry->ptr = ret; |
417 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
418 |
new_entry->size = ksize(ret); |
419 |
#else |
420 |
new_entry->size = round2(size); |
421 |
#endif |
422 |
spin_lock(&cache_list_lock); |
423 |
list_add_tail(&new_entry->list, &cache_list); |
424 |
dynamic_memory_size += new_entry->size; |
425 |
spin_unlock(&cache_list_lock); |
426 |
memset(ret, 0, size); |
427 |
} |
428 |
} |
429 |
return ret; |
430 |
} |
431 |
|
432 |
void ccs_free(const void *p) |
433 |
{ |
434 |
struct list_head *v; |
435 |
struct cache_entry *entry = NULL; |
436 |
if (!p) return; |
437 |
spin_lock(&cache_list_lock); |
438 |
list_for_each(v, &cache_list) { |
439 |
entry = list_entry(v, struct cache_entry, list); |
440 |
if (entry->ptr != p) { |
441 |
entry = NULL; continue; |
442 |
} |
443 |
list_del(&entry->list); |
444 |
dynamic_memory_size -= entry->size; |
445 |
break; |
446 |
} |
447 |
spin_unlock(&cache_list_lock); |
448 |
if (entry) { |
449 |
kfree(p); |
450 |
kmem_cache_free(ccs_cachep, entry); |
451 |
} else { |
452 |
printk("BUG: ccs_free() with invalid pointer.\n"); |
453 |
} |
454 |
} |