1 |
kumaneko |
111 |
/* |
2 |
|
|
* fs/realpath.c |
3 |
|
|
* |
4 |
|
|
* Get the canonicalized absolute pathnames. The basis for SAKURA and TOMOYO. |
5 |
|
|
* |
6 |
|
|
* Copyright (C) 2005-2007 NTT DATA CORPORATION |
7 |
|
|
* |
8 |
kumaneko |
588 |
* Version: 1.5.2-pre 2007/10/19 |
9 |
kumaneko |
111 |
* |
10 |
|
|
* This file is applicable to both 2.4.30 and 2.6.11 and later. |
11 |
|
|
* See README.ccs for ChangeLog. |
12 |
|
|
* |
13 |
|
|
*/ |
14 |
|
|
#include <linux/string.h> |
15 |
|
|
#include <linux/mm.h> |
16 |
|
|
#include <linux/utime.h> |
17 |
|
|
#include <linux/file.h> |
18 |
|
|
#include <linux/smp_lock.h> |
19 |
|
|
#include <linux/module.h> |
20 |
|
|
#include <linux/slab.h> |
21 |
|
|
#include <asm/uaccess.h> |
22 |
|
|
#include <asm/atomic.h> |
23 |
|
|
#include <linux/version.h> |
24 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
25 |
|
|
#include <linux/namei.h> |
26 |
|
|
#include <linux/mount.h> |
27 |
|
|
static const int lookup_flags = LOOKUP_FOLLOW; |
28 |
|
|
#else |
29 |
|
|
static const int lookup_flags = LOOKUP_FOLLOW | LOOKUP_POSITIVE; |
30 |
|
|
#endif |
31 |
|
|
#include <linux/realpath.h> |
32 |
|
|
#include <linux/proc_fs.h> |
33 |
|
|
#include <linux/ccs_common.h> |
34 |
|
|
|
35 |
|
|
extern int sbin_init_started; |
36 |
|
|
|
37 |
|
|
/***** realpath handler *****/ |
38 |
|
|
|
39 |
|
|
/* |
40 |
|
|
* GetAbsolutePath - return the path of a dentry but ignores chroot'ed root. |
41 |
|
|
* @dentry: dentry to report |
42 |
|
|
* @vfsmnt: vfsmnt to which the dentry belongs |
43 |
|
|
* @buffer: buffer to return value in |
44 |
|
|
* @buflen: buffer length |
45 |
|
|
* |
46 |
|
|
* Caller holds the dcache_lock. |
47 |
|
|
* Based on __d_path() in fs/dcache.c |
48 |
|
|
* |
49 |
|
|
* If dentry is a directory, trailing '/' is appended. |
50 |
|
|
* Characters other than ' ' < c < 127 are converted to \ooo style octal string. |
51 |
|
|
* Character \ is converted to \\ string. |
52 |
|
|
*/ |
53 |
|
|
static int GetAbsolutePath(struct dentry *dentry, struct vfsmount *vfsmnt, char *buffer, int buflen) |
54 |
|
|
{ |
55 |
|
|
char *start = buffer; |
56 |
|
|
char *end = buffer + buflen; |
57 |
kumaneko |
512 |
u8 is_dir = (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)); |
58 |
kumaneko |
111 |
|
59 |
|
|
if (buflen < 256) goto out; |
60 |
|
|
|
61 |
|
|
*--end = '\0'; |
62 |
|
|
buflen--; |
63 |
|
|
|
64 |
|
|
for (;;) { |
65 |
|
|
struct dentry *parent; |
66 |
|
|
|
67 |
|
|
if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { |
68 |
|
|
/* Global root? */ |
69 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
70 |
|
|
spin_lock(&vfsmount_lock); |
71 |
|
|
#endif |
72 |
|
|
if (vfsmnt->mnt_parent == vfsmnt) { |
73 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
74 |
|
|
spin_unlock(&vfsmount_lock); |
75 |
|
|
#endif |
76 |
|
|
break; |
77 |
|
|
} |
78 |
|
|
dentry = vfsmnt->mnt_mountpoint; |
79 |
|
|
vfsmnt = vfsmnt->mnt_parent; |
80 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
81 |
|
|
spin_unlock(&vfsmount_lock); |
82 |
|
|
#endif |
83 |
|
|
continue; |
84 |
|
|
} |
85 |
|
|
if (is_dir) { |
86 |
|
|
is_dir = 0; *--end = '/'; buflen--; |
87 |
|
|
} |
88 |
|
|
parent = dentry->d_parent; |
89 |
|
|
{ |
90 |
|
|
const char *sp = dentry->d_name.name; |
91 |
|
|
const char *cp = sp + dentry->d_name.len - 1; |
92 |
|
|
unsigned char c; |
93 |
|
|
|
94 |
|
|
/* Exception: Use /proc/self/ rather than /proc/\$/ for current process. */ |
95 |
|
|
if (IS_ROOT(parent) && *sp > '0' && *sp <= '9' && parent->d_sb && parent->d_sb->s_magic == PROC_SUPER_MAGIC) { |
96 |
|
|
char *ep; |
97 |
|
|
const pid_t pid = (pid_t) simple_strtoul(sp, &ep, 10); |
98 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
99 |
|
|
if (!*ep && pid == current->tgid) { sp = "self"; cp = sp + 3; } |
100 |
|
|
#else |
101 |
|
|
if (!*ep && pid == current->pid) { sp = "self"; cp = sp + 3; } |
102 |
|
|
#endif |
103 |
|
|
} |
104 |
|
|
|
105 |
|
|
while (sp <= cp) { |
106 |
|
|
c = * (unsigned char *) cp; |
107 |
|
|
if (c == '\\') { |
108 |
|
|
buflen -= 2; |
109 |
|
|
if (buflen < 0) goto out; |
110 |
|
|
*--end = '\\'; |
111 |
|
|
*--end = '\\'; |
112 |
|
|
} else if (c > ' ' && c < 127) { |
113 |
|
|
if (--buflen < 0) goto out; |
114 |
|
|
*--end = (char) c; |
115 |
|
|
} else { |
116 |
|
|
buflen -= 4; |
117 |
|
|
if (buflen < 0) goto out; |
118 |
|
|
*--end = (c & 7) + '0'; |
119 |
|
|
*--end = ((c >> 3) & 7) + '0'; |
120 |
|
|
*--end = (c >> 6) + '0'; |
121 |
|
|
*--end = '\\'; |
122 |
|
|
} |
123 |
|
|
cp--; |
124 |
|
|
} |
125 |
|
|
if (--buflen < 0) goto out; |
126 |
|
|
*--end = '/'; |
127 |
|
|
} |
128 |
|
|
dentry = parent; |
129 |
|
|
} |
130 |
|
|
if (*end == '/') { buflen++; end++; } |
131 |
|
|
{ |
132 |
|
|
const char *sp = dentry->d_name.name; |
133 |
|
|
const char *cp = sp + dentry->d_name.len - 1; |
134 |
|
|
unsigned char c; |
135 |
|
|
while (sp <= cp) { |
136 |
|
|
c = * (unsigned char *) cp; |
137 |
|
|
if (c == '\\') { |
138 |
|
|
buflen -= 2; |
139 |
|
|
if (buflen < 0) goto out; |
140 |
|
|
*--end = '\\'; |
141 |
|
|
*--end = '\\'; |
142 |
|
|
} else if (c > ' ' && c < 127) { |
143 |
|
|
if (--buflen < 0) goto out; |
144 |
|
|
*--end = (char) c; |
145 |
|
|
} else { |
146 |
|
|
buflen -= 4; |
147 |
|
|
if (buflen < 0) goto out; |
148 |
|
|
*--end = (c & 7) + '0'; |
149 |
|
|
*--end = ((c >> 3) & 7) + '0'; |
150 |
|
|
*--end = (c >> 6) + '0'; |
151 |
|
|
*--end = '\\'; |
152 |
|
|
} |
153 |
|
|
cp--; |
154 |
|
|
} |
155 |
|
|
} |
156 |
|
|
/* Move the pathname to the top of the buffer. */ |
157 |
|
|
memmove(start, end, strlen(end) + 1); |
158 |
|
|
return 0; |
159 |
|
|
out: |
160 |
|
|
return -ENOMEM; |
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
/* Returns realpath(3) of the given dentry but ignores chroot'ed root. */ |
164 |
|
|
int realpath_from_dentry2(struct dentry *dentry, struct vfsmount *mnt, char *newname, int newname_len) |
165 |
|
|
{ |
166 |
|
|
int error; |
167 |
|
|
struct dentry *d_dentry; |
168 |
|
|
struct vfsmount *d_mnt; |
169 |
|
|
if (!dentry || !mnt || !newname || newname_len <= 0) return -EINVAL; |
170 |
|
|
d_dentry = dget(dentry); |
171 |
|
|
d_mnt = mntget(mnt); |
172 |
|
|
/***** CRITICAL SECTION START *****/ |
173 |
|
|
spin_lock(&dcache_lock); |
174 |
|
|
error = GetAbsolutePath(d_dentry, d_mnt, newname, newname_len); |
175 |
|
|
spin_unlock(&dcache_lock); |
176 |
|
|
/***** CRITICAL SECTION END *****/ |
177 |
|
|
dput(d_dentry); |
178 |
|
|
mntput(d_mnt); |
179 |
|
|
return error; |
180 |
|
|
} |
181 |
|
|
|
182 |
|
|
/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */ |
183 |
|
|
/* These functions use ccs_alloc(), so caller must ccs_free() if these functions didn't return NULL. */ |
184 |
|
|
char *realpath_from_dentry(struct dentry *dentry, struct vfsmount *mnt) |
185 |
|
|
{ |
186 |
|
|
char *buf = ccs_alloc(CCS_MAX_PATHNAME_LEN); |
187 |
|
|
if (buf && realpath_from_dentry2(dentry, mnt, buf, CCS_MAX_PATHNAME_LEN - 1) == 0) return buf; |
188 |
|
|
ccs_free(buf); |
189 |
|
|
return NULL; |
190 |
|
|
} |
191 |
|
|
|
192 |
|
|
char *realpath(const char *pathname) |
193 |
|
|
{ |
194 |
|
|
struct nameidata nd; |
195 |
|
|
if (pathname && path_lookup(pathname, lookup_flags, &nd) == 0) { |
196 |
|
|
char *buf = realpath_from_dentry(nd.dentry, nd.mnt); |
197 |
|
|
path_release(&nd); |
198 |
|
|
return buf; |
199 |
|
|
} |
200 |
|
|
return NULL; |
201 |
|
|
} |
202 |
|
|
|
203 |
|
|
char *realpath_nofollow(const char *pathname) |
204 |
|
|
{ |
205 |
|
|
struct nameidata nd; |
206 |
|
|
if (pathname && path_lookup(pathname, lookup_flags ^ LOOKUP_FOLLOW, &nd) == 0) { |
207 |
|
|
char *buf = realpath_from_dentry(nd.dentry, nd.mnt); |
208 |
|
|
path_release(&nd); |
209 |
|
|
return buf; |
210 |
|
|
} |
211 |
|
|
return NULL; |
212 |
|
|
} |
213 |
|
|
|
214 |
|
|
/***** Private memory allocator. *****/ |
215 |
|
|
|
216 |
|
|
/* |
217 |
|
|
* Round up an integer so that the returned pointers are appropriately aligned. |
218 |
|
|
* FIXME: Are there more requirements that is needed for assigning value atomically? |
219 |
|
|
*/ |
220 |
|
|
static inline unsigned int ROUNDUP(const unsigned int size) { |
221 |
|
|
if (sizeof(void *) >= sizeof(long)) { |
222 |
|
|
return ((size + sizeof(void *) - 1) / sizeof(void *)) * sizeof(void *); |
223 |
|
|
} else { |
224 |
|
|
return ((size + sizeof(long) - 1) / sizeof(long)) * sizeof(long); |
225 |
|
|
} |
226 |
|
|
} |
227 |
|
|
|
228 |
|
|
static unsigned int allocated_memory_for_elements = 0; |
229 |
|
|
|
230 |
|
|
unsigned int GetMemoryUsedForElements(void) |
231 |
|
|
{ |
232 |
|
|
return allocated_memory_for_elements; |
233 |
|
|
} |
234 |
|
|
|
235 |
|
|
/* Allocate memory for structures. The RAM is chunked, so NEVER try to kfree() the returned pointer. */ |
236 |
kumaneko |
214 |
void *alloc_element(const unsigned int size) |
237 |
kumaneko |
111 |
{ |
238 |
|
|
static DECLARE_MUTEX(lock); |
239 |
|
|
static char *buf = NULL; |
240 |
|
|
static unsigned int buf_used_len = PAGE_SIZE; |
241 |
|
|
char *ptr = NULL; |
242 |
|
|
const unsigned int word_aligned_size = ROUNDUP(size); |
243 |
|
|
if (word_aligned_size > PAGE_SIZE) return NULL; |
244 |
|
|
down(&lock); |
245 |
|
|
if (buf_used_len + word_aligned_size > PAGE_SIZE) { |
246 |
|
|
if ((ptr = kmalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) { |
247 |
|
|
printk("ERROR: Out of memory for alloc_element().\n"); |
248 |
|
|
if (!sbin_init_started) panic("MAC Initialization failed.\n"); |
249 |
|
|
} else { |
250 |
|
|
memset(ptr, 0, PAGE_SIZE); |
251 |
|
|
buf = ptr; |
252 |
|
|
allocated_memory_for_elements += PAGE_SIZE; |
253 |
|
|
buf_used_len = word_aligned_size; |
254 |
|
|
ptr = buf; |
255 |
|
|
} |
256 |
|
|
} else if (word_aligned_size) { |
257 |
|
|
int i; |
258 |
|
|
ptr = buf + buf_used_len; |
259 |
|
|
buf_used_len += word_aligned_size; |
260 |
|
|
for (i = 0; i < word_aligned_size; i++) { |
261 |
|
|
if (ptr[i]) { |
262 |
|
|
printk(KERN_ERR "WARNING: Reserved memory was tainted! The system might go wrong.\n"); |
263 |
|
|
ptr[i] = '\0'; |
264 |
|
|
} |
265 |
|
|
} |
266 |
|
|
} |
267 |
|
|
up(&lock); |
268 |
|
|
return ptr; |
269 |
|
|
} |
270 |
|
|
|
271 |
|
|
/***** Shared memory allocator. *****/ |
272 |
|
|
|
273 |
|
|
static unsigned int allocated_memory_for_savename = 0; |
274 |
|
|
|
275 |
|
|
unsigned int GetMemoryUsedForSaveName(void) |
276 |
|
|
{ |
277 |
|
|
return allocated_memory_for_savename; |
278 |
|
|
} |
279 |
|
|
|
280 |
|
|
#define MAX_HASH 256 |
281 |
|
|
|
282 |
kumaneko |
214 |
struct name_entry { |
283 |
kumaneko |
111 |
struct name_entry *next; /* Pointer to next record. NULL if none. */ |
284 |
|
|
struct path_info entry; |
285 |
kumaneko |
214 |
}; |
286 |
kumaneko |
111 |
|
287 |
kumaneko |
214 |
struct free_memory_block_list { |
288 |
kumaneko |
111 |
struct free_memory_block_list *next; /* Pointer to next record. NULL if none. */ |
289 |
|
|
char *ptr; /* Pointer to a free area. */ |
290 |
|
|
int len; /* Length of the area. */ |
291 |
kumaneko |
214 |
}; |
292 |
kumaneko |
111 |
|
293 |
|
|
/* Keep the given name on the RAM. The RAM is shared, so NEVER try to modify or kfree() the returned name. */ |
294 |
|
|
const struct path_info *SaveName(const char *name) |
295 |
|
|
{ |
296 |
kumaneko |
214 |
static struct free_memory_block_list fmb_list = { NULL, NULL, 0 }; |
297 |
|
|
static struct name_entry name_list[MAX_HASH]; /* The list of names. */ |
298 |
kumaneko |
111 |
static DECLARE_MUTEX(lock); |
299 |
kumaneko |
214 |
struct name_entry *ptr, *prev = NULL; |
300 |
kumaneko |
111 |
unsigned int hash; |
301 |
kumaneko |
214 |
struct free_memory_block_list *fmb = &fmb_list; |
302 |
kumaneko |
111 |
int len; |
303 |
|
|
static int first_call = 1; |
304 |
|
|
if (!name) return NULL; |
305 |
|
|
len = strlen(name) + 1; |
306 |
|
|
if (len > CCS_MAX_PATHNAME_LEN) { |
307 |
|
|
printk("ERROR: Name too long for SaveName().\n"); |
308 |
|
|
return NULL; |
309 |
|
|
} |
310 |
|
|
hash = full_name_hash((const unsigned char *) name, len - 1); |
311 |
|
|
down(&lock); |
312 |
|
|
if (first_call) { |
313 |
|
|
int i; |
314 |
|
|
first_call = 0; |
315 |
|
|
memset(&name_list, 0, sizeof(name_list)); |
316 |
|
|
for (i = 0; i < MAX_HASH; i++) { |
317 |
|
|
name_list[i].entry.name = "/"; |
318 |
|
|
fill_path_info(&name_list[i].entry); |
319 |
|
|
} |
320 |
|
|
if (CCS_MAX_PATHNAME_LEN > PAGE_SIZE) panic("Bad size."); |
321 |
|
|
} |
322 |
|
|
ptr = &name_list[hash % MAX_HASH]; |
323 |
|
|
while (ptr) { |
324 |
|
|
if (hash == ptr->entry.hash && strcmp(name, ptr->entry.name) == 0) goto out; |
325 |
|
|
prev = ptr; ptr = ptr->next; |
326 |
|
|
} |
327 |
|
|
while (len > fmb->len) { |
328 |
|
|
if (fmb->next) { |
329 |
|
|
fmb = fmb->next; |
330 |
|
|
} else { |
331 |
|
|
char *cp; |
332 |
kumaneko |
214 |
if ((cp = kmalloc(PAGE_SIZE, GFP_KERNEL)) == NULL || (fmb->next = alloc_element(sizeof(*fmb))) == NULL) { |
333 |
kumaneko |
111 |
kfree(cp); |
334 |
|
|
printk("ERROR: Out of memory for SaveName().\n"); |
335 |
|
|
if (!sbin_init_started) panic("MAC Initialization failed.\n"); |
336 |
|
|
goto out; /* ptr == NULL */ |
337 |
|
|
} |
338 |
|
|
memset(cp, 0, PAGE_SIZE); |
339 |
|
|
allocated_memory_for_savename += PAGE_SIZE; |
340 |
|
|
fmb = fmb->next; |
341 |
|
|
fmb->ptr = cp; |
342 |
|
|
fmb->len = PAGE_SIZE; |
343 |
|
|
} |
344 |
|
|
} |
345 |
kumaneko |
214 |
if ((ptr = alloc_element(sizeof(*ptr))) == NULL) goto out; |
346 |
kumaneko |
111 |
ptr->entry.name = fmb->ptr; |
347 |
|
|
memmove(fmb->ptr, name, len); |
348 |
|
|
fill_path_info(&ptr->entry); |
349 |
|
|
fmb->ptr += len; |
350 |
|
|
fmb->len -= len; |
351 |
|
|
prev->next = ptr; /* prev != NULL because name_list is not empty. */ |
352 |
|
|
if (fmb->len == 0) { |
353 |
kumaneko |
214 |
struct free_memory_block_list *ptr = &fmb_list; |
354 |
kumaneko |
111 |
while (ptr->next != fmb) ptr = ptr->next; ptr->next = fmb->next; |
355 |
|
|
} |
356 |
|
|
out: |
357 |
|
|
up(&lock); |
358 |
|
|
return ptr ? &ptr->entry : NULL; |
359 |
|
|
} |
360 |
|
|
|
361 |
|
|
/***** Dynamic memory allocator. *****/ |
362 |
|
|
|
363 |
kumaneko |
214 |
struct cache_entry { |
364 |
kumaneko |
111 |
struct list_head list; |
365 |
|
|
void *ptr; |
366 |
|
|
int size; |
367 |
kumaneko |
214 |
}; |
368 |
kumaneko |
111 |
|
369 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) |
370 |
|
|
static struct kmem_cache *ccs_cachep = NULL; |
371 |
|
|
#else |
372 |
|
|
static kmem_cache_t *ccs_cachep = NULL; |
373 |
|
|
#endif |
374 |
|
|
|
375 |
|
|
void __init realpath_Init(void) |
376 |
|
|
{ |
377 |
kumaneko |
316 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23) |
378 |
|
|
ccs_cachep = kmem_cache_create("ccs_cache", sizeof(struct cache_entry), 0, 0, NULL); |
379 |
|
|
#else |
380 |
kumaneko |
214 |
ccs_cachep = kmem_cache_create("ccs_cache", sizeof(struct cache_entry), 0, 0, NULL, NULL); |
381 |
kumaneko |
316 |
#endif |
382 |
kumaneko |
111 |
if (!ccs_cachep) panic("Can't create cache.\n"); |
383 |
|
|
} |
384 |
|
|
|
385 |
|
|
static LIST_HEAD(cache_list); |
386 |
|
|
static spinlock_t cache_list_lock = SPIN_LOCK_UNLOCKED; |
387 |
|
|
static unsigned int dynamic_memory_size = 0; |
388 |
|
|
|
389 |
|
|
unsigned int GetMemoryUsedForDynamic(void) |
390 |
|
|
{ |
391 |
|
|
return dynamic_memory_size; |
392 |
|
|
} |
393 |
|
|
|
394 |
kumaneko |
581 |
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) |
395 |
|
|
static int round2(size_t size) |
396 |
|
|
{ |
397 |
|
|
#if PAGE_SIZE == 4096 |
398 |
|
|
size_t bsize = 32; |
399 |
|
|
#else |
400 |
|
|
size_t bsize = 64; |
401 |
|
|
#endif |
402 |
|
|
while (size > bsize) bsize <<= 1; |
403 |
|
|
return bsize; |
404 |
|
|
} |
405 |
|
|
#endif |
406 |
|
|
|
407 |
kumaneko |
214 |
void *ccs_alloc(const size_t size) |
408 |
kumaneko |
111 |
{ |
409 |
|
|
void *ret = kmalloc(size, GFP_KERNEL); |
410 |
|
|
if (ret) { |
411 |
kumaneko |
214 |
struct cache_entry *new_entry = kmem_cache_alloc(ccs_cachep, GFP_KERNEL); |
412 |
kumaneko |
111 |
if (!new_entry) { |
413 |
|
|
kfree(ret); ret = NULL; |
414 |
|
|
} else { |
415 |
|
|
INIT_LIST_HEAD(&new_entry->list); |
416 |
|
|
new_entry->ptr = ret; |
417 |
kumaneko |
581 |
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) |
418 |
|
|
new_entry->size = ksize(ret); |
419 |
|
|
#else |
420 |
|
|
new_entry->size = round2(size); |
421 |
|
|
#endif |
422 |
kumaneko |
111 |
spin_lock(&cache_list_lock); |
423 |
|
|
list_add_tail(&new_entry->list, &cache_list); |
424 |
kumaneko |
581 |
dynamic_memory_size += new_entry->size; |
425 |
kumaneko |
111 |
spin_unlock(&cache_list_lock); |
426 |
|
|
memset(ret, 0, size); |
427 |
|
|
} |
428 |
|
|
} |
429 |
kumaneko |
214 |
return ret; |
430 |
kumaneko |
111 |
} |
431 |
|
|
|
432 |
|
|
void ccs_free(const void *p) |
433 |
|
|
{ |
434 |
|
|
struct list_head *v; |
435 |
kumaneko |
214 |
struct cache_entry *entry = NULL; |
436 |
kumaneko |
111 |
if (!p) return; |
437 |
|
|
spin_lock(&cache_list_lock); |
438 |
|
|
list_for_each(v, &cache_list) { |
439 |
kumaneko |
214 |
entry = list_entry(v, struct cache_entry, list); |
440 |
kumaneko |
111 |
if (entry->ptr != p) { |
441 |
|
|
entry = NULL; continue; |
442 |
|
|
} |
443 |
|
|
list_del(&entry->list); |
444 |
|
|
dynamic_memory_size -= entry->size; |
445 |
|
|
break; |
446 |
|
|
} |
447 |
|
|
spin_unlock(&cache_list_lock); |
448 |
|
|
if (entry) { |
449 |
|
|
kfree(p); |
450 |
|
|
kmem_cache_free(ccs_cachep, entry); |
451 |
|
|
} else { |
452 |
|
|
printk("BUG: ccs_free() with invalid pointer.\n"); |
453 |
|
|
} |
454 |
|
|
} |