オープンソース・ソフトウェアの開発とダウンロード

Subversion リポジトリの参照

Diff of /trunk/1.7.x/ccs-patch/security/ccsecurity/realpath.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1054 by kumaneko, Mon Mar 24 09:38:11 2008 UTC revision 1055 by kumaneko, Tue Mar 25 09:01:31 2008 UTC
# Line 366  void *ccs_alloc_element(const unsigned i Line 366  void *ccs_alloc_element(const unsigned i
366  }  }
367    
368  static unsigned int allocated_memory_for_savename;  static unsigned int allocated_memory_for_savename;
 static unsigned int allocated_memory_for_pool;  
   
369    
370  /**  /**
371   * ccs_get_memory_used_for_save_name - Get memory used for keeping string data.   * ccs_get_memory_used_for_save_name - Get memory used for keeping string data.
# Line 376  static unsigned int allocated_memory_for Line 374  static unsigned int allocated_memory_for
374   */   */
375  unsigned int ccs_get_memory_used_for_save_name(void)  unsigned int ccs_get_memory_used_for_save_name(void)
376  {  {
377          return allocated_memory_for_savename + allocated_memory_for_pool;          return allocated_memory_for_savename;
378  }  }
379    
380  #define MAX_HASH 256  #define MAX_HASH 256
# Line 481  static struct kmem_cache *ccs_cachep; Line 479  static struct kmem_cache *ccs_cachep;
479  static kmem_cache_t *ccs_cachep;  static kmem_cache_t *ccs_cachep;
480  #endif  #endif
481    
 #ifdef CCS_MAX_RESERVED_PAGES  
 #define MAX_CCS_PAGE_BUFFER_POOL (CCS_MAX_RESERVED_PAGES)  
 #else  
 #define MAX_CCS_PAGE_BUFFER_POOL 10  
 #endif  
   
 static struct ccs_page_buffer *ccs_page_buffer_pool[MAX_CCS_PAGE_BUFFER_POOL];  
 static bool ccs_page_buffer_pool_in_use[MAX_CCS_PAGE_BUFFER_POOL];  
   
482  /**  /**
483   * ccs_realpath_init - Initialize realpath related code.   * ccs_realpath_init - Initialize realpath related code.
484   *   *
# Line 518  static int __init ccs_realpath_init(void Line 507  static int __init ccs_realpath_init(void
507          list1_add_tail_mb(&KERNEL_DOMAIN.list, &domain_list);          list1_add_tail_mb(&KERNEL_DOMAIN.list, &domain_list);
508          if (ccs_find_domain(ROOT_NAME) != &KERNEL_DOMAIN)          if (ccs_find_domain(ROOT_NAME) != &KERNEL_DOMAIN)
509                  panic("Can't register KERNEL_DOMAIN");                  panic("Can't register KERNEL_DOMAIN");
         memset(ccs_page_buffer_pool, 0, sizeof(ccs_page_buffer_pool));  
         for (i = 0; i < MAX_CCS_PAGE_BUFFER_POOL; i++)  
                 ccs_page_buffer_pool_in_use[i] = false;  
510          return 0;          return 0;
511  }  }
512    
# Line 563  static int round2(size_t size) Line 549  static int round2(size_t size)
549  }  }
550  #endif  #endif
551    
 static DEFINE_SPINLOCK(ccs_page_buffer_pool_lock);  
   
552  /**  /**
553   * ccs_alloc - Allocate memory for temporal purpose.   * ccs_alloc - Allocate memory for temporal purpose.
554   *   *
# Line 574  static DEFINE_SPINLOCK(ccs_page_buffer_p Line 558  static DEFINE_SPINLOCK(ccs_page_buffer_p
558   */   */
559  void *ccs_alloc(const size_t size)  void *ccs_alloc(const size_t size)
560  {  {
         int i;  
         void *ret;  
561          struct cache_entry *new_entry;          struct cache_entry *new_entry;
562          if (size != sizeof(struct ccs_page_buffer))          void *ret = kzalloc(size, GFP_KERNEL);
                 goto normal;  
         for (i = 0; i < MAX_CCS_PAGE_BUFFER_POOL; i++) {  
                 struct ccs_page_buffer *ptr;  
                 bool in_use;  
                 if (ccs_page_buffer_pool_in_use[i])  
                         continue;  
                 /***** CRITICAL SECTION START *****/  
                 spin_lock(&ccs_page_buffer_pool_lock);  
                 in_use = ccs_page_buffer_pool_in_use[i];  
                 if (!in_use)  
                         ccs_page_buffer_pool_in_use[i] = true;  
                 spin_unlock(&ccs_page_buffer_pool_lock);  
                 /***** CRITICAL SECTION END *****/  
                 if (in_use)  
                         continue;  
                 ptr = ccs_page_buffer_pool[i];  
                 if (ptr)  
                         goto ok;  
                 ptr = kmalloc(sizeof(struct ccs_page_buffer), GFP_KERNEL);  
                 /***** CRITICAL SECTION START *****/  
                 spin_lock(&ccs_page_buffer_pool_lock);  
                 if (ptr) {  
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0)  
                         allocated_memory_for_pool += ksize(ptr);  
 #else  
                         allocated_memory_for_pool += round2(size);  
 #endif  
                 } else {  
                         ccs_page_buffer_pool_in_use[i] = false;  
                 }  
                 spin_unlock(&ccs_page_buffer_pool_lock);  
                 /***** CRITICAL SECTION END *****/  
                 if (!ptr)  
                         goto normal;  
                 ccs_page_buffer_pool[i] = ptr;  
                 printk(KERN_DEBUG "Allocated permanent buffer %d/%d\n",  
                        i, MAX_CCS_PAGE_BUFFER_POOL);  
  ok:  
                 memset(ptr, 0, sizeof(struct ccs_page_buffer));  
                 return ptr;  
         }  
  normal:  
         ret = kzalloc(size, GFP_KERNEL);  
563          if (!ret)          if (!ret)
564                  goto out;                  goto out;
565          new_entry = kmem_cache_alloc(ccs_cachep, GFP_KERNEL);          new_entry = kmem_cache_alloc(ccs_cachep, GFP_KERNEL);
# Line 654  void *ccs_alloc(const size_t size) Line 593  void *ccs_alloc(const size_t size)
593   */   */
594  void ccs_free(const void *p)  void ccs_free(const void *p)
595  {  {
         int i;  
596          struct list_head *v;          struct list_head *v;
597          struct cache_entry *entry = NULL;          struct cache_entry *entry = NULL;
598          if (!p)          if (!p)
599                  return;                  return;
         for (i = 0; i < MAX_CCS_PAGE_BUFFER_POOL; i++) {  
                 bool done;  
                 if (p != ccs_page_buffer_pool[i])  
                         continue;  
                 /***** CRITICAL SECTION START *****/  
                 spin_lock(&ccs_page_buffer_pool_lock);  
                 done = ccs_page_buffer_pool_in_use[i];  
                 if (done)  
                         ccs_page_buffer_pool_in_use[i] = false;  
                 spin_unlock(&ccs_page_buffer_pool_lock);  
                 /***** CRITICAL SECTION END *****/  
                 if (done)  
                         return;  
         }  
600          /***** CRITICAL SECTION START *****/          /***** CRITICAL SECTION START *****/
601          spin_lock(&cache_list_lock);          spin_lock(&cache_list_lock);
602          list_for_each(v, &cache_list) {          list_for_each(v, &cache_list) {

Legend:
Removed from v.1054  
changed lines
  Added in v.1055

Back to OSDN">Back to OSDN
ViewVC Help
Powered by ViewVC 1.1.26