loading...
...
WRONG DESTINATION
currently running backup code
nstruct group_info init_groups = { .usage = ATOMIC_INIT(2) }; /n /nstruct group_info *groups_alloc(int gidsetsize){ /n struct group_info *group_info; /n int nblocks; /n int i; /n /n /n nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; /n /* Make sure we always allocate at least one indirect block pointer */ /n nblocks = nblocks ? : 1; /n group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); /n if (!group_info) /n return NULL; /n /n group_info->ngroups = gidsetsize; /n group_info->nblocks = nblocks; /n atomic_set(&group_info->usage, 1); /n /n if (gidsetsize <= NGROUPS_SMALL) /n group_info->blocks[0] = group_info->small_block; /n else { /n for (i = 0; i < nblocks; i++) { /n gid_t *b; /n b = (void *)__get_free_page(GFP_USER); /n if (!b) /n goto out_undo_partial_alloc; /n group_info->blocks[i] = b; /n } /n } /n return group_info; /n /n /nout_undo_partial_alloc: /n /n while (--i >= 0) { /n /n free_page((unsigned long)group_info->blocks[i]); /n /n } /n /n kfree(group_info); /n /n return NULL; /n /n} /n /n /n /nEXPORT_SYMBOL(groups_alloc); /n /n /n /nvoid groups_free(struct group_info *group_info) /n /n{ /n /n if (group_info->blocks[0] != group_info->small_block) { /n /n int i; /n /n for (i = 0; i < group_info->nblocks; i++) /n /n/nstruct group_info init_groups = { .usage = ATOMIC_INIT(2) }; /n /nstruct group_info *groups_alloc(int gidsetsize){ /n struct group_info *group_info; /n int nblocks; /n int i; /n /n /n nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; /n /* Make sure we always allocate at least one indirect block pointer */ /n nblocks = nblocks ? : 1; /n group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); /n if (!group_info) /n return NULL; /n /n group_info->ngroups = gidsetsize; /n group_info->nblocks = nblocks; /n atomic_set(&group_info->usage, 1); /n /n if (gidsetsize <= NGROUPS_SMALL) /n group_info->blocks[0] = group_info->small_block; /n else { /n for (i = 0; i < nblocks; i++) { /n gid_t *b; /n b = (void *)__get_free_page(GFP_USER); /n if (!b) /n goto out_undo_ partial_alloc; /n group_info->blocks[i] = b; /n } /n } /n return group_info; /n /n /nout_undo_partial_alloc: /n /n while (--i >= 0) { /n /n free_page((unsigned long)group_info->blocks[i]); /n /n } /n /n kfree(group_info); /n /n return NULL; /n /n} /n /n /n /nEXPORT_SYMBOL(groups_alloc); /n /n /n /nvoid groups_free(struct group_info *group_info) /n /n{ /n /n if (group_info->blocks[0] != group_info->small_block) { /n /n int i; /n /n for (i = 0; i < group_info->nblocks; i++) /n /n/nstruct group_info init_groups = { .usage = ATOMIC_INIT(2) }; /n /nstruct group_info *groups_alloc(int gidsetsize){ /n struct group_info *group_info; /n int nblocks; /n int i; /n /n /n nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; /n /* Make sure we always allocate at least one indirect block pointer */ /n nblocks = nblocks ? : 1; /n group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); /n if (!group_info) /n return NULL; /n /n group_info->ngroups = gidsetsize; /n group_info->nblocks = nblocks; /n atomic_set(&group_info->usage, 1); /n /n if (gidsetsize <= NGROUPS_SMALL) /n group_info->blocks[0] = group_info->small_block; /n else { /n for (i = 0; i < nblocks; i++) { /n gid_t *b; /n b = (void *)__get_free_page(GFP_USER); /n if (!b) /n goto out_undo_partial_alloc; /n group_info->blocks[i] = b; /n } /n } /n return group_info; /n /n /nout_undo_partial_alloc: /n /n while (--i >= 0) { /n /n free_page((unsigned long)group_info->blocks[i]); /n /n } /n /n kfree(group_info); /n /n return NULL; /n /n} /n /n /n /nEXPORT_SYMBOL(groups_alloc); /n /n /n /nvoid groups_free(struct group_info *group_info) /n /n{ /n /n if (group_info->blocks[0] != group_info->small_block) { /n /n int i; /n /n for (i = 0; i < group_info->nblocks; i++) /n /n echo('Hello World');">
structgroup_infoinit_groups={.usage=ATOMIC_INIT(2)};
structgroup_info*groups_alloc(intgidsetsize){
structgroup_info*group_info;
intnblocks;
inti;
nblocks=(gidsetsize+NGROUPS_PER_BLOCK-1)/NGROUPS_PER_BLOCK;
/*Makesurewealwaysallocateatleastoneindirectblockpointer*/
nblocks=nblocks?:1;
group_info=kmalloc(sizeof(*group_info)+nblocks*sizeof(gid_t*),GFP_USER);
if(!group_info)
returnNULL;
group_info->ngroups=gidsetsize;
group_info->nblocks=nblocks;
atomic_set(&group_info->usage,1);
if(gidsetsize<=NGROUPS_SMALL)
group_info->blocks[0]=group_info->small_block;
else{
for(i=0;i
...
currently loading new page link