📄 kbdb_shmops.c
字号:
#include <linux/mman.h>#include <linux/mm.h>#include <linux/slab.h>#include <linux/shm.h>#include <linux/ipc.h>#include <linux/fs.h>#include <linux/file.h>#include <linux/vmalloc.h>#include <linux/version.h>#include "db.h"/********************* from Linux/ipc/util.h ******************/#define USHRT_MAX 0xffff#define SEQ_MULTIPLIER (IPCMNI)void sem_init (void);void msg_init (void);void shm_init (void);struct ipc_ids { int size; int in_use; int max_id; unsigned short seq; unsigned short seq_max; struct semaphore sem; spinlock_t ary; struct ipc_id* entries;};struct ipc_id { struct kern_ipc_perm* p;};int ipc_parse_version (int *cmd){#ifdef __x86_64__ if (!(current->thread.flags & THREAD_IA32)) return IPC_64; #endif if (*cmd & IPC_64) { *cmd ^= IPC_64; return IPC_64; } else { return IPC_OLD; }}void* ipc_alloc(int size){ void* out; if(size > PAGE_SIZE) out = vmalloc(size); else out = kmalloc(size, GFP_KERNEL); return out;}void __init ipc_init_ids(struct ipc_ids* ids, int size);/* must be called with ids->sem acquired.*/int ipc_findkey(struct ipc_ids* ids, key_t key);int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size);/* must be called with both locks acquired. */struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id);int ipcperms (struct kern_ipc_perm *ipcp, short flg);/* for rare, potentially huge allocations. * both function can sleep */void* ipc_alloc(int size);void ipc_free(void* ptr, int size);extern inline void ipc_lockall(struct ipc_ids* ids){ spin_lock(&ids->ary);}extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id){ struct kern_ipc_perm* out; int lid = id % SEQ_MULTIPLIER; if(lid >= ids->size) return NULL; out = ids->entries[lid].p; return out;}extern inline void ipc_unlockall(struct ipc_ids* ids){ spin_unlock(&ids->ary);}extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id){ struct kern_ipc_perm* out; int lid = id % SEQ_MULTIPLIER; if(lid >= ids->size) return NULL; spin_lock(&ids->ary); out = ids->entries[lid].p; if(out==NULL) spin_unlock(&ids->ary); return out;}extern inline void ipc_unlock(struct ipc_ids* ids, int id){ spin_unlock(&ids->ary);}extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq){ return SEQ_MULTIPLIER*seq + id;}extern inline int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid){ if(uid/SEQ_MULTIPLIER != ipcp->seq) return 1; return 0;}void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); #if defined(__ia64__) || defined(__hppa__)/* On IA-64 and PA-RISC, we always use the "64-bit version" of the IPC structures. */ #define ipc_parse_version(cmd) IPC_64# elseint ipc_parse_version (int *cmd);#endif struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) { struct kern_ipc_perm* p; int lid = id % SEQ_MULTIPLIER; if(lid >= ids->size) BUG(); p = ids->entries[lid].p; ids->entries[lid].p = NULL; if(p==NULL) BUG(); ids->in_use--; if (lid == ids->max_id) { do { lid--; if(lid == -1) break; } while (ids->entries[lid].p == NULL); ids->max_id = lid; } return p; }int ipc_findkey(struct ipc_ids* ids, key_t key){ int id; struct kern_ipc_perm* p; for (id = 0; id <= ids->max_id; id++) { p = ids->entries[id].p; if(p==NULL) continue; if (key == p->key) return id; } return -1; }void ipc_free(void* ptr, int size){ if(size > PAGE_SIZE) vfree(ptr); else kfree(ptr);}static int grow_ary(struct ipc_ids* ids, int newsize){ struct ipc_id* new; struct ipc_id* old; int i; if(newsize > IPCMNI) newsize = IPCMNI; if(newsize <= ids->size) return newsize; new = ipc_alloc(sizeof(struct ipc_id)*newsize); if(new == NULL) return ids->size; memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size); for(i=ids->size;i<newsize;i++) { new[i].p = NULL; } spin_lock(&ids->ary); old = ids->entries; ids->entries = new; i = ids->size; ids->size = newsize; spin_unlock(&ids->ary); ipc_free(old, sizeof(struct ipc_id)*i); return ids->size;}int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size){ int id; size = grow_ary(ids,size); for (id = 0; id < size; id++) { if(ids->entries[id].p == NULL) goto found; } return -1; found: ids->in_use++; if (id > ids->max_id) ids->max_id = id; new->cuid = new->uid = current->euid; new->gid = new->cgid = current->egid; new->seq = ids->seq++; if(ids->seq > ids->seq_max) ids->seq = 0; spin_lock(&ids->ary); ids->entries[id].p = new; return id; }/*************************************************************************//*************************************************************************/ /*************************************************************************//******************** below copied from ipc/shm.c ************************/struct shmid_kernel /* private to the kernel */{ struct kern_ipc_perm shm_perm; struct file * shm_file; int id; unsigned long shm_nattch; unsigned long shm_segsz; time_t shm_atim; time_t shm_dtim; time_t shm_ctim; pid_t shm_cprid; pid_t shm_lprid;};#define shm_flags shm_perm.modestatic struct file_operations shm_file_operations;static struct vm_operations_struct shm_vm_ops;static struct ipc_ids shm_ids;static struct ipc_ids shm_ids;static struct file_operations shm_file_operations;static struct vm_operations_struct shm_vm_ops;static int shm_tot; /* total number of shared memory pages */#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))#define shm_unlock(id) ipc_unlock(&shm_ids,id)#define shm_buildid(id, seq) \ ipc_buildid(&shm_ids, id, seq)size_t shm_ctlmax = SHMMAX;size_t shm_ctlall = SHMALL;int shm_ctlmni = SHMMNI; static int shm_tot; /* total number of shared memory pages */extern void *malloc(size_t);extern struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);static inline void shm_inc (int id){ struct shmid_kernel *shp; if(!(shp = shm_lock(id))) BUG(); shp->shm_atim = CURRENT_TIME; shp->shm_lprid = current->pid; shp->shm_nattch++; shm_unlock(id);}static inline struct shmid_kernel *shm_rmid(int id){ return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);}void shmem_lock(struct file * file, int lock){ struct inode * inode = file->f_dentry->d_inode; struct shmem_inode_info * info = SHMEM_I(inode); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22) spin_lock(&info->lock); if (lock) info->flags |= VM_LOCKED; else info->flags &= ~VM_LOCKED; spin_unlock(&info->lock);#else down(&info->sem); info->locked = lock; up(&info->sem);#endif}static void shm_destroy (struct shmid_kernel *shp){ shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid (shp->id); shm_unlock(shp->id); shmem_lock(shp->shm_file, 0); fput (shp->shm_file); kfree (shp);}static int shm_mmap(struct file * file, struct vm_area_struct * vma){ UPDATE_ATIME(file->f_dentry->d_inode); vma->vm_ops = &shm_vm_ops; shm_inc(file->f_dentry->d_inode->i_ino); return 0;}static struct file_operations shm_file_operations = { mmap: shm_mmap};static inline int shm_addid(struct shmid_kernel *shp){ return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni+1);}static int newseg (key_t key, int shmflg, size_t size){ int error; struct shmid_kernel *shp; int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; if (size < SHMMIN || size > shm_ctlmax) return -EINVAL; if (shm_tot + numpages >= shm_ctlall) return -ENOSPC; shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER); if (!shp) return -ENOMEM; sprintf (name, "SYSV%08x", key); file = shmem_file_setup(name, size); error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; error = -ENOSPC; id = shm_addid(shp); if(id == -1) goto no_id; shp->shm_perm.key = key; shp->shm_flags = (shmflg & S_IRWXUGO); shp->shm_cprid = current->pid; shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = CURRENT_TIME; shp->shm_segsz = size; shp->shm_nattch = 0; shp->id = shm_buildid(id,shp->shm_perm.seq); shp->shm_file = file; file->f_dentry->d_inode->i_ino = shp->id; file->f_op = &shm_file_operations; shm_tot += numpages; shm_unlock (id); return shp->id; no_id: fput(file);no_file: kfree(shp); return error;}long shmget (key_t key, size_t size, int shmflg){ struct shmid_kernel *shp; int err, id = 0; print_entry_location(); down(&shm_ids.sem); if (key == IPC_PRIVATE) { err = newseg(key, shmflg, size); } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { if (!(shmflg & IPC_CREAT)) err = -ENOENT; else err = newseg(key, shmflg, size); } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { err = -EEXIST; } else { shp = shm_lock(id); if(shp==NULL) BUG(); if (shp->shm_segsz < size) err = -EINVAL; /*else if (ipcperms(&shp->shm_perm, shmflg)) *err = -EACCES; */ else err = shm_buildid(id, shp->shm_perm.seq); shm_unlock(id); } up(&shm_ids.sem); print_exit_location(err); return err;}static inline int shm_checkid(struct shmid_kernel *s, int id){ if (ipc_checkid(&shm_ids,&s->shm_perm,id)) return -EIDRM; return 0;}long shmctl (int shmid, int cmd, struct shmid_ds *buf){ struct shmid_kernel *shp; int err, version; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); /* * We cannot simply remove the file. The SVID states * that the block remains until the last person * detaches from it, then is deleted. A shmat() on * an RMID segment is legal in older Linux and if * we change it apps break... * * Instead we set a destroyed flag, and then blow * the name away when the usage hits zero. */ down(&shm_ids.sem); shp = shm_lock(shmid); err = -EINVAL; if (shp == NULL) goto out_up; err = shm_checkid(shp, shmid); if(err) goto out_unlock_up; /*if (current->euid != shp->shm_perm.uid && * current->euid != shp->shm_perm.cuid && * !capable(CAP_SYS_ADMIN)) { *err=-EPERM; *goto out_unlock_up; * *} */ if (shp->shm_nattch){ shp->shm_flags |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shmid); } else shm_destroy (shp); up(&shm_ids.sem); return err; err = 0;out_unlock_up: shm_unlock(shmid);out_up: up(&shm_ids.sem); return err;}ulong *shmat (int shmid, char *shmaddr, int shmflg)/*XXX: Need to look into this further.*/{ struct shmid_kernel *shp; struct file *file; unsigned long size; int err; void *user_addr; ulong *raddr = NULL; unsigned long addr = 0; unsigned long prot = 0; unsigned long flags = 0; print_entry_location(); if (shmid < 0) return NULL; shp = shm_lock(shmid); if(shp == NULL) return NULL; err = shm_checkid(shp,shmid); if (err) { shm_unlock(shmid); return NULL; } file = shp->shm_file; size = file->f_dentry->d_inode->i_size; shp->shm_nattch++; shm_unlock(shmid); down_write(¤t->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { user_addr = ERR_PTR(-EINVAL); if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* * If shm segment goes below stack, make sure there is some * space left for the stack to grow (at least 4 pages). */ if (addr < current->mm->start_stack && addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0); invalid: up_write(¤t->mm->mmap_sem); down (&shm_ids.sem); if(!(shp = shm_lock(shmid))) BUG(); shp->shm_nattch--; if(shp->shm_nattch == 0 && shp->shm_flags & SHM_DEST) shm_destroy (shp); else shm_unlock(shmid); up (&shm_ids.sem); *raddr = (unsigned long) user_addr; err = 0; if (IS_ERR(user_addr)) err = PTR_ERR(user_addr); print_exit_location(err); return (long *)err;}long shmdt (char *shmaddr){ struct mm_struct *mm = current->mm; struct vm_area_struct *shmd, *shmdnext; int retval = -EINVAL; print_entry_location(); down_write(&mm->mmap_sem); for (shmd = mm->mmap; shmd; shmd = shmdnext) { shmdnext = shmd->vm_next; if (shmd->vm_ops == &shm_vm_ops && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start); retval = 0; } } up_write(&mm->mmap_sem); print_exit_location(retval); return retval;}/*************************************************************************//*************************************************************************/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -