zcrypt_api.c
来自「linux 内核源代码」· C语言 代码 · 共 1,091 行 · 第 1/2 页
C
1,091 行
/* * linux/drivers/s390/crypto/zcrypt_api.c * * zcrypt 2.1.0 * * Copyright (C) 2001, 2006 IBM Corporation * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * Cornelia Huck <cornelia.huck@de.ibm.com> * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * Ralph Wuerthner <rwuerthn@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <linux/module.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/miscdevice.h>#include <linux/fs.h>#include <linux/proc_fs.h>#include <linux/compat.h>#include <asm/atomic.h>#include <asm/uaccess.h>#include "zcrypt_api.h"/** * Module description. */MODULE_AUTHOR("IBM Corporation");MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " "Copyright 2001, 2006 IBM Corporation");MODULE_LICENSE("GPL");static DEFINE_SPINLOCK(zcrypt_device_lock);static LIST_HEAD(zcrypt_device_list);static int zcrypt_device_count = 0;static atomic_t zcrypt_open_count = ATOMIC_INIT(0);/** * Device attributes common for all crypto devices. */static ssize_t zcrypt_type_show(struct device *dev, struct device_attribute *attr, char *buf){ struct zcrypt_device *zdev = to_ap_dev(dev)->private; return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);}static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);static ssize_t zcrypt_online_show(struct device *dev, struct device_attribute *attr, char *buf){ struct zcrypt_device *zdev = to_ap_dev(dev)->private; return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);}static ssize_t zcrypt_online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count){ struct zcrypt_device *zdev = to_ap_dev(dev)->private; int online; if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) return -EINVAL; zdev->online = online; if (!online) ap_flush_queue(zdev->ap_dev); return count;}static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);static struct attribute * zcrypt_device_attrs[] = { &dev_attr_type.attr, &dev_attr_online.attr, NULL,};static struct attribute_group zcrypt_device_attr_group = { .attrs = zcrypt_device_attrs,};/** * Move the device towards the head of the device list. * Need to be called while holding the zcrypt device list lock. * Note: cards with speed_rating of 0 are kept at the end of the list. */static void __zcrypt_increase_preference(struct zcrypt_device *zdev){ struct zcrypt_device *tmp; struct list_head *l; if (zdev->speed_rating == 0) return; for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { tmp = list_entry(l, struct zcrypt_device, list); if ((tmp->request_count + 1) * tmp->speed_rating <= (zdev->request_count + 1) * zdev->speed_rating && tmp->speed_rating != 0) break; } if (l == zdev->list.prev) return; /* Move zdev behind l */ list_del(&zdev->list); list_add(&zdev->list, l);}/** * Move the device towards the tail of the device list. * Need to be called while holding the zcrypt device list lock. * Note: cards with speed_rating of 0 are kept at the end of the list. */static void __zcrypt_decrease_preference(struct zcrypt_device *zdev){ struct zcrypt_device *tmp; struct list_head *l; if (zdev->speed_rating == 0) return; for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { tmp = list_entry(l, struct zcrypt_device, list); if ((tmp->request_count + 1) * tmp->speed_rating > (zdev->request_count + 1) * zdev->speed_rating || tmp->speed_rating == 0) break; } if (l == zdev->list.next) return; /* Move zdev before l */ list_del(&zdev->list); list_add_tail(&zdev->list, l);}static void zcrypt_device_release(struct kref *kref){ struct zcrypt_device *zdev = container_of(kref, struct zcrypt_device, refcount); zcrypt_device_free(zdev);}void zcrypt_device_get(struct zcrypt_device *zdev){ kref_get(&zdev->refcount);}EXPORT_SYMBOL(zcrypt_device_get);int zcrypt_device_put(struct zcrypt_device *zdev){ return kref_put(&zdev->refcount, zcrypt_device_release);}EXPORT_SYMBOL(zcrypt_device_put);struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size){ struct zcrypt_device *zdev; zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); if (!zdev) return NULL; zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); if (!zdev->reply.message) goto out_free; zdev->reply.length = max_response_size; spin_lock_init(&zdev->lock); INIT_LIST_HEAD(&zdev->list); return zdev;out_free: kfree(zdev); return NULL;}EXPORT_SYMBOL(zcrypt_device_alloc);void zcrypt_device_free(struct zcrypt_device *zdev){ kfree(zdev->reply.message); kfree(zdev);}EXPORT_SYMBOL(zcrypt_device_free);/** * Register a crypto device. */int zcrypt_device_register(struct zcrypt_device *zdev){ int rc; rc = sysfs_create_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); if (rc) goto out; get_device(&zdev->ap_dev->device); kref_init(&zdev->refcount); spin_lock_bh(&zcrypt_device_lock); zdev->online = 1; /* New devices are online by default. */ list_add_tail(&zdev->list, &zcrypt_device_list); __zcrypt_increase_preference(zdev); zcrypt_device_count++; spin_unlock_bh(&zcrypt_device_lock);out: return rc;}EXPORT_SYMBOL(zcrypt_device_register);/** * Unregister a crypto device. */void zcrypt_device_unregister(struct zcrypt_device *zdev){ spin_lock_bh(&zcrypt_device_lock); zcrypt_device_count--; list_del_init(&zdev->list); spin_unlock_bh(&zcrypt_device_lock); sysfs_remove_group(&zdev->ap_dev->device.kobj, &zcrypt_device_attr_group); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev);}EXPORT_SYMBOL(zcrypt_device_unregister);/** * zcrypt_read is not be supported beyond zcrypt 1.3.1 */static ssize_t zcrypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos){ return -EPERM;}/** * Write is is not allowed */static ssize_t zcrypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos){ return -EPERM;}/** * Device open/close functions to count number of users. */static int zcrypt_open(struct inode *inode, struct file *filp){ atomic_inc(&zcrypt_open_count); return 0;}static int zcrypt_release(struct inode *inode, struct file *filp){ atomic_dec(&zcrypt_open_count); return 0;}/** * zcrypt ioctls. */static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex){ struct zcrypt_device *zdev; int rc; if (mex->outputdatalength < mex->inputdatalength) return -EINVAL; /** * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case */ mex->outputdatalength = mex->inputdatalength; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->rsa_modexpo || zdev->min_mod_size > mex->inputdatalength || zdev->max_mod_size < mex->inputdatalength) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->rsa_modexpo(zdev, mex); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV;}static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt){ struct zcrypt_device *zdev; unsigned long long z1, z2, z3; int rc, copied; if (crt->outputdatalength < crt->inputdatalength || (crt->inputdatalength & 1)) return -EINVAL; /** * As long as outputdatalength is big enough, we can set the * outputdatalength equal to the inputdatalength, since that is the * number of bytes we will copy in any case */ crt->outputdatalength = crt->inputdatalength; copied = 0; restart: spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->rsa_modexpo_crt || zdev->min_mod_size > crt->inputdatalength || zdev->max_mod_size < crt->inputdatalength) continue; if (zdev->short_crt && crt->inputdatalength > 240) { /** * Check inputdata for leading zeros for cards * that can't handle np_prime, bp_key, or * u_mult_inv > 128 bytes. */ if (copied == 0) { int len; spin_unlock_bh(&zcrypt_device_lock); /* len is max 256 / 2 - 120 = 8 */ len = crt->inputdatalength / 2 - 120; z1 = z2 = z3 = 0; if (copy_from_user(&z1, crt->np_prime, len) || copy_from_user(&z2, crt->bp_key, len) || copy_from_user(&z3, crt->u_mult_inv, len)) return -EFAULT; copied = 1; /** * We have to restart device lookup - * the device list may have changed by now. */ goto restart; } if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) /* The device can't handle this request. */ continue; } zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->rsa_modexpo_crt(zdev, crt); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV;}static long zcrypt_send_cprb(struct ica_xcRB *xcRB){ struct zcrypt_device *zdev; int rc; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { if (!zdev->online || !zdev->ops->send_cprb || (xcRB->user_defined != AUTOSELECT && AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) ) continue; zcrypt_device_get(zdev); get_device(&zdev->ap_dev->device); zdev->request_count++; __zcrypt_decrease_preference(zdev); if (try_module_get(zdev->ap_dev->drv->driver.owner)) { spin_unlock_bh(&zcrypt_device_lock); rc = zdev->ops->send_cprb(zdev, xcRB); spin_lock_bh(&zcrypt_device_lock); module_put(zdev->ap_dev->drv->driver.owner); } else rc = -EAGAIN; zdev->request_count--; __zcrypt_increase_preference(zdev); put_device(&zdev->ap_dev->device); zcrypt_device_put(zdev); spin_unlock_bh(&zcrypt_device_lock); return rc; } spin_unlock_bh(&zcrypt_device_lock); return -ENODEV;}static void zcrypt_status_mask(char status[AP_DEVICES]){ struct zcrypt_device *zdev; memset(status, 0, sizeof(char) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) status[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->online ? zdev->user_space_type : 0x0d; spin_unlock_bh(&zcrypt_device_lock);}static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]){ struct zcrypt_device *zdev; memset(qdepth, 0, sizeof(char) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->ap_dev->pendingq_count + zdev->ap_dev->requestq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock);}static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]){ struct zcrypt_device *zdev; memset(reqcnt, 0, sizeof(int) * AP_DEVICES); spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = zdev->ap_dev->total_request_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock);}static int zcrypt_pendingq_count(void){ struct zcrypt_device *zdev; int pendingq_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); pendingq_count += zdev->ap_dev->pendingq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); return pendingq_count;}static int zcrypt_requestq_count(void){ struct zcrypt_device *zdev; int requestq_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) { spin_lock(&zdev->ap_dev->lock); requestq_count += zdev->ap_dev->requestq_count; spin_unlock(&zdev->ap_dev->lock); } spin_unlock_bh(&zcrypt_device_lock); return requestq_count;}static int zcrypt_count_type(int type){ struct zcrypt_device *zdev; int device_count = 0; spin_lock_bh(&zcrypt_device_lock); list_for_each_entry(zdev, &zcrypt_device_list, list) if (zdev->user_space_type == type) device_count++; spin_unlock_bh(&zcrypt_device_lock); return device_count;}/** * Old, deprecated combi status call. */static long zcrypt_ica_status(struct file *filp, unsigned long arg){ struct ica_z90_status *pstat; int ret; pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); if (!pstat) return -ENOMEM; pstat->totalcount = zcrypt_device_count; pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); pstat->requestqWaitCount = zcrypt_requestq_count(); pstat->pendingqWaitCount = zcrypt_pendingq_count(); pstat->totalOpenCount = atomic_read(&zcrypt_open_count); pstat->cryptoDomain = ap_domain_index; zcrypt_status_mask(pstat->status); zcrypt_qdepth_mask(pstat->qdepth); ret = 0; if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) ret = -EFAULT; kfree(pstat); return ret;}static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg){ int rc;
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?