📄 tdma_ioctl.c
字号:
if (!req_cal.result_buffer) { kfree(slot); return -ENOMEM; } ret = rtpc_dispatch_call(start_calibration, 0, &req_cal, sizeof(req_cal), copyback_calibration, cleanup_calibration); if (ret < 0) { /* kick out any pending calibration job before returning */ rtdm_lock_get_irqsave(&tdma->lock, context); job = list_entry(tdma->first_job->entry.next, struct tdma_job, entry); if (job != tdma->first_job) { __list_del(job->entry.prev, job->entry.next); while (job->ref_count > 0) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); rtdm_lock_get_irqsave(&tdma->lock, context); } } rtdm_lock_put_irqrestore(&tdma->lock, context); kfree(slot); return ret; }#ifdef CONFIG_RTNET_TDMA_MASTER if (test_bit(TDMA_FLAG_MASTER, &tdma->flags)) { u32 cycle_no = (volatile u32)tdma->current_cycle; u64 cycle_ms; /* switch back to [backup] master mode */ if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags)) tdma->sync_job.id = BACKUP_SYNC; else tdma->sync_job.id = XMIT_SYNC; /* wait 2 cycle periods for the mode switch */ cycle_ms = tdma->cycle_period; do_div(cycle_ms, 1000000); if (cycle_ms == 0) cycle_ms = 1; msleep(2*cycle_ms); /* catch the very unlikely case that the current master died while we just switched the mode */ if (cycle_no == (volatile u32)tdma->current_cycle) { kfree(slot); return -ETIME; } }#endif /* CONFIG_RTNET_TDMA_MASTER */ set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags); } slot->head.id = id; slot->head.ref_count = 0; slot->period = cfg->args.set_slot.period; slot->phasing = cfg->args.set_slot.phasing; slot->mtu = cfg->args.set_slot.size; slot->size = cfg->args.set_slot.size + rtdev->hard_header_len; slot->offset = cfg->args.set_slot.offset; slot->queue = &slot->local_queue; rtskb_prio_queue_init(&slot->local_queue); if (jnt_id >= 0) /* all other validation tests performed above */ slot->queue = tdma->slot_table[jnt_id]->queue; old_slot = tdma->slot_table[id]; if ((id == DEFAULT_NRT_SLOT) && (old_slot == tdma->slot_table[DEFAULT_SLOT])) old_slot = NULL; restart: job_list_revision = tdma->job_list_revision; if (!old_slot) { job = tdma->first_job; while (1) { prev_job = job; job = list_entry(job->entry.next, struct tdma_job, entry); if (((job->id >= 0) && ((slot->offset < SLOT_JOB(job)->offset) || ((slot->offset == SLOT_JOB(job)->offset) && (slot->head.id <= SLOT_JOB(job)->head.id)))) ||#ifdef CONFIG_RTNET_TDMA_MASTER ((job->id == XMIT_RPL_CAL) && (slot->offset < REPLY_CAL_JOB(job)->reply_offset)) ||#endif /* CONFIG_RTNET_TDMA_MASTER */ (job == tdma->first_job)) break; } } else prev_job = list_entry(old_slot->head.entry.prev, struct tdma_job, entry); rtdm_lock_get_irqsave(&tdma->lock, context); if (job_list_revision != tdma->job_list_revision) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); goto restart; } if (old_slot) __list_del(old_slot->head.entry.prev, old_slot->head.entry.next); list_add(&slot->head.entry, &prev_job->entry); tdma->slot_table[id] = slot; if ((id == DEFAULT_SLOT) && (tdma->slot_table[DEFAULT_NRT_SLOT] == old_slot)) tdma->slot_table[DEFAULT_NRT_SLOT] = slot; if (old_slot) { while (old_slot->head.ref_count > 0) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); rtdm_lock_get_irqsave(&tdma->lock, context); } rtdm_lock_put_irqrestore(&tdma->lock, context); /* search for other slots linked to the old one */ for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++) if ((tdma->slot_table[jnt_id] != 0) && (tdma->slot_table[jnt_id]->queue == &old_slot->local_queue)) { /* found a joint slot, move or detach it now */ rtdm_lock_get_irqsave(&tdma->lock, context); while (tdma->slot_table[jnt_id]->head.ref_count > 0) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); rtdm_lock_get_irqsave(&tdma->lock, context); } /* If the new slot size is larger, detach the other slot, * update it otherwise. */ if (slot->mtu > tdma->slot_table[jnt_id]->mtu) tdma->slot_table[jnt_id]->queue = &tdma->slot_table[jnt_id]->local_queue; else { tdma->slot_table[jnt_id]->mtu = slot->mtu; tdma->slot_table[jnt_id]->queue = slot->queue; } rtdm_lock_put_irqrestore(&tdma->lock, context); } } else rtdm_lock_put_irqrestore(&tdma->lock, context); rtmac_vnic_set_max_mtu(rtdev, cfg->args.set_slot.size); if (old_slot) { /* avoid that the formerly joint queue gets purged */ old_slot->queue = &old_slot->local_queue; /* Without any reference to the old job and no joint slots we can * safely purge its queue without lock protection. * NOTE: Reconfiguring a slot during runtime may lead to packet * drops! */ while ((rtskb = __rtskb_prio_dequeue(old_slot->queue))) kfree_rtskb(rtskb); kfree(old_slot); } return 0;}int tdma_cleanup_slot(struct tdma_priv *tdma, struct tdma_slot *slot){ struct rtskb *rtskb; unsigned int id, jnt_id; rtdm_lockctx_t context; if (!slot) return -EINVAL; id = slot->head.id; rtdm_lock_get_irqsave(&tdma->lock, context); __list_del(slot->head.entry.prev, slot->head.entry.next); if (id == DEFAULT_NRT_SLOT) tdma->slot_table[DEFAULT_NRT_SLOT] = tdma->slot_table[DEFAULT_SLOT]; else { if ((id == DEFAULT_SLOT) && (tdma->slot_table[DEFAULT_NRT_SLOT] == slot)) tdma->slot_table[DEFAULT_NRT_SLOT] = NULL; tdma->slot_table[id] = NULL; } while (slot->head.ref_count > 0) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); rtdm_lock_get_irqsave(&tdma->lock, context); } rtdm_lock_put_irqrestore(&tdma->lock, context); /* search for other slots linked to this one */ for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++) if ((tdma->slot_table[jnt_id] != 0) && (tdma->slot_table[jnt_id]->queue == &slot->local_queue)) { /* found a joint slot, detach it now under lock protection */ rtdm_lock_get_irqsave(&tdma->lock, context); while (tdma->slot_table[jnt_id]->head.ref_count > 0) { rtdm_lock_put_irqrestore(&tdma->lock, context); msleep(100); rtdm_lock_get_irqsave(&tdma->lock, context); } tdma->slot_table[jnt_id]->queue = &tdma->slot_table[jnt_id]->local_queue; rtdm_lock_put_irqrestore(&tdma->lock, context); } /* avoid that the formerly joint queue gets purged */ slot->queue = &slot->local_queue; /* No need to protect the queue access here - * no one is referring to this job anymore * (ref_count == 0, all joint slots detached). */ while ((rtskb = __rtskb_prio_dequeue(slot->queue))) kfree_rtskb(rtskb); kfree(slot); return 0;}static int tdma_ioctl_remove_slot(struct rtnet_device *rtdev, struct tdma_config *cfg){ struct tdma_priv *tdma; int id; if (rtdev->mac_priv == NULL) return -ENOTTY; tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv; if (tdma->magic != TDMA_MAGIC) return -ENOTTY; id = cfg->args.remove_slot.id; if (id > tdma->max_slot_id) return -EINVAL; if ((id == DEFAULT_NRT_SLOT) && (tdma->slot_table[DEFAULT_NRT_SLOT] == tdma->slot_table[DEFAULT_SLOT])) return -EINVAL; return tdma_cleanup_slot(tdma, tdma->slot_table[id]);}static int tdma_ioctl_detach(struct rtnet_device *rtdev){ struct tdma_priv *tdma; int ret; if (rtdev->mac_priv == NULL) return -ENOTTY; tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv; if (tdma->magic != TDMA_MAGIC) return -ENOTTY; ret = rtmac_disc_detach(rtdev); return ret;}int tdma_ioctl(struct rtnet_device *rtdev, unsigned int request, unsigned long arg){ struct tdma_config cfg; int ret; ret = copy_from_user(&cfg, (void *)arg, sizeof(cfg)); if (ret != 0) return -EFAULT; if (down_interruptible(&rtdev->nrt_lock)) return -ERESTARTSYS; switch (request) {#ifdef CONFIG_RTNET_TDMA_MASTER case TDMA_IOC_MASTER: ret = tdma_ioctl_master(rtdev, &cfg); break;#endif case TDMA_IOC_SLAVE: ret = tdma_ioctl_slave(rtdev, &cfg); break; case TDMA_IOC_CAL_RESULT_SIZE: ret = tdma_ioctl_cal_result_size(rtdev, &cfg); break; case TDMA_IOC_SET_SLOT: ret = tdma_ioctl_set_slot(rtdev, &cfg); break; case TDMA_IOC_REMOVE_SLOT: ret = tdma_ioctl_remove_slot(rtdev, &cfg); break; case TDMA_IOC_DETACH: ret = tdma_ioctl_detach(rtdev); break; default: ret = -ENOTTY; } up(&rtdev->nrt_lock); return ret;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -