📄 pcibr_rrb.c
字号:
/* * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved. */#include <linux/types.h>#include <linux/slab.h>#include <linux/module.h>#include <asm/sn/sgi.h>#include <asm/sn/sn_cpuid.h>#include <asm/sn/addrs.h>#include <asm/sn/arch.h>#include <asm/sn/iograph.h>#include <asm/sn/invent.h>#include <asm/sn/hcl.h>#include <asm/sn/labelcl.h>#include <asm/sn/xtalk/xwidget.h>#include <asm/sn/pci/bridge.h>#include <asm/sn/pci/pciio.h>#include <asm/sn/pci/pcibr.h>#include <asm/sn/pci/pcibr_private.h>#include <asm/sn/pci/pci_defs.h>#include <asm/sn/prio.h>#include <asm/sn/xtalk/xbow.h>#include <asm/sn/ioc3.h>#include <asm/sn/eeprom.h>#include <asm/sn/io.h>#include <asm/sn/sn_private.h>void do_pcibr_rrb_clear(bridge_t *, int);void do_pcibr_rrb_flush(bridge_t *, int);int do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t, int);int do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);int do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int, int);int do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int, int);void do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);int pcibr_wrb_flush(devfs_handle_t);int pcibr_rrb_alloc(devfs_handle_t, int *, int *);int pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);void pcibr_rrb_flush(devfs_handle_t);int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);void pcibr_rrb_debug(char *, pcibr_soft_t);/* * RRB Management * * All the do_pcibr_rrb_ routines manipulate the Read Response Buffer (rrb) * registers within the Bridge. Two 32 registers (b_rrb_map[2] also known * as the b_even_resp & b_odd_resp registers) are used to allocate the 16 * rrbs to devices. The b_even_resp register represents even num devices, * and b_odd_resp represent odd number devices. Each rrb is represented by * 4-bits within a register. * BRIDGE & XBRIDGE: 1 enable bit, 1 virtual channel bit, 2 device bits * PIC: 1 enable bit, 2 virtual channel bits, 1 device bit * PIC has 4 devices per bus, and 4 virtual channels (1 normal & 3 virtual) * per device. BRIDGE & XBRIDGE have 8 devices per bus and 2 virtual * channels (1 normal & 1 virtual) per device. See the BRIDGE and PIC ASIC * Programmers Reference guides for more information. */ #define RRB_MASK (0xf) /* mask a single rrb within reg */#define RRB_SIZE (4) /* sizeof rrb within reg (bits) */ #define RRB_ENABLE_BIT(bridge) (0x8) /* [BRIDGE | PIC]_RRB_EN */#define NUM_PDEV_BITS(bridge) (is_pic((bridge)) ? 1 : 2)#define NUM_VDEV_BITS(bridge) (is_pic((bridge)) ? 2 : 1)#define NUMBER_VCHANNELS(bridge) (is_pic((bridge)) ? 4 : 2)#define SLOT_2_PDEV(bridge, slot) ((slot) >> 1)#define SLOT_2_RRB_REG(bridge, slot) ((slot) & 0x1) /* validate that the slot and virtual channel are valid for a given bridge */#define VALIDATE_SLOT_n_VCHAN(bridge, s, v) \ (is_pic((bridge)) ? \ (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0) : \ (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)7)) && (((v) >= 0) && ((v) <= 1))) ? 1 : 0)) /* * Count how many RRBs are marked valid for the specified PCI slot * and virtual channel. Return the count. */ intdo_pcibr_rrb_count_valid(bridge_t *bridge, pciio_slot_t slot, int vchan){ bridgereg_t tmp; uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits; int rrb_index, cnt=0; if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) { printk(KERN_WARNING "do_pcibr_rrb_count_valid() invalid slot/vchan [%d/%d]\n", slot, vchan); return 0; } enable_bit = RRB_ENABLE_BIT(bridge); vchan_bits = vchan << NUM_PDEV_BITS(bridge); pdev_bits = SLOT_2_PDEV(bridge, slot); rrb_bits = enable_bit | vchan_bits | pdev_bits; if ( is_pic(bridge) ) { tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)); } else { tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } } for (rrb_index = 0; rrb_index < 8; rrb_index++) { if ((tmp & RRB_MASK) == rrb_bits) cnt++; tmp = (tmp >> RRB_SIZE); } return cnt;} /* * Count how many RRBs are available to be allocated to the specified * slot. Return the count. */ intdo_pcibr_rrb_count_avail(bridge_t *bridge, pciio_slot_t slot){ bridgereg_t tmp; uint16_t enable_bit; int rrb_index, cnt=0; if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, 0)) { printk(KERN_WARNING "do_pcibr_rrb_count_avail() invalid slot/vchan"); return 0; } enable_bit = RRB_ENABLE_BIT(bridge); if ( is_pic(bridge) ) { tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)); } else { tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } } for (rrb_index = 0; rrb_index < 8; rrb_index++) { if ((tmp & enable_bit) != enable_bit) cnt++; tmp = (tmp >> RRB_SIZE); } return cnt;} /* * Allocate some additional RRBs for the specified slot and the specified * virtual channel. Returns -1 if there were insufficient free RRBs to * satisfy the request, or 0 if the request was fulfilled. * * Note that if a request can be partially filled, it will be, even if * we return failure. */ intdo_pcibr_rrb_alloc(bridge_t *bridge, pciio_slot_t slot, int vchan, int more){ bridgereg_t reg, tmp = (bridgereg_t)0; uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits; int rrb_index; if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) { printk(KERN_WARNING "do_pcibr_rrb_alloc() invalid slot/vchan"); return -1; } enable_bit = RRB_ENABLE_BIT(bridge); vchan_bits = vchan << NUM_PDEV_BITS(bridge); pdev_bits = SLOT_2_PDEV(bridge, slot); rrb_bits = enable_bit | vchan_bits | pdev_bits; if ( is_pic(bridge) ) { reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { reg = tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)); } else { reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } } for (rrb_index = 0; ((rrb_index < 8) && (more > 0)); rrb_index++) { if ((tmp & enable_bit) != enable_bit) { /* clear the rrb and OR in the new rrb into 'reg' */ reg = reg & ~(RRB_MASK << (RRB_SIZE * rrb_index)); reg = reg | (rrb_bits << (RRB_SIZE * rrb_index)); more--; } tmp = (tmp >> RRB_SIZE); } if ( is_pic(bridge) ) { bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg; } else { bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg; } } return (more ? -1 : 0);} /* * Release some of the RRBs that have been allocated for the specified * slot. Returns zero for success, or negative if it was unable to free * that many RRBs. * * Note that if a request can be partially fulfilled, it will be, even * if we return failure. */ intdo_pcibr_rrb_free(bridge_t *bridge, pciio_slot_t slot, int vchan, int less){ bridgereg_t reg, tmp = (bridgereg_t)0, clr = 0; uint16_t enable_bit, vchan_bits, pdev_bits, rrb_bits; int rrb_index; if (!VALIDATE_SLOT_n_VCHAN(bridge, slot, vchan)) { printk(KERN_WARNING "do_pcibr_rrb_free() invalid slot/vchan"); return -1; } enable_bit = RRB_ENABLE_BIT(bridge); vchan_bits = vchan << NUM_PDEV_BITS(bridge); pdev_bits = SLOT_2_PDEV(bridge, slot); rrb_bits = enable_bit | vchan_bits | pdev_bits; if ( is_pic(bridge) ) { reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { reg = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)); } else { reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg; } } for (rrb_index = 0; ((rrb_index < 8) && (less > 0)); rrb_index++) { if ((tmp & RRB_MASK) == rrb_bits) { /* * the old do_pcibr_rrb_free() code only clears the enable bit * but I say we should clear the whole rrb (ie): * reg = reg & ~(RRB_MASK << (RRB_SIZE * rrb_index)); * But to be compatable with old code we'll only clear enable. */ reg = reg & ~(RRB_ENABLE_BIT(bridge) << (RRB_SIZE * rrb_index)); clr = clr | (enable_bit << (RRB_SIZE * rrb_index)); less--; } tmp = (tmp >> RRB_SIZE); } if ( is_pic(bridge) ) { bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg; } else { if (io_get_sh_swapper(NASID_GET(bridge))) { BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg; } else { bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg; } } /* call do_pcibr_rrb_clear() for all the rrbs we've freed */ for (rrb_index = 0; rrb_index < 8; rrb_index++) { int evn_odd = SLOT_2_RRB_REG(bridge, slot); if (clr & (enable_bit << (RRB_SIZE * rrb_index))) do_pcibr_rrb_clear(bridge, (2 * rrb_index) + evn_odd); } return (less ? -1 : 0);} /* * free all the rrbs (both the normal and virtual channels) for the * specified slot. */ voiddo_pcibr_rrb_free_all(pcibr_soft_t pcibr_soft, bridge_t *bridge, pciio_slot_t slot){ int vchan; int vchan_total = NUMBER_VCHANNELS(bridge); /* pretend we own all 8 rrbs and just ignore the return value */ for (vchan = 0; vchan < vchan_total; vchan++) { (void)do_pcibr_rrb_free(bridge, slot, vchan, 8); pcibr_soft->bs_rrb_valid[slot][vchan] = 0; }} /* * Wait for the the specified rrb to have no outstanding XIO pkts * and for all data to be drained. Mark the rrb as no longer being * valid. */voiddo_pcibr_rrb_clear(bridge_t *bridge, int rrb){ bridgereg_t status; /* bridge_lock must be held; * this RRB must be disabled. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -