📄 iph5526.c
字号:
/* Allocate one huge chunk of memory... helps while reassembling * frames. */ if ( (addr = (u_char *)__get_free_pages(GFP_KERNEL, 5) ) == 0) { T_MSG("failed to get MFSBQ page"); return 0; } /* fill in addresses of empty buffers */ for (i = 0; i < MFSBQ_LENGTH; i++) { for (j = 0; j < NO_OF_ENTRIES; j++) { *(fi->q.ptr_mfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr)); addr += MFS_BUFFER_SIZE; } } /* The number of entries in each MFS buffer is 8. There are 8 * MFS buffers. That leaves us with 4096-256 bytes. We use them * as temporary space for ELS frames. This is done to make sure that * the addresses are aligned. */ fi->g.els_buffer[0] = fi->q.ptr_mfsbq_base + MFSBQ_LENGTH*NO_OF_ENTRIES; for (i = 1; i < MAX_PENDING_FRAMES; i++) fi->g.els_buffer[i] = fi->g.els_buffer[i-1] + 64; /* build SFSBQ */ if ( (fi->q.ptr_sfsbq_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) { T_MSG("failed to get SFSBQ page"); return 0; } memset((char *)fi->q.ptr_sfsbq_base, 0, SFSBQ_LENGTH * 32); /* fill in addresses of empty buffers */ for (i = 0; i < SFSBQ_LENGTH; i++) for (j = 0; j < NO_OF_ENTRIES; j++){ addr = kmalloc(SFS_BUFFER_SIZE*2, GFP_KERNEL); if (addr == NULL){ T_MSG("ptr_sfs_buffer : memory not allocated"); return 0; } else { int offset = ALIGNED_SFS_ADDR(addr); memset((char *)addr, 0, SFS_BUFFER_SIZE); fi->q.ptr_sfs_buffers[i*NO_OF_ENTRIES +j] = (u_int *)addr; addr += offset; *(fi->q.ptr_sfsbq_base + i*NO_OF_ENTRIES + j) = htonl(virt_to_bus(addr)); } } /* The number of entries in each SFS buffer is 8. There are 8 * MFS buffers. That leaves us with 4096-256 bytes. We use them * as temporary space for ARP frames. This is done inorder to * support HW_Types of 0x1 and 0x6. */ fi->g.arp_buffer = (char *)fi->q.ptr_sfsbq_base + SFSBQ_LENGTH*NO_OF_ENTRIES*4; /* build EDB */ if ((fi->q.ptr_edb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5) ) == 0) { T_MSG("failed to get EDB page"); return 0; } for (i = 0; i < EDB_LEN; i++) fi->q.ptr_edb[i] = fi->q.ptr_edb_base + 2*i; /* build SEST */ /* OX_IDs range from 0x0 - 0x4FFF. */ if ((fi->q.ptr_sest_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) { T_MSG("failed to get SEST page"); return 0; } for (i = 0; i < SEST_LENGTH; i++) fi->q.ptr_sest[i] = fi->q.ptr_sest_base + NO_OF_ENTRIES*i; if ((fi->q.ptr_sdb_base = (u_int *)__get_free_pages(GFP_KERNEL, 5)) == 0) { T_MSG("failed to get SDB page"); return 0; } for (i = 0 ; i < NO_OF_SDB_ENTRIES; i++) fi->q.ptr_sdb_slot[i] = fi->q.ptr_sdb_base + (SDB_SIZE/4)*i; if ((fi->q.ptr_fcp_cmnd_base = (u_int *)__get_free_pages(GFP_KERNEL, 0)) == 0) { T_MSG("failed to get FCP_CMND page"); return 0; } for (i = 0; i < NO_OF_FCP_CMNDS; i++) fi->q.ptr_fcp_cmnd[i] = fi->q.ptr_fcp_cmnd_base + NO_OF_ENTRIES*i; /* Allocate space for Tachyon Header as well... */ if ((fi->q.ptr_tachyon_header_base = (u_int *)__get_free_pages(GFP_KERNEL, 0) ) == 0) { T_MSG("failed to get tachyon_header page"); return 0; } for (i = 0; i < NO_OF_TACH_HEADERS; i++) fi->q.ptr_tachyon_header[i] = fi->q.ptr_tachyon_header_base + 16*i; /* Allocate memory for indices. * Indices should be aligned on 32 byte boundaries. */ fi->q.host_ocq_cons_indx = kmalloc(2*32, GFP_KERNEL); if (fi->q.host_ocq_cons_indx == NULL){ T_MSG("fi->q.host_ocq_cons_indx : memory not allocated"); return 0; } fi->q.ptr_host_ocq_cons_indx = fi->q.host_ocq_cons_indx; if ((u_long)(fi->q.host_ocq_cons_indx) % 32) fi->q.host_ocq_cons_indx++; fi->q.host_hpcq_cons_indx = kmalloc(2*32, GFP_KERNEL); if (fi->q.host_hpcq_cons_indx == NULL){ T_MSG("fi->q.host_hpcq_cons_indx : memory not allocated"); return 0; } fi->q.ptr_host_hpcq_cons_indx= fi->q.host_hpcq_cons_indx; if ((u_long)(fi->q.host_hpcq_cons_indx) % 32) fi->q.host_hpcq_cons_indx++; fi->q.host_imq_prod_indx = kmalloc(2*32, GFP_KERNEL); if (fi->q.host_imq_prod_indx == NULL){ T_MSG("fi->q.host_imq_prod_indx : memory not allocated"); return 0; } fi->q.ptr_host_imq_prod_indx = fi->q.host_imq_prod_indx; if ((u_long)(fi->q.host_imq_prod_indx) % 32) fi->q.host_imq_prod_indx++; LEAVE("build_queues"); return 1;}static void write_to_tachyon_registers(struct fc_info *fi){u_int bus_addr, bus_indx_addr, i; ENTER("write_to_tachyon_registers"); /* Clear Queues each time Tachyon is reset */ memset((char *)fi->q.ptr_ocq_base, 0, OCQ_LENGTH * 32); memset((char *)fi->q.ptr_imq_base, 0, IMQ_LENGTH * 32); memset((char *)fi->q.ptr_edb_base, 0, EDB_LEN * 8); memset((char *)fi->q.ptr_sest_base, 0, SEST_LENGTH * 32); memset((char *)fi->q.ptr_sdb_base, 0, NO_OF_SDB_ENTRIES * SDB_SIZE); memset((char *)fi->q.ptr_tachyon_header_base, 0xFF, NO_OF_TACH_HEADERS * TACH_HEADER_SIZE); for (i = 0; i < SEST_LENGTH; i++) fi->q.free_scsi_oxid[i] = OXID_AVAILABLE; for (i = 0; i < NO_OF_SDB_ENTRIES; i++) fi->q.sdb_slot_status[i] = SDB_FREE; take_tachyon_offline(fi); writel(readl(fi->t_r.ptr_tach_config_reg) | SCSI_ENABLE | WRITE_STREAM_SIZE | READ_STREAM_SIZE | PARITY_EVEN | OOO_REASSEMBLY_DISABLE, fi->t_r.ptr_tach_config_reg); /* Write OCQ registers */ fi->q.ocq_prod_indx = 0; *(fi->q.host_ocq_cons_indx) = 0; /* The Tachyon needs to be passed the "real" address */ bus_addr = virt_to_bus(fi->q.ptr_ocq_base); writel(bus_addr, fi->t_r.ptr_ocq_base_reg); writel(OCQ_LENGTH - 1, fi->t_r. ptr_ocq_len_reg); bus_indx_addr = virt_to_bus(fi->q.host_ocq_cons_indx); writel(bus_indx_addr, fi->t_r.ptr_ocq_cons_indx_reg); /* Write IMQ registers */ fi->q.imq_cons_indx = 0; *(fi->q.host_imq_prod_indx) = 0; bus_addr = virt_to_bus(fi->q.ptr_imq_base); writel(bus_addr, fi->t_r.ptr_imq_base_reg); writel(IMQ_LENGTH - 1, fi->t_r.ptr_imq_len_reg); bus_indx_addr = virt_to_bus(fi->q.host_imq_prod_indx); writel(bus_indx_addr, fi->t_r.ptr_imq_prod_indx_reg); /* Write MFSBQ registers */ fi->q.mfsbq_prod_indx = MFSBQ_LENGTH - 1; fi->q.mfsbuff_end = MFS_BUFFER_SIZE - 1; fi->q.mfsbq_cons_indx = 0; bus_addr = virt_to_bus(fi->q.ptr_mfsbq_base); writel(bus_addr, fi->t_r.ptr_mfsbq_base_reg); writel(MFSBQ_LENGTH - 1, fi->t_r.ptr_mfsbq_len_reg); writel(fi->q.mfsbuff_end, fi->t_r.ptr_mfsbuff_len_reg); /* Do this last as tachyon will prefetch the * first entry as soon as we write to it. */ writel(fi->q.mfsbq_prod_indx, fi->t_r.ptr_mfsbq_prod_reg); /* Write SFSBQ registers */ fi->q.sfsbq_prod_indx = SFSBQ_LENGTH - 1; fi->q.sfsbuff_end = SFS_BUFFER_SIZE - 1; fi->q.sfsbq_cons_indx = 0; bus_addr = virt_to_bus(fi->q.ptr_sfsbq_base); writel(bus_addr, fi->t_r.ptr_sfsbq_base_reg); writel(SFSBQ_LENGTH - 1, fi->t_r.ptr_sfsbq_len_reg); writel(fi->q.sfsbuff_end, fi->t_r.ptr_sfsbuff_len_reg); /* Do this last as tachyon will prefetch the first * entry as soon as we write to it. */ writel(fi->q.sfsbq_prod_indx, fi->t_r.ptr_sfsbq_prod_reg); /* Write SEST registers */ bus_addr = virt_to_bus(fi->q.ptr_sest_base); writel(bus_addr, fi->t_r.ptr_sest_base_reg); writel(SEST_LENGTH - 1, fi->t_r.ptr_sest_len_reg); /* the last 2 bits _should_ be 1 */ writel(SEST_BUFFER_SIZE - 1, fi->t_r.ptr_scsibuff_len_reg); /* write AL_TIME & E_D_TOV into the registers */ writel(TOV_VALUES, fi->t_r.ptr_fm_tov_reg); /* Tell Tachyon to pick a Soft Assigned AL_PA */ writel(LOOP_INIT_SOFT_ADDRESS, fi->t_r.ptr_fm_config_reg); /* Read the WWN from EEPROM . But, for now we assign it here. */ writel(WORLD_WIDE_NAME_LOW, fi->t_r.ptr_fm_wwn_low_reg); writel(WORLD_WIDE_NAME_HIGH, fi->t_r.ptr_fm_wwn_hi_reg); DPRINTK1("TACHYON initializing as L_Port...\n"); writel(INITIALIZE, fi->t_r.ptr_fm_control_reg); LEAVE("write_to_tachyon_registers");}static irqreturn_t tachyon_interrupt(int irq, void* dev_id, struct pt_regs* regs){struct Scsi_Host *host = dev_id;struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;struct fc_info *fi = hostdata->fi; u_long flags; spin_lock_irqsave(&fi->fc_lock, flags); tachyon_interrupt_handler(irq, dev_id, regs); spin_unlock_irqrestore(&fi->fc_lock, flags); return IRQ_HANDLED;}static void tachyon_interrupt_handler(int irq, void* dev_id, struct pt_regs* regs){struct Scsi_Host *host = dev_id;struct iph5526_hostdata *hostdata = (struct iph5526_hostdata *)host->hostdata;struct fc_info *fi = hostdata->fi; u_int *ptr_imq_entry;u_int imq_int_type, current_IMQ_index = 0, prev_IMQ_index;int index, no_of_entries = 0; DPRINTK("\n"); ENTER("tachyon_interrupt"); if (fi->q.host_imq_prod_indx != NULL) { current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx)); } else { /* _Should not_ happen */ T_MSG("IMQ_indx NULL. DISABLING INTERRUPTS!!!\n"); writel(0x0, fi->i_r.ptr_ichip_hw_control_reg); } if (current_IMQ_index > fi->q.imq_cons_indx) no_of_entries = current_IMQ_index - fi->q.imq_cons_indx; else if (current_IMQ_index < fi->q.imq_cons_indx) no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index); if (no_of_entries == 0) { u_int ichip_status; ichip_status = readl(fi->i_r.ptr_ichip_hw_status_reg); if (ichip_status & 0x20) { /* Should _never_ happen. Might require a hard reset */ T_MSG("Too bad... PCI Bus Error. Resetting (i)chip"); reset_ichip(fi); T_MSG("DISABLING INTERRUPTS!!!\n"); writel(0x0, fi->i_r.ptr_ichip_hw_control_reg); } } prev_IMQ_index = current_IMQ_index; for (index = 0; index < no_of_entries; index++) { ptr_imq_entry = fi->q.ptr_imqe[fi->q.imq_cons_indx]; imq_int_type = ntohl(*ptr_imq_entry); completion_message_handler(fi, imq_int_type); if ((fi->g.link_up == FALSE) && ((imq_int_type == MFS_BUF_WARN) || (imq_int_type == SFS_BUF_WARN) || (imq_int_type == IMQ_BUF_WARN))) break; update_IMQ_indx(fi, 1); /* Check for more entries */ current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx)); if (current_IMQ_index != prev_IMQ_index) { no_of_entries++; prev_IMQ_index = current_IMQ_index; } } /*end of for loop*/ LEAVE("tachyon_interrupt"); return;}static void handle_SFS_BUF_WARN_interrupt(struct fc_info *fi){int i; ENTER("handle_SFS_BUF_WARN_interrupt"); if (fi->g.link_up == FALSE) { reset_tachyon(fi, SOFTWARE_RESET); return; } /* Free up all but one entry in the Q. */ for (i = 0; i < ((SFSBQ_LENGTH - 1) * NO_OF_ENTRIES); i++) { handle_SFS_interrupt(fi); update_IMQ_indx(fi, 1); } LEAVE("handle_SFS_BUF_WARN_interrupt");}/* Untested_Code_Begin */ static void handle_MFS_BUF_WARN_interrupt(struct fc_info *fi){int i; ENTER("handle_MFS_BUF_WARN_interrupt"); if (fi->g.link_up == FALSE) { reset_tachyon(fi, SOFTWARE_RESET); return; } /* FIXME: freeing up 8 entries. */ for (i = 0; i < NO_OF_ENTRIES; i++) { handle_MFS_interrupt(fi); update_IMQ_indx(fi, 1); } LEAVE("handle_MFS_BUF_WARN_interrupt");}/*Untested_Code_End */static void handle_IMQ_BUF_WARN_interrupt(struct fc_info *fi){u_int *ptr_imq_entry;u_int imq_int_type, current_IMQ_index = 0, temp_imq_cons_indx;int index, no_of_entries = 0; ENTER("handle_IMQ_BUF_WARN_interrupt"); if (fi->g.link_up == FALSE) { reset_tachyon(fi, SOFTWARE_RESET); return; } current_IMQ_index = ntohl(*(fi->q.host_imq_prod_indx)); if (current_IMQ_index > fi->q.imq_cons_indx) no_of_entries = current_IMQ_index - fi->q.imq_cons_indx; else if (current_IMQ_index < fi->q.imq_cons_indx) no_of_entries = IMQ_LENGTH - (fi->q.imq_cons_indx - current_IMQ_index); /* We don't want to look at the same IMQ entry again. */ temp_imq_cons_indx = fi->q.imq_cons_indx + 1; if (no_of_entries != 0) no_of_entries -= 1; for (index = 0; index < no_of_entries; index++) { ptr_imq_entry = fi->q.ptr_imqe[temp_imq_cons_indx]; imq_int_type = ntohl(*ptr_imq_entry); if (imq_int_type != IMQ_BUF_WARN) completion_message_handler(fi, imq_int_type); temp_imq_cons_indx++; if (temp_imq_cons_indx == IMQ_LENGTH) temp_imq_cons_indx = 0; } /*end of for loop*/ if (no_of_entries != 0) update_IMQ_indx(fi, no_of_entries); LEAVE("handle_IMQ_BUF_WARN_interrupt");}static void completion_message_handler(struct fc_info *fi, u_int imq_int_type){ switch(imq_int_type) { case OUTBOUND_COMPLETION: DPRINTK("OUTBOUND_COMPLETION message received"); break; case OUTBOUND_COMPLETION_I: DPRINTK("OUTBOUND_COMPLETION_I message received"); handle_OCI_interrupt(fi); break; case OUT_HI_PRI_COMPLETION: DPRINTK("OUT_HI_PRI_COMPLETION message received"); break; case OUT_HI_PRI_COMPLETION_I: DPRINTK("OUT_HI_PRI_COMPLETION_I message received"); break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -