📄 tg3.c
字号:
virt_to_bus(tp->rx_std)); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT); } else { TG3_WRITE_SETTINGS(table_not_5705); } } /* There is only one send ring on 5705, no need to explicitly * disable the others. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { /* Clear out send RCB ring in SRAM. */ for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE) tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); } tp->tx_prod = 0; tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); tw32_mailbox2(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0); tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB, virt_to_bus(tp->tx_ring), (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), NIC_SRAM_TX_BUFFER_DESC); /* There is only one receive return ring on 5705, no need to explicitly * disable the others. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK; i += TG3_BDINFO_SIZE) { tg3_write_mem(i + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); } } tp->rx_rcb_ptr = 0; tw32_mailbox2(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0); tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB, virt_to_bus(tp->rx_rcb), (TG3_RX_RCB_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), 0); tp->rx_std_ptr = TG3_DEF_RX_RING_PENDING; tw32_mailbox2(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, tp->rx_std_ptr); tw32_mailbox2(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 0); /* Initialize MAC address and backoff seed. */ __tg3_set_mac_addr(tp); /* Calculate RDMAC_MODE setting early, we need it to determine * the RCVLPC_STATE_ENABLE mask. */ rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB | RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB | RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB | RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | RDMAC_MODE_LNGREAD_ENAB); if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; } } } /* Setup host coalescing engine. */ tw32(HOSTCC_MODE, 0); for (i = 0; i < 2000; i++) { if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE)) break; udelay(10); } tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; tw32_carefully(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1); tw32_carefully(GRC_LOCAL_CTRL, tp->grc_local_ctrl); tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); tr32(MAILBOX_INTERRUPT_0); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { tw32_carefully(DMAC_MODE, DMAC_MODE_ENABLE); } val = ( WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB | WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB | WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB | WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) && ((tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0) && !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { val |= WDMAC_MODE_RX_ACCEL; } tw32_carefully(WDMAC_MODE, val); if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) { val = tr32(TG3PCI_X_CAPS); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) { val &= PCIX_CAPS_BURST_MASK; val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK); val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT); if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) val |= (tp->split_mode_max_reqs << PCIX_CAPS_SPLIT_SHIFT); } tw32(TG3PCI_X_CAPS, val); } tw32_carefully(RDMAC_MODE, rdmac_mode); { static const uint32_t table_all[] = { /* MTU + ethernet header + FCS + optional VLAN tag */ MAC_RX_MTU_SIZE, ETH_MAX_MTU + ETH_HLEN + 8, /* The slot time is changed by tg3_setup_phy if we * run at gigabit with half duplex. */ MAC_TX_LENGTHS, (2 << TX_LENGTHS_IPG_CRS_SHIFT) | (6 << TX_LENGTHS_IPG_SHIFT) | (32 << TX_LENGTHS_SLOT_TIME_SHIFT), /* Receive rules. */ MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS, RCVLPC_CONFIG, 0x0181, /* Receive/send statistics. */ RCVLPC_STATS_ENABLE, 0xffffff, RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE, SNDDATAI_STATSENAB, 0xffffff, SNDDATAI_STATSCTRL, (SNDDATAI_SCTRL_ENABLE |SNDDATAI_SCTRL_FASTUPD), /* Host coalescing engine */ HOSTCC_RXCOL_TICKS, 0, HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS, HOSTCC_RXMAX_FRAMES, 1, HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES, HOSTCC_RXCOAL_MAXF_INT, 1, HOSTCC_TXCOAL_MAXF_INT, 0, /* Status/statistics block address. */ /* Etherboot lives below 4GB, so HIGH == 0 */ HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0, /* No need to enable 32byte coalesce mode. */ HOSTCC_MODE, HOSTCC_MODE_ENABLE | 0, RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE, RCVLPC_MODE, RCVLPC_MODE_ENABLE, RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE, RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE, SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE, /* Accept all multicast frames. */ MAC_HASH_REG_0, 0xffffffff, MAC_HASH_REG_1, 0xffffffff, MAC_HASH_REG_2, 0xffffffff, MAC_HASH_REG_3, 0xffffffff, }; static const uint32_t table_not_5705[] = { /* Host coalescing engine */ HOSTCC_RXCOAL_TICK_INT, 0, HOSTCC_TXCOAL_TICK_INT, 0, /* Status/statistics block address. */ /* Etherboot lives below 4GB, so HIGH == 0 */ HOSTCC_STAT_COAL_TICKS, DEFAULT_STAT_COAL_TICKS, HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 0, HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK, HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK, RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE, MBFREE_MODE, MBFREE_MODE_ENABLE, }; TG3_WRITE_SETTINGS(table_all); tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, virt_to_bus(tp->hw_stats)); tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, virt_to_bus(tp->hw_status)); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) { TG3_WRITE_SETTINGS(table_not_5705); } } tp->tx_mode = TX_MODE_ENABLE; tw32_carefully(MAC_TX_MODE, tp->tx_mode); tp->rx_mode = RX_MODE_ENABLE; tw32_carefully(MAC_RX_MODE, tp->rx_mode); tp->mi_mode = MAC_MI_MODE_BASE; tw32_carefully(MAC_MI_MODE, tp->mi_mode); tw32(MAC_LED_CTRL, 0); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); if (tp->phy_id == PHY_ID_SERDES) { tw32_carefully(MAC_RX_MODE, RX_MODE_RESET); } tp->rx_mode |= RX_MODE_KEEP_VLAN_TAG; /* drop tagged vlan packets */ tw32_carefully(MAC_RX_MODE, tp->rx_mode); if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) tw32(MAC_SERDES_CFG, 0x616000); /* Prevent chip from dropping frames when flow control * is enabled. */ tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2); tr32(MAC_LOW_WMARK_MAX_RX_FRAME); err = tg3_setup_phy(tp); /* Ignore CRC stats */ /* Initialize receive rules. */ tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK); tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) limit = 8; else limit = 16; if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) limit -= 4; switch (limit) { case 16: tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0); case 15: tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0); case 14: tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0); case 13: tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0); case 12: tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0); case 11: tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0); case 10: tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0); case 9: tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0); case 8: tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0); case 7: tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0); case 6: tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0); case 5: tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0); case 4: /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */ case 3: /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */ case 2: case 1: default: break; }; return err;}/* Chips other than 5700/5701 use the NVRAM for fetching info. */static void tg3_nvram_init(struct tg3 *tp){ tw32(GRC_EEPROM_ADDR, (EEPROM_ADDR_FSM_RESET | (EEPROM_DEFAULT_CLOCK_PERIOD << EEPROM_ADDR_CLKPERD_SHIFT))); mdelay(1); /* Enable seeprom accesses. */ tw32_carefully(GRC_LOCAL_CTRL, tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { uint32_t nvcfg1 = tr32(NVRAM_CFG1); tp->tg3_flags |= TG3_FLAG_NVRAM; if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE) tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); } } else { tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED); }}static int tg3_nvram_read_using_eeprom( struct tg3 *tp __unused, uint32_t offset, uint32_t *val){ uint32_t tmp; int i; if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0) { return -EINVAL; } tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | EEPROM_ADDR_READ); tw32(GRC_EEPROM_ADDR, tmp | (0 << EEPROM_ADDR_DEVID_SHIFT) | ((offset << EEPROM_ADDR_ADDR_SHIFT) & EEPROM_ADDR_ADDR_MASK) | EEPROM_ADDR_READ | EEPROM_ADDR_START); for (i = 0; i < 10000; i++) { tmp = tr32(GRC_EEPROM_ADDR); if (tmp & EEPROM_ADDR_COMPLETE) break; udelay(100); } if (!(tmp & EEPROM_ADDR_COMPLETE)) { return -EBUSY; } *val = tr32(GRC_EEPROM_DATA); return 0;}static int tg3_nvram_read(struct tg3 *tp, uint32_t offset, uint32_t *val){ int i, saw_done_clear; if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) return tg3_nvram_read_using_eeprom(tp, offset, val); if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) << NVRAM_BUFFERED_PAGE_POS) + (offset % NVRAM_BUFFERED_PAGE_SIZE); if (offset > NVRAM_ADDR_MSK) return -EINVAL; tw32(NVRAM_SWARB, SWARB_REQ_SET1); for (i = 0; i < 1000; i++) { if (tr32(NVRAM_SWARB) & SWARB_GNT1) break; udelay(20); } tw32(NVRAM_ADDR, offset); tw32(NVRAM_CMD, NVRAM_CMD_RD | NVRAM_CMD_GO | NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE); /* Wait for done bit to clear then set again. */ saw_done_clear = 0; for (i = 0; i < 1000; i++) { udelay(10); if (!saw_done_clear && !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE)) saw_done_clear = 1; else if (saw_done_clear && (tr32(NVRAM_CMD) & NVRAM_CMD_DONE)) break; } if (i >= 1000) { tw32(NVRAM_SWARB, SWARB_REQ_CLR1); return -EBUSY; } *val = bswap_32(tr32(NVRAM_RDDATA)); tw32(NVRAM_SWARB, 0x20); return 0;}struct subsys_tbl_ent { uint16_t subsys_vendor, subsys_devid; uint32_t phy_id;};static struct subsys_tbl_ent subsys_id_to_phy_id[] = { /* Broadcom boards. */ { 0x14e4, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */ { 0x14e4, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */ { 0x14e4, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */ { 0x14e4, 0x0003, PHY_ID_SERDES }, /* BCM95700A9 */ { 0x14e4, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */ { 0x14e4, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */ { 0x14e4, 0x0007, PHY_ID_SERDES }, /* BCM95701A7 */ { 0x14e4, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */ { 0x14e4, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */ { 0x14e4, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */ { 0x14e4, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */ /* 3com boards. */ { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */ { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */ /* { PCI_VENDOR_ID_3COM, 0x1002, PHY_ID_XXX }, 3C996CT */ /* { PCI_VENDOR_ID_3COM, 0x1003, PHY_ID_XXX }, 3C997T */ { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES }, /* 3C996SX */ /* { PCI_VENDOR_ID_3COM, 0x1005, PHY_ID_XXX }, 3C997SZ */ { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */ { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */ /* DELL boards. */ { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */ { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */ { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */ { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */ /* Compaq boards. */ { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */ { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */ { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES }, /* CHANGELING */ { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */ { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 } /* NC7780_2 */};static int tg3_phy_probe(struct tg3 *tp){ uint32_t eeprom_phy_id, hw_phy_id_1, hw_phy_id_2; uint32_t hw_phy_id, hw_phy_id_masked; enum phy_led_mode eeprom_led_mode; uint32_t val; unsigned i; int eeprom_signature_found, err; tp->phy_id = PHY_ID_INVALID; for (i = 0; i < sizeof(subsys_id_to_phy_id)/sizeof(subsys_id_to_phy_id[0]); i++) { if ((subsys_id_to_phy_id[i].subsys_vendor == tp->subsystem_vendor) && (subsys_id_to_phy_id[i].subsys_devid == tp->subsystem_device)) { tp->phy_id = subsys_id_to_phy_id[i].phy_id; break; } } eeprom_phy_id = PHY_ID_INVALID; eeprom_led_mode = led_mode_auto; eeprom_signature_found = 0; tg3_read_mem(NIC_SRAM_DATA_SIG, &val); if (val == NIC_SRAM_DATA_SIG_MAGIC) { uint32_t nic_cfg; tg3_read_mem(NIC_SRAM_DATA_CFG, &nic_cfg); tp->nic_sram_data_cfg = nic_cfg; eeprom_signature_found = 1; if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) == NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) { eeprom_phy_id = PHY_ID_SERDES; } else { uint32_t nic_phy_id; tg3_read_mem(NIC_SRAM_DATA_PHY_ID, &nic_phy_id); if (nic_p
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -