⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gemm_kernel_2x4_3dnow.s

📁 Optimized GotoBLAS libraries
💻 S
📖 第 1 页 / 共 3 页
字号:
/*********************************************************************//*                                                                   *//*             Optimized BLAS libraries                              *//*                     By Kazushige Goto <kgoto@tacc.utexas.edu>     *//*                                                                   *//* Copyright (c) The University of Texas, 2005. All rights reserved. *//* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING  *//* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF      *//* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE,              *//* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY  *//* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF     *//* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO   *//* THE USE OF THE SOFTWARE OR DOCUMENTATION.                         *//* Under no circumstances shall University be liable for incidental, *//* special, indirect, direct or consequential damages or loss of     *//* profits, interruption of business, or related expenses which may  *//* arise from use of Software or Documentation, including but not    *//* limited to those resulting from defects in Software and/or        *//* Documentation, or loss or inaccuracy of data of any kind.         *//*********************************************************************/#define ASSEMBLER#include "common.h"#define STACK	16#define ARGS	 0	#define OLD_M	 4 + STACK + ARGS(%esi)#define OLD_N	 8 + STACK + ARGS(%esi)#define OLD_K	12 + STACK + ARGS(%esi)#define OLD_ALPHA	16 + STACK + ARGS(%esi)#define OLD_A	20 + STACK + ARGS(%esi)#define OLD_B	24 + STACK + ARGS(%esi)#define OLD_C	28 + STACK + ARGS(%esi)#define OLD_LDC	32 + STACK + ARGS(%esi)#define OLD_OFFSET 36 + STACK + ARGS(%esi)#define ALPHA	 0(%esp)#define K	 8(%esp)#define N	12(%esp)#define M	16(%esp)#define A	20(%esp)#define C	24(%esp)#define J	28(%esp)#define OLD_STACK 32(%esp)#define OFFSET	36(%esp)#define KK	40(%esp)#define KKK	44(%esp)#define BUFFER  64(%esp)#define AA	%edx#define BB	%ecx#define PREFETCHSIZE (16 * 2 + 6)#define LOCAL_BUFFER_SIZE  GEMM_Q * GEMM_UNROLL_N * COMPSIZE * 16#define AOFFSET -32#define BOFFSET 128/*  A hint of scheduling is received from following URLhttps://sourceforge.net/mailarchive/forum.php?forum_id=426&max_rows=25&style=flat&viewmonth=200309&viewday=11*/	PROLOGUE	pushl	%ebp	pushl	%edi	pushl	%esi	pushl	%ebx	PROFCODE	EMMS	movl	%esp, %esi	# save old stack	subl	$128 + LOCAL_BUFFER_SIZE, %esp	movl	OLD_M, %ebx	andl	$-1024, %esp	# align stack#ifdef WINDOWS_ABI#if LOCAL_BUFFER_SIZE > 12288	movl	$0,  4096 * 3(%esp)#endif#if LOCAL_BUFFER_SIZE >  8192	movl	$0,  4096 * 2(%esp)#endif#if LOCAL_BUFFER_SIZE >  4096	movl	$0,  4096 * 1(%esp)#endif	movl	$0,  4096 * 0(%esp)#endif	movl	OLD_N, %eax	movl	OLD_K, %ecx	movl	OLD_A, %edx	movd	OLD_ALPHA,  %mm3	movl	%ebx, M	movl	%eax, N	movl	%ecx, K	subl	$AOFFSET * SIZE, %edx	movl	%edx, A	movl	%esi, OLD_STACK	movl	OLD_B, %edi	movl	OLD_C, %ebx	punpckldq %mm3, %mm3		movq	 %mm3, ALPHA	movl	%ebx, C	movl	OLD_LDC, %ebp	leal	(, %ebp, SIZE), %ebp#ifdef TRMMKERNEL	movl	OLD_OFFSET, %eax	movl	%eax, OFFSET#ifndef LEFT	negl	%eax	movl	%eax, KK#endif#endif	movl	N, %eax	sarl	$2, %eax	movl	%eax, J	jle	.L30	ALIGN_3.L01:/* Copying to Sub Buffer */	leal	BUFFER, %ecx#if defined(TRMMKERNEL) && defined(LEFT)	movl	OFFSET, %eax	movl	%eax, KK#endif		movl	K, %eax	sarl	$2, %eax	jle	.L03	ALIGN_3	.L02:	movd	 0 * SIZE(%edi), %mm0	movd	 1 * SIZE(%edi), %mm1	movd	 2 * SIZE(%edi), %mm2	movd	 3 * SIZE(%edi), %mm3	movd	 4 * SIZE(%edi), %mm4	movd	 5 * SIZE(%edi), %mm5	movd	 6 * SIZE(%edi), %mm6	movd	 7 * SIZE(%edi), %mm7	prefetchnta	72 * SIZE(%edi)	punpckldq %mm0, %mm0	punpckldq %mm1, %mm1	punpckldq %mm2, %mm2	punpckldq %mm3, %mm3	punpckldq %mm4, %mm4	punpckldq %mm5, %mm5	punpckldq %mm6, %mm6	punpckldq %mm7, %mm7	movq	%mm0,  0 * SIZE(%ecx)	movq	%mm1,  2 * SIZE(%ecx)	movq	%mm2,  4 * SIZE(%ecx)	movq	%mm3,  6 * SIZE(%ecx)	movq	%mm4,  8 * SIZE(%ecx)	movq	%mm5, 10 * SIZE(%ecx)	movq	%mm6, 12 * SIZE(%ecx)	movq	%mm7, 14 * SIZE(%ecx)	movd	 8 * SIZE(%edi), %mm0	movd	 9 * SIZE(%edi), %mm1	movd	10 * SIZE(%edi), %mm2	movd	11 * SIZE(%edi), %mm3	movd	12 * SIZE(%edi), %mm4	movd	13 * SIZE(%edi), %mm5	movd	14 * SIZE(%edi), %mm6	movd	15 * SIZE(%edi), %mm7	punpckldq %mm0, %mm0	punpckldq %mm1, %mm1	punpckldq %mm2, %mm2	punpckldq %mm3, %mm3	punpckldq %mm4, %mm4	punpckldq %mm5, %mm5	punpckldq %mm6, %mm6	punpckldq %mm7, %mm7	movq	%mm0, 16 * SIZE(%ecx)	movq	%mm1, 18 * SIZE(%ecx)	movq	%mm2, 20 * SIZE(%ecx)	movq	%mm3, 22 * SIZE(%ecx)	movq	%mm4, 24 * SIZE(%ecx)	movq	%mm5, 26 * SIZE(%ecx)	movq	%mm6, 28 * SIZE(%ecx)	movq	%mm7, 30 * SIZE(%ecx)	addl	$16 * SIZE, %edi	addl	$32 * SIZE, %ecx	decl	%eax	jne	.L02.L03:	movl	K, %eax	andl	$3, %eax	BRANCH	jle	.L10	ALIGN_2.L04:	movd	 0 * SIZE(%edi), %mm0	movd	 1 * SIZE(%edi), %mm1	movd	 2 * SIZE(%edi), %mm2	movd	 3 * SIZE(%edi), %mm3	punpckldq %mm0, %mm0	punpckldq %mm1, %mm1	punpckldq %mm2, %mm2	punpckldq %mm3, %mm3	movq	%mm0,  0 * SIZE(%ecx)	movq	%mm1,  2 * SIZE(%ecx)	movq	%mm2,  4 * SIZE(%ecx)	movq	%mm3,  6 * SIZE(%ecx)	addl	$4 * SIZE, %edi	addl	$8 * SIZE, %ecx	decl	%eax	jne	.L04	ALIGN_4	.L10:	movl	C, %esi		# coffset = c	movl	A, %edx		# aoffset = a	movl	M,  %ebx	sarl	$1, %ebx	# i = (m >> 2)	jle	.L20	ALIGN_4.L11:	leal	- BOFFSET * SIZE + BUFFER, BB#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))#else	movl	KK,   %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 8), BB#endif	movq	        (  0 + AOFFSET) * SIZE(AA), %mm0	pxor	%mm4, %mm4	movq	        ( 16 + AOFFSET) * SIZE(AA), %mm1	pxor	%mm5, %mm5	PADDING movq	(  0 + BOFFSET) * SIZE(BB), %mm2	pxor	%mm6, %mm6	PADDING movq	( 16 + BOFFSET) * SIZE(BB), %mm3	pxor	%mm7, %mm7	leal	(%ebp, %ebp, 2), %eax	prefetchw 2 * SIZE(%esi)	prefetchw 2 * SIZE(%esi, %ebp)	prefetchw 2 * SIZE(%esi, %ebp, 2)	prefetchw 2 * SIZE(%esi, %eax)#ifndef TRMMKERNEL	movl	K,  %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$2, %eax#else	addl	$4, %eax#endif	movl	%eax, KKK#endif	sarl	$4, %eax	je	.L15	ALIGN_4.L12:	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movq	(  2 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm5	PADDING movq	(  4 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm6	PADDING prefetch	(PREFETCHSIZE +  0) * SIZE(AA)	PADDING movq	(  8 + BOFFSET) * SIZE(BB), %mm2	pfmul	        (  6 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        (  2 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 10 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm5	PADDING movq	( 12 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm6	PADDING movq	( 32 + BOFFSET) * SIZE(BB), %mm2	pfmul	        ( 14 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        (  4 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 18 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 20 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm6	PADDING movq	( 24 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 22 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        (  6 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 26 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 28 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm6	PADDING movq	( 48 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 30 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        (  8 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 34 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm5	PADDING movq	( 36 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm6	PADDING movq	( 40 + BOFFSET) * SIZE(BB), %mm2	pfmul	        ( 38 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        ( 10 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 42 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm5	PADDING movq	( 44 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm6	PADDING movq	( 64 + BOFFSET) * SIZE(BB), %mm2	pfmul	        ( 46 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        ( 12 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 50 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 52 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm6	PADDING movq	( 56 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 54 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        ( 14 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm0, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 58 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 60 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm0, %mm3	pfadd	%mm3, %mm6	PADDING movq	( 80 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 62 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        ( 32 + AOFFSET) * SIZE(AA), %mm0	pfmul	%mm1, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 66 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm5	PADDING movq	( 68 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm6	PADDING movq	( 72 + BOFFSET) * SIZE(BB), %mm2	pfmul	        ( 70 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 18 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 74 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm5	PADDING movq	( 76 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm6	PADDING movq	( 96 + BOFFSET) * SIZE(BB), %mm2	pfmul	        ( 78 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 20 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 82 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 84 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm6	PADDING movq	( 88 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 86 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 22 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm3	pfadd	%mm3, %mm4	PADDING movq	( 90 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm5	PADDING movq	( 92 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm6	PADDING movq	(112 + BOFFSET) * SIZE(BB), %mm3	pfmul	        ( 94 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 24 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm2	pfadd	%mm2, %mm4	PADDING movq	( 98 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm5	PADDING movq	(100 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm6	PADDING movq	(104 + BOFFSET) * SIZE(BB), %mm2	pfmul	        (102 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 26 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm2	pfadd	%mm2, %mm4	PADDING movq	(106 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm5	PADDING movq	(108 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm1, %mm2	pfadd	%mm2, %mm6	PADDING movq	(128 + BOFFSET) * SIZE(BB), %mm2	pfmul	        (110 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 28 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm3	pfadd	%mm3, %mm4	PADDING movq	(114 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm5	PADDING movq	(116 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm6	PADDING movq	(120 + BOFFSET) * SIZE(BB), %mm3	pfmul	        (118 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 30 + AOFFSET) * SIZE(AA), %mm1	pfmul	%mm1, %mm3	pfadd	%mm3, %mm4	PADDING movq	(122 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm5	PADDING movq	(124 + BOFFSET) * SIZE(BB), %mm3	pfmul	%mm1, %mm3	pfadd	%mm3, %mm6	PADDING movq	(144 + BOFFSET) * SIZE(BB), %mm3	pfmul	        (126 + BOFFSET) * SIZE(BB), %mm1	pfadd	%mm1, %mm7	movq	        ( 48 + AOFFSET) * SIZE(AA), %mm1	subl	$-32 * SIZE, AA	addl	$128 * SIZE, BB	decl	%eax	jne    .L12	ALIGN_3.L15:	movq	ALPHA,  %mm3#ifndef TRMMKERNEL	movl	K, %eax#else	movl	KKK, %eax#endif	andl	$15, %eax	BRANCH	je .L18	ALIGN_3.L16:	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movq	(  2 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm5	PADDING movq	(  4 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2	pfadd	%mm2, %mm6	PADDING movq	(  8 + BOFFSET) * SIZE(BB), %mm2	pfmul	        (  6 + BOFFSET) * SIZE(BB), %mm0	pfadd	%mm0, %mm7	movq	        (  2 + AOFFSET) * SIZE(AA), %mm0	addl	$2 * SIZE, AA	addl	$8 * SIZE, BB	decl	%eax	jg	.L16	ALIGN_3.L18:	leal	(%ebp, %ebp, 2), %eax#ifndef TRMMKERNEL	pfmul	%mm3, %mm4	pfadd	0 * SIZE(%esi), %mm4	pfmul	%mm3, %mm5	pfadd	0 * SIZE(%esi, %ebp, 1), %mm5	pfmul	%mm3, %mm6	pfadd	0 * SIZE(%esi, %ebp, 2), %mm6	pfmul	%mm3, %mm7	pfadd	0 * SIZE(%esi, %eax, 1), %mm7#else	pfmul	%mm3, %mm4	pfmul	%mm3, %mm5	pfmul	%mm3, %mm6	pfmul	%mm3, %mm7#endif	movq	%mm4, 0 * SIZE(%esi)	movq	%mm5, 0 * SIZE(%esi, %ebp, 1)	movq	%mm6, 0 * SIZE(%esi, %ebp, 2)	movq	%mm7, 0 * SIZE(%esi, %eax, 1)#if (defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \    (defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))	movl	K, %eax	subl	KKK, %eax	leal	(,%eax, SIZE), %eax	leal	(AA, %eax, 2), AA	leal	(BB, %eax, 8), BB#endif#if defined(TRMMKERNEL) && defined(LEFT)	addl	$2, KK#endif	addl	$2 * SIZE, %esi		# coffset += 2	decl	%ebx			# i --	jg	.L11	ALIGN_4.L20:	movl	M,  %ebx	testl	$1, %ebx	# i = (m >> 2)	jle	.L29	ALIGN_4.L21:	leal	- BOFFSET * SIZE + BUFFER, BB#if !defined(TRMMKERNEL) || \	(defined(TRMMKERNEL) &&  defined(LEFT) &&  defined(TRANSA)) || \	(defined(TRMMKERNEL) && !defined(LEFT) && !defined(TRANSA))#else	movl	KK,   %eax	leal	(, %eax, SIZE), %eax	leal	(AA, %eax, 1), AA	leal	(BB, %eax, 8), BB#endif	movq	        (  0 + AOFFSET) * SIZE(AA), %mm0	pxor	%mm4, %mm4	movq	        (  8 + AOFFSET) * SIZE(AA), %mm1	pxor	%mm5, %mm5	PADDING movq	(  0 + BOFFSET) * SIZE(BB), %mm2	pxor	%mm6, %mm6	PADDING movq	( 16 + BOFFSET) * SIZE(BB), %mm3	pxor	%mm7, %mm7#ifndef TRMMKERNEL	movl	K,  %eax#elif (defined(LEFT) && !defined(TRANSA)) || (!defined(LEFT) && defined(TRANSA))	movl	K, %eax	subl	KK, %eax	movl	%eax, KKK	#else	movl	KK, %eax#ifdef LEFT	addl	$1, %eax#else	addl	$4, %eax#endif	movl	%eax, KKK#endif	sarl	$4, %eax	je	.L25	ALIGN_4.L22:	pfmul	%mm0, %mm2	pfadd	%mm2, %mm4	PADDING movd	(  2 + BOFFSET) * SIZE(BB), %mm2	pfmul	%mm0, %mm2

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -