t1tokens.c
来自「Qt/Embedded是一个多平台的C++图形用户界面应用程序框架」· C语言 代码 · 共 996 行 · 第 1/2 页
C
996 行
/* skip whitespace/line feed after "eexec" */ base = (char*)tokzer->base + tokzer->cursor + 1; if ( ( hexa_value( base[0] ) | hexa_value( base[1] ) | hexa_value( base[2] ) | hexa_value( base[3] ) ) < 0 ) { /* binary encoding - "simply" read the stream */ /* if it's a memory-based resource, we need to allocate a new */ /* storage buffer for the private dictionary, as it needs to */ /* be decrypted later.. */ if ( stream->base ) { size = stream->size - tokzer->cursor-1; /* remaining bytes */ if ( ALLOC( private, size ) ) /* allocate private dict buffer */ goto Fail; /* copy eexec-encrypted bytes */ MEM_Copy( private, tokzer->base + tokzer->cursor+1, size ); /* reset pointers - forget about file mapping */ tokzer->base = private; tokzer->limit = size; tokzer->max = size; tokzer->cursor = 0; } /* on the opposite, for disk based resources, we simply grow */ /* the current buffer until its completion, and decrypt the */ /* bytes within it. In all cases, the "base" buffer will be */ /* discarded on DoneTokenizer if we're in the private dict.. */ else { /* grow the read buffer to the full file.. */ while ( tokzer->limit < tokzer->max ) { error = grow( tokenizer ); if (error) goto Fail; } /* set up cursor to first encrypted byte */ tokzer->cursor++; } } else { /* ASCII hexadecimal encoding.. This sucks.. */ T1_Byte* write; T1_Byte* cur; T1_Byte* limit; T1_Int count; /* Allocate a buffer, read each one byte at a time .. */ count = ( stream->size - tokzer->cursor ); size = count/2; if ( ALLOC( private, size ) ) /* allocate private dict buffer */ goto Fail; write = private; cur = tokzer->base + tokzer->cursor; limit = tokzer->base + tokzer->limit; /* read each bytes */ while ( count > 0 ) { /* ensure that we can read the next 2 bytes !! */ while ( cur+2 > limit ) { int cursor = cur - tokzer->base; error = grow( tokzer ); if (error) goto Fail_Private; cur = tokzer->base + cursor; limit = tokzer->base + tokzer->limit; } /* check for new line */ if ( cur[0] == '\r' || cur[0] == '\n' ) { cur++; count--; } else { int hex1 = hexa_value(cur[0]); /* exit if we have a non hexa-decimal digit which isn't */ /* a new-line character.. */ if (hex1 < 0) break; /* otherwise, store byte */ *write++ = ( hex1 << 4 ) | hexa_value( cur[1] ); cur += 2; count -= 2; } } /* get rid of old buffer in the case of disk-based resources */ if ( !stream->base ) FREE( tokzer->base ); /* set up pointers */ tokzer->base = private; tokzer->limit = size; tokzer->max = size; tokzer->cursor = 0; } } /* finally, decrypt the private dictionary - and skip the lenIV bytes */ t1_decrypt( tokzer->base, tokzer->limit, 55665 ); tokzer->cursor += 4; Fail: return error; Fail_Private: FREE( private ); goto Fail; } /*************************************************************************/ /* */ /* <Function> Read_Token */ /* */ /* <Description> */ /* Read a new token from the current input stream. This function */ /* extracts a token from the font program until "Open_PrivateDict" */ /* has been called. After this, it returns tokens from the */ /* (eexec-encrypted) private dictionnary.. */ /* */ /* <Input> */ /* tokenizer :: target tokenizer object */ /* */ /* <Return> */ /* Type1 error code. 0 means success.. */ /* */ /* <Note> */ /* One should use the function Read_CharStrings to read the binary */ /* charstrings from the private dict.. */ /* */ LOCAL_FUNC T1_Error Read_Token( T1_Tokenizer tokenizer ) { T1_Tokenizer tok = tokenizer; T1_Long cur, limit; T1_Byte* base; char c, starter, ender; T1_Bool token_started; T1_TokenType kind; tok->error = T1_Err_Ok; tok->token.kind = tok_any; base = tok->base; limit = tok->limit; cur = tok->cursor; token_started = 0; for (;;) { if ( cur >= limit ) { if ( grow( tok ) ) goto Exit; base = tok->base; limit = tok->limit; } c = (char)base[cur++]; /* check that we have an ASCII character */ if ( (T1_Byte)c > 127 ) { FT_ERROR(( "Unexpected binary data in Type1 fragment !!\n" )); tok->error = T1_Err_Invalid_File_Format; goto Exit; } switch (c) { case '\r' : case '\n' : case ' ' : case '\t' : /* skip initial whitespace => skip to next */ if (token_started) { /* possibly a name, keyword, wathever */ tok->token.kind = tok_any; tok->token.len = cur-tok->token.start-1; goto Exit; } /* otherwise, skip everything */ break; case '%' : /* this is a comment - skip everything */ for (;;) { T1_Int left = limit - cur; while (left > 0) { c = (char)base[cur++]; if ( c == '\r' || c == '\n' ) goto Next; left--; } if ( grow( tokenizer ) ) goto Exit; base = tok->base; limit = tok->limit; } case '(' : /* a Postscript string */ kind = tok_string; ender = ')'; L1: if (!token_started) { token_started = 1; tok->token.start = cur-1; } { T1_Int nest_level = 1; starter = c; for (;;) { T1_Int left = limit-cur; while (left > 0) { c = (char)base[cur++]; if ( c == starter ) nest_level++; else if ( c == ender ) { nest_level--; if (nest_level <= 0) { tok->token.kind = kind; tok->token.len = cur - tok->token.start; goto Exit; } } left--; } if ( grow( tok ) ) goto Exit; base = tok->base; limit = tok->limit; } } case '[' : /* a Postscript array */ if (token_started) goto Any_Token; kind = tok_array; ender = ']'; goto L1; break; case '{' : /* a Postscript program */ if (token_started) goto Any_Token; kind = tok_program; ender = '}'; goto L1; break; case '<' : /* a Postscript hex byte array ?? */ if (token_started) goto Any_Token; kind = tok_hexarray; ender = '>'; goto L1; break; case '0': /* any number */ case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': if (token_started) goto Next; tok->token.kind = tok_number; token_started = 1; tok->token.start = cur-1; L2: for (;;) { T1_Int left = limit-cur; while (left > 0) { c = (char)base[cur++]; switch (c) { case '[': case '{': case '(': case '<': case '/': goto Any_Token; case ' ': case '\r': case '\t': case '\n': tok->token.len = cur - tok->token.start - 1; goto Exit; default: ; } left--; } if (grow( tok )) goto Exit; base = tok->base; limit = tok->limit; } case '.': /* maybe a number */ case '-': case '+': if (token_started) goto Next; token_started = 1; tok->token.start = cur-1; for (;;) { T1_Int left = limit-cur; if ( left > 0 ) { /* test for any following digit, interpreted as number */ c = (char)base[cur]; tok->token.kind = ( c >= '0' && c <= '9' ? tok_number : tok_any ); goto L2; } if (grow( tok )) goto Exit; base = tok->base; limit = tok->limit; } case '/': /* maybe an immediate name */ if (!token_started) { token_started = 1; tok->token.start = cur-1; for (;;) { T1_Int left = limit-cur; if ( left > 0 ) { /* test for single '/', interpreted as garbage */ c = (char)base[cur]; tok->token.kind = ( c == ' ' || c == '\t' || c == '\r' || c == '\n' ? tok_any : tok_immediate ); goto L2; } if (grow( tok )) goto Exit; base = tok->base; limit = tok->limit; } } else { Any_Token: /* possibly a name or wathever */ cur--; tok->token.len = cur - tok->token.start; goto Exit; } default: if (!token_started) { token_started = 1; tok->token.start = cur-1; } } Next: ; } Exit: tok->cursor = cur; if (!tok->error) { /* now, tries to match keywords and immediate names */ T1_Int index; switch ( tok->token.kind ) { case tok_immediate : /* immediate name */ index = Find_Name( (char*)(tok->base + tok->token.start+1), tok->token.len-1, t1_immediates, imm_max - imm_first_ ); tok->token.kind2 = ( index >= 0 ? imm_first_ + index : 0 ); break; case tok_any : /* test for keyword */ index = Find_Name( (char*)(tok->base + tok->token.start), tok->token.len, t1_keywords, key_max - key_first_ ); if ( index >= 0 ) { tok->token.kind = tok_keyword; tok->token.kind2 = key_first_ + index; } else tok->token.kind2 = 0; break; default: tok->token.kind2 = 0; } } return tokenizer->error; }#if 0 /*************************************************************************/ /* */ /* <Function> Read_CharStrings */ /* */ /* <Description> */ /* Read a charstrings from the current input stream. These are */ /* binary bytes that encode each individual glyph outline. */ /* */ /* The caller is responsible for skipping the "lenIV" bytes at */ /* the start of the record.. */ /* */ /* <Input> */ /* tokenizer :: target tokenizer object */ /* num_chars :: number of binary bytes to read */ /* */ /* <Output> */ /* buffer :: target array of bytes. These are eexec-decrypted.. */ /* */ /* <Return> */ /* Type1 error code. 0 means success.. */ /* */ /* <Note> */ /* One should use the function Read_CharStrings to read the binary */ /* charstrings from the private dict.. */ /* */ LOCAL_FUNC T1_Error Read_CharStrings( T1_Tokenizer tokenizer, T1_Int num_chars, T1_Byte* buffer ) { for (;;) { T1_Int left = tokenizer->limit - tokenizer->cursor; if ( left >= num_chars ) { MEM_Copy( buffer, tokenizer->base + tokenizer->cursor, num_chars ); t1_decrypt( buffer, num_chars, 4330 ); tokenizer->cursor += num_chars; return T1_Err_Ok; } if ( grow(tokenizer) ) return tokenizer->error; } }#endif
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?