t1tokens.c

来自「Qt/Embedded是一个多平台的C++图形用户界面应用程序框架」· C语言 代码 · 共 996 行 · 第 1/2 页

C
996
字号
/******************************************************************* * *  t1tokens.c * *  Type 1 tokenizer * *  Copyright 1996 David Turner, Robert Wilhelm and Werner Lemberg. * *  This file is part of the FreeType project, and may only be used *  modified and distributed under the terms of the FreeType project *  license, LICENSE.TXT. By continuing to use, modify or distribute *  this file you indicate that you have read the license and *  understand and accept it fully. * * *  The tokenizer is in charge of loading and reading a Type1 font *  file (either in PFB or PFA format), and extract successive tokens *  and keywords from its two streams (i.e. the font program, and the *  private dictionary). * *  Eexec decryption is performed automatically when entering the *  private dictionary, or when retrieving char strings.. * ******************************************************************/#include <ftstream.h>#include <ftdebug.h>#include <t1tokens.h>#include <t1load.h>#undef  READ_BUFFER_INCREMENT#define READ_BUFFER_INCREMENT  0x400#undef  FT_COMPONENT#define FT_COMPONENT  trace_t1load  /* array of Type1 keywords supported by this engine. This table places */  /* the keyword in lexicographical order. It should always correspond   */  /* to the enums key_XXXX !!                                            */  /*                                                                     */  const  char*  t1_keywords[ key_max - key_first_ ] =  {    "-|", "ExpertEncoding", "ND", "NP", "RD", "StandardEncoding", "array",	"begin", "closefile", "currentdict", "currentfile", "def", "dict", "dup",	"eexec", "end", "executeonly", "false", "for", "index",	"noaccess",	"put", "readonly", "true", "userdict", "|", "|-"  };  const  char*  t1_immediates[ imm_max - imm_first_ ] =  {    "-|", ".notdef", "BlueFuzz", "BlueScale", "BlueShift", "BlueValues",    "CharStrings", "Encoding", "FamilyBlues", "FamilyName", "FamilyOtherBlues",    "FID", "FontBBox", "FontID", "FontInfo", "FontMatrix", "FontName",    "FontType", "ForceBold", "FullName", "ItalicAngle", "LanguageGroup",    "Metrics", "MinFeature", "ND", "NP", "Notice", "OtherBlues", "OtherSubrs",    "PaintType", "Private", "RD", "RndStemUp", "StdHW", "StdVW", "StemSnapH",    "StemSnapV", "StrokeWidth", "Subrs", "UnderlinePosition",    "UnderlineThickness", "UniqueID", "Weight", "isFixedPitch", "lenIV",    "password", "version", "|", "|-"  };  /* lexicographic comparison of two strings */  static  int  lexico_strcmp( const char*  str1,                      int          str1_len,                      const char*  str2 )  {    int  c2 = 0;    for ( ; str1_len > 0; str1_len-- )    {      int c1, diff;      c1 = *str1++;      c2 = *str2++;      diff = c1 - c2;      if (diff) return diff;    };    return -*str2;  }  /* Find a given token/name, perform binary search */  static  int  Find_Name( char*  base, int  length,                  const char** table, int  table_len )  {    /* performs a binary search */    T1_Int  left, right;    left  = 0;    right = table_len-1;    while ( right-left > 1 )    {      T1_Int  middle = left + (( right-left ) >> 1);      T1_Int  cmp;      cmp = lexico_strcmp( base, length, table[middle] );      if (!cmp) return middle;      if ( cmp < 0 )        right = middle;      else        left  = middle;    }    if ( !lexico_strcmp( base, length, table[left ] ) ) return left;    if ( !lexico_strcmp( base, length, table[right] ) ) return right;    return -1;  }  /* read the small PFB section header */  static  T1_Error  Read_PFB_Tag( FT_Stream  stream,                          T1_UShort* atag,                          T1_ULong*  asize )  {    T1_UShort tag;    T1_ULong  size;    T1_Error  error;    FT_TRACE2(( "Read_PFB_Tag : reading\n" ));    if ( ACCESS_Frame( 6L ) ) return error;    tag  = GET_UShort();    size = GET_ULong();    FORGET_Frame();    *atag  = tag;    *asize = (  (size        & 0xFF) << 24 ) |             ( ((size >> 8)  & 0xFF) << 16 ) |             ( ((size >> 16) & 0xFF) << 8 )  |             ( ((size >> 24) & 0xFF) );    FT_TRACE2(( "  tag  = %04x\n", tag    ));    FT_TRACE4(( "  asze = %08x\n", size   ));    FT_TRACE2(( "  size = %08x\n", *asize ));    return T1_Err_Ok;  }  static  T1_Error  grow( T1_Tokenizer  tokzer )  {    T1_Error   error;    T1_Long    left_bytes;    FT_Memory  memory = tokzer->memory;    left_bytes = tokzer->max - tokzer->limit;    if ( left_bytes > 0 )    {      FT_Stream stream = tokzer->stream;      if ( left_bytes > READ_BUFFER_INCREMENT )        left_bytes = READ_BUFFER_INCREMENT;      FT_TRACE2(( "Growing tokenizer buffer by %d bytes\n", left_bytes ));      (void)stream;  /* unused in non reentrant mode */      if ( !REALLOC( tokzer->base, tokzer->limit,                      tokzer->limit + left_bytes )                 &&           !FILE_Read( tokzer->base + tokzer->limit, left_bytes ) )        tokzer->limit += left_bytes;    }    else    {      FT_ERROR(( "Unexpected end of Type1 fragment !!\n" ));      error = T1_Err_Invalid_File_Format;    }    tokzer->error = error;    return error;  }  LOCAL_FUNC  void  t1_decrypt( T1_Byte*   buffer,                    T1_Int     length,                    T1_UShort  seed )  {    while ( length > 0 )    {      T1_Byte  plain;      plain     = (*buffer ^ (seed >> 8));      seed      = (*buffer+seed)*52845+22719;      *buffer++ = plain;      length--;    }  } /*************************************************************************/ /*                                                                       */ /*  <Function> New_Tokenizer                                             */ /*                                                                       */ /*  <Description>                                                        */ /*     Creates a new tokenizer from a given input stream. This function  */ /*     automatically recognizes "pfa" or "pfb" files. The function       */ /*     "Read_Token" can then be used to extract successive tokens from   */ /*     the stream..                                                      */ /*                                                                       */ /*  <Input>                                                              */ /*     stream  :: input stream                                           */ /*                                                                       */ /*  <Output>                                                             */ /*     tokenizer :: handle to new tokenizer object..                     */ /*                                                                       */ /*  <Return>                                                             */ /*     Type1 error code. 0 means success..                               */ /*                                                                       */ /*  <Note>                                                               */ /*     This function copies the stream handle within the object. Callers */ /*     should not discard "stream". This is done by the Done_Tokenizer   */ /*     function..                                                        */ /*                                                                       */ LOCAL_FUNC T1_Error  New_Tokenizer( FT_Stream      stream,                          T1_Tokenizer*  tokenizer ) {   FT_Memory     memory = stream->memory;   T1_Tokenizer  tokzer;   T1_Error      error;   T1_UShort     tag;   T1_ULong      size;   T1_Byte*      tok_base;   T1_ULong      tok_limit;   T1_ULong      tok_max;   *tokenizer = 0;   /* allocate object */   if ( FILE_Seek( 0L )                     ||        ALLOC( tokzer, sizeof(*tokzer) ) )     return error;   tokzer->stream = stream;   tokzer->memory = stream->memory;   tokzer->in_pfb     = 0;   tokzer->in_private = 0;   tok_base  = 0;   tok_limit = 0;   tok_max   = stream->size;   error = Read_PFB_Tag( stream, &tag, &size );   if (error) goto Fail;   if ( tag != 0x8001 )   {     /* assume that it is a PFA file - an error will be produced later */     /* if a character with value > 127 is encountered..               */     /* rewind to start of file */     if ( FILE_Seek(0L) ) goto Fail;     size = stream->size;   }   else     tokzer->in_pfb = 1;   /* if it's a memory-based resource, set up pointer */   if ( !stream->read )   {     tok_base  = (T1_Byte*)stream->base + stream->pos;     tok_limit = size;     tok_max   = size;     /* check that the "size" field is valid */     if ( FILE_Skip(size) ) goto Fail;   }   else if ( tag == 0x8001 )   {     /* read segment in memory */     if ( ALLOC( tok_base, size ) )       goto Fail;     if ( FILE_Read( tok_base, size ) )     {       FREE( tok_base );       goto Fail;     }     tok_limit = size;     tok_max   = size;   }   tokzer->base   = tok_base;   tokzer->limit  = tok_limit;   tokzer->max    = tok_max;   tokzer->cursor = 0;   *tokenizer = tokzer;      /* Now check font format, we must see a '%!PS-AdobeFont-1' */   /* or a '%!FontType'                                       */   {     if ( 16 > tokzer->limit )       grow( tokzer );            if ( tokzer->limit <= 16 ||          ( strncmp( (const char*)tokzer->base, "%!PS-AdobeFont-1", 16 ) &&            strncmp( (const char*)tokzer->base, "%!FontType", 10 )       ) )     {       FT_TRACE2(( "Not a Type1 font\n" ));       error = T1_Err_Invalid_File_Format;       goto Fail;     }   }   return T1_Err_Ok; Fail:   FREE( tokzer );   return error; } /* return the value of an hexadecimal digit */ static int  hexa_value( char c ) {   unsigned int  d;   d = (unsigned int)(c-'0');   if ( d <= 9 ) return (int)d;   d = (unsigned int)(c-'a');   if ( d <= 5 ) return (int)(d+10);   d = (unsigned int)(c-'A');   if ( d <= 5 ) return (int)(d+10);   return -1; } /*************************************************************************/ /*                                                                       */ /*  <Function> Done_Tokenizer                                            */ /*                                                                       */ /*  <Description>                                                        */ /*     Closes a given tokenizer. This function will also close the       */ /*     stream embedded in the object..                                   */ /*                                                                       */ /*  <Input>                                                              */ /*     tokenizer :: target tokenizer object                              */ /*                                                                       */ /*  <Return>                                                             */ /*     Type1 error code. 0 means success..                               */ /*                                                                       */ LOCAL_FUNC T1_Error  Done_Tokenizer( T1_Tokenizer  tokenizer ) {   FT_Memory  memory = tokenizer->memory;   /* clear read buffer if needed (disk-based resources) */   if ( tokenizer->in_private || !tokenizer->stream->base )     FREE( tokenizer->base );   FREE( tokenizer );   return T1_Err_Ok; } /*************************************************************************/ /*                                                                       */ /*  <Function> Open_PrivateDict                                          */ /*                                                                       */ /*  <Description>                                                        */ /*     This function must be called to set the tokenizer to the private  */ /*     section of the Type1 file. It recognizes automatically the        */ /*     the kind of eexec encryption used (ascii or binary)..             */ /*                                                                       */ /*  <Input>                                                              */ /*     tokenizer :: target tokenizer object                              */ /*     lenIV     :: value of the "lenIV" variable..                      */ /*                                                                       */ /*  <Return>                                                             */ /*     Type1 error code. 0 means success..                               */ /*                                                                       */ LOCAL_FUNC T1_Error  Open_PrivateDict( T1_Tokenizer  tokenizer ) {   T1_Tokenizer  tokzer = tokenizer;   FT_Stream     stream = tokzer->stream;   FT_Memory     memory = tokzer->memory;   T1_Error      error = 0;   T1_UShort     tag;   T1_ULong      size;   T1_Byte*      private;   /* are we already in the private dictionary ? */   if ( tokzer->in_private )     return 0;   if ( tokzer->in_pfb )   {     /* in the case of the PFB format, the private dictionary can be  */     /* made of several segments. We thus first read the number of    */     /* segments to compute the total size of the private dictionary  */     /* then re-read them into memory..                               */     T1_Long  start_pos    = FILE_Pos();     T1_ULong private_size = 0;     do     {       error = Read_PFB_Tag( stream, &tag, &size );       if (error || tag != 0x8002) break;       private_size += size;       if ( FILE_Skip(size) )         goto Fail;     }     while (1);     /* Check that we have a private dictionary there */     /* and allocate private dictionary buffer        */     if ( private_size == 0 )     {       FT_ERROR(( "T1.Open_Private: invalid private dictionary section\n" ));       error = T1_Err_Invalid_File_Format;       goto Fail;     }     if ( ALLOC( private, private_size ) )       goto Fail;     /* read all sections into buffer */     if ( FILE_Seek( start_pos ) )       goto Fail_Private;     private_size = 0;     do     {       error = Read_PFB_Tag( stream, &tag, &size );       if (error || tag != 0x8002) { error = 0; break; }       if ( FILE_Read( private + private_size, size ) )         goto Fail_Private;       private_size += size;     }     while (1);     tokzer->base   = private;     tokzer->cursor = 0;     tokzer->limit  = private_size;     tokzer->max    = private_size;   }   else   {     char*  base;     /* we're in a PFA file, read each token until we find "eexec" */     while ( tokzer->token.kind2 != key_eexec )     {       error = Read_Token( tokzer );       if (error) goto Fail;     }     /* now determine wether the private dictionary is encoded in binary */     /* or hexadecimal ASCII format..                                    */     /* we need to access the next 4 bytes (after the final \r following */     /* the 'eexec' keyword..) if they all are hexadecimal digits, then  */     /*we have a case of ASCII storage..                                 */     while ( tokzer->cursor+5 > tokzer->limit )     {       error = grow( tokzer );       if (error) goto Fail;     }

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?