⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 megaco_text_scanner.erl

📁 OTP是开放电信平台的简称
💻 ERL
📖 第 1 页 / 共 2 页
字号:
%% ``The contents of this file are subject to the Erlang Public License,%% Version 1.1, (the "License"); you may not use this file except in%% compliance with the License. You should have received a copy of the%% Erlang Public License along with this software. If not, it can be%% retrieved via the world wide web at http://www.erlang.org/.%% %% Software distributed under the License is distributed on an "AS IS"%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See%% the License for the specific language governing rights and limitations%% under the License.%% %% The Initial Developer of the Original Code is Ericsson Utvecklings AB.%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings%% AB. All Rights Reserved.''%% %%     $Id$%%----------------------------------------------------------------------%% Purpose : Scanner for text encoded Megaco/H.248 messages%%-----------------------------------------------------------------------module('megaco_text_scanner').-export([scan/1, skip_sep_chars/2]).-include_lib("megaco/include/megaco.hrl").-include_lib("megaco/src/engine/megaco_message_internal.hrl").-include("megaco_text_tokens.hrl").-define(LOWER1(Char),	if	    Char >= $A, Char =< $Z ->		Char - ($A - $a);	    true ->		Char	end).%% This is used when we _know_ it to be upper case-define(LOWER2(Char), Char - ($A - $a)).scan(Bin) when binary(Bin) ->    Chars = erlang:binary_to_list(Bin),    tokens1(Chars, 1, []);scan(Chars) when list(Chars) ->    tokens1(Chars, 1, []).%% As long as we dont know the version, we will loop in this functiontokens1(Chars, Line, Acc) ->    case any_chars(Chars, Line) of	{token, Token, [], LatestLine} ->	    %% We got to the end without actually getting a version token.	    Tokens = [{endOfMessage, LatestLine, endOfMessage}, Token | Acc],	    {error, no_version_found, lists:reverse(Tokens), Line};        %% -- Version token for version 1 --        {token, {'SafeChars',_,"!/1"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 1, [Token | Acc]);        {token, {'SafeChars',_,"megaco/1"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 1, [Token | Acc]);        %% -- Version token for version 2 --        {token, {'SafeChars',_,"!/2"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 2, [Token | Acc]);        {token, {'SafeChars',_,"megaco/2"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 2, [Token | Acc]);        %% -- Version token for version 3 --        {token, {'SafeChars',_,"!/3"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 3, [Token | Acc]);        {token, {'SafeChars',_,"megaco/3"} = Token, Rest, LatestLine} ->            tokens2(Rest, LatestLine, 3, [Token | Acc]);        %% -- Version token for version X --        {token, {'SafeChars',_,[$!,$/| Vstr]} = Token, Rest, LatestLine} ->	    case guess_version(Vstr) of		{ok, V} ->		    tokens2(Rest, LatestLine, V, [Token | Acc]);		{error, Reason} ->		    {error, Reason, LatestLine}	    end;        {token, {'SafeChars',_,[$m,$e,$g,$a,$c,$o,$/|Vstr]} = Token, Rest, LatestLine} ->	    case guess_version(Vstr) of		{ok, V} ->		    tokens2(Rest, LatestLine, V, [Token | Acc]);		{error, Reason} ->		    {error, Reason, LatestLine}	    end;	%% -- Other tokens --	{token, Token, Rest, LatestLine} ->	    tokens1(Rest, LatestLine, [Token | Acc]);	{bad_token, Token, _Rest, _LatestLine} ->	    {error, {bad_token, [Token, Acc]}, Line}    end.tokens2(Chars, Line0, Version, Tokens0) ->    case tokens2(Chars, Line0, Tokens0) of	{ok, Tokens, Line} ->	    {ok, Tokens, Version, Line};	Error ->	    Error    end.tokens2(Chars, Line, Acc) ->%%     d("tokens2 -> entry with"%%       "~n   Chars: ~s"%%       "~n   Line:  ~p", [Chars, Line]),    case any_chars(Chars, Line) of	{token, Token, [], LatestLine} ->%% 	    d("tokens2 -> Token: ~n~p", [Token]),	    Tokens = [{endOfMessage, LatestLine, endOfMessage}, Token | Acc],	    {ok, lists:reverse(Tokens), Line};	{token, Token, Rest, LatestLine} ->%% 	    d("tokens2 -> Token: ~n~p", [Token]),	    tokens2(Rest, LatestLine, [Token | Acc]);	{bad_token, Token, _Rest, _LatestLine} ->	    {error, {bad_token, [Token, Acc]}, Line}    end.guess_version([C]) when (48 =< C) and (C =< 57) ->    {ok, C-48};guess_version(Str) when is_list(Str) ->    case (catch list_to_integer(Str)) of	I when is_integer(I) ->	    {ok, I};	_ ->	    {error, {invalid_version, Str}}    end.%% Returns {token,     Token, Rest, LatestLine}%% Returns {bad_token, Token, Rest, LatestLine}any_chars([Char | Rest], Line) ->%     Class = ?classify_char(Char),%     d("any_chars -> ~w of class ~w", [Char, Class]),%     case Class of    case ?classify_char(Char) of	safe_char_upper ->	    safe_chars(Rest, [Char], [?LOWER2(Char)], Line);	safe_char ->	    safe_chars(Rest, [Char], [Char], Line);	rest_char ->	    case Char of		?SemiColonToken ->		    comment_chars(Rest, Line);		_ ->		    rest_chars(Rest, [Char], Line)	    end;	double_quote ->	    quoted_chars(Rest, [], Line);	white_space ->	    sep_chars(Rest, Line);	end_of_line ->	    sep_chars(Rest, Line);	bad_char ->	    %% {bad_token, {'SEP', Line, Char}, Rest, Line}	    {bad_token, {'AnyChars', Line, Char}, Rest, Line}    end;any_chars([] = All, Line) ->    {token, {'SEP', Line, end_of_input}, All, Line}.comment_chars([Char | Rest], Line) ->    case ?classify_char(Char) of	safe_char_upper ->	    comment_chars(Rest, Line);	safe_char ->	    comment_chars(Rest, Line);	rest_char ->	    comment_chars(Rest, Line);	white_space -> 	    comment_chars(Rest, Line);	end_of_line ->	    sep_chars(Rest, Line);	_ when Char == 22 ->	    comment_chars(Rest, Line);	_ ->	    %% {bad_token, {'SEP', Line, Char}, Rest, Line}	    {bad_token, {'CommentChars', Line, Char}, Rest, Line}    end;comment_chars([] = All, Line) ->    {token, {'SEP', Line}, All, Line}.    sep_chars([Char | Rest] = All, Line) ->    case ?classify_char(Char) of	safe_char_upper ->	    {token, {'SEP', Line}, All, Line};	safe_char ->	    {token, {'SEP', Line}, All, Line};	rest_char ->	    case Char of		?SemiColonToken ->		    comment_chars(Rest, Line);		_ ->		    rest_chars(Rest, [Char], Line)	    end;	white_space -> 	    sep_chars(Rest, Line);	end_of_line ->	    sep_chars(Rest, Line + 1);	_ ->	    %% {bad_token, {'SEP', Line, Char}, Rest, Line}	    {bad_token, {'SepChars', Line, Char}, Rest, Line}    end;sep_chars([] = All, Line) ->    {token, {'SEP', Line}, All, Line}.rest_chars(Rest, [?ColonToken], Line) ->    {token, {'COLON',   Line}, Rest, Line};rest_chars(Rest, [AccChar], Line) ->    TokenTag =	case AccChar of	    ?EqualToken   -> 'EQUAL';	    ?NequalToken  -> 'NEQUAL';	    ?LesserToken  -> 'LESSER';	    ?GreaterToken -> 'GREATER';	    ?LbrktToken   -> 'LBRKT';	    ?RbrktToken   -> 'RBRKT';	    ?LsbrktToken  -> 'LSBRKT';	    ?RsbrktToken  -> 'RSBRKT';	    ?LparToken    -> 'LPAR';	    ?RparToken    -> 'RPAR';	    ?VbarToken    -> 'VBAR';	    ?CommaToken   -> 'COMMA'	end,    {Rest2, Line2} = skip_sep_chars(Rest, Line),    {token, {TokenTag, Line}, Rest2, Line2}.skip_sep_chars([Char | Rest] = All, Line) ->    case ?classify_char(Char) of	rest_char when Char == ?SemiColonToken ->	    skip_comment_chars(Rest, Line);	white_space -> 	    skip_sep_chars(Rest, Line);	end_of_line ->	    skip_sep_chars(Rest, Line + 1);	_ ->	    {All, Line}    end;skip_sep_chars([] = All, Line) ->    {All, Line}.skip_comment_chars([Char | Rest] = All, Line) ->    case ?classify_char(Char) of	safe_char_upper ->	    skip_comment_chars(Rest, Line);	safe_char ->	    skip_comment_chars(Rest, Line);	rest_char ->	    skip_comment_chars(Rest, Line);	double_quote ->	    skip_comment_chars(Rest, Line);	white_space -> 	    skip_comment_chars(Rest, Line);	end_of_line ->	    skip_sep_chars(Rest, Line + 1);	_ ->	    {All, Line}    end;skip_comment_chars([] = All, Line) ->    {All, Line}.quoted_chars([Char | Rest], Acc, Line) ->    case ?classify_char(Char) of	safe_char_upper ->	    quoted_chars(Rest, [Char | Acc], Line);	safe_char ->	    quoted_chars(Rest, [Char | Acc], Line);	rest_char ->	    quoted_chars(Rest, [Char | Acc], Line);	white_space -> 	    quoted_chars(Rest, [Char | Acc], Line);	double_quote -> 	    {token, {'QuotedChars', Line, lists:reverse(Acc)}, Rest, Line};	_ ->	    {bad_token, {'QuotedChars', Line, Char}, Rest, Line}    end;quoted_chars([] = All, _Acc, Line) ->    {bad_token, {'QuotedChars', Line, end_of_input}, All, Line}.    safe_chars([Char | Rest] = All, Acc, LowerAcc, Line) ->    case ?classify_char(Char) of	safe_char_upper ->	    safe_chars(Rest, [Char | Acc], [?LOWER2(Char) | LowerAcc], Line);	safe_char ->	    safe_chars(Rest, [Char | Acc], [Char | LowerAcc], Line);	_ ->	    LowerSafeChars = lists:reverse(LowerAcc),	    TokenTag = select_token(LowerSafeChars),	    SafeChars = lists:reverse(Acc),	    case TokenTag of		'MtpToken' ->		    %% 'MtpToken' 'LBRKT' OctetString 'RBRKT'		    special_chars(All, LowerSafeChars, Line, TokenTag);		'LocalToken' ->		    %% 'LocalToken' 'LBRKT' OctetString 'RBRKT'		    special_chars(All, SafeChars, Line, TokenTag);		'RemoteToken' ->		    %% 'RemoteToken' 'LBRKT' OctetString 'RBRKT'		    special_chars(All, SafeChars, Line, TokenTag);		'DigitMapToken' -> 		    %% 'DigitMapToken'		    %% 'DigitMapToken' 'EQUAL' Name		    %% 'DigitMapToken' 'EQUAL' Name 'LBRKT' Value 'RBRKT'		    %% 'DigitMapToken' 'EQUAL' 'LBRKT' Value 'RBRKT'		    %% 'DigitMapToken' 'LBRKT' Value 'RBRKT'		    special_chars(All, LowerSafeChars, Line, TokenTag);		_ ->		    {token, {TokenTag, Line, LowerSafeChars}, All, Line}	    end    end;safe_chars([] = All, _Acc, LowerAcc, Line) ->    LowerSafeChars = lists:reverse(LowerAcc),    TokenTag = select_token(LowerSafeChars),    %%SafeChars = lists:reverse(Acc),    {token, {TokenTag, Line, LowerSafeChars}, All, Line}.    collect_safe_chars([Char | Rest] = All, LowerAcc) ->    case ?classify_char(Char) of	safe_char_upper ->	    collect_safe_chars(Rest, [?LOWER2(Char) | LowerAcc]);	safe_char ->	    collect_safe_chars(Rest, [Char | LowerAcc]);	_ ->	    {All, lists:reverse(LowerAcc)}    end;collect_safe_chars([] = Rest, LowerAcc) ->    {Rest, lists:reverse(LowerAcc)}.special_chars(All, SafeChars, Line, TokenTag) ->    {Rest, Line2} = skip_sep_chars(All, Line),    case Rest of	[?LbrktToken | Rest2] ->	    {token, {'OctetString', _, OctetString}, Rest4, Line4} =		octet_string(Rest2, Line2),	    case Rest4 of		[?RbrktToken | Rest6] ->		    Token = 			case TokenTag of			    'MtpToken' ->				%% 'MtpToken' 'LBRKT' OctetString 'RBRKT'				{'MtpAddressToken', Line, OctetString};			    'LocalToken' ->				%% 'LocalToken' 'LBRKT' OctetString 'RBRKT'				{'LocalDescriptorToken', Line, OctetString};			    'RemoteToken' ->				%% 'RemoteToken' 'LBRKT' OctetString 'RBRKT'				{'RemoteDescriptorToken', Line, OctetString};			    'DigitMapToken' ->				%% 'DigitMapToken' 'LBRKT' OctetString 'RBRKT'				DMV = digit_map_value(OctetString),				DMD = #'DigitMapDescriptor'{digitMapValue = DMV},				{'DigitMapDescriptorToken', Line, DMD}			end,		    {token, Token, Rest6, Line4};		_ when TokenTag == 'DigitMapToken' ->		    %% 'DigitMapToken'		    {token, {'DigitMapToken', Line, SafeChars}, All, Line};		_ ->		    {token, {'SafeChars', Line, SafeChars}, All, Line}	    end;	[?EqualToken | Rest2] when TokenTag == 'DigitMapToken' ->	    {Rest3, Line3} = skip_sep_chars(Rest2, Line2),	    {Rest4, DigitMapName} = collect_safe_chars(Rest3, []),	    {Rest6, Line6, DMD} = 		if		    DigitMapName == [] ->			{Rest3, Line3, #'DigitMapDescriptor'{}};		    true ->			{Rest5, Line5} = skip_sep_chars(Rest4, Line3),

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -