📄 reportgenerator.java
字号:
proc[1] = new Stat("Output Processing Time", outputProcessTime); proc[1].setUnits("seconds"); proc[2] = new Stat("Log File Lines Analysed", logLines); proc[2].setUnits("lines"); process.add(proc); report.addBlock(process); // finally write the string into the output file try { BufferedWriter out = new BufferedWriter(new FileWriter(output)); out.write(report.render()); out.close(); } catch (IOException e) { System.out.println("Unable to write to output file " + output); System.exit(0); } return; } /** * a standard stats block preparation method for use when an aggregator * has to be put out in its entirity. This method will not be able to * deal with complex cases, although it will perform sorting by value and * translations as per the map file if requested * * @param aggregator the aggregator that should be converted * @param sort should the resulting stats be sorted by value * @param translate translate the stat name using the map file * * @return a Statistics object containing all the relevant information */ public static Statistics prepareStats(Map aggregator, boolean sort, boolean translate) { Stat[] stats = new Stat[aggregator.size()]; if (aggregator.size() > 0) { Iterator keys = aggregator.keySet().iterator(); int i = 0; while (keys.hasNext()) { String key = (String) keys.next(); int value = Integer.parseInt((String) aggregator.get(key)); if (translate) { stats[i] = new Stat(translate(key), value); } else { stats[i] = new Stat(key, value); } i++; } if (sort) { Arrays.sort(stats); } } // add the results to the statistics object Statistics statistics = new Statistics(); statistics.add(stats); return statistics; } /** * look the given text up in the action map table and return a translated * value if one exists. If no translation exists the original text is * returned * * @param text the text to be translated * * @return a string containing either the translated text or the original * text */ public static String translate(String text) { if (actionMap.containsKey(text)) { return (String) actionMap.get(text); } else { return text; } } /** * read in the action map file which converts log file line actions into * actions which are more understandable to humans * * @param map the map file */ public static void readMap(String map) throws IOException { FileReader fr = null; BufferedReader br = null; // read in the map file, printing a warning if none is found String record = null; try { fr = new FileReader(map); br = new BufferedReader(fr); } catch (IOException e) { System.err.println("Failed to read map file: log file actions will be displayed without translation"); return; } // loop through the map file and read in the values while ((record = br.readLine()) != null) { Matcher matchReal = real.matcher(record); // if the line is real then read it in if (matchReal.matches()) { actionMap.put(matchReal.group(1).trim(), matchReal.group(2).trim()); } } } /** * set the passed parameters up as global class variables. This has to * be done in a separate method because the API permits for running from * the command line with args or calling the processReport method statically * from elsewhere * * @param myFormat the log file directory to be analysed * @param myInput regex for log file names * @param myOutput config file to use for dstat * @param myMap the action map file to use for translations */ public static void setParameters(String myFormat, String myInput, String myOutput, String myMap) { if (myFormat != null) { format = myFormat; } if (myInput != null) { input = myInput; } if (myOutput != null) { output = myOutput; } if (myMap != null) { map = myMap; } return; } /** * read the input file and populate all the class globals with the contents * The values that come from this file form the basis of the analysis report * * @param input the aggregator file */ public static void readInput(String input) throws IOException, ParseException { FileReader fr = null; BufferedReader br = null; // read in the analysis information, throwing an error if we fail to open // the given file String record = null; try { fr = new FileReader(input); br = new BufferedReader(fr); } catch (IOException e) { System.out.println("Failed to read input file"); System.exit(0); } // FIXME: although this works, it is not very elegant // loop through the aggregator file and read in the values while ((record = br.readLine()) != null) { // match real lines Matcher matchReal = real.matcher(record); // pre-prepare our input strings String section = null; String key = null; String value = null; // temporary string to hold the left hand side of the equation String left = null; // match the line or skip this record if (matchReal.matches()) { // lift the values out of the matcher's result groups left = matchReal.group(1).trim(); value = matchReal.group(2).trim(); // now analyse the left hand side, splitting by ".", taking the // first token as the section and the remainder of the string // as they key if it exists StringTokenizer tokens = new StringTokenizer(left, "."); int numTokens = tokens.countTokens(); if (tokens.hasMoreTokens()) { section = tokens.nextToken(); if (numTokens > 1) { key = left.substring(section.length() + 1); } else { key = ""; } } } else { continue; } // if the line is real, then we carry on // first initialise a date format object to do our date processing // if necessary SimpleDateFormat sdf = new SimpleDateFormat("dd'/'MM'/'yyyy"); // read the analysis contents in if (section.equals("archive")) { archiveStats.put(key, value); } if (section.equals("action")) { actionAggregator.put(key, value); } if (section.equals("user")) { userAggregator.put(key, value); } if (section.equals("search")) { searchAggregator.put(key, value); } if (section.equals("item")) { itemAggregator.put(key, value); } // read the config details used to make this report in if (section.equals("user_email")) { userEmail = value; } if (section.equals("item_floor")) { itemFloor = Integer.parseInt(value); } if (section.equals("search_floor")) { searchFloor = Integer.parseInt(value); } if (section.equals("host_url")) { url = value; } if (section.equals("item_lookup")) { itemLookup = Integer.parseInt(value); } if (section.equals("avg_item_views")) { try { avgItemViews = Integer.parseInt(value); } catch (NumberFormatException e) { avgItemViews = 0; } } if (section.equals("server_name")) { serverName = value; } if (section.equals("service_name")) { name = value; } if (section.equals("start_date")) { startDate = sdf.parse(value); } if (section.equals("end_date")) { endDate = sdf.parse(value); } if (section.equals("analysis_process_time")) { processTime = Integer.parseInt(value); } if (section.equals("general_summary")) { generalSummary.add(value); } if (section.equals("log_lines")) { logLines = Integer.parseInt(value); } if (section.equals("warnings")) { warnings = Integer.parseInt(value); } } // close the inputs br.close(); fr.close(); } /** * get the information for the item with the given handle * * @param context the DSpace context we are operating under * @param handle the handle of the item being looked up, in the form * 1234/567 and so forth * * @return a string containing a reference (almost citation) to the * article */ public static String getItemInfo(Context context, String handle) throws SQLException { Item item = null; // ensure that the handle exists try { item = (Item) HandleManager.resolveToObject(context, handle); } catch (Exception e) { return null; } // if no handle that matches is found then also return null if (item == null) { return null; } // build the referece // FIXME: here we have blurred the line between content and presentation // and it should probably be un-blurred DCValue[] title = item.getDC("title", null, Item.ANY); DCValue[] author = item.getDC("contributor", "author", Item.ANY); StringBuffer authors = new StringBuffer(); if (author.length > 0) { authors.append("(" + author[0].value); } if (author.length > 1) { authors.append(" et al"); } if (author.length > 0) { authors.append(")"); } String content = title[0].value + " " + authors.toString(); return content; } /** * output the usage information to the terminal */ public static void usage() { String usage = "Usage Information:\n" + "ReportGenerator [options [parameters]]\n" + "-format [output format]\n" + "\tRequired\n" + "\tSpecify the format that you would like the output in\n" + "\tOptions:\n" + "\t\thtml\n" + "-in [aggregation file]\n" + "\tRequired\n" + "\tSpecify the aggregation data file to display\n" + "-out [output file]\n" + "\tOptional\n" + "\tSpecify the file to output the report to\n" + "\tDefault uses [dspace log directory]/report\n" + "-map [map file]\n" + "\tOptional\n" + "\tSpecify the map file to translate log file actions into human readable actions\n" + "\tDefault uses [dspace config directory]/dstat.map\n" + "-help\n" + "\tdisplay this usage information\n"; System.out.println(usage); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -