⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 retr.c

📁 wget讓你可以在console介面下
💻 C
📖 第 1 页 / 共 2 页
字号:
/* Return a printed representation of the download rate, as   appropriate for the speed.  If PAD is non-zero, strings will be   padded to the width of 7 characters (xxxx.xx).  */char *retr_rate (wgint bytes, double msecs, int pad){  static char res[20];  static const char *rate_names[] = {"B/s", "KB/s", "MB/s", "GB/s" };  int units = 0;  double dlrate = calc_rate (bytes, msecs, &units);  sprintf (res, pad ? "%7.2f %s" : "%.2f %s", dlrate, rate_names[units]);  return res;}/* Calculate the download rate and trim it as appropriate for the   speed.  Appropriate means that if rate is greater than 1K/s,   kilobytes are used, and if rate is greater than 1MB/s, megabytes   are used.   UNITS is zero for B/s, one for KB/s, two for MB/s, and three for   GB/s.  */doublecalc_rate (wgint bytes, double msecs, int *units){  double dlrate;  assert (msecs >= 0);  assert (bytes >= 0);  if (msecs == 0)    /* If elapsed time is exactly zero, it means we're under the       resolution of the timer.  This can easily happen on systems       that use time() for the timer.  Since the interval lies between       0 and the timer's resolution, assume half the resolution.  */    msecs = ptimer_resolution () / 2.0;  dlrate = 1000.0 * bytes / msecs;  if (dlrate < 1024.0)    *units = 0;  else if (dlrate < 1024.0 * 1024.0)    *units = 1, dlrate /= 1024.0;  else if (dlrate < 1024.0 * 1024.0 * 1024.0)    *units = 2, dlrate /= (1024.0 * 1024.0);  else    /* Maybe someone will need this, one day. */    *units = 3, dlrate /= (1024.0 * 1024.0 * 1024.0);  return dlrate;}/* Maximum number of allowed redirections.  20 was chosen as a   "reasonable" value, which is low enough to not cause havoc, yet   high enough to guarantee that normal retrievals will not be hurt by   the check.  */#define MAX_REDIRECTIONS 20#define SUSPEND_POST_DATA do {			\  post_data_suspended = 1;			\  saved_post_data = opt.post_data;		\  saved_post_file_name = opt.post_file_name;	\  opt.post_data = NULL;				\  opt.post_file_name = NULL;			\} while (0)#define RESTORE_POST_DATA do {				\  if (post_data_suspended)				\    {							\      opt.post_data = saved_post_data;			\      opt.post_file_name = saved_post_file_name;	\      post_data_suspended = 0;				\    }							\} while (0)static char *getproxy PARAMS ((struct url *));/* Retrieve the given URL.  Decides which loop to call -- HTTP, FTP,   FTP, proxy, etc.  *//* #### This function should be rewritten so it doesn't return from   multiple points. */uerr_tretrieve_url (const char *origurl, char **file, char **newloc,	      const char *refurl, int *dt){  uerr_t result;  char *url;  int location_changed, dummy;  char *mynewloc, *proxy;  struct url *u, *proxy_url;  int up_error_code;		/* url parse error code */  char *local_file;  int redirection_count = 0;  int post_data_suspended = 0;  char *saved_post_data = NULL;  char *saved_post_file_name = NULL;  /* If dt is NULL, use local storage.  */  if (!dt)    {      dt = &dummy;      dummy = 0;    }  url = xstrdup (origurl);  if (newloc)    *newloc = NULL;  if (file)    *file = NULL;  u = url_parse (url, &up_error_code);  if (!u)    {      logprintf (LOG_NOTQUIET, "%s: %s.\n", url, url_error (up_error_code));      xfree (url);      return URLERROR;    }  if (!refurl)    refurl = opt.referer; redirected:  result = NOCONERROR;  mynewloc = NULL;  local_file = NULL;  proxy_url = NULL;  proxy = getproxy (u);  if (proxy)    {      /* Parse the proxy URL.  */      proxy_url = url_parse (proxy, &up_error_code);      if (!proxy_url)	{	  logprintf (LOG_NOTQUIET, _("Error parsing proxy URL %s: %s.\n"),		     proxy, url_error (up_error_code));	  xfree (url);	  RESTORE_POST_DATA;	  return PROXERR;	}      if (proxy_url->scheme != SCHEME_HTTP && proxy_url->scheme != u->scheme)	{	  logprintf (LOG_NOTQUIET, _("Error in proxy URL %s: Must be HTTP.\n"), proxy);	  url_free (proxy_url);	  xfree (url);	  RESTORE_POST_DATA;	  return PROXERR;	}    }  if (u->scheme == SCHEME_HTTP#ifdef HAVE_SSL      || u->scheme == SCHEME_HTTPS#endif      || (proxy_url && proxy_url->scheme == SCHEME_HTTP))    {      result = http_loop (u, &mynewloc, &local_file, refurl, dt, proxy_url);    }  else if (u->scheme == SCHEME_FTP)    {      /* If this is a redirection, temporarily turn off opt.ftp_glob	 and opt.recursive, both being undesirable when following	 redirects.  */      int oldrec = opt.recursive, oldglob = opt.ftp_glob;      if (redirection_count)	opt.recursive = opt.ftp_glob = 0;      result = ftp_loop (u, dt, proxy_url);      opt.recursive = oldrec;      opt.ftp_glob = oldglob;      /* There is a possibility of having HTTP being redirected to	 FTP.  In these cases we must decide whether the text is HTML	 according to the suffix.  The HTML suffixes are `.html',	 `.htm' and a few others, case-insensitive.  */      if (redirection_count && local_file && u->scheme == SCHEME_FTP)	{	  if (has_html_suffix_p (local_file))	    *dt |= TEXTHTML;	}    }  if (proxy_url)    {      url_free (proxy_url);      proxy_url = NULL;    }  location_changed = (result == NEWLOCATION);  if (location_changed)    {      char *construced_newloc;      struct url *newloc_parsed;      assert (mynewloc != NULL);      if (local_file)	xfree (local_file);      /* The HTTP specs only allow absolute URLs to appear in	 redirects, but a ton of boneheaded webservers and CGIs out	 there break the rules and use relative URLs, and popular	 browsers are lenient about this, so wget should be too. */      construced_newloc = uri_merge (url, mynewloc);      xfree (mynewloc);      mynewloc = construced_newloc;      /* Now, see if this new location makes sense. */      newloc_parsed = url_parse (mynewloc, &up_error_code);      if (!newloc_parsed)	{	  logprintf (LOG_NOTQUIET, "%s: %s.\n", escnonprint_uri (mynewloc),		     url_error (up_error_code));	  url_free (u);	  xfree (url);	  xfree (mynewloc);	  RESTORE_POST_DATA;	  return result;	}      /* Now mynewloc will become newloc_parsed->url, because if the         Location contained relative paths like .././something, we         don't want that propagating as url.  */      xfree (mynewloc);      mynewloc = xstrdup (newloc_parsed->url);      /* Check for max. number of redirections.  */      if (++redirection_count > MAX_REDIRECTIONS)	{	  logprintf (LOG_NOTQUIET, _("%d redirections exceeded.\n"),		     MAX_REDIRECTIONS);	  url_free (newloc_parsed);	  url_free (u);	  xfree (url);	  xfree (mynewloc);	  RESTORE_POST_DATA;	  return WRONGCODE;	}      xfree (url);      url = mynewloc;      url_free (u);      u = newloc_parsed;      /* If we're being redirected from POST, we don't want to POST	 again.  Many requests answer POST with a redirection to an	 index page; that redirection is clearly a GET.  We "suspend"	 POST data for the duration of the redirections, and restore	 it when we're done. */      if (!post_data_suspended)	SUSPEND_POST_DATA;      goto redirected;    }  if (local_file)    {      if (*dt & RETROKF)	{	  register_download (u->url, local_file);	  if (redirection_count && 0 != strcmp (origurl, u->url))	    register_redirection (origurl, u->url);	  if (*dt & TEXTHTML)	    register_html (u->url, local_file);	}    }  if (file)    *file = local_file ? local_file : NULL;  else    xfree_null (local_file);  url_free (u);  if (redirection_count)    {      if (newloc)	*newloc = url;      else	xfree (url);    }  else    {      if (newloc)	*newloc = NULL;      xfree (url);    }  RESTORE_POST_DATA;  return result;}/* Find the URLs in the file and call retrieve_url() for each of   them.  If HTML is non-zero, treat the file as HTML, and construct   the URLs accordingly.   If opt.recursive is set, call retrieve_tree() for each file.  */uerr_tretrieve_from_file (const char *file, int html, int *count){  uerr_t status;  struct urlpos *url_list, *cur_url;  url_list = (html ? get_urls_html (file, NULL, NULL)	      : get_urls_file (file));  status = RETROK;             /* Suppose everything is OK.  */  *count = 0;                  /* Reset the URL count.  */  for (cur_url = url_list; cur_url; cur_url = cur_url->next, ++*count)    {      char *filename = NULL, *new_file = NULL;      int dt;      if (cur_url->ignore_when_downloading)	continue;      if (opt.quota && total_downloaded_bytes > opt.quota)	{	  status = QUOTEXC;	  break;	}      if ((opt.recursive || opt.page_requisites)	  && cur_url->url->scheme != SCHEME_FTP)	status = retrieve_tree (cur_url->url->url);      else	status = retrieve_url (cur_url->url->url, &filename, &new_file, NULL, &dt);      if (filename && opt.delete_after && file_exists_p (filename))	{	  DEBUGP (("\Removing file due to --delete-after in retrieve_from_file():\n"));	  logprintf (LOG_VERBOSE, _("Removing %s.\n"), filename);	  if (unlink (filename))	    logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno));	  dt &= ~RETROKF;	}      xfree_null (new_file);      xfree_null (filename);    }  /* Free the linked list of URL-s.  */  free_urlpos (url_list);  return status;}/* Print `giving up', or `retrying', depending on the impending   action.  N1 and N2 are the attempt number and the attempt limit.  */voidprintwhat (int n1, int n2){  logputs (LOG_VERBOSE, (n1 == n2) ? _("Giving up.\n\n") : _("Retrying.\n\n"));}/* If opt.wait or opt.waitretry are specified, and if certain   conditions are met, sleep the appropriate number of seconds.  See   the documentation of --wait and --waitretry for more information.   COUNT is the count of current retrieval, beginning with 1. */voidsleep_between_retrievals (int count){  static int first_retrieval = 1;  if (first_retrieval)    {      /* Don't sleep before the very first retrieval. */      first_retrieval = 0;      return;    }  if (opt.waitretry && count > 1)    {      /* If opt.waitretry is specified and this is a retry, wait for	 COUNT-1 number of seconds, or for opt.waitretry seconds.  */      if (count <= opt.waitretry)	xsleep (count - 1.0);      else	xsleep (opt.waitretry);    }  else if (opt.wait)    {      if (!opt.random_wait || count > 1)	/* If random-wait is not specified, or if we are sleeping	   between retries of the same download, sleep the fixed	   interval.  */	xsleep (opt.wait);      else	{	  /* Sleep a random amount of time averaging in opt.wait	     seconds.  The sleeping amount ranges from 0 to	     opt.wait*2, inclusive.  */	  double waitsecs = 2 * opt.wait * random_float ();	  DEBUGP (("sleep_between_retrievals: avg=%f,sleep=%f\n",		   opt.wait, waitsecs));	  xsleep (waitsecs);	}    }}/* Free the linked list of urlpos.  */voidfree_urlpos (struct urlpos *l){  while (l)    {      struct urlpos *next = l->next;      if (l->url)	url_free (l->url);      xfree_null (l->local_name);      xfree (l);      l = next;    }}/* Rotate FNAME opt.backups times */voidrotate_backups(const char *fname){  int maxlen = strlen (fname) + 1 + numdigit (opt.backups) + 1;  char *from = (char *)alloca (maxlen);  char *to = (char *)alloca (maxlen);  struct_stat sb;  int i;  if (stat (fname, &sb) == 0)    if (S_ISREG (sb.st_mode) == 0)      return;  for (i = opt.backups; i > 1; i--)    {      sprintf (from, "%s.%d", fname, i - 1);      sprintf (to, "%s.%d", fname, i);      rename (from, to);    }  sprintf (to, "%s.%d", fname, 1);  rename(fname, to);}static int no_proxy_match PARAMS ((const char *, const char **));/* Return the URL of the proxy appropriate for url U.  */static char *getproxy (struct url *u){  char *proxy = NULL;  char *rewritten_url;  static char rewritten_storage[1024];  if (!opt.use_proxy)    return NULL;  if (!no_proxy_match (u->host, (const char **)opt.no_proxy))    return NULL;  switch (u->scheme)    {    case SCHEME_HTTP:      proxy = opt.http_proxy ? opt.http_proxy : getenv ("http_proxy");      break;#ifdef HAVE_SSL    case SCHEME_HTTPS:      proxy = opt.https_proxy ? opt.https_proxy : getenv ("https_proxy");      break;#endif    case SCHEME_FTP:      proxy = opt.ftp_proxy ? opt.ftp_proxy : getenv ("ftp_proxy");      break;    case SCHEME_INVALID:      break;    }  if (!proxy || !*proxy)    return NULL;  /* Handle shorthands.  `rewritten_storage' is a kludge to allow     getproxy() to return static storage. */  rewritten_url = rewrite_shorthand_url (proxy);  if (rewritten_url)    {      strncpy (rewritten_storage, rewritten_url, sizeof (rewritten_storage));      rewritten_storage[sizeof (rewritten_storage) - 1] = '\0';      proxy = rewritten_storage;    }  return proxy;}/* Should a host be accessed through proxy, concerning no_proxy?  */static intno_proxy_match (const char *host, const char **no_proxy){  if (!no_proxy)    return 1;  else    return !sufmatch (no_proxy, host);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -