diff options
author | Dave Reisner <d@falconindy.com> | 2011-02-08 21:05:53 -0500 |
---|---|---|
committer | Dave Reisner <d@falconindy.com> | 2011-03-09 15:22:32 -0500 |
commit | f2eac18a6ec62db3ec53744064e05416024c1b30 (patch) | |
tree | 7291505dff3663bf351a0cd37537e51cf010aaa5 | |
parent | 4ad4527d104c915efa912d3e1e3a543fad7aca34 (diff) | |
download | pacman-f2eac18a6ec62db3ec53744064e05416024c1b30.tar.xz |
Remove all traces of libfetch
Signed-off-by: Dave Reisner <d@falconindy.com>
-rw-r--r-- | configure.ac | 24 | ||||
-rw-r--r-- | lib/libalpm/alpm.c | 13 | ||||
-rw-r--r-- | lib/libalpm/dload.c | 284 | ||||
-rw-r--r-- | lib/libalpm/dload.h | 2 | ||||
-rw-r--r-- | lib/libalpm/error.c | 19 |
5 files changed, 2 insertions, 340 deletions
diff --git a/configure.ac b/configure.ac index 0ae9cba8..88dee2f9 100644 --- a/configure.ac +++ b/configure.ac @@ -93,11 +93,6 @@ AC_ARG_WITH(openssl, AS_HELP_STRING([--with-openssl], [use OpenSSL crypto implementations instead of internal routines]), [], [with_openssl=check]) -# Help line for libfetch -AC_ARG_WITH(fetch, - AS_HELP_STRING([--with-fetch], [use libfetch as an internal downloader]), - [], [with_fetch=no]) - # Help line for libcurl AC_ARG_WITH(curl, AS_HELP_STRING([--with-curl], [use libcurl as an internal downloader]), @@ -166,25 +161,6 @@ AS_IF([test "x$with_curl" != "xno"], AC_MSG_RESULT(no)) AM_CONDITIONAL([HAVE_LIBCURL], [test "x$ac_cv_lib_curl_curl_easy_perform" = "xyes"]) -# Enable or disable usage of libfetch -AC_MSG_CHECKING(whether to link with libfetch) -AS_IF([test "x$with_fetch" != "xno"], - [AC_MSG_RESULT(yes) - AC_CHECK_LIB([fetch], [fetchParseURL], , - [if test "x$with_fetch" != "xcheck"; then - AC_MSG_FAILURE([--with-fetch was given, but -lfetch was not found]) - fi], - [-lcrypto -ldl]) - # Check if libfetch supports connnection caching which we use - AS_IF([test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"], - [AC_CHECK_DECL(fetchConnectionCacheInit, , - AC_MSG_ERROR([libfetch must be version 2.28 or greater]), - [#include <fetch.h>]) - ]) - ], - AC_MSG_RESULT(no)) -AM_CONDITIONAL([HAVE_LIBFETCH], [test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"]) - # Checks for header files. AC_CHECK_HEADERS([fcntl.h glob.h libintl.h locale.h mntent.h string.h \ sys/ioctl.h sys/mount.h sys/param.h sys/statvfs.h \ diff --git a/lib/libalpm/alpm.c b/lib/libalpm/alpm.c index e5b47444..db2a63de 100644 --- a/lib/libalpm/alpm.c +++ b/lib/libalpm/alpm.c @@ -27,11 +27,6 @@ #include <curl/curl.h> #endif -/* connection caching setup */ -#ifdef HAVE_LIBFETCH -#include <fetch.h> -#endif - /* libalpm */ #include "alpm.h" #include "alpm_list.h" @@ -69,10 +64,6 @@ int SYMEXPORT alpm_initialize(void) bindtextdomain("libalpm", LOCALEDIR); #endif -#ifdef HAVE_LIBFETCH - fetchConnectionCacheInit(5, 1); -#endif - #ifdef HAVE_LIBCURL curl_global_init(CURL_GLOBAL_SSL); handle->curl = curl_easy_init(); @@ -97,10 +88,6 @@ int SYMEXPORT alpm_release(void) _alpm_handle_free(handle); handle = NULL; -#ifdef HAVE_LIBFETCH - fetchConnectionCacheClose(); -#endif - #ifdef HAVE_LIBCURL curl_global_cleanup(); #endif diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 51dda5e2..25bb659b 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -39,10 +39,6 @@ #include <curl/curl.h> #endif -#ifdef HAVE_LIBFETCH -#include <fetch.h> -#endif - /* libalpm */ #include "dload.h" #include "alpm_list.h" @@ -59,7 +55,7 @@ static char *get_filename(const char *url) { return(filename); } -#if defined(HAVE_LIBFETCH) || defined(HAVE_LIBCURL) +#ifdef HAVE_LIBCURL static char *get_destfile(const char *path, const char *filename) { char *destfile; /* len = localpath len + filename len + null */ @@ -88,269 +84,7 @@ static void inthandler(int signum) { dload_interrupted = 1; } -#endif - -#ifdef HAVE_LIBFETCH -static const char *fetch_gethost(struct url *fileurl) -{ - const char *host = _("disk"); - if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { - host = fileurl->host; - } - return(host); -} - -static int fetch_download_internal(const char *url, const char *localpath, - int force) { - FILE *localf = NULL; - struct stat st; - int ret = 0; - off_t dl_thisfile = 0; - ssize_t nread = 0; - char *tempfile, *destfile, *filename; - struct sigaction sig_pipe[2], sig_int[2]; - - off_t local_size = 0; - time_t local_time = 0; - - struct url *fileurl; - struct url_stat ust; - fetchIO *dlf = NULL; - - char buffer[PM_DLBUF_LEN]; - - filename = get_filename(url); - if(!filename) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); - RET_ERR(PM_ERR_SERVER_BAD_URL, -1); - } - - fileurl = fetchParseURL(url); - if(!fileurl) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); - RET_ERR(PM_ERR_LIBFETCH, -1); - } - - destfile = get_destfile(localpath, filename); - tempfile = get_tempfile(localpath, filename); - - if(stat(tempfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = fileurl->offset = (off_t)st.st_size; - dl_thisfile = st.st_size; - localf = fopen(tempfile, "ab"); - } else if(!force && stat(destfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "destfile found, using mtime only\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = /* no fu->off here */ (off_t)st.st_size; - } else { - _alpm_log(PM_LOG_DEBUG, "no file found matching criteria, starting from scratch\n"); - } - - /* pass the raw filename for passing to the callback function */ - _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename); - - /* print proxy info for debug purposes */ - _alpm_log(PM_LOG_DEBUG, "HTTP_PROXY: %s\n", getenv("HTTP_PROXY")); - _alpm_log(PM_LOG_DEBUG, "http_proxy: %s\n", getenv("http_proxy")); - _alpm_log(PM_LOG_DEBUG, "FTP_PROXY: %s\n", getenv("FTP_PROXY")); - _alpm_log(PM_LOG_DEBUG, "ftp_proxy: %s\n", getenv("ftp_proxy")); - - /* 10s timeout */ - fetchTimeout = 10; - - /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or - * something along those lines. Store the old signal handler first. */ - sig_pipe[NEW].sa_handler = SIG_IGN; - sigemptyset(&sig_pipe[NEW].sa_mask); - sig_pipe[NEW].sa_flags = 0; - sigaction(SIGPIPE, NULL, &sig_pipe[OLD]); - sigaction(SIGPIPE, &sig_pipe[NEW], NULL); - - dload_interrupted = 0; - sig_int[NEW].sa_handler = &inthandler; - sigemptyset(&sig_int[NEW].sa_mask); - sig_int[NEW].sa_flags = 0; - sigaction(SIGINT, NULL, &sig_int[OLD]); - sigaction(SIGINT, &sig_int[NEW], NULL); - - /* NOTE: libfetch does not reset the error code, be sure to do it before - * calls into the library */ - - /* TODO: if we call fetchStat() and get a redirect (disabling automagic - * redirect following), we should repeat the file locator stuff and get a new - * filename rather than only base if off the first URL, and then verify - * get_filename() didn't return ''. Of course, libfetch might not even allow - * us to even get that URL...FS#22645. This would allow us to download things - * without totally puking like - * http://www.archlinux.org/packages/community/x86_64/exim/download/ */ - - /* find out the remote size *and* mtime in one go. there is a lot of - * trouble in trying to do both size and "if-modified-since" logic in a - * non-stat request, so avoid it. */ - fetchLastErrCode = 0; - if(fetchStat(fileurl, &ust, "") == -1) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, fetch_gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } - check_stop(); - - _alpm_log(PM_LOG_DEBUG, "ust.mtime: %ld local_time: %ld compare: %ld\n", - ust.mtime, local_time, local_time - ust.mtime); - _alpm_log(PM_LOG_DEBUG, "ust.size: %jd local_size: %jd compare: %jd\n", - (intmax_t)ust.size, (intmax_t)local_size, (intmax_t)(local_size - ust.size)); - if(!force && ust.mtime && ust.mtime == local_time - && ust.size && ust.size == local_size) { - /* the remote time and size values agreed with what we have, so move on - * because there is nothing more to do. */ - _alpm_log(PM_LOG_DEBUG, "files are identical, skipping %s\n", filename); - ret = 1; - goto cleanup; - } - if(!ust.mtime || ust.mtime != local_time) { - _alpm_log(PM_LOG_DEBUG, "mtimes were different or unavailable, downloading %s from beginning\n", filename); - fileurl->offset = 0; - } - - fetchLastErrCode = 0; - dlf = fetchGet(fileurl, ""); - check_stop(); - - if(fetchLastErrCode != 0 || dlf == NULL) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, fetch_gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } else { - _alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host); - } - - if(localf && fileurl->offset == 0) { - _alpm_log(PM_LOG_WARNING, _("resuming download of %s not possible; starting over\n"), filename); - fclose(localf); - localf = NULL; - } else if(fileurl->offset) { - _alpm_log(PM_LOG_DEBUG, "resuming download at position %jd\n", (intmax_t)fileurl->offset); - } - - - if(localf == NULL) { - _alpm_rmrf(tempfile); - fileurl->offset = (off_t)0; - dl_thisfile = 0; - localf = fopen(tempfile, "wb"); - if(localf == NULL) { /* still null? */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - } - - /* Progress 0 - initialize */ - if(handle->dlcb) { - handle->dlcb(filename, 0, ust.size); - } - - while((nread = fetchIO_read(dlf, buffer, PM_DLBUF_LEN)) > 0) { - check_stop(); - size_t nwritten = 0; - nwritten = fwrite(buffer, 1, (size_t)nread, localf); - if((nwritten != (size_t)nread) || ferror(localf)) { - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - dl_thisfile += nread; - - if(handle->dlcb) { - handle->dlcb(filename, dl_thisfile, ust.size); - } - } - - /* did the transfer complete normally? */ - if (nread == -1) { - /* not PM_ERR_LIBFETCH here because libfetch error string might be empty */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s\n"), - filename, fetch_gethost(fileurl)); - ret = -1; - goto cleanup; - } - - if (ust.size != -1 && dl_thisfile < ust.size) { - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("%s appears to be truncated: %jd/%jd bytes\n"), - filename, (intmax_t)dl_thisfile, (intmax_t)ust.size); - ret = -1; - goto cleanup; - } - - /* probably safer to close the file descriptors now before renaming the file, - * for example to make sure the buffers are flushed. - */ - fclose(localf); - localf = NULL; - fetchIO_close(dlf); - dlf = NULL; - - /* set the times on the file to the same as that of the remote file */ - if(ust.mtime) { - struct timeval tv[2]; - memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; - utimes(tempfile, tv); - } - if(rename(tempfile, destfile)) { - _alpm_log(PM_LOG_ERROR, _("could not rename %s to %s (%s)\n"), - tempfile, destfile, strerror(errno)); - ret = -1; - } - ret = 0; - -cleanup: - FREE(tempfile); - FREE(destfile); - if(localf != NULL) { - /* if we still had a local file open, we got interrupted. set the mtimes on - * the file accordingly. */ - fflush(localf); - if(ust.mtime) { - struct timeval tv[2]; - memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; - futimes(fileno(localf), tv); - } - fclose(localf); - } - if(dlf != NULL) { - fetchIO_close(dlf); - } - fetchFreeURL(fileurl); - - /* restore the old signal handlers */ - sigaction(SIGINT, &sig_int[OLD], NULL); - sigaction(SIGPIPE, &sig_pipe[OLD], NULL); - /* if we were interrupted, trip the old handler */ - if(dload_interrupted) { - raise(SIGINT); - } - - return(ret); -} -#endif -#ifdef HAVE_LIBCURL static int curl_progress(void *filename, double dltotal, double dlnow, double ultotal, double ulnow) { @@ -561,21 +295,7 @@ cleanup: static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { -#if defined(HAVE_LIBFETCH) && defined(HAVE_LIBCURL) - const char *pmdownloader = getenv("PACMANDL"); - if(!pmdownloader || strcmp(pmdownloader, "curl") == 0) { - printf(">> using libcurl as internal downloader\n"); - return(curl_download_internal(url, localpath, force)); - } else if(strcmp(pmdownloader, "fetch") == 0) { - printf(">> using libfetch as internal downloader\n"); - return(fetch_download_internal(url, localpath, force)); - } else { - _alpm_log(PM_LOG_ERROR, "PACMANDL unset or invalid! Use `curl' or `fetch'\n"); - return(-1); - } -#elif HAVE_LIBFETCH - return(fetch_download_internal(url, localpath, force)); -#elif HAVE_LIBCURL +#ifdef HAVE_LIBCURL return(curl_download_internal(url, localpath, force)); #else RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1); diff --git a/lib/libalpm/dload.h b/lib/libalpm/dload.h index 63266491..e8d99b23 100644 --- a/lib/libalpm/dload.h +++ b/lib/libalpm/dload.h @@ -25,8 +25,6 @@ #include <time.h> -#define PM_DLBUF_LEN (1024 * 16) - int _alpm_download_single_file(const char *filename, alpm_list_t *servers, const char *localpath, int force); diff --git a/lib/libalpm/error.c b/lib/libalpm/error.c index 19c7d92c..cf98cc75 100644 --- a/lib/libalpm/error.c +++ b/lib/libalpm/error.c @@ -20,22 +20,10 @@ #include "config.h" -/* TODO: needed for the libfetch stuff, unfortunately- we should kill it */ -#include <stdio.h> -/* the following two are needed for FreeBSD's libfetch */ -#include <limits.h> /* PATH_MAX */ -#if defined(HAVE_SYS_PARAM_H) -#include <sys/param.h> /* MAXHOSTNAMELEN */ -#endif - #ifdef HAVE_LIBCURL #include <curl/curl.h> #endif -#ifdef HAVE_LIBFETCH -#include <fetch.h> /* fetchLastErrString */ -#endif - /* libalpm */ #include "util.h" #include "alpm.h" @@ -152,13 +140,6 @@ const char SYMEXPORT *alpm_strerror(int err) * requires the archive struct, so we can't. Just use a generic * error string instead. */ return _("libarchive error"); - case PM_ERR_LIBFETCH: -#ifdef HAVE_LIBFETCH - return fetchLastErrString; -#else - /* obviously shouldn't get here... */ - return _("download library error"); -#endif case PM_ERR_LIBCURL: #ifdef HAVE_LIBCURL return(curl_easy_strerror(handle->curlerr)); |