Closing issue #33, now set a default cache directory

This commit is contained in:
Fufu Fang 2019-04-30 08:05:46 +01:00
parent 99b530ee15
commit 283520136c
9 changed files with 110 additions and 29 deletions

View File

@ -1,4 +1,4 @@
VERSION=1.1.4
VERSION=1.1.5
CFLAGS+= -g -O2 -Wall -Wextra -Wshadow\
-D_FILE_OFFSET_BITS=64 -DVERSION=\"$(VERSION)\" \

View File

@ -23,26 +23,27 @@ the filesystem is visiting.
Useful options:
-f Run HTTPDirFS in foreground
-u --username HTTP authentication username
-p --password HTTP authentication password
-P --proxy Proxy for libcurl, for more details refer to
https://curl.haxx.se/libcurl/c/CURLOPT_PROXY.html
--proxy-username Username for the proxy
--proxy-password Password for the proxy
--cache Set a cache folder, by default this is disabled
--cache Enable cache, by default this is disabled
--cache-location Set a custom cache location, by default it is
located in ${XDG_CACHE_HOME}/httpdirfs
--dl-seg-size The size of each download segment in MB,
default to 8MB.
--max-seg-count The maximum number of download segments a file
can have. By default it is set to 1048576. This
means the maximum memory usage per file is 1MB
memory. This allows caching file up to 8TB in
can have. By default it is set to 128*1024. This
means the maximum memory usage per file is 128KB
memory. This allows caching file up to 1TB in
size, assuming you are using the default segment
size.
--max-conns The maximum number of network connections that
libcurl is allowed to make, default to 10.
--retry-wait The waiting interval in seconds before making an
HTTP request, after encountering an error,
HTTP request, after encountering an error,
default to 5 seconds.
--user-agent The user agent string, default to "HTTPDirFS".

View File

@ -52,18 +52,74 @@ static char *META_DIR;
*/
static char *DATA_DIR;
void CacheSystem_init(const char *path)
/**
* \brief Calculate cache system directory
*/
static char *CacheSystem_calc_dir(const char *url)
{
char *xdg_cache_home = getenv("XDG_CACHE_HOME");
if (!xdg_cache_home) {
char *home = getenv("HOME");
char *xdg_cache_home_default = "/.cache";
xdg_cache_home = path_append(home, xdg_cache_home_default);
}
if (mkdir(xdg_cache_home, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)
&& (errno != EEXIST)) {
fprintf(stderr, "CacheSystem_calc_dir(): mkdir(): %s\n",
strerror(errno));
}
char *cache_dir_root = path_append(xdg_cache_home, "/httpdirfs/");
if (mkdir(cache_dir_root, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)
&& (errno != EEXIST)) {
fprintf(stderr, "CacheSystem_calc_dir(): mkdir(): %s\n",
strerror(errno));
}
char *fn = path_append(cache_dir_root, "/CACHEDIR.TAG");
FILE *fp = fopen(fn, "w");
if (fn) {
fprintf(fp,
"Signature: 8a477f597d28d172789f06886806bc55\n\
# This file is a cache directory tag created by httpdirfs.\n\
# For information about cache directory tags, see:\n\
# http://www.brynosaurus.com/cachedir/\n");
} else {
fprintf(stderr, "CacheSystem_calc_dir(): fopen(%s): %s", fn,
strerror(errno));
}
if (ferror(fp)) {
fprintf(stderr,
"CacheSystem_calc_dir(): fwrite(): encountered error!\n");
}
if (fclose(fp)) {
fprintf(stderr, "CacheSystem_calc_dir(): fclose(%s): %s\n", fn,
strerror(errno));
}
CURL* c = curl_easy_init();
char *escaped_url = curl_easy_escape(c, url, 0);
char *full_path = path_append(cache_dir_root, escaped_url);
if (mkdir(full_path, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH)
&& (errno != EEXIST)) {
fprintf(stderr, "CacheSystem_calc_dir(): mkdir(): %s\n",
strerror(errno));
}
free(cache_dir_root);
curl_free(escaped_url);
curl_easy_cleanup(c);
return full_path;
}
void CacheSystem_init(const char *path, int url_supplied)
{
if (url_supplied) {
path = CacheSystem_calc_dir(path);
}
fprintf(stderr, "CacheSystem_init(): directory: %s\n", path);
DIR* dir;
/*
* Check if the top-level cache directory exists, if not, exit the
* program. We don't want to unintentionally create a folder
*/
dir = opendir(path);
if (dir) {
closedir(dir);
} else {
if (!dir) {
fprintf(stderr,
"CacheSystem_init(): opendir(): %s\n", strerror(errno));
exit(EXIT_FAILURE);
@ -208,7 +264,7 @@ static int Meta_write(Cache *cf)
/* Error checking for fwrite */
if (ferror(fp)) {
fprintf(stderr,
"Meta_write(): fwrite(): encountered error (from ferror)!\n");
"Meta_write(): fwrite(): encountered error!\n");
return -1;
}
@ -297,7 +353,7 @@ static long Data_read(Cache *cf, uint8_t *buf, off_t len, off_t offset)
if (ferror(cf->dfp)) {
/* filesystem error */
fprintf(stderr,
"Data_read(): fread(): encountered error (from ferror)!\n");
"Data_read(): fread(): encountered error!\n");
}
}
@ -338,7 +394,7 @@ static long Data_write(Cache *cf, const uint8_t *buf, off_t len,
if (ferror(cf->dfp)) {
/* filesystem error */
fprintf(stderr,
"Data_write(): fwrite(): encountered error (from ferror)!\n");
"Data_write(): fwrite(): encountered error!\n");
}
}

View File

@ -58,7 +58,6 @@ extern int DATA_BLK_SZ;
*/
extern int MAX_SEGBC;
/**
* \brief initialise the cache system directories
* \details This function basically sets up the following variables:
@ -68,7 +67,7 @@ extern int MAX_SEGBC;
* If these directories do not exist, they will be created.
* \note Called by parse_arg_list(), verified to be working
*/
void CacheSystem_init(const char *dir);
void CacheSystem_init(const char *path, int path_supplied);
/**
* \brief Create directories under the cache directory structure, if they do

View File

@ -195,10 +195,12 @@ static void LinkTable_fill(LinkTable *linktbl)
strncpy(this_link->f_url, url, MAX_PATH_LEN);
free(url);
char *unescaped_linkname;
unescaped_linkname = curl_easy_unescape(NULL, this_link->linkname,
CURL* c = curl_easy_init();
unescaped_linkname = curl_easy_unescape(c, this_link->linkname,
0, NULL);
strncpy(this_link->linkname, unescaped_linkname, MAX_FILENAME_LEN);
curl_free(unescaped_linkname);
curl_easy_cleanup(c);
Link_get_stat(this_link);
}
/* Block until the LinkTable is filled up */
@ -313,7 +315,8 @@ HTTP %ld\n", url, http_resp);
int skip_fill = 0;
char *unescaped_path;
unescaped_path = curl_easy_unescape(NULL, url + ROOT_LINK_OFFSET, 0, NULL);
CURL* c = curl_easy_init();
unescaped_path = curl_easy_unescape(c, url + ROOT_LINK_OFFSET, 0, NULL);
if (CACHE_SYSTEM_INIT) {
CacheDir_create(unescaped_path);
LinkTable *disk_linktbl;
@ -344,6 +347,7 @@ HTTP %ld\n", url, http_resp);
}
curl_free(unescaped_path);
curl_easy_cleanup(c);
LinkTable_print(linktbl);
return linktbl;

View File

@ -128,12 +128,13 @@ parse_arg_list(int argc, char **argv, char ***fuse_argv, int *fuse_argc)
{"proxy", required_argument, NULL, 'P'}, /* 5 */
{"proxy-username", required_argument, NULL, 'L'}, /* 6 */
{"proxy-password", required_argument, NULL, 'L'}, /* 7 */
{"cache", required_argument, NULL, 'L'}, /* 8 */
{"cache", no_argument, NULL, 'L'}, /* 8 */
{"dl-seg-size", required_argument, NULL, 'L'}, /* 9 */
{"max-seg-count", required_argument, NULL, 'L'}, /* 10 */
{"max-conns", required_argument, NULL, 'L'}, /* 11 */
{"user-agent", required_argument, NULL, 'L'}, /* 12 */
{"retry-wait", required_argument, NULL, 'L'}, /* 13 */
{"cache-location", required_argument, NULL, 'L'}, /* 14 */
{0, 0, 0, 0}
};
while ((c =
@ -181,7 +182,7 @@ parse_arg_list(int argc, char **argv, char ***fuse_argv, int *fuse_argc)
NETWORK_CONFIG.proxy_pass = strdup(optarg);
break;
case 8:
CacheSystem_init(optarg);
NETWORK_CONFIG.cache_enabled = 1;
break;
case 9:
DATA_BLK_SZ = atoi(optarg) * 1024 * 1024;
@ -198,6 +199,9 @@ parse_arg_list(int argc, char **argv, char ***fuse_argv, int *fuse_argc)
case 13:
HTTP_429_WAIT = atoi(optarg);
break;
case 14:
NETWORK_CONFIG.cache_dir = strdup(optarg);
break;
default:
fprintf(stderr, "Error: Invalid option\n");
add_arg(fuse_argv, fuse_argc, "--help");
@ -250,13 +254,15 @@ static void print_http_options()
https://curl.haxx.se/libcurl/c/CURLOPT_PROXY.html\n\
--proxy-username Username for the proxy\n\
--proxy-password Password for the proxy\n\
--cache Set a cache folder, by default this is disabled\n\
--cache Enable cache, by default this is disabled\n\
--cache-location Set a custom cache location, by default it is \n\
located in ${XDG_CACHE_HOME}/httpdirfs \n\
--dl-seg-size The size of each download segment in MB,\n\
default to 8MB.\n\
--max-seg-count The maximum number of download segments a file\n\
can have. By default it is set to 1048576. This\n\
means the maximum memory usage per file is 1MB\n\
memory. This allows caching file up to 8TB in\n\
can have. By default it is set to 128*1024. This\n\
means the maximum memory usage per file is 128KB\n\
memory. This allows caching file up to 1TB in\n\
size, assuming you are using the default segment\n\
size.\n\
--max-conns The maximum number of network connections that\n\

View File

@ -1,5 +1,7 @@
#include "network.h"
#include "cache.h"
#include <openssl/crypto.h>
#include <errno.h>
@ -217,6 +219,8 @@ void network_config_init()
NETWORK_CONFIG.proxy_pass = NULL;
NETWORK_CONFIG.max_conns = DEFAULT_NETWORK_MAX_CONNS;
NETWORK_CONFIG.user_agent = "HTTPDirFS";
NETWORK_CONFIG.cache_enabled = 0;
NETWORK_CONFIG.cache_dir = NULL;
}
LinkTable *network_init(const char *url)
@ -288,6 +292,15 @@ LinkTable *network_init(const char *url)
ROOT_LINK_OFFSET += 1;
}
/* ----------- Enable cache system --------------------*/
if (NETWORK_CONFIG.cache_enabled) {
if (NETWORK_CONFIG.cache_dir) {
CacheSystem_init(NETWORK_CONFIG.cache_dir, 0);
} else {
CacheSystem_init(url, 1);
}
}
/* ----------- Create the root link table --------------*/
ROOT_LINK_TBL = LinkTable_new(url);
return ROOT_LINK_TBL;

View File

@ -35,6 +35,8 @@ typedef struct {
long max_conns;
char *user_agent;
int http_429_wait;
char *cache_dir;
int cache_enabled;
} NetworkConfigStruct;
/** \brief The waiting time after getting HTTP 429 */

View File

@ -22,7 +22,7 @@
* \details This function appends a path with the next level, while taking the
* trailing slash of the upper level into account.
*
* Please free the char * pointer after use.
* Please free the char * after use.
*/
char *path_append(const char *path, const char *filename);