Skip to content
This repository has been archived by the owner on Jul 2, 2021. It is now read-only.

Miscellaneous fixes #333

Merged
merged 7 commits into from
May 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 28 additions & 15 deletions src/crypto.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,40 +92,53 @@ int double_ripemd160sha256_as_string(uint8_t *data, uint64_t data_size,
int generate_bucket_key(const char *mnemonic, const char *bucket_id,
char **bucket_key)
{
int status = 0;
char *seed = calloc(128 + 1, sizeof(char));
if (!seed) {
return 1;
status = 1;
goto cleanup;
}
mnemonic_to_seed(mnemonic, "", &seed);
seed[128] = '\0';
if (get_deterministic_key(seed, 128, bucket_id, bucket_key)) {
return 1;
}

memset_zero(seed, 128 + 1);
free(seed);
status = get_deterministic_key(seed, 128, bucket_id, bucket_key);

return 0;
cleanup:

if (seed) {
memset_zero(seed, 128 + 1);
free(seed);
}

return status;
}

int generate_file_key(const char *mnemonic, const char *bucket_id,
const char *index, char **file_key)
{
int status = 0;
char *bucket_key = calloc(DETERMINISTIC_KEY_SIZE + 1, sizeof(char));
if (!bucket_key) {
return 1;
status = 1;
goto cleanup;
}
if (generate_bucket_key(mnemonic, bucket_id, &bucket_key)) {
return 1;

status = generate_bucket_key(mnemonic, bucket_id, &bucket_key);
if (status) {
goto cleanup;
}
bucket_key[DETERMINISTIC_KEY_SIZE] = '\0';

get_deterministic_key(bucket_key, 64, index, file_key);

memset_zero(bucket_key, DETERMINISTIC_KEY_SIZE + 1);
free(bucket_key);
cleanup:

return 0;
if (bucket_key) {
memset_zero(bucket_key, DETERMINISTIC_KEY_SIZE + 1);
free(bucket_key);
}

return status;
}

int get_deterministic_key(const char *key, int key_len,
Expand All @@ -145,7 +158,7 @@ int get_deterministic_key(const char *key, int key_len,
// Convert input to hexdata
uint8_t *sha512input_as_hex = str2hex(input_len, sha512input);
if (!sha512input_as_hex) {
return 1;
return 2;
}

// Sha512 of hexdata
Expand All @@ -155,7 +168,7 @@ int get_deterministic_key(const char *key, int key_len,
// Convert Sha512 hex to character array
char *sha512_str = hex2str(SHA512_DIGEST_SIZE, sha512_digest);
if (!sha512_str) {
return 1;
return 2;
}

//First 64 bytes of sha512
Expand Down
52 changes: 40 additions & 12 deletions src/downloader.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,6 @@ static void request_replace_pointer(uv_work_t *work)
"Request replace pointer error: %i", request_status);
}


req->status_code = status_code;

if (!req->response) {
Expand Down Expand Up @@ -454,6 +453,9 @@ static void append_pointers_to_state(storj_download_state_t *state,
int length = json_object_array_length(res);

if (length == 0) {
state->log->debug(state->env->log_options,
state->handle,
"Finished requesting pointers");
state->pointers_completed = true;
} else if (length > 0) {

Expand Down Expand Up @@ -496,33 +498,42 @@ static void append_pointers_to_state(storj_download_state_t *state,
static void after_request_pointers(uv_work_t *work, int status)
{
json_request_download_t *req = work->data;
storj_download_state_t *state = req->state;

req->state->pending_work_count--;
req->state->requesting_pointers = false;
state->pending_work_count--;
state->requesting_pointers = false;

state->log->debug(state->env->log_options, state->handle,
"Finished request pointers - JSON Response %s",
json_object_to_json_string(req->response));

free_bucket_token(req->state);

if (status != 0) {
req->state->error_status = STORJ_BRIDGE_POINTER_ERROR;
state->error_status = STORJ_BRIDGE_POINTER_ERROR;
} else if (req->status_code != 200) {
if (req->status_code > 0 && req->status_code < 500) {
req->state->error_status = STORJ_BRIDGE_POINTER_ERROR;
state->error_status = STORJ_BRIDGE_POINTER_ERROR;
} else {
req->state->pointer_fail_count += 1;
state->pointer_fail_count += 1;
}

if (req->state->pointer_fail_count >= STORJ_MAX_POINTER_TRIES) {
req->state->pointer_fail_count = 0;
req->state->error_status = STORJ_BRIDGE_POINTER_ERROR;
state->log->debug(state->env->log_options, state->handle,
"Request pointers fail count: %i",
state->pointer_fail_count);

if (state->pointer_fail_count >= STORJ_MAX_POINTER_TRIES) {
state->pointer_fail_count = 0;
state->error_status = STORJ_BRIDGE_POINTER_ERROR;
}

} else if (!json_object_is_type(req->response, json_type_array)) {
req->state->error_status = STORJ_BRIDGE_JSON_ERROR;
state->error_status = STORJ_BRIDGE_JSON_ERROR;
} else {
append_pointers_to_state(req->state, req->response);
append_pointers_to_state(state, req->response);
}

queue_next_work(req->state);
queue_next_work(state);

json_object_put(req->response);
free(req->path);
Expand All @@ -538,6 +549,11 @@ static void after_request_replace_pointer(uv_work_t *work, int status)
state->pending_work_count--;
state->requesting_pointers = false;

state->log->debug(state->env->log_options, state->handle,
"Finished request replace pointer %i - JSON Response: %s",
req->pointer_index,
json_object_to_json_string(req->response));

free_bucket_token(req->state);

if (status != 0) {
Expand All @@ -549,10 +565,17 @@ static void after_request_replace_pointer(uv_work_t *work, int status)
if (req->status_code > 0 && req->status_code < 500) {
state->pointers[req->pointer_index].status = POINTER_MISSING;
} else {
// Update status so that it will be retried
state->pointers[req->pointer_index].status = POINTER_ERROR_REPORTED;
state->pointer_fail_count += 1;
}

state->log->debug(state->env->log_options, state->handle,
"Request replace pointer fail count: %i",
state->pointer_fail_count);

if (state->pointer_fail_count >= STORJ_MAX_POINTER_TRIES) {
// Skip retrying mark as missing
state->pointer_fail_count = 0;
state->pointers[req->pointer_index].status = POINTER_MISSING;
}
Expand Down Expand Up @@ -1772,6 +1795,11 @@ static void queue_recover_shards(storj_download_state_t *state)
if (pointer->status != POINTER_MISSING &&
pointer->status != POINTER_DOWNLOADED) {
is_ready = false;
state->log->debug(state->env->log_options,
state->handle,
"Pointer %i not ready with status: %i\n",
i, pointer->status);

}
}

Expand Down
2 changes: 1 addition & 1 deletion src/downloader.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#define STORJ_DOWNLOAD_WRITESYNC_CONCURRENCY 4
#define STORJ_DEFAULT_MIRRORS 5
#define STORJ_MAX_REPORT_TRIES 2
#define STORJ_MAX_TOKEN_TRIES 3
#define STORJ_MAX_TOKEN_TRIES 6
#define STORJ_MAX_POINTER_TRIES 2
#define STORJ_MAX_INFO_TRIES 6

Expand Down
2 changes: 2 additions & 0 deletions src/storj.c
Original file line number Diff line number Diff line change
Expand Up @@ -1037,6 +1037,8 @@ char *storj_strerror(int error_code)
return "Memory mapped file unmap error";
case STORJ_QUEUE_ERROR:
return "Queue error";
case STORJ_HEX_DECODE_ERROR:
return "Unable to decode hex string";
case STORJ_TRANSFER_OK:
return "No errors";
default:
Expand Down
3 changes: 3 additions & 0 deletions src/storj.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ extern "C" {
#define STORJ_META_ENCRYPTION_ERROR 6000
#define STORJ_META_DECRYPTION_ERROR 6001

// Miscellaneous errors
#define STORJ_HEX_DECODE_ERROR 7000

// Exchange report codes
#define STORJ_REPORT_SUCCESS 1000
#define STORJ_REPORT_FAILURE 1100
Expand Down
19 changes: 13 additions & 6 deletions src/uploader.c
Original file line number Diff line number Diff line change
Expand Up @@ -2108,7 +2108,7 @@ static void verify_bucket_id_callback(uv_work_t *work_req, int status)
if (req->status_code == 200) {
state->bucket_verified = true;
goto clean_variables;
} else if (req->status_code == 404) {
} else if (req->status_code == 404 || req->status_code == 400) {
state->log->error(state->env->log_options, state->handle,
"Bucket [%s] doesn't exist", state->bucket_id);
state->error_status = STORJ_BRIDGE_BUCKET_NOTFOUND_ERROR;
Expand Down Expand Up @@ -2528,11 +2528,18 @@ static void prepare_upload_state(uv_work_t *work)
goto cleanup;
}

if (generate_file_key(state->env->encrypt_options->mnemonic,
state->bucket_id,
index_as_str,
&key_as_str)) {
state->error_status = STORJ_MEMORY_ERROR;
int key_status = generate_file_key(state->env->encrypt_options->mnemonic,
state->bucket_id,
index_as_str,
&key_as_str);
if (key_status) {
switch (key_status) {
case 2:
state->error_status = STORJ_HEX_DECODE_ERROR;
break;
default:
state->error_status = STORJ_MEMORY_ERROR;
}
goto cleanup;
}

Expand Down