Skip to content
This repository has been archived by the owner on Dec 26, 2022. It is now read-only.

Commit

Permalink
feat(db): Implement importer and listener for permanode
Browse files Browse the repository at this point in the history
The listener subscribes newly confirmed transactions from IRI and adds inserting tasks into the task queue of thread pool.

The importer reads confirmed transactions from historical transaction files dumped from IRI and adds inserting tasks into the task queue of thread pool.

Each worker in the thread pool establishes a session connected to ScyllaDB cluster and takes inserting tasks.
  • Loading branch information
YingHan-Chen committed May 21, 2020
1 parent 87d7459 commit 7475196
Show file tree
Hide file tree
Showing 8 changed files with 714 additions and 4 deletions.
8 changes: 7 additions & 1 deletion common/ta_errors.h
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,13 @@ typedef enum {
SC_STORAGE_INVALID_INPUT = 0x03 | SC_MODULE_STORAGE | SC_SEVERITY_MAJOR,
/**< Invalid input parameter, e.g., null pointer */
SC_STORAGE_CASSANDRA_QUERY_FAIL = 0x04 | SC_MODULE_STORAGE | SC_SEVERITY_MAJOR,
/**< Failed to execute Cassandra query */
/**< Failed to execute Cassandra query */
SC_STORAGE_SYNC_ERROR = 0x05 | SC_MODULE_STORAGE | SC_SEVERITY_MAJOR,
/**< Failed to synchronize lastest confirmed transactions from IRI */
SC_STORAGE_THPOOL_ADD_REQUEST_FAIL = 0x06 | SC_MODULE_STORAGE | SC_SEVERITY_MAJOR,
/**< Failed to add requests to permanode thread pool. (request queue full) */
SC_STORAGE_PTHREAD_ERROR = 0x07 | SC_MODULE_STORAGE | SC_SEVERITY_MAJOR,
/**< Failed when calling pthread library */

// Core module
SC_CORE_OOM = 0x01 | SC_MODULE_CORE | SC_SEVERITY_FATAL,
Expand Down
32 changes: 31 additions & 1 deletion docs/permanode.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,34 @@ The ScyllaDB backend in `tangle-accelerator` supports the following APIs:
- find_transactions_approvees
- get_inclusion_status

See [docs/build.md] for more information about enabling the external storage.
Read [docs/build.md] for more information about enabling the external storage.

## Listener

The listener subscribes newly confirmed transactions from IRI and adds inserting tasks into the task queue of the thread pool.

Here are configurations and CLI options you need to specify:

* `--iri_host`: Listening IRI host for ZMQ events and querying trytes.
* `--db_host`: Connecting ScyllaDB host name.
* `--thread_num`: Workers number in the thread pool to handle receiving transactions.

Build command:

`bazel build //storage:scylladb_listener`

## Importer

The importer reads confirmed transactions from historical transaction files dumped from IRI and adds inserting tasks into the task queue of thread pool.

The historical transaction files must consist of lines with the format: `TRANSACTION_HASH,TRYTES,SNAPSHOT_INDEX`

Here are configurations and CLI options you need to specify:

* `--db_host`: Connecting ScyllaDB host name.
* `--file`: A file consist of historical transaction file paths.
* `--thread_num`: Worker's number in the thread pool to handle receiving transactions.

Build command:

`bazel build //storage:scylladb_importer`
32 changes: 32 additions & 0 deletions storage/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,28 @@ cc_library(
deps = [
":scylladb_identity",
":scylladb_permanode",
":scylladb_permanode_thpool",
],
)

cc_binary(
name = "scylladb_importer",
srcs = ["scylladb_importer.c"],
deps = [
":storage",
],
)

cc_binary(
name = "scylladb_listener",
srcs = ["scylladb_listener.c"],
linkopts = [
"-lzmq",
],
deps = [
":storage",
"//accelerator:ta_config",
"@entangled//cclient/api",
],
)

Expand Down Expand Up @@ -33,6 +55,16 @@ cc_library(
],
)

cc_library(
name = "scylladb_permanode_thpool",
srcs = ["scylladb_permanode_thpool.c"],
hdrs = ["scylladb_permanode_thpool.h"],
linkopts = ["-lpthread"],
deps = [
":scylladb_permanode",
],
)

cc_library(
name = "scylladb_permanode",
srcs = ["scylladb_permanode.c"],
Expand Down
165 changes: 165 additions & 0 deletions storage/scylladb_importer.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
/*
* Copyright (C) 2020 BiiLabs Co., Ltd. and Contributors
* All Rights Reserved.
* This is free software; you can redistribute it and/or modify it under the
* terms of the MIT license. A copy of the license can be found in the file
* "LICENSE" at the root of this distribution.
*/

#include <getopt.h>
#include "ta_storage.h"

#define logger_id scylladb_logger_id

typedef struct {
pthread_mutex_t thread_mutex;
db_permanode_pool_t* pool;
char* file_path;
} db_importer_thread_t;

static status_t init_importer_data(db_importer_thread_t* thread_data, db_permanode_pool_t* pool, char* file_list) {
status_t ret = SC_OK;
pthread_mutex_init(&thread_data->thread_mutex, NULL);
thread_data->pool = pool;
thread_data->file_path = strdup(file_list);
return ret;
}

static void* importer_handler(void* data) {
#define TRANSACTION_BUFFER_SIZE \
(NUM_FLEX_TRITS_HASH + 1 + NUM_TRYTES_SERIALIZED_TRANSACTION + 12) // 12 is for snapshot_index
#define MAX_FILE_PATH 256

status_t ret = SC_OK;
db_importer_thread_t* thread_data = (db_importer_thread_t*)data;
pthread_mutex_lock(&thread_data->thread_mutex);
FILE* list_file = NULL;
char file_name_buffer[MAX_FILE_PATH];

if ((list_file = fopen(thread_data->file_path, "r")) == NULL) {
/* The specified configuration file does not exist */
ret = SC_CONF_FOPEN_ERROR;
ta_log_error("Failed to open file %s\n", thread_data->file_path);
goto exit;
}

while (fgets(file_name_buffer, MAX_FILE_PATH, list_file) != NULL) {
char input_buffer[TRANSACTION_BUFFER_SIZE];
FILE* file = NULL;

int name_len = strlen(file_name_buffer);
if (name_len > 0) {
file_name_buffer[name_len - 1] = 0;
} else {
ta_log_warning("Empty file name\n");
continue;
}

if ((file = fopen(file_name_buffer, "r")) == NULL) {
/* The specified configuration file does not exist */
ret = SC_CONF_FOPEN_ERROR;
ta_log_error("Failed to open file %s\n", file_name_buffer);
goto exit;
}
ta_log_info("%s %s\n", "starting to import file : ", file_name_buffer);
int cnt = 1;
int cnt_base1000 = 0;
while (fgets(input_buffer, TRANSACTION_BUFFER_SIZE, file) != NULL) {
if (cnt % 1000 == 0) {
ta_log_info("Import %d K transactions\n", ++cnt_base1000);
cnt = 0;
}
if (input_buffer[strlen(input_buffer) - 1] != '\n') {
ret = SC_STORAGE_INVALID_INPUT;
ta_log_error("%s\n", "Historical dump file format error");
continue;
}

do {
ret = db_permanode_thpool_add((tryte_t*)input_buffer, (tryte_t*)input_buffer + NUM_FLEX_TRITS_HASH + 1,
thread_data->pool);
if (ret != SC_OK) {
pthread_cond_wait(&thread_data->pool->finish_request, &thread_data->thread_mutex);
}
} while (ret != SC_OK);

cnt++;
}

ta_log_info("Successfully import file : %s\n", file_name_buffer);
}

exit:
if (ret == SC_OK) {
ta_log_info("%s %s\n", "Successfully import file : ", thread_data->file_path);
} else {
ta_log_error("Failed to import file : %s\n", thread_data->file_path);
}
return NULL;
}

int main(int argc, char* argv[]) {
int thread_num = 1;
pthread_t* worker_threads; /* thread's structures */
pthread_t importer_thread;
db_worker_thread_t* worker_data;
db_importer_thread_t importer_data;
db_permanode_pool_t pool;

char* db_host = "localhost";
char* file_path = NULL;
const struct option longOpt[] = {{"db_host", required_argument, NULL, 's'},
{"file", required_argument, NULL, 'f'},
{"thread_num", required_argument, NULL, 't'},
{NULL, 0, NULL, 0}};
/* Parse the command line options */
while (1) {
int cmdOpt;
int optIdx;
cmdOpt = getopt_long(argc, argv, "sft:", longOpt, &optIdx);
if (cmdOpt == -1) break;

/* Invalid option */
if (cmdOpt == '?') break;

if (cmdOpt == 's') {
db_host = optarg;
}
if (cmdOpt == 'f') {
file_path = optarg;
}
if (cmdOpt == 't') {
thread_num = atoi(optarg);
}
}
if (file_path == NULL) {
ta_log_error("No specified import file list\n");
return EXIT_FAILURE;
}
if (ta_logger_init() != SC_OK) {
ta_log_error("Failed to init logger\n");
return EXIT_FAILURE;
}
scylladb_logger_init();
worker_threads = malloc(thread_num * sizeof(pthread_t));
worker_data = malloc(thread_num * sizeof(db_worker_thread_t));

db_permanode_thpool_init(&pool);
/* create the request-handling threads */
for (int i = 0; i < thread_num; i++) {
db_permanode_thpool_init_worker(worker_data + i, &pool, db_host);
pthread_create(&worker_threads[i], NULL, db_permanode_worker_handler, (void*)&worker_data[i]);
}
init_importer_data(&importer_data, &pool, file_path);
pthread_create(&importer_thread, NULL, (void*)importer_handler, (void*)&importer_data);

pthread_join(importer_thread, NULL);

db_permanode_tpool_wait(&pool);
free(worker_data);
free(worker_threads);

scylladb_logger_release();

return 0;
}
Loading

0 comments on commit 7475196

Please sign in to comment.