crypto: fix constant randomx exceptions in large page allocator

If allocating large pages fails, we don't try again.
This has the obvious drawback of not being able to use large pages
if they fail once.
This commit is contained in:
moneromooo-monero 2020-12-05 21:05:21 +00:00
parent 9aab19f349
commit 06e6c8bf4a
No known key found for this signature in database
GPG key ID: 686F07454D6CEFC3

View file

@ -63,6 +63,7 @@ static rx_state rx_s[2] = {{CTHR_MUTEX_INIT,{0},0,0},{CTHR_MUTEX_INIT,{0},0,0}};
static randomx_dataset *rx_dataset;
static int rx_dataset_nomem;
static int rx_dataset_nolp;
static uint64_t rx_dataset_height;
static THREADV randomx_vm *rx_vm = NULL;
@ -316,10 +317,11 @@ void rx_slow_hash(const uint64_t mainheight, const uint64_t seedheight, const ch
}
CTHR_MUTEX_UNLOCK(rx_dataset_mutex);
}
if (!(disabled_flags() & RANDOMX_FLAG_LARGE_PAGES)) {
if (!(disabled_flags() & RANDOMX_FLAG_LARGE_PAGES) && !rx_dataset_nolp) {
rx_vm = randomx_create_vm(flags | RANDOMX_FLAG_LARGE_PAGES, rx_sp->rs_cache, rx_dataset);
if(rx_vm == NULL) { //large pages failed
mdebug(RX_LOGCAT, "Couldn't use largePages for RandomX VM");
rx_dataset_nolp = 1;
}
}
if (rx_vm == NULL)
@ -370,5 +372,6 @@ void rx_stop_mining(void) {
randomx_release_dataset(rd);
}
rx_dataset_nomem = 0;
rx_dataset_nolp = 0;
CTHR_MUTEX_UNLOCK(rx_dataset_mutex);
}