mirror of
https://github.com/lnbits/lnbits-legend.git
synced 2025-02-25 07:07:48 +01:00
Added hashrate endpoint
This commit is contained in:
parent
1280d3ba74
commit
d67efc320d
3 changed files with 130 additions and 44 deletions
|
@ -1,7 +1,73 @@
|
|||
import httpx
|
||||
import textwrap
|
||||
|
||||
from .number_prefixer import *
|
||||
|
||||
|
||||
# A helper function get a nicely formated dict for the text
|
||||
def get_text_item_dict(text: str, font_size: int, x_pos: int = None, y_pos: int = None):
|
||||
# Get line size by font size
|
||||
line_width = 60
|
||||
if font_size <= 12:
|
||||
line_width = 75
|
||||
elif font_size <= 15:
|
||||
line_width = 58
|
||||
elif font_size <= 20:
|
||||
line_width = 40
|
||||
elif font_size <= 40:
|
||||
line_width = 30
|
||||
else:
|
||||
line_width = 20
|
||||
|
||||
# wrap the text
|
||||
wrapper = textwrap.TextWrapper(width=line_width)
|
||||
word_list = wrapper.wrap(text=text)
|
||||
# logger.debug("number of chars = {0}".format(len(text)))
|
||||
|
||||
multilineText = '\n'.join(word_list)
|
||||
# logger.debug("number of lines = {0}".format(len(word_list)))
|
||||
|
||||
# logger.debug('multilineText')
|
||||
# logger.debug(multilineText)
|
||||
|
||||
text = {
|
||||
"value": multilineText,
|
||||
"size": font_size
|
||||
}
|
||||
if x_pos is None and y_pos is None:
|
||||
text['position'] = 'center'
|
||||
else:
|
||||
text['x'] = x_pos
|
||||
text['y'] = y_pos
|
||||
return text
|
||||
|
||||
# format a number for nice display output
|
||||
def format_number(number):
|
||||
return ("{:,}".format(round(number)))
|
||||
|
||||
|
||||
async def get_mempool_recommended_fees(gerty):
|
||||
if isinstance(gerty.mempool_endpoint, str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
r = await client.get(gerty.mempool_endpoint + "/api/v1/fees/recommended")
|
||||
return r.json()
|
||||
return r.json()
|
||||
|
||||
async def api_get_mining_stat(stat_slug: str, gerty):
|
||||
stat = "";
|
||||
if isinstance(gerty.mempool_endpoint, str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
if stat_slug == "mining_current_hash_rate":
|
||||
r = await client.get(gerty.mempool_endpoint + "/api/v1/mining/hashrate/3d")
|
||||
data = r.json()
|
||||
stat = data['currentHashrate']
|
||||
return stat
|
||||
|
||||
|
||||
async def get_mining_stat(stat_slug: str, gerty):
|
||||
text = []
|
||||
if stat_slug == "mining_current_hash_rate":
|
||||
stat = await api_get_mining_stat(stat_slug, gerty)
|
||||
stat = "{0}hash".format(si_format(stat, 6, True, " "))
|
||||
text.append(get_text_item_dict("Current Hashrate", 20))
|
||||
text.append(get_text_item_dict(stat, 40))
|
||||
return text
|
62
lnbits/extensions/gerty/number_prefixer.py
Normal file
62
lnbits/extensions/gerty/number_prefixer.py
Normal file
|
@ -0,0 +1,62 @@
|
|||
import math
|
||||
|
||||
def si_classifier(val):
|
||||
suffixes = {
|
||||
24:{'long_suffix':'yotta', 'short_suffix':'Y', 'scalar':10**24},
|
||||
21:{'long_suffix':'zetta', 'short_suffix':'Z', 'scalar':10**21},
|
||||
18:{'long_suffix':'exa', 'short_suffix':'E', 'scalar':10**18},
|
||||
15:{'long_suffix':'peta', 'short_suffix':'P', 'scalar':10**15},
|
||||
12:{'long_suffix':'tera', 'short_suffix':'T', 'scalar':10**12},
|
||||
9:{'long_suffix':'giga', 'short_suffix':'G', 'scalar':10**9},
|
||||
6:{'long_suffix':'mega', 'short_suffix':'M', 'scalar':10**6},
|
||||
3:{'long_suffix':'kilo', 'short_suffix':'k', 'scalar':10**3},
|
||||
0:{'long_suffix':'', 'short_suffix':'', 'scalar':10**0},
|
||||
-3:{'long_suffix':'milli', 'short_suffix':'m', 'scalar':10**-3},
|
||||
-6:{'long_suffix':'micro', 'short_suffix':'µ', 'scalar':10**-6},
|
||||
-9:{'long_suffix':'nano', 'short_suffix':'n', 'scalar':10**-9},
|
||||
-12:{'long_suffix':'pico', 'short_suffix':'p', 'scalar':10**-12},
|
||||
-15:{'long_suffix':'femto', 'short_suffix':'f', 'scalar':10**-15},
|
||||
-18:{'long_suffix':'atto', 'short_suffix':'a', 'scalar':10**-18},
|
||||
-21:{'long_suffix':'zepto', 'short_suffix':'z', 'scalar':10**-21},
|
||||
-24:{'long_suffix':'yocto', 'short_suffix':'y', 'scalar':10**-24}
|
||||
}
|
||||
exponent = int(math.floor(math.log10(abs(val))/3.0)*3)
|
||||
return suffixes.get(exponent, None)
|
||||
|
||||
def si_formatter(value):
|
||||
'''
|
||||
Return a triple of scaled value, short suffix, long suffix, or None if
|
||||
the value cannot be classified.
|
||||
'''
|
||||
classifier = si_classifier(value)
|
||||
if classifier == None:
|
||||
# Don't know how to classify this value
|
||||
return None
|
||||
|
||||
scaled = value / classifier['scalar']
|
||||
return (scaled, classifier['short_suffix'], classifier['long_suffix'])
|
||||
|
||||
def si_format(value, precision=4, long_form=False, separator=''):
|
||||
'''
|
||||
"SI prefix" formatted string: return a string with the given precision
|
||||
and an appropriate order-of-3-magnitudes suffix, e.g.:
|
||||
si_format(1001.0) => '1.00K'
|
||||
si_format(0.00000000123, long_form=True, separator=' ') => '1.230 nano'
|
||||
'''
|
||||
scaled, short_suffix, long_suffix = si_formatter(value)
|
||||
|
||||
if scaled == None:
|
||||
# Don't know how to format this value
|
||||
return value
|
||||
|
||||
suffix = long_suffix if long_form else short_suffix
|
||||
|
||||
if abs(scaled) < 10:
|
||||
precision = precision - 1
|
||||
elif abs(scaled) < 100:
|
||||
precision = precision - 2
|
||||
else:
|
||||
precision = precision - 3
|
||||
|
||||
return '{scaled:.{precision}f}{separator}{suffix}'.format(
|
||||
scaled=scaled, precision=precision, separator=separator, suffix=suffix)
|
|
@ -1,7 +1,6 @@
|
|||
import math
|
||||
from http import HTTPStatus
|
||||
import json
|
||||
import textwrap
|
||||
import httpx
|
||||
import random
|
||||
import os
|
||||
|
@ -196,7 +195,7 @@ async def get_screen_text(screen_num: int, screens_list: dict, gerty):
|
|||
elif screen_slug == "mempool_tx_count":
|
||||
areas.append(await get_mempool_stat(screen_slug, gerty))
|
||||
elif screen_slug == "mining_current_hash_rate":
|
||||
areas.append(await get_placeholder_text())
|
||||
areas.append(await get_mining_stat(screen_slug, gerty))
|
||||
elif screen_slug == "mining_current_difficulty":
|
||||
areas.append(await get_placeholder_text())
|
||||
elif screen_slug == "lightning_channel_count":
|
||||
|
@ -309,42 +308,7 @@ async def get_exchange_rate(gerty):
|
|||
return text
|
||||
|
||||
|
||||
# A helper function get a nicely formated dict for the text
|
||||
def get_text_item_dict(text: str, font_size: int, x_pos: int = None, y_pos: int = None):
|
||||
# Get line size by font size
|
||||
line_width = 60
|
||||
if font_size <= 12:
|
||||
line_width = 75
|
||||
elif font_size <= 15:
|
||||
line_width = 58
|
||||
elif font_size <= 20:
|
||||
line_width = 40
|
||||
elif font_size <= 40:
|
||||
line_width = 30
|
||||
else:
|
||||
line_width = 20
|
||||
|
||||
# wrap the text
|
||||
wrapper = textwrap.TextWrapper(width=line_width)
|
||||
word_list = wrapper.wrap(text=text)
|
||||
# logger.debug("number of chars = {0}".format(len(text)))
|
||||
|
||||
multilineText = '\n'.join(word_list)
|
||||
# logger.debug("number of lines = {0}".format(len(word_list)))
|
||||
|
||||
# logger.debug('multilineText')
|
||||
# logger.debug(multilineText)
|
||||
|
||||
text = {
|
||||
"value": multilineText,
|
||||
"size": font_size
|
||||
}
|
||||
if x_pos is None and y_pos is None:
|
||||
text['position'] = 'center'
|
||||
else:
|
||||
text['x'] = x_pos
|
||||
text['y'] = y_pos
|
||||
return text
|
||||
|
||||
|
||||
async def get_onchain_stat(stat_slug: str, gerty):
|
||||
|
@ -393,7 +357,6 @@ async def get_block_height(gerty):
|
|||
|
||||
return r.json()
|
||||
|
||||
|
||||
async def get_mempool_stat(stat_slug: str, gerty):
|
||||
text = []
|
||||
if isinstance(gerty.mempool_endpoint, str):
|
||||
|
@ -452,11 +415,6 @@ def get_date_suffix(dayNumber):
|
|||
else:
|
||||
return ["st", "nd", "rd"][dayNumber % 10 - 1]
|
||||
|
||||
# format a number for nice display output
|
||||
def format_number(number):
|
||||
return ("{:,}".format(round(number)))
|
||||
|
||||
|
||||
def get_time_remaining(seconds, granularity=2):
|
||||
|
||||
intervals = (
|
||||
|
|
Loading…
Add table
Reference in a new issue