Run `Black` code formatter
This commit is contained in:
parent
5c3acd8d37
commit
960ab5b7b7
|
@ -63,9 +63,7 @@ def run_migrations_online():
|
|||
)
|
||||
|
||||
with connectable.connect() as connection:
|
||||
context.configure(
|
||||
connection=connection, target_metadata=target_metadata
|
||||
)
|
||||
context.configure(connection=connection, target_metadata=target_metadata)
|
||||
|
||||
with context.begin_transaction():
|
||||
context.run_migrations()
|
||||
|
|
65
hive/cli.py
65
hive/cli.py
|
@ -16,12 +16,12 @@ def setup_logging(conf):
|
|||
timestamp = conf.get('log_timestamp')
|
||||
epoch = conf.get('log_epoch')
|
||||
if timestamp and epoch:
|
||||
datefmt='%Y-%m-%d %H:%M:%S'
|
||||
datefmt = '%Y-%m-%d %H:%M:%S'
|
||||
timezone = time.strftime('%z')
|
||||
fmt = f'%(asctime)s.%(msecs)03d{timezone} %(created).6f %(levelname)s - %(name)s - %(message)s'
|
||||
logging.basicConfig(format=fmt, datefmt=datefmt)
|
||||
elif timestamp:
|
||||
datefmt='%Y-%m-%d %H:%M:%S'
|
||||
datefmt = '%Y-%m-%d %H:%M:%S'
|
||||
timezone = time.strftime('%z')
|
||||
fmt = f'%(asctime)s.%(msecs)03d{timezone} %(levelname)s - %(name)s - %(message)s'
|
||||
logging.basicConfig(format=fmt, datefmt=datefmt)
|
||||
|
@ -36,58 +36,63 @@ def setup_logging(conf):
|
|||
def run():
|
||||
"""Run the service specified in the `--mode` argument."""
|
||||
with Conf() as conf:
|
||||
conf.init_argparse()
|
||||
mode = conf.mode()
|
||||
PrometheusClient( conf.get('prometheus_port') )
|
||||
conf.init_argparse()
|
||||
mode = conf.mode()
|
||||
PrometheusClient(conf.get('prometheus_port'))
|
||||
|
||||
setup_logging(conf)
|
||||
setup_logging(conf)
|
||||
|
||||
if mode == 'completion':
|
||||
conf.generate_completion()
|
||||
return
|
||||
if mode == 'completion':
|
||||
conf.generate_completion()
|
||||
return
|
||||
|
||||
#Calculation of number of maximum connection and closing a database
|
||||
#In next step the database will be opened with correct number of connections
|
||||
Db.set_max_connections(conf.db())
|
||||
conf.disconnect()
|
||||
# Calculation of number of maximum connection and closing a database
|
||||
# In next step the database will be opened with correct number of connections
|
||||
Db.set_max_connections(conf.db())
|
||||
conf.disconnect()
|
||||
|
||||
Db.set_shared_instance(conf.db())
|
||||
Db.set_shared_instance(conf.db())
|
||||
|
||||
pid_file_name = conf.pid_file()
|
||||
if pid_file_name is not None:
|
||||
fh = open(pid_file_name, 'w')
|
||||
if fh is None:
|
||||
print("Cannot write into specified pid_file: %s", pid_file_name)
|
||||
else:
|
||||
pid = os.getpid()
|
||||
fh.write(str(pid))
|
||||
fh.close()
|
||||
pid_file_name = conf.pid_file()
|
||||
if pid_file_name is not None:
|
||||
fh = open(pid_file_name, 'w')
|
||||
if fh is None:
|
||||
print("Cannot write into specified pid_file: %s", pid_file_name)
|
||||
else:
|
||||
pid = os.getpid()
|
||||
fh.write(str(pid))
|
||||
fh.close()
|
||||
|
||||
if conf.get('test_profile'):
|
||||
from hive.utils.profiler import Profiler
|
||||
|
||||
with Profiler():
|
||||
launch_mode(mode, conf)
|
||||
else:
|
||||
launch_mode(mode, conf)
|
||||
|
||||
if conf.get('test_profile'):
|
||||
from hive.utils.profiler import Profiler
|
||||
with Profiler():
|
||||
launch_mode(mode, conf)
|
||||
else:
|
||||
launch_mode(mode, conf)
|
||||
|
||||
def launch_mode(mode, conf):
|
||||
"""Launch a routine as indicated by `mode`."""
|
||||
if mode == 'server':
|
||||
from hive.server.serve import run_server
|
||||
|
||||
run_server(conf=conf)
|
||||
|
||||
elif mode == 'sync':
|
||||
from hive.indexer.sync import Sync
|
||||
|
||||
with Sync(conf=conf) as sync:
|
||||
sync.run()
|
||||
sync.run()
|
||||
|
||||
elif mode == 'status':
|
||||
from hive.db.db_state import DbState
|
||||
|
||||
print(DbState.status())
|
||||
|
||||
else:
|
||||
raise Exception(f"unknown run mode {mode}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
|
|
189
hive/conf.py
189
hive/conf.py
|
@ -11,13 +11,15 @@ from hive.utils.stats import DbStats
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _sanitized_conf(parser):
|
||||
"""Formats parser config, redacting database url password."""
|
||||
out = parser.format_values()
|
||||
return re.sub(r'(?<=:)\w+(?=@)', '<redacted>', out)
|
||||
|
||||
class Conf():
|
||||
""" Manages sync/server configuration via args, ENVs, and hive.conf. """
|
||||
|
||||
class Conf:
|
||||
"""Manages sync/server configuration via args, ENVs, and hive.conf."""
|
||||
|
||||
def __init__(self):
|
||||
self._args = None
|
||||
|
@ -29,10 +31,8 @@ class Conf():
|
|||
def init_argparse(self, strict=True, **kwargs):
|
||||
"""Read hive config (CLI arg > ENV var > config)"""
|
||||
|
||||
#pylint: disable=line-too-long
|
||||
parser = configargparse.get_arg_parser(
|
||||
default_config_files=['./hive.conf'],
|
||||
**kwargs)
|
||||
# pylint: disable=line-too-long
|
||||
parser = configargparse.get_arg_parser(default_config_files=['./hive.conf'], **kwargs)
|
||||
add = parser.add
|
||||
|
||||
# runmodes: sync, server, status
|
||||
|
@ -40,47 +40,143 @@ class Conf():
|
|||
|
||||
# common
|
||||
add('--database-url', env_var='DATABASE_URL', required=False, help='database connection url', default='')
|
||||
add('--steemd-url', env_var='STEEMD_URL', required=False, help='steemd/jussi endpoint', default='{"default" : "https://api.hive.blog"}')
|
||||
add('--muted-accounts-url', env_var='MUTED_ACCOUNTS_URL', required=False, help='url to flat list of muted accounts', default='https://raw.githubusercontent.com/hivevectordefense/irredeemables/master/full.txt')
|
||||
add('--blacklist-api-url', env_var='BLACKLIST_API_URL', required=False, help='url to access blacklist api', default='https://blacklist.usehive.com')
|
||||
add(
|
||||
'--steemd-url',
|
||||
env_var='STEEMD_URL',
|
||||
required=False,
|
||||
help='steemd/jussi endpoint',
|
||||
default='{"default" : "https://api.hive.blog"}',
|
||||
)
|
||||
add(
|
||||
'--muted-accounts-url',
|
||||
env_var='MUTED_ACCOUNTS_URL',
|
||||
required=False,
|
||||
help='url to flat list of muted accounts',
|
||||
default='https://raw.githubusercontent.com/hivevectordefense/irredeemables/master/full.txt',
|
||||
)
|
||||
add(
|
||||
'--blacklist-api-url',
|
||||
env_var='BLACKLIST_API_URL',
|
||||
required=False,
|
||||
help='url to access blacklist api',
|
||||
default='https://blacklist.usehive.com',
|
||||
)
|
||||
|
||||
# server
|
||||
add('--http-server-port', type=int, env_var='HTTP_SERVER_PORT', default=8080)
|
||||
add('--prometheus-port', type=int, env_var='PROMETHEUS_PORT', required=False, help='if specified, runs prometheus deamon on specified port, which provide statistic and performance data')
|
||||
add(
|
||||
'--prometheus-port',
|
||||
type=int,
|
||||
env_var='PROMETHEUS_PORT',
|
||||
required=False,
|
||||
help='if specified, runs prometheus deamon on specified port, which provide statistic and performance data',
|
||||
)
|
||||
|
||||
# sync
|
||||
add('--max-workers', type=int, env_var='MAX_WORKERS', help='max workers for batch requests', default=6)
|
||||
add('--max-batch', type=int, env_var='MAX_BATCH', help='max chunk size for batch requests', default=35)
|
||||
add('--max-retries', type=int, env_var='MAX_RETRIES', help='max number of retries after request failure is accepted; default -1 means no limit', default=-1)
|
||||
add(
|
||||
'--max-retries',
|
||||
type=int,
|
||||
env_var='MAX_RETRIES',
|
||||
help='max number of retries after request failure is accepted; default -1 means no limit',
|
||||
default=-1,
|
||||
)
|
||||
add('--trail-blocks', type=int, env_var='TRAIL_BLOCKS', help='number of blocks to trail head by', default=2)
|
||||
add('--sync-to-s3', type=strtobool, env_var='SYNC_TO_S3', help='alternative healthcheck for background sync service', default=False)
|
||||
add('--hived-database-url', env_var='HIVED_DATABASE_URL', required=False, help='Hived blocks database connection url', default='')
|
||||
add(
|
||||
'--sync-to-s3',
|
||||
type=strtobool,
|
||||
env_var='SYNC_TO_S3',
|
||||
help='alternative healthcheck for background sync service',
|
||||
default=False,
|
||||
)
|
||||
add(
|
||||
'--hived-database-url',
|
||||
env_var='HIVED_DATABASE_URL',
|
||||
required=False,
|
||||
help='Hived blocks database connection url',
|
||||
default='',
|
||||
)
|
||||
|
||||
# test/debug
|
||||
add('--log-level', env_var='LOG_LEVEL', default='INFO')
|
||||
add('--test-disable-sync', type=strtobool, env_var='TEST_DISABLE_SYNC', help='(debug) skip sync and sweep; jump to block streaming', default=False)
|
||||
add('--test-max-block', type=int, env_var='TEST_MAX_BLOCK', help='(debug) only sync to given block, for running sync test', default=None)
|
||||
add('--test-skip-ais-phase', env_var='TEST_SKIP_AIS_PHASE', help='(debug) Allows to skip After-Initial-Sync phase. Useful to go into live sync or exit if TEST_MAX_BLOCK is used', action='store_true')
|
||||
add(
|
||||
'--test-disable-sync',
|
||||
type=strtobool,
|
||||
env_var='TEST_DISABLE_SYNC',
|
||||
help='(debug) skip sync and sweep; jump to block streaming',
|
||||
default=False,
|
||||
)
|
||||
add(
|
||||
'--test-max-block',
|
||||
type=int,
|
||||
env_var='TEST_MAX_BLOCK',
|
||||
help='(debug) only sync to given block, for running sync test',
|
||||
default=None,
|
||||
)
|
||||
add(
|
||||
'--test-skip-ais-phase',
|
||||
env_var='TEST_SKIP_AIS_PHASE',
|
||||
help='(debug) Allows to skip After-Initial-Sync phase. Useful to go into live sync or exit if TEST_MAX_BLOCK is used',
|
||||
action='store_true',
|
||||
)
|
||||
add('--test-profile', type=strtobool, env_var='TEST_PROFILE', help='(debug) profile execution', default=False)
|
||||
add('--log-request-times', env_var='LOG_REQUEST_TIMES', help='(debug) allows to generate log containing request processing times', action='store_true')
|
||||
add('--log-virtual-op-calls', env_var='LOG_VIRTUAL_OP_CALLS', help='(debug) log virtual op calls and responses', default=False)
|
||||
add('--mock-block-data-path', type=str, nargs='+', env_var='MOCK_BLOCK_DATA_PATH', help='(debug/testing) load additional data from block data file')
|
||||
add('--mock-vops-data-path', type=str, env_var='MOCK_VOPS_DATA_PATH', help='(debug/testing) load additional data from virtual operations data file')
|
||||
add(
|
||||
'--log-request-times',
|
||||
env_var='LOG_REQUEST_TIMES',
|
||||
help='(debug) allows to generate log containing request processing times',
|
||||
action='store_true',
|
||||
)
|
||||
add(
|
||||
'--log-virtual-op-calls',
|
||||
env_var='LOG_VIRTUAL_OP_CALLS',
|
||||
help='(debug) log virtual op calls and responses',
|
||||
default=False,
|
||||
)
|
||||
add(
|
||||
'--mock-block-data-path',
|
||||
type=str,
|
||||
nargs='+',
|
||||
env_var='MOCK_BLOCK_DATA_PATH',
|
||||
help='(debug/testing) load additional data from block data file',
|
||||
)
|
||||
add(
|
||||
'--mock-vops-data-path',
|
||||
type=str,
|
||||
env_var='MOCK_VOPS_DATA_PATH',
|
||||
help='(debug/testing) load additional data from virtual operations data file',
|
||||
)
|
||||
add('--community-start-block', type=int, env_var='COMMUNITY_START_BLOCK', default=37500000)
|
||||
add('--log_explain_queries', type=strtobool, env_var='LOG_EXPLAIN_QUERIES', help='(debug) Adds to log output of EXPLAIN ANALYZE for specific queries - only for db super user', default=False)
|
||||
add(
|
||||
'--log_explain_queries',
|
||||
type=strtobool,
|
||||
env_var='LOG_EXPLAIN_QUERIES',
|
||||
help='(debug) Adds to log output of EXPLAIN ANALYZE for specific queries - only for db super user',
|
||||
default=False,
|
||||
)
|
||||
|
||||
# logging
|
||||
add('--log-timestamp', help='Output timestamp in log', action='store_true')
|
||||
add('--log-epoch', help='Output unix epoch in log', action='store_true')
|
||||
add('--log-mask-sensitive-data', help='Mask sensitive data, e.g. passwords', action='store_true')
|
||||
|
||||
add('--pid-file', type=str, env_var='PID_FILE', help='Allows to dump current process pid into specified file', default=None)
|
||||
add(
|
||||
'--pid-file',
|
||||
type=str,
|
||||
env_var='PID_FILE',
|
||||
help='Allows to dump current process pid into specified file',
|
||||
default=None,
|
||||
)
|
||||
|
||||
add('--auto-http-server-port', nargs='+', type=int, help='Hivemind will listen on first available port from this range')
|
||||
add(
|
||||
'--auto-http-server-port',
|
||||
nargs='+',
|
||||
type=int,
|
||||
help='Hivemind will listen on first available port from this range',
|
||||
)
|
||||
|
||||
# needed for e.g. tests - other args may be present
|
||||
args = (parser.parse_args() if strict
|
||||
else parser.parse_known_args()[0])
|
||||
args = parser.parse_args() if strict else parser.parse_known_args()[0]
|
||||
|
||||
self._args = vars(args)
|
||||
self.arguments = parser._actions
|
||||
|
@ -104,6 +200,7 @@ class Conf():
|
|||
# Print command line args, but on continuous integration server
|
||||
# hide db connection string.
|
||||
from sys import argv
|
||||
|
||||
if self.get('log_mask_sensitive_data'):
|
||||
my_args = []
|
||||
upcoming_connection_string = False
|
||||
|
@ -120,12 +217,12 @@ class Conf():
|
|||
root.info("Used command line args: %s", " ".join(argv[1:]))
|
||||
|
||||
# uncomment for full list of program args
|
||||
#args_list = ["--" + k + " " + str(v) for k,v in vars(args).items()]
|
||||
#root.info("Full command line args: %s", " ".join(args_list))
|
||||
# args_list = ["--" + k + " " + str(v) for k,v in vars(args).items()]
|
||||
# root.info("Full command line args: %s", " ".join(args_list))
|
||||
|
||||
if self.mode() == 'server':
|
||||
#DbStats.SLOW_QUERY_MS = 750
|
||||
DbStats.SLOW_QUERY_MS = 200 # TODO
|
||||
# DbStats.SLOW_QUERY_MS = 750
|
||||
DbStats.SLOW_QUERY_MS = 200 # TODO
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
@ -141,21 +238,24 @@ class Conf():
|
|||
"""Get a SteemClient instance, lazily initialized"""
|
||||
if not self._steem:
|
||||
from json import loads
|
||||
|
||||
self._steem = SteemClient(
|
||||
url=loads(self.get('steemd_url')),
|
||||
max_batch=self.get('max_batch'),
|
||||
max_workers=self.get('max_workers'),
|
||||
max_retries=self.get('max_retries'))
|
||||
max_retries=self.get('max_retries'),
|
||||
)
|
||||
return self._steem
|
||||
|
||||
def db(self):
|
||||
"""Get a configured instance of Db."""
|
||||
if self._db is None:
|
||||
url = self.get('database_url')
|
||||
enable_autoexplain = self.get( 'log_explain_queries' )
|
||||
assert url, ('--database-url (or DATABASE_URL env) not specified; '
|
||||
'e.g. postgresql://user:pass@localhost:5432/hive')
|
||||
self._db = Db(url, "root db creation", enable_autoexplain )
|
||||
enable_autoexplain = self.get('log_explain_queries')
|
||||
assert url, (
|
||||
'--database-url (or DATABASE_URL env) not specified; ' 'e.g. postgresql://user:pass@localhost:5432/hive'
|
||||
)
|
||||
self._db = Db(url, "root db creation", enable_autoexplain)
|
||||
log.info("The database created...")
|
||||
|
||||
return self._db
|
||||
|
@ -188,16 +288,17 @@ class Conf():
|
|||
arguments.extend(arg.option_strings)
|
||||
arguments = " ".join(arguments)
|
||||
with open('hive-completion.bash', 'w') as file:
|
||||
file.writelines([
|
||||
"#!/bin/bash\n",
|
||||
"# to run type: source hive-completion.bash\n\n",
|
||||
"# if you want to have completion everywhere, execute theese commands\n",
|
||||
"# ln $PWD/hive-completion.bash $HOME/.local/\n",
|
||||
'# echo "source $HOME/.local/hive-completion.bash" >> $HOME/.bashrc\n',
|
||||
"# source $HOME/.bashrc\n\n"
|
||||
f'complete -f -W "{arguments}" hive\n',
|
||||
"\n"
|
||||
])
|
||||
file.writelines(
|
||||
[
|
||||
"#!/bin/bash\n",
|
||||
"# to run type: source hive-completion.bash\n\n",
|
||||
"# if you want to have completion everywhere, execute theese commands\n",
|
||||
"# ln $PWD/hive-completion.bash $HOME/.local/\n",
|
||||
'# echo "source $HOME/.local/hive-completion.bash" >> $HOME/.bashrc\n',
|
||||
"# source $HOME/.bashrc\n\n" f'complete -f -W "{arguments}" hive\n',
|
||||
"\n",
|
||||
]
|
||||
)
|
||||
|
||||
def disconnect(self):
|
||||
if self._db is not None:
|
||||
|
|
|
@ -14,12 +14,13 @@ logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Db:
|
||||
"""RDBMS adapter for hive. Handles connecting and querying."""
|
||||
|
||||
_instance = None
|
||||
|
||||
#maximum number of connections that is required so as to execute some tasks concurrently
|
||||
# maximum number of connections that is required so as to execute some tasks concurrently
|
||||
necessary_connections = 15
|
||||
max_connections = 1
|
||||
|
||||
|
@ -40,18 +41,23 @@ class Db:
|
|||
assert db is not None, "Database has to be initialized"
|
||||
cls.max_connections = db.query_one("SELECT setting::int FROM pg_settings WHERE name = 'max_connections'")
|
||||
if cls.necessary_connections > cls.max_connections:
|
||||
log.info(f"A database offers only {cls.max_connections} connections, but it's required {cls.necessary_connections} connections")
|
||||
log.info(
|
||||
f"A database offers only {cls.max_connections} connections, but it's required {cls.necessary_connections} connections"
|
||||
)
|
||||
else:
|
||||
log.info(f"A database offers maximum connections: {cls.max_connections}. Required {cls.necessary_connections} connections.")
|
||||
log.info(
|
||||
f"A database offers maximum connections: {cls.max_connections}. Required {cls.necessary_connections} connections."
|
||||
)
|
||||
|
||||
def __init__(self, url, name, enable_autoexplain = False):
|
||||
def __init__(self, url, name, enable_autoexplain=False):
|
||||
"""Initialize an instance.
|
||||
|
||||
No work is performed here. Some modues might initialize an
|
||||
instance before config is loaded.
|
||||
"""
|
||||
assert url, ('--database-url (or DATABASE_URL env) not specified; '
|
||||
'e.g. postgresql://user:pass@localhost:5432/hive')
|
||||
assert url, (
|
||||
'--database-url (or DATABASE_URL env) not specified; ' 'e.g. postgresql://user:pass@localhost:5432/hive'
|
||||
)
|
||||
self._url = url
|
||||
self._conn = []
|
||||
self._engine = None
|
||||
|
@ -60,7 +66,7 @@ class Db:
|
|||
|
||||
self.name = name
|
||||
|
||||
self._conn.append( { "connection" : self.engine().connect(), "name" : name } )
|
||||
self._conn.append({"connection": self.engine().connect(), "name": name})
|
||||
# Since we need to manage transactions ourselves, yet the
|
||||
# core behavior of DBAPI (per PEP-0249) is that a transaction
|
||||
# is always in progress, this COMMIT is a workaround to get
|
||||
|
@ -68,9 +74,9 @@ class Db:
|
|||
self._basic_connection = self.get_connection(0)
|
||||
self._basic_connection.execute(sqlalchemy.text("COMMIT"))
|
||||
|
||||
self.__autoexplain = None;
|
||||
self.__autoexplain = None
|
||||
if enable_autoexplain:
|
||||
self.__autoexplain = AutoExplainWrapper( self )
|
||||
self.__autoexplain = AutoExplainWrapper(self)
|
||||
|
||||
def clone(self, name):
|
||||
cloned = Db(self._url, name, self.__autoexplain)
|
||||
|
@ -99,7 +105,7 @@ class Db:
|
|||
self._engine.dispose()
|
||||
self._engine = None
|
||||
else:
|
||||
log.info("SQL engine was already disposed")
|
||||
log.info("SQL engine was already disposed")
|
||||
except Exception as ex:
|
||||
log.exception(f"Error during database closing: {ex}")
|
||||
raise ex
|
||||
|
@ -114,14 +120,15 @@ class Db:
|
|||
if self._engine is None:
|
||||
self._engine = sqlalchemy.create_engine(
|
||||
self._url,
|
||||
isolation_level="READ UNCOMMITTED", # only supported in mysql
|
||||
isolation_level="READ UNCOMMITTED", # only supported in mysql
|
||||
pool_size=self.max_connections,
|
||||
pool_recycle=3600,
|
||||
echo=False)
|
||||
echo=False,
|
||||
)
|
||||
return self._engine
|
||||
|
||||
def get_new_connection(self, name):
|
||||
self._conn.append( { "connection" : self.engine().connect(), "name" : name } )
|
||||
self._conn.append({"connection": self.engine().connect(), "name": name})
|
||||
return self.get_connection(len(self._conn) - 1)
|
||||
|
||||
def get_dialect(self):
|
||||
|
@ -133,9 +140,9 @@ class Db:
|
|||
|
||||
def explain(self):
|
||||
if self.__autoexplain:
|
||||
return self.__autoexplain;
|
||||
return self.__autoexplain
|
||||
|
||||
return self;
|
||||
return self
|
||||
|
||||
def query(self, sql, **kwargs):
|
||||
"""Perform a (*non-`SELECT`*) write query."""
|
||||
|
@ -211,7 +218,7 @@ class Db:
|
|||
|
||||
fields = list(values.keys())
|
||||
cols = ', '.join([k for k in fields])
|
||||
params = ', '.join([':'+k for k in fields])
|
||||
params = ', '.join([':' + k for k in fields])
|
||||
sql = "INSERT INTO %s (%s) VALUES (%s)"
|
||||
sql = sql % (table, cols, params)
|
||||
|
||||
|
@ -225,23 +232,23 @@ class Db:
|
|||
values = OrderedDict(values)
|
||||
fields = list(values.keys())
|
||||
|
||||
update = ', '.join([k+" = :"+k for k in fields if k not in pks])
|
||||
where = ' AND '.join([k+" = :"+k for k in fields if k in pks])
|
||||
update = ', '.join([k + " = :" + k for k in fields if k not in pks])
|
||||
where = ' AND '.join([k + " = :" + k for k in fields if k in pks])
|
||||
sql = "UPDATE %s SET %s WHERE %s"
|
||||
sql = sql % (table, update, where)
|
||||
|
||||
return (sql, values)
|
||||
|
||||
def _sql_text(self, sql, is_prepared):
|
||||
# if sql in self._prep_sql:
|
||||
# query = self._prep_sql[sql]
|
||||
# else:
|
||||
# query = sqlalchemy.text(sql).execution_options(autocommit=False)
|
||||
# self._prep_sql[sql] = query
|
||||
# if sql in self._prep_sql:
|
||||
# query = self._prep_sql[sql]
|
||||
# else:
|
||||
# query = sqlalchemy.text(sql).execution_options(autocommit=False)
|
||||
# self._prep_sql[sql] = query
|
||||
if is_prepared:
|
||||
query = sql
|
||||
query = sql
|
||||
else:
|
||||
query = sqlalchemy.text(sql)
|
||||
query = sqlalchemy.text(sql)
|
||||
return query
|
||||
|
||||
def _query(self, sql, is_prepared, **kwargs):
|
||||
|
@ -264,8 +271,7 @@ class Db:
|
|||
Stats.log_db(sql, perf() - start)
|
||||
return result
|
||||
except Exception as e:
|
||||
log.warning("[SQL-ERR] %s in query %s (%s)",
|
||||
e.__class__.__name__, sql, kwargs)
|
||||
log.warning("[SQL-ERR] %s in query %s (%s)", e.__class__.__name__, sql, kwargs)
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
|
@ -274,7 +280,6 @@ class Db:
|
|||
action = sql.strip()[0:6].strip()
|
||||
if action == 'SELECT':
|
||||
return False
|
||||
if action in ['DELETE', 'UPDATE', 'INSERT', 'COMMIT', 'START',
|
||||
'ALTER', 'TRUNCA', 'CREATE', 'DROP I', 'DROP T']:
|
||||
if action in ['DELETE', 'UPDATE', 'INSERT', 'COMMIT', 'START', 'ALTER', 'TRUNCA', 'CREATE', 'DROP I', 'DROP T']:
|
||||
return True
|
||||
raise Exception(f"unknown action: {sql}")
|
||||
|
|
|
@ -3,82 +3,84 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class PostgresClientLogSeverity( enum.Enum ):
|
||||
|
||||
class PostgresClientLogSeverity(enum.Enum):
|
||||
debug5 = 1
|
||||
debug4 = 2
|
||||
debug3 = 3
|
||||
debug2 = 4
|
||||
debug1 = 5
|
||||
log = 6
|
||||
log = 6
|
||||
notice = 7
|
||||
warning = 8
|
||||
error = 9
|
||||
error = 9
|
||||
|
||||
|
||||
class AutoExplainController:
|
||||
def __init__( self, _db ):
|
||||
def __init__(self, _db):
|
||||
"""
|
||||
Prepere the db for using autoexplain
|
||||
"""
|
||||
self.__wrapped_db = _db
|
||||
self.__wrapped_db.query_no_return( "LOAD 'auto_explain'" )
|
||||
self.__wrapped_db.query_no_return("LOAD 'auto_explain'")
|
||||
|
||||
def __enter__( self ):
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_nested_statements=on" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_min_duration=0" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_analyze=on" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_buffers=on" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_verbose=on" )
|
||||
def __enter__(self):
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_nested_statements=on")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_min_duration=0")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_analyze=on")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_buffers=on")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_verbose=on")
|
||||
|
||||
self.__previous_psql_client_log_level = self.__wrapped_db.query_one( "SHOW client_min_messages" )
|
||||
if PostgresClientLogSeverity[ self.__previous_psql_client_log_level ].value > PostgresClientLogSeverity.log.value:
|
||||
self.__wrapped_db.query_no_return( "SET client_min_messages=log" )
|
||||
self.__previous_psql_client_log_level = self.__wrapped_db.query_one("SHOW client_min_messages")
|
||||
if PostgresClientLogSeverity[self.__previous_psql_client_log_level].value > PostgresClientLogSeverity.log.value:
|
||||
self.__wrapped_db.query_no_return("SET client_min_messages=log")
|
||||
|
||||
self.__previous_log_level = logging.getLogger('sqlalchemy.dialects').getEffectiveLevel();
|
||||
self.__previous_log_level = logging.getLogger('sqlalchemy.dialects').getEffectiveLevel()
|
||||
if self.__previous_log_level > getattr(logging, 'INFO'):
|
||||
logging.getLogger('sqlalchemy.dialects').setLevel(logging.INFO)
|
||||
|
||||
def __exit__( self, exc_type, exc_value, traceback ):
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_nested_statements=off" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_min_duration=-1" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_analyze=off" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_buffers=off" )
|
||||
self.__wrapped_db.query_no_return( "SET auto_explain.log_verbose=off" )
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_nested_statements=off")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_min_duration=-1")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_analyze=off")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_buffers=off")
|
||||
self.__wrapped_db.query_no_return("SET auto_explain.log_verbose=off")
|
||||
|
||||
if PostgresClientLogSeverity[ self.__previous_psql_client_log_level ].value > PostgresClientLogSeverity.log.value:
|
||||
self.__wrapped_db.query_no_return( f"SET client_min_messages={self.__previous_psql_client_log_level}" )
|
||||
if PostgresClientLogSeverity[self.__previous_psql_client_log_level].value > PostgresClientLogSeverity.log.value:
|
||||
self.__wrapped_db.query_no_return(f"SET client_min_messages={self.__previous_psql_client_log_level}")
|
||||
|
||||
if self.__previous_log_level > getattr(logging, 'INFO'):
|
||||
logging.getLogger('sqlalchemy.dialects').setLevel(self.__previous_log_level)
|
||||
|
||||
|
||||
class AutoExplainWrapper:
|
||||
def __init__( self, _db ):
|
||||
def __init__(self, _db):
|
||||
self.__wrapped_db = _db
|
||||
|
||||
def query( self, sql, **kwargs ):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
return self.__wrapped_db.query( sql, **kwargs )
|
||||
def query(self, sql, **kwargs):
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
return self.__wrapped_db.query(sql, **kwargs)
|
||||
|
||||
def query_no_return( self, sql, **kwargs ):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
self.__wrapped_db.query_no_return( sql, **kwargs )
|
||||
def query_no_return(self, sql, **kwargs):
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
self.__wrapped_db.query_no_return(sql, **kwargs)
|
||||
|
||||
def query_all(self, sql, **kwargs):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
return self.__wrapped_db.query_all( sql, **kwargs )
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
return self.__wrapped_db.query_all(sql, **kwargs)
|
||||
|
||||
def query_row(self, sql, **kwargs):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
return self.__wrapped_db.query_row( sql, **kwargs )
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
return self.__wrapped_db.query_row(sql, **kwargs)
|
||||
|
||||
def query_col(self, sql, **kwargs):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
return self.__wrapped_db.query_col( sql, **kwargs )
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
return self.__wrapped_db.query_col(sql, **kwargs)
|
||||
|
||||
def query_one(self, sql, **kwargs):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
return self.__wrapped_db.query_one( sql, **kwargs )
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
return self.__wrapped_db.query_one(sql, **kwargs)
|
||||
|
||||
def batch_queries(self, queries, trx):
|
||||
with AutoExplainController( self.__wrapped_db ) as auto_explain:
|
||||
self.__wrapped_db.batch_queries( queries, trx )
|
||||
with AutoExplainController(self.__wrapped_db) as auto_explain:
|
||||
self.__wrapped_db.batch_queries(queries, trx)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""Hive db state manager. Check if schema loaded, init synced, etc."""
|
||||
|
||||
#pylint: disable=too-many-lines
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
import time
|
||||
from time import perf_counter
|
||||
|
@ -8,8 +8,7 @@ from time import perf_counter
|
|||
import logging
|
||||
import sqlalchemy
|
||||
|
||||
from hive.db.schema import (setup, set_logged_table_attribute, build_metadata,
|
||||
build_metadata_community, teardown)
|
||||
from hive.db.schema import setup, set_logged_table_attribute, build_metadata, build_metadata_community, teardown
|
||||
from hive.db.adapter import Db
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
@ -23,7 +22,8 @@ from hive.utils.stats import FinalOperationStatusManager as FOSM
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
SYNCED_BLOCK_LIMIT = 7*24*1200 # 7 days
|
||||
SYNCED_BLOCK_LIMIT = 7 * 24 * 1200 # 7 days
|
||||
|
||||
|
||||
class DbState:
|
||||
"""Manages database state: sync status, migrations, etc."""
|
||||
|
@ -90,20 +90,16 @@ class DbState:
|
|||
def _disableable_indexes(cls):
|
||||
to_locate = [
|
||||
'hive_blocks_created_at_idx',
|
||||
|
||||
'hive_feed_cache_block_num_idx',
|
||||
'hive_feed_cache_created_at_idx',
|
||||
'hive_feed_cache_post_id_idx',
|
||||
|
||||
'hive_follows_ix5a', # (following, state, created_at, follower)
|
||||
'hive_follows_ix5b', # (follower, state, created_at, following)
|
||||
'hive_follows_ix5a', # (following, state, created_at, follower)
|
||||
'hive_follows_ix5b', # (follower, state, created_at, following)
|
||||
'hive_follows_block_num_idx',
|
||||
'hive_follows_created_at_idx',
|
||||
|
||||
'hive_posts_parent_id_id_idx',
|
||||
'hive_posts_depth_idx',
|
||||
'hive_posts_root_id_id_idx',
|
||||
|
||||
'hive_posts_community_id_id_idx',
|
||||
'hive_posts_payout_at_idx',
|
||||
'hive_posts_payout_idx',
|
||||
|
@ -119,25 +115,18 @@ class DbState:
|
|||
'hive_posts_tags_ids_idx',
|
||||
'hive_posts_author_id_created_at_id_idx',
|
||||
'hive_posts_author_id_id_idx',
|
||||
|
||||
|
||||
'hive_posts_api_helper_author_s_permlink_idx',
|
||||
|
||||
'hive_votes_voter_id_last_update_idx',
|
||||
'hive_votes_block_num_idx',
|
||||
|
||||
'hive_subscriptions_block_num_idx',
|
||||
'hive_subscriptions_community_idx',
|
||||
'hive_communities_block_num_idx',
|
||||
'hive_reblogs_created_at_idx',
|
||||
|
||||
'hive_votes_voter_id_post_id_idx',
|
||||
'hive_votes_post_id_voter_id_idx',
|
||||
|
||||
'hive_reputation_data_block_num_idx',
|
||||
|
||||
'hive_notification_cache_block_num_idx',
|
||||
'hive_notification_cache_dst_score_idx'
|
||||
'hive_notification_cache_dst_score_idx',
|
||||
]
|
||||
|
||||
to_return = {}
|
||||
|
@ -148,8 +137,8 @@ class DbState:
|
|||
continue
|
||||
to_locate.remove(index.name)
|
||||
if table not in to_return:
|
||||
to_return[ table ] = []
|
||||
to_return[ table ].append(index)
|
||||
to_return[table] = []
|
||||
to_return[table].append(index)
|
||||
|
||||
# ensure we found all the items we expected
|
||||
assert not to_locate, f"indexes not located: {to_locate}"
|
||||
|
@ -167,7 +156,7 @@ class DbState:
|
|||
@classmethod
|
||||
def _execute_query(cls, db, query):
|
||||
time_start = perf_counter()
|
||||
|
||||
|
||||
current_work_mem = cls.update_work_mem('2GB')
|
||||
log.info("[INIT] Attempting to execute query: `%s'...", query)
|
||||
|
||||
|
@ -192,7 +181,6 @@ class DbState:
|
|||
time_end = perf_counter()
|
||||
log.info("[INIT] Query `%s' done in %.4fs", query, time_end - time_start)
|
||||
|
||||
|
||||
@classmethod
|
||||
def processing_indexes_per_table(cls, db, table_name, indexes, is_pre_process, drop, create):
|
||||
log.info("[INIT] Begin %s-initial sync hooks for table %s", "pre" if is_pre_process else "post", table_name)
|
||||
|
@ -235,7 +223,13 @@ class DbState:
|
|||
|
||||
methods = []
|
||||
for _key_table, indexes in _indexes.items():
|
||||
methods.append( (_key_table.name, cls.processing_indexes_per_table, [cls.db(), _key_table.name, indexes, is_pre_process, drop, create]) )
|
||||
methods.append(
|
||||
(
|
||||
_key_table.name,
|
||||
cls.processing_indexes_per_table,
|
||||
[cls.db(), _key_table.name, indexes, is_pre_process, drop, create],
|
||||
)
|
||||
)
|
||||
|
||||
cls.process_tasks_in_threads("[INIT] %i threads finished creating indexes.", methods)
|
||||
|
||||
|
@ -243,7 +237,9 @@ class DbState:
|
|||
|
||||
log.info(f"=== {action} INDEXES ===")
|
||||
threads_time = FOSM.log_current(f"Total {action} indexes time")
|
||||
log.info(f"Elapsed time: {real_time :.4f}s. Calculated elapsed time: {threads_time :.4f}s. Difference: {real_time - threads_time :.4f}s")
|
||||
log.info(
|
||||
f"Elapsed time: {real_time :.4f}s. Calculated elapsed time: {threads_time :.4f}s. Difference: {real_time - threads_time :.4f}s"
|
||||
)
|
||||
FOSM.clear()
|
||||
log.info(f"=== {action} INDEXES ===")
|
||||
|
||||
|
@ -260,15 +256,16 @@ class DbState:
|
|||
log.info("[INIT] Skipping pre-initial sync hooks")
|
||||
return
|
||||
|
||||
#is_pre_process, drop, create
|
||||
cls.processing_indexes( True, True, False )
|
||||
# is_pre_process, drop, create
|
||||
cls.processing_indexes(True, True, False)
|
||||
|
||||
from hive.db.schema import drop_fk, set_logged_table_attribute
|
||||
|
||||
log.info("Dropping FKs")
|
||||
drop_fk(cls.db())
|
||||
|
||||
# intentionally disabled since it needs a lot of WAL disk space when switching back to LOGGED
|
||||
#set_logged_table_attribute(cls.db(), False)
|
||||
# set_logged_table_attribute(cls.db(), False)
|
||||
|
||||
log.info("[INIT] Finish pre-initial sync hooks")
|
||||
|
||||
|
@ -291,7 +288,7 @@ class DbState:
|
|||
@classmethod
|
||||
def _finish_hive_posts(cls, db, massive_sync_preconditions, last_imported_block, current_imported_block):
|
||||
with AutoDbDisposer(db, "finish_hive_posts") as db_mgr:
|
||||
#UPDATE: `abs_rshares`, `vote_rshares`, `sc_hot`, ,`sc_trend`, `total_votes`, `net_votes`
|
||||
# UPDATE: `abs_rshares`, `vote_rshares`, `sc_hot`, ,`sc_trend`, `total_votes`, `net_votes`
|
||||
time_start = perf_counter()
|
||||
sql = f"""
|
||||
SELECT update_posts_rshares({last_imported_block}, {current_imported_block});
|
||||
|
@ -301,7 +298,7 @@ class DbState:
|
|||
|
||||
time_start = perf_counter()
|
||||
|
||||
#UPDATE: `children`
|
||||
# UPDATE: `children`
|
||||
if massive_sync_preconditions:
|
||||
# Update count of all child posts (what was hold during initial sync)
|
||||
cls._execute_query(db_mgr.db, "select update_all_hive_posts_children_count()")
|
||||
|
@ -311,7 +308,7 @@ class DbState:
|
|||
cls._execute_query(db_mgr.db, sql)
|
||||
log.info("[INIT] update_hive_posts_children_count executed in %.4fs", perf_counter() - time_start)
|
||||
|
||||
#UPDATE: `root_id`
|
||||
# UPDATE: `root_id`
|
||||
# Update root_id all root posts
|
||||
time_start = perf_counter()
|
||||
sql = f"""
|
||||
|
@ -358,7 +355,9 @@ class DbState:
|
|||
|
||||
@classmethod
|
||||
def _finish_account_reputations(cls, db, last_imported_block, current_imported_block):
|
||||
log.info(f"Performing update_account_reputations on block rangge: {last_imported_block}:{current_imported_block}")
|
||||
log.info(
|
||||
f"Performing update_account_reputations on block rangge: {last_imported_block}:{current_imported_block}"
|
||||
)
|
||||
|
||||
with AutoDbDisposer(db, "finish_account_reputations") as db_mgr:
|
||||
time_start = perf_counter()
|
||||
|
@ -415,18 +414,20 @@ class DbState:
|
|||
def process_tasks_in_threads(cls, info, methods):
|
||||
futures = []
|
||||
pool = ThreadPoolExecutor(max_workers=Db.max_connections)
|
||||
futures = {pool.submit(cls.time_collector, method, args): (description) for (description, method, args) in methods}
|
||||
futures = {
|
||||
pool.submit(cls.time_collector, method, args): (description) for (description, method, args) in methods
|
||||
}
|
||||
|
||||
completedThreads = 0
|
||||
for future in as_completed(futures):
|
||||
description = futures[future]
|
||||
completedThreads = completedThreads + 1
|
||||
try:
|
||||
elapsedTime = future.result()
|
||||
FOSM.final_stat(description, elapsedTime)
|
||||
except Exception as exc:
|
||||
log.error(f'{description!r} generated an exception: {exc}')
|
||||
raise exc
|
||||
description = futures[future]
|
||||
completedThreads = completedThreads + 1
|
||||
try:
|
||||
elapsedTime = future.result()
|
||||
FOSM.final_stat(description, elapsedTime)
|
||||
except Exception as exc:
|
||||
log.error(f'{description!r} generated an exception: {exc}')
|
||||
raise exc
|
||||
|
||||
pool.shutdown()
|
||||
log.info(info, completedThreads)
|
||||
|
@ -438,27 +439,53 @@ class DbState:
|
|||
log.info("#############################################################################")
|
||||
|
||||
methods = []
|
||||
methods.append( ('hive_posts', cls._finish_hive_posts, [cls.db(), massive_sync_preconditions, last_imported_block, current_imported_block]) )
|
||||
methods.append( ('hive_feed_cache', cls._finish_hive_feed_cache, [cls.db(), last_imported_block, current_imported_block]) )
|
||||
methods.append( ('hive_mentions', cls._finish_hive_mentions, [cls.db(), last_imported_block, current_imported_block]) )
|
||||
methods.append( ('payout_stats_view', cls._finish_payout_stats_view, []) )
|
||||
methods.append( ('communities_posts_and_rank', cls._finish_communities_posts_and_rank, [cls.db()]) )
|
||||
methods.append( ('blocks_consistency_flag', cls._finish_blocks_consistency_flag, [cls.db(), last_imported_block, current_imported_block]) )
|
||||
methods.append(
|
||||
(
|
||||
'hive_posts',
|
||||
cls._finish_hive_posts,
|
||||
[cls.db(), massive_sync_preconditions, last_imported_block, current_imported_block],
|
||||
)
|
||||
)
|
||||
methods.append(
|
||||
('hive_feed_cache', cls._finish_hive_feed_cache, [cls.db(), last_imported_block, current_imported_block])
|
||||
)
|
||||
methods.append(
|
||||
('hive_mentions', cls._finish_hive_mentions, [cls.db(), last_imported_block, current_imported_block])
|
||||
)
|
||||
methods.append(('payout_stats_view', cls._finish_payout_stats_view, []))
|
||||
methods.append(('communities_posts_and_rank', cls._finish_communities_posts_and_rank, [cls.db()]))
|
||||
methods.append(
|
||||
(
|
||||
'blocks_consistency_flag',
|
||||
cls._finish_blocks_consistency_flag,
|
||||
[cls.db(), last_imported_block, current_imported_block],
|
||||
)
|
||||
)
|
||||
cls.process_tasks_in_threads("[INIT] %i threads finished filling tables. Part nr 0", methods)
|
||||
|
||||
methods = []
|
||||
#Notifications are dependent on many tables, therefore it's necessary to calculate it at the end
|
||||
methods.append( ('notification_cache', cls._finish_notification_cache, [cls.db()]) )
|
||||
#hive_posts_api_helper is dependent on `hive_posts/root_id` filling
|
||||
methods.append( ('hive_posts_api_helper', cls._finish_hive_posts_api_helper, [cls.db(), last_imported_block, current_imported_block]) )
|
||||
methods.append( ('follow_count', cls._finish_follow_count, [cls.db(), last_imported_block, current_imported_block]) )
|
||||
# Notifications are dependent on many tables, therefore it's necessary to calculate it at the end
|
||||
methods.append(('notification_cache', cls._finish_notification_cache, [cls.db()]))
|
||||
# hive_posts_api_helper is dependent on `hive_posts/root_id` filling
|
||||
methods.append(
|
||||
(
|
||||
'hive_posts_api_helper',
|
||||
cls._finish_hive_posts_api_helper,
|
||||
[cls.db(), last_imported_block, current_imported_block],
|
||||
)
|
||||
)
|
||||
methods.append(
|
||||
('follow_count', cls._finish_follow_count, [cls.db(), last_imported_block, current_imported_block])
|
||||
)
|
||||
cls.process_tasks_in_threads("[INIT] %i threads finished filling tables. Part nr 1", methods)
|
||||
|
||||
real_time = FOSM.stop(start_time)
|
||||
|
||||
log.info("=== FILLING FINAL DATA INTO TABLES ===")
|
||||
threads_time = FOSM.log_current("Total final operations time")
|
||||
log.info(f"Elapsed time: {real_time :.4f}s. Calculated elapsed time: {threads_time :.4f}s. Difference: {real_time - threads_time :.4f}s")
|
||||
log.info(
|
||||
f"Elapsed time: {real_time :.4f}s. Calculated elapsed time: {threads_time :.4f}s. Difference: {real_time - threads_time :.4f}s"
|
||||
)
|
||||
FOSM.clear()
|
||||
log.info("=== FILLING FINAL DATA INTO TABLES ===")
|
||||
|
||||
|
@ -473,9 +500,11 @@ class DbState:
|
|||
|
||||
last_imported_block = DbState.db().query_one("SELECT block_num FROM hive_state LIMIT 1")
|
||||
|
||||
log.info("[INIT] Current imported block: %s. Last imported block: %s.", current_imported_block, last_imported_block)
|
||||
log.info(
|
||||
"[INIT] Current imported block: %s. Last imported block: %s.", current_imported_block, last_imported_block
|
||||
)
|
||||
if last_imported_block > current_imported_block:
|
||||
last_imported_block = current_imported_block
|
||||
last_imported_block = current_imported_block
|
||||
|
||||
synced_blocks = current_imported_block - last_imported_block
|
||||
|
||||
|
@ -487,47 +516,46 @@ class DbState:
|
|||
force_index_rebuild = True
|
||||
massive_sync_preconditions = True
|
||||
|
||||
#is_pre_process, drop, create
|
||||
# is_pre_process, drop, create
|
||||
log.info("Creating indexes: started")
|
||||
cls.processing_indexes( False, force_index_rebuild, True )
|
||||
cls.processing_indexes(False, force_index_rebuild, True)
|
||||
log.info("Creating indexes: finished")
|
||||
|
||||
# Update statistics and execution plans after index creation.
|
||||
if massive_sync_preconditions:
|
||||
cls._execute_query(cls.db(),"VACUUM (VERBOSE,ANALYZE)")
|
||||
cls._execute_query(cls.db(), "VACUUM (VERBOSE,ANALYZE)")
|
||||
|
||||
#all post-updates are executed in different threads: one thread per one table
|
||||
# all post-updates are executed in different threads: one thread per one table
|
||||
log.info("Filling tables with final values: started")
|
||||
cls._finish_all_tables(massive_sync_preconditions, last_imported_block, current_imported_block)
|
||||
log.info("Filling tables with final values: finished")
|
||||
|
||||
# Update a block num immediately
|
||||
cls.db().query_no_return("UPDATE hive_state SET block_num = :block_num", block_num = current_imported_block)
|
||||
cls.db().query_no_return("UPDATE hive_state SET block_num = :block_num", block_num=current_imported_block)
|
||||
|
||||
if massive_sync_preconditions:
|
||||
from hive.db.schema import create_fk, set_logged_table_attribute
|
||||
|
||||
# intentionally disabled since it needs a lot of WAL disk space when switching back to LOGGED
|
||||
#set_logged_table_attribute(cls.db(), True)
|
||||
# set_logged_table_attribute(cls.db(), True)
|
||||
|
||||
log.info("Recreating foreign keys")
|
||||
create_fk(cls.db())
|
||||
log.info("Foreign keys were recreated")
|
||||
|
||||
cls._execute_query(cls.db(),"VACUUM (VERBOSE,ANALYZE)")
|
||||
cls._execute_query(cls.db(), "VACUUM (VERBOSE,ANALYZE)")
|
||||
|
||||
end_time = perf_counter()
|
||||
log.info("[INIT] After initial sync actions done in %.4fs", end_time - start_time)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def status():
|
||||
"""Basic health status: head block/time, current age (secs)."""
|
||||
sql = ("SELECT num, created_at, extract(epoch from created_at) ts "
|
||||
"FROM hive_blocks ORDER BY num DESC LIMIT 1")
|
||||
sql = "SELECT num, created_at, extract(epoch from created_at) ts " "FROM hive_blocks ORDER BY num DESC LIMIT 1"
|
||||
row = DbState.db().query_row(sql)
|
||||
return dict(db_head_block=row['num'],
|
||||
db_head_time=str(row['created_at']),
|
||||
db_head_age=int(time.time() - row['ts']))
|
||||
return dict(
|
||||
db_head_block=row['num'], db_head_time=str(row['created_at']), db_head_age=int(time.time() - row['ts'])
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _is_schema_loaded(cls):
|
||||
|
@ -535,9 +563,13 @@ class DbState:
|
|||
# check if database has been initialized (i.e. schema loaded)
|
||||
_engine_name = cls.db().engine_name()
|
||||
if _engine_name == 'postgresql':
|
||||
return bool(cls.db().query_one("""
|
||||
return bool(
|
||||
cls.db().query_one(
|
||||
"""
|
||||
SELECT 1 FROM pg_catalog.pg_tables WHERE schemaname = 'public'
|
||||
"""))
|
||||
"""
|
||||
)
|
||||
)
|
||||
if _engine_name == 'mysql':
|
||||
return bool(cls.db().query_one('SHOW TABLES'))
|
||||
raise Exception(f"unknown db engine {_engine_name}")
|
||||
|
@ -549,4 +581,3 @@ class DbState:
|
|||
If empty, it indicates that the initial sync has not finished.
|
||||
"""
|
||||
return not cls.db().query_one("SELECT 1 FROM hive_feed_cache LIMIT 1")
|
||||
|
||||
|
|
|
@ -9,16 +9,19 @@ from sqlalchemy.types import TEXT
|
|||
from sqlalchemy.types import BOOLEAN
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
#pylint: disable=line-too-long, too-many-lines, bad-whitespace
|
||||
# pylint: disable=line-too-long, too-many-lines, bad-whitespace
|
||||
|
||||
|
||||
def build_metadata():
|
||||
"""Build schema def with SqlAlchemy"""
|
||||
metadata = sa.MetaData()
|
||||
|
||||
sa.Table(
|
||||
'hive_blocks', metadata,
|
||||
'hive_blocks',
|
||||
metadata,
|
||||
sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column('hash', CHAR(40), nullable=False),
|
||||
sa.Column('prev', CHAR(40)),
|
||||
|
@ -26,51 +29,49 @@ def build_metadata():
|
|||
sa.Column('ops', sa.Integer, server_default='0', nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('completed', sa.Boolean, nullable=False, server_default='0'),
|
||||
|
||||
sa.UniqueConstraint('hash', name='hive_blocks_ux1'),
|
||||
sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),
|
||||
sa.Index('hive_blocks_created_at_idx', 'created_at'),
|
||||
sa.Index('hive_blocks_completed_idx', 'completed')
|
||||
sa.Index('hive_blocks_completed_idx', 'completed'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_accounts', metadata,
|
||||
'hive_accounts',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('name', VARCHAR(16, collation='C'), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
#sa.Column('block_num', sa.Integer, nullable=False),
|
||||
# sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('reputation', sa.BigInteger, nullable=False, server_default='0'),
|
||||
sa.Column('is_implicit', sa.Boolean, nullable=False, server_default='1'),
|
||||
sa.Column('followers', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('following', sa.Integer, nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('rank', sa.Integer, nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
|
||||
sa.Column('posting_json_metadata', sa.Text),
|
||||
sa.Column('json_metadata', sa.Text),
|
||||
|
||||
sa.UniqueConstraint('name', name='hive_accounts_ux1'),
|
||||
sa.Index('hive_accounts_reputation_id_idx', sa.text('reputation DESC, id'))
|
||||
sa.Index('hive_accounts_reputation_id_idx', sa.text('reputation DESC, id')),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_reputation_data', metadata,
|
||||
'hive_reputation_data',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('author_id', sa.Integer, nullable=False),
|
||||
sa.Column('voter_id', sa.Integer, nullable=False),
|
||||
sa.Column('permlink', sa.String(255, collation='C'), nullable=False),
|
||||
sa.Column('rshares', sa.BigInteger, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Index('hive_reputation_data_author_permlink_voter_idx', 'author_id', 'permlink', 'voter_id'),
|
||||
sa.Index('hive_reputation_data_block_num_idx', 'block_num')
|
||||
sa.Index('hive_reputation_data_block_num_idx', 'block_num'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_posts', metadata,
|
||||
'hive_posts',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('root_id', sa.Integer, nullable=False), # records having initially set 0 will be updated to their id
|
||||
sa.Column('root_id', sa.Integer, nullable=False), # records having initially set 0 will be updated to their id
|
||||
sa.Column('parent_id', sa.Integer, nullable=False),
|
||||
sa.Column('author_id', sa.Integer, nullable=False),
|
||||
sa.Column('permlink_id', sa.Integer, nullable=False),
|
||||
|
@ -83,9 +84,7 @@ def build_metadata():
|
|||
sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),
|
||||
sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('children', sa.Integer, nullable=False, server_default='0'),
|
||||
|
||||
# core stats/indexes
|
||||
sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
|
||||
sa.Column('pending_payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),
|
||||
|
@ -93,24 +92,19 @@ def build_metadata():
|
|||
sa.Column('last_payout_at', sa.DateTime, nullable=False, server_default='1970-01-01'),
|
||||
sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1970-01-01'),
|
||||
sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),
|
||||
|
||||
# ui flags/filters
|
||||
sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),
|
||||
|
||||
# important indexes
|
||||
sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),
|
||||
sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('total_payout_value', sa.String(30), nullable=False, server_default='0.000 HBD'),
|
||||
sa.Column('author_rewards', sa.BigInteger, nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('author_rewards_hive', sa.BigInteger, nullable=False, server_default='0'),
|
||||
sa.Column('author_rewards_hbd', sa.BigInteger, nullable=False, server_default='0'),
|
||||
sa.Column('author_rewards_vests', sa.BigInteger, nullable=False, server_default='0'),
|
||||
|
||||
sa.Column('abs_rshares', sa.Numeric, nullable=False, server_default='0'),
|
||||
sa.Column('vote_rshares', sa.Numeric, nullable=False, server_default='0'),
|
||||
sa.Column('total_vote_weight', sa.Numeric, nullable=False, server_default='0'),
|
||||
|
@ -119,70 +113,97 @@ def build_metadata():
|
|||
sa.Column('active', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
|
||||
sa.Column('cashout_time', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
|
||||
sa.Column('percent_hbd', sa.Integer, nullable=False, server_default='10000'),
|
||||
|
||||
sa.Column('curator_payout_value', sa.String(30), nullable=False, server_default='0.000 HBD'),
|
||||
sa.Column('max_accepted_payout', sa.String(30), nullable=False, server_default='1000000.000 HBD'),
|
||||
sa.Column('max_accepted_payout', sa.String(30), nullable=False, server_default='1000000.000 HBD'),
|
||||
sa.Column('allow_votes', BOOLEAN, nullable=False, server_default='1'),
|
||||
sa.Column('allow_curation_rewards', BOOLEAN, nullable=False, server_default='1'),
|
||||
sa.Column('beneficiaries', sa.JSON, nullable=False, server_default='[]'),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
sa.Column('block_num_created', sa.Integer, nullable=False ),
|
||||
sa.Column('tags_ids', sa.ARRAY(sa.Integer), nullable=True ),
|
||||
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('block_num_created', sa.Integer, nullable=False),
|
||||
sa.Column('tags_ids', sa.ARRAY(sa.Integer), nullable=True),
|
||||
sa.ForeignKeyConstraint(['author_id'], ['hive_accounts.id'], name='hive_posts_fk1'),
|
||||
sa.ForeignKeyConstraint(['root_id'], ['hive_posts.id'], name='hive_posts_fk2'),
|
||||
sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),
|
||||
sa.UniqueConstraint('author_id', 'permlink_id', 'counter_deleted', name='hive_posts_ux1'),
|
||||
|
||||
sa.Index('hive_posts_depth_idx', 'depth'),
|
||||
|
||||
sa.Index('hive_posts_root_id_id_idx', 'root_id','id'),
|
||||
|
||||
sa.Index('hive_posts_parent_id_id_idx', sa.text('parent_id, id DESC'), postgresql_where=sql_text("counter_deleted = 0")),
|
||||
sa.Index('hive_posts_root_id_id_idx', 'root_id', 'id'),
|
||||
sa.Index(
|
||||
'hive_posts_parent_id_id_idx',
|
||||
sa.text('parent_id, id DESC'),
|
||||
postgresql_where=sql_text("counter_deleted = 0"),
|
||||
),
|
||||
sa.Index('hive_posts_community_id_id_idx', 'community_id', sa.text('id DESC')),
|
||||
|
||||
sa.Index('hive_posts_payout_at_idx', 'payout_at'),
|
||||
sa.Index('hive_posts_payout_idx', 'payout'),
|
||||
sa.Index('hive_posts_promoted_id_idx', 'promoted', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0")),
|
||||
sa.Index('hive_posts_sc_trend_id_idx', 'sc_trend', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0")),
|
||||
sa.Index('hive_posts_sc_hot_id_idx', 'sc_hot', 'id', postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0")),
|
||||
sa.Index(
|
||||
'hive_posts_promoted_id_idx',
|
||||
'promoted',
|
||||
'id',
|
||||
postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0"),
|
||||
),
|
||||
sa.Index(
|
||||
'hive_posts_sc_trend_id_idx',
|
||||
'sc_trend',
|
||||
'id',
|
||||
postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0"),
|
||||
),
|
||||
sa.Index(
|
||||
'hive_posts_sc_hot_id_idx',
|
||||
'sc_hot',
|
||||
'id',
|
||||
postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0 AND depth = 0"),
|
||||
),
|
||||
sa.Index('hive_posts_author_id_created_at_id_idx', sa.text('author_id DESC, created_at DESC, id')),
|
||||
sa.Index('hive_posts_author_id_id_idx', 'author_id', 'id', postgresql_where=sql_text('depth = 0')),
|
||||
sa.Index('hive_posts_block_num_idx', 'block_num'),
|
||||
sa.Index('hive_posts_block_num_created_idx', 'block_num_created'),
|
||||
sa.Index('hive_posts_cashout_time_id_idx', 'cashout_time', 'id'),
|
||||
sa.Index('hive_posts_updated_at_idx', sa.text('updated_at DESC')),
|
||||
sa.Index('hive_posts_payout_plus_pending_payout_id_idx', sa.text('(payout+pending_payout), id'), postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0")),
|
||||
sa.Index('hive_posts_category_id_payout_plus_pending_payout_depth_idx', sa.text('category_id, (payout+pending_payout), depth'), postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0")),
|
||||
sa.Index('hive_posts_tags_ids_idx', 'tags_ids', postgresql_using="gin", postgresql_ops={'tags_ids': 'gin__int_ops'})
|
||||
)
|
||||
sa.Index(
|
||||
'hive_posts_payout_plus_pending_payout_id_idx',
|
||||
sa.text('(payout+pending_payout), id'),
|
||||
postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0"),
|
||||
),
|
||||
sa.Index(
|
||||
'hive_posts_category_id_payout_plus_pending_payout_depth_idx',
|
||||
sa.text('category_id, (payout+pending_payout), depth'),
|
||||
postgresql_where=sql_text("NOT is_paidout AND counter_deleted = 0"),
|
||||
),
|
||||
sa.Index(
|
||||
'hive_posts_tags_ids_idx', 'tags_ids', postgresql_using="gin", postgresql_ops={'tags_ids': 'gin__int_ops'}
|
||||
),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_post_data', metadata,
|
||||
'hive_post_data',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column('title', VARCHAR(512), nullable=False, server_default=''),
|
||||
sa.Column('preview', VARCHAR(1024), nullable=False, server_default=''), # first 1k of 'body'
|
||||
sa.Column('img_url', VARCHAR(1024), nullable=False, server_default=''), # first 'image' from 'json'
|
||||
sa.Column('preview', VARCHAR(1024), nullable=False, server_default=''), # first 1k of 'body'
|
||||
sa.Column('img_url', VARCHAR(1024), nullable=False, server_default=''), # first 'image' from 'json'
|
||||
sa.Column('body', TEXT, nullable=False, server_default=''),
|
||||
sa.Column('json', TEXT, nullable=False, server_default='')
|
||||
sa.Column('json', TEXT, nullable=False, server_default=''),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_permlink_data', metadata,
|
||||
'hive_permlink_data',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('permlink', sa.String(255, collation='C'), nullable=False),
|
||||
sa.UniqueConstraint('permlink', name='hive_permlink_data_permlink')
|
||||
sa.UniqueConstraint('permlink', name='hive_permlink_data_permlink'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_category_data', metadata,
|
||||
'hive_category_data',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('category', sa.String(255, collation='C'), nullable=False),
|
||||
sa.UniqueConstraint('category', name='hive_category_data_category')
|
||||
sa.UniqueConstraint('category', name='hive_category_data_category'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_votes', metadata,
|
||||
'hive_votes',
|
||||
metadata,
|
||||
sa.Column('id', sa.BigInteger, primary_key=True),
|
||||
sa.Column('post_id', sa.Integer, nullable=False),
|
||||
sa.Column('voter_id', sa.Integer, nullable=False),
|
||||
|
@ -193,35 +214,45 @@ def build_metadata():
|
|||
sa.Column('vote_percent', sa.Integer, server_default='0'),
|
||||
sa.Column('last_update', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),
|
||||
sa.Column('num_changes', sa.Integer, server_default='0'),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('is_effective', BOOLEAN, nullable=False, server_default='0'),
|
||||
|
||||
sa.UniqueConstraint('voter_id', 'author_id', 'permlink_id', name='hive_votes_voter_id_author_id_permlink_id_uk'),
|
||||
|
||||
sa.UniqueConstraint(
|
||||
'voter_id', 'author_id', 'permlink_id', name='hive_votes_voter_id_author_id_permlink_id_uk'
|
||||
),
|
||||
sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_votes_fk1'),
|
||||
sa.ForeignKeyConstraint(['voter_id'], ['hive_accounts.id'], name='hive_votes_fk2'),
|
||||
sa.ForeignKeyConstraint(['author_id'], ['hive_accounts.id'], name='hive_votes_fk3'),
|
||||
sa.ForeignKeyConstraint(['permlink_id'], ['hive_permlink_data.id'], name='hive_votes_fk4'),
|
||||
sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_votes_fk5'),
|
||||
|
||||
sa.Index('hive_votes_voter_id_post_id_idx', 'voter_id', 'post_id'), # probably this index is redundant to hive_votes_voter_id_last_update_idx because of starting voter_id.
|
||||
sa.Index('hive_votes_voter_id_last_update_idx', 'voter_id', 'last_update'), # this index is critical for hive_accounts_info_view performance
|
||||
sa.Index(
|
||||
'hive_votes_voter_id_post_id_idx', 'voter_id', 'post_id'
|
||||
), # probably this index is redundant to hive_votes_voter_id_last_update_idx because of starting voter_id.
|
||||
sa.Index(
|
||||
'hive_votes_voter_id_last_update_idx', 'voter_id', 'last_update'
|
||||
), # this index is critical for hive_accounts_info_view performance
|
||||
sa.Index('hive_votes_post_id_voter_id_idx', 'post_id', 'voter_id'),
|
||||
sa.Index('hive_votes_block_num_idx', 'block_num'), # this is also important for hive_accounts_info_view
|
||||
|
||||
sa.Index('hive_votes_post_id_block_num_rshares_vote_is_effective_idx', 'post_id', 'block_num', 'rshares', 'is_effective') # this index is needed by update_posts_rshares procedure.
|
||||
sa.Index('hive_votes_block_num_idx', 'block_num'), # this is also important for hive_accounts_info_view
|
||||
sa.Index(
|
||||
'hive_votes_post_id_block_num_rshares_vote_is_effective_idx',
|
||||
'post_id',
|
||||
'block_num',
|
||||
'rshares',
|
||||
'is_effective',
|
||||
), # this index is needed by update_posts_rshares procedure.
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_tag_data', metadata,
|
||||
'hive_tag_data',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
|
||||
sa.Column('tag', VARCHAR(64, collation='C'), nullable=False, server_default=''),
|
||||
sa.UniqueConstraint('tag', name='hive_tag_data_ux1')
|
||||
sa.UniqueConstraint('tag', name='hive_tag_data_ux1'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_follows', metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True ),
|
||||
'hive_follows',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('follower', sa.Integer, nullable=False),
|
||||
sa.Column('following', sa.Integer, nullable=False),
|
||||
sa.Column('state', SMALLINT, nullable=False, server_default='1'),
|
||||
|
@ -229,9 +260,8 @@ def build_metadata():
|
|||
sa.Column('blacklisted', sa.Boolean, nullable=False, server_default='0'),
|
||||
sa.Column('follow_blacklists', sa.Boolean, nullable=False, server_default='0'),
|
||||
sa.Column('follow_muted', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
|
||||
sa.UniqueConstraint('following', 'follower', name='hive_follows_ux1'), # core
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.UniqueConstraint('following', 'follower', name='hive_follows_ux1'), # core
|
||||
sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_follows_fk1'),
|
||||
sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),
|
||||
sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),
|
||||
|
@ -240,24 +270,25 @@ def build_metadata():
|
|||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_reblogs', metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True ),
|
||||
'hive_reblogs',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('blogger_id', sa.Integer, nullable=False),
|
||||
sa.Column('post_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.ForeignKeyConstraint(['blogger_id'], ['hive_accounts.id'], name='hive_reblogs_fk1'),
|
||||
sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),
|
||||
sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_reblogs_fk3'),
|
||||
sa.UniqueConstraint('blogger_id', 'post_id', name='hive_reblogs_ux1'), # core
|
||||
sa.UniqueConstraint('blogger_id', 'post_id', name='hive_reblogs_ux1'), # core
|
||||
sa.Index('hive_reblogs_post_id', 'post_id'),
|
||||
sa.Index('hive_reblogs_block_num_idx', 'block_num'),
|
||||
sa.Index('hive_reblogs_created_at_idx', 'created_at')
|
||||
sa.Index('hive_reblogs_created_at_idx', 'created_at'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_payments', metadata,
|
||||
'hive_payments',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('tx_idx', SMALLINT, nullable=False),
|
||||
|
@ -266,7 +297,6 @@ def build_metadata():
|
|||
sa.Column('to_account', sa.Integer, nullable=False),
|
||||
sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),
|
||||
sa.Column('token', VARCHAR(5), nullable=False),
|
||||
|
||||
sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),
|
||||
sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),
|
||||
sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),
|
||||
|
@ -276,21 +306,22 @@ def build_metadata():
|
|||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_feed_cache', metadata,
|
||||
'hive_feed_cache',
|
||||
metadata,
|
||||
sa.Column('post_id', sa.Integer, nullable=False),
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.PrimaryKeyConstraint('account_id', 'post_id', name='hive_feed_cache_pk'),
|
||||
sa.ForeignKeyConstraint(['block_num'], ['hive_blocks.num'], name='hive_feed_cache_fk1'),
|
||||
|
||||
sa.Index('hive_feed_cache_block_num_idx', 'block_num'),
|
||||
sa.Index('hive_feed_cache_created_at_idx', 'created_at'),
|
||||
sa.Index('hive_feed_cache_post_id_idx', 'post_id')
|
||||
sa.Index('hive_feed_cache_post_id_idx', 'post_id'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_state', metadata,
|
||||
'hive_state',
|
||||
metadata,
|
||||
sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column('db_version', sa.Integer, nullable=False),
|
||||
sa.Column('steem_per_mvest', sa.types.DECIMAL(14, 6), nullable=False),
|
||||
|
@ -300,127 +331,148 @@ def build_metadata():
|
|||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_posts_api_helper', metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True, autoincrement = False),
|
||||
sa.Column('author_s_permlink', VARCHAR(275, collation='C'), nullable=False), # concatenation of author '/' permlink
|
||||
sa.Index('hive_posts_api_helper_author_s_permlink_idx', 'author_s_permlink')
|
||||
'hive_posts_api_helper',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column(
|
||||
'author_s_permlink', VARCHAR(275, collation='C'), nullable=False
|
||||
), # concatenation of author '/' permlink
|
||||
sa.Index('hive_posts_api_helper_author_s_permlink_idx', 'author_s_permlink'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_mentions', metadata,
|
||||
'hive_mentions',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('post_id', sa.Integer, nullable=False),
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
|
||||
sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_mentions_fk1'),
|
||||
sa.ForeignKeyConstraint(['account_id'], ['hive_accounts.id'], name='hive_mentions_fk2'),
|
||||
|
||||
sa.Index('hive_mentions_account_id_idx', 'account_id'),
|
||||
sa.UniqueConstraint('post_id', 'account_id', 'block_num', name='hive_mentions_ux1')
|
||||
sa.UniqueConstraint('post_id', 'account_id', 'block_num', name='hive_mentions_ux1'),
|
||||
)
|
||||
|
||||
metadata = build_metadata_community(metadata)
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def build_metadata_community(metadata=None):
|
||||
"""Build community schema defs"""
|
||||
if not metadata:
|
||||
metadata = sa.MetaData()
|
||||
|
||||
sa.Table(
|
||||
'hive_communities', metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column('type_id', SMALLINT, nullable=False),
|
||||
sa.Column('lang', CHAR(2), nullable=False, server_default='en'),
|
||||
sa.Column('name', VARCHAR(16, collation='C'), nullable=False),
|
||||
sa.Column('title', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('sum_pending', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('num_pending', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('num_authors', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('rank', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('subscribers', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('about', sa.String(120), nullable=False, server_default=''),
|
||||
sa.Column('primary_tag', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('category', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('avatar_url', sa.String(1024), nullable=False, server_default=''),
|
||||
'hive_communities',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True, autoincrement=False),
|
||||
sa.Column('type_id', SMALLINT, nullable=False),
|
||||
sa.Column('lang', CHAR(2), nullable=False, server_default='en'),
|
||||
sa.Column('name', VARCHAR(16, collation='C'), nullable=False),
|
||||
sa.Column('title', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('sum_pending', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('num_pending', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('num_authors', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('rank', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('subscribers', sa.Integer, nullable=False, server_default='0'),
|
||||
sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),
|
||||
sa.Column('about', sa.String(120), nullable=False, server_default=''),
|
||||
sa.Column('primary_tag', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('category', sa.String(32), nullable=False, server_default=''),
|
||||
sa.Column('avatar_url', sa.String(1024), nullable=False, server_default=''),
|
||||
sa.Column('description', sa.String(5000), nullable=False, server_default=''),
|
||||
sa.Column('flag_text', sa.String(5000), nullable=False, server_default=''),
|
||||
sa.Column('settings', TEXT, nullable=False, server_default='{}'),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
|
||||
sa.Column('flag_text', sa.String(5000), nullable=False, server_default=''),
|
||||
sa.Column('settings', TEXT, nullable=False, server_default='{}'),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.UniqueConstraint('name', name='hive_communities_ux1'),
|
||||
sa.Index('hive_communities_ix1', 'rank', 'id'),
|
||||
sa.Index('hive_communities_block_num_idx', 'block_num')
|
||||
sa.Index('hive_communities_block_num_idx', 'block_num'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_roles', metadata,
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('community_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('role_id', SMALLINT, nullable=False, server_default='0'),
|
||||
sa.Column('title', sa.String(140), nullable=False, server_default=''),
|
||||
|
||||
'hive_roles',
|
||||
metadata,
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('community_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('role_id', SMALLINT, nullable=False, server_default='0'),
|
||||
sa.Column('title', sa.String(140), nullable=False, server_default=''),
|
||||
sa.PrimaryKeyConstraint('account_id', 'community_id', name='hive_roles_pk'),
|
||||
sa.Index('hive_roles_ix1', 'community_id', 'account_id', 'role_id'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_subscriptions', metadata,
|
||||
'hive_subscriptions',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('community_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False ),
|
||||
|
||||
sa.Column('account_id', sa.Integer, nullable=False),
|
||||
sa.Column('community_id', sa.Integer, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.UniqueConstraint('account_id', 'community_id', name='hive_subscriptions_ux1'),
|
||||
sa.Index('hive_subscriptions_community_idx', 'community_id'),
|
||||
sa.Index('hive_subscriptions_block_num_idx', 'block_num')
|
||||
sa.Index('hive_subscriptions_block_num_idx', 'block_num'),
|
||||
)
|
||||
|
||||
sa.Table(
|
||||
'hive_notifs', metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('type_id', SMALLINT, nullable=False),
|
||||
sa.Column('score', SMALLINT, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('src_id', sa.Integer, nullable=True),
|
||||
sa.Column('dst_id', sa.Integer, nullable=True),
|
||||
sa.Column('post_id', sa.Integer, nullable=True),
|
||||
sa.Column('community_id', sa.Integer, nullable=True),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('payload', sa.Text, nullable=True),
|
||||
|
||||
sa.Index('hive_notifs_ix1', 'dst_id', 'id', postgresql_where=sql_text("dst_id IS NOT NULL")),
|
||||
sa.Index('hive_notifs_ix2', 'community_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL")),
|
||||
sa.Index('hive_notifs_ix3', 'community_id', 'type_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL")),
|
||||
sa.Index('hive_notifs_ix4', 'community_id', 'post_id', 'type_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL AND post_id IS NOT NULL")),
|
||||
sa.Index('hive_notifs_ix5', 'post_id', 'type_id', 'dst_id', 'src_id', postgresql_where=sql_text("post_id IS NOT NULL AND type_id IN (16,17)")), # filter: dedupe
|
||||
sa.Index('hive_notifs_ix6', 'dst_id', 'created_at', 'score', 'id', postgresql_where=sql_text("dst_id IS NOT NULL")), # unread
|
||||
'hive_notifs',
|
||||
metadata,
|
||||
sa.Column('id', sa.Integer, primary_key=True),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('type_id', SMALLINT, nullable=False),
|
||||
sa.Column('score', SMALLINT, nullable=False),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False),
|
||||
sa.Column('src_id', sa.Integer, nullable=True),
|
||||
sa.Column('dst_id', sa.Integer, nullable=True),
|
||||
sa.Column('post_id', sa.Integer, nullable=True),
|
||||
sa.Column('community_id', sa.Integer, nullable=True),
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('payload', sa.Text, nullable=True),
|
||||
sa.Index('hive_notifs_ix1', 'dst_id', 'id', postgresql_where=sql_text("dst_id IS NOT NULL")),
|
||||
sa.Index('hive_notifs_ix2', 'community_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL")),
|
||||
sa.Index(
|
||||
'hive_notifs_ix3', 'community_id', 'type_id', 'id', postgresql_where=sql_text("community_id IS NOT NULL")
|
||||
),
|
||||
sa.Index(
|
||||
'hive_notifs_ix4',
|
||||
'community_id',
|
||||
'post_id',
|
||||
'type_id',
|
||||
'id',
|
||||
postgresql_where=sql_text("community_id IS NOT NULL AND post_id IS NOT NULL"),
|
||||
),
|
||||
sa.Index(
|
||||
'hive_notifs_ix5',
|
||||
'post_id',
|
||||
'type_id',
|
||||
'dst_id',
|
||||
'src_id',
|
||||
postgresql_where=sql_text("post_id IS NOT NULL AND type_id IN (16,17)"),
|
||||
), # filter: dedupe
|
||||
sa.Index(
|
||||
'hive_notifs_ix6', 'dst_id', 'created_at', 'score', 'id', postgresql_where=sql_text("dst_id IS NOT NULL")
|
||||
), # unread
|
||||
)
|
||||
|
||||
sa.Table('hive_notification_cache', metadata,
|
||||
sa.Table(
|
||||
'hive_notification_cache',
|
||||
metadata,
|
||||
sa.Column('id', sa.BigInteger, primary_key=True),
|
||||
sa.Column('block_num', sa.Integer, nullable = False),
|
||||
sa.Column('type_id', sa.Integer, nullable = False),
|
||||
sa.Column('dst', sa.Integer, nullable=True), # dst account id except persistent notifs from hive_notifs
|
||||
sa.Column('src', sa.Integer, nullable=True), # src account id
|
||||
sa.Column('dst_post_id', sa.Integer, nullable=True), # destination post id
|
||||
sa.Column('block_num', sa.Integer, nullable=False),
|
||||
sa.Column('type_id', sa.Integer, nullable=False),
|
||||
sa.Column('dst', sa.Integer, nullable=True), # dst account id except persistent notifs from hive_notifs
|
||||
sa.Column('src', sa.Integer, nullable=True), # src account id
|
||||
sa.Column('dst_post_id', sa.Integer, nullable=True), # destination post id
|
||||
sa.Column('post_id', sa.Integer, nullable=True),
|
||||
sa.Column('created_at', sa.DateTime, nullable=False), # notification creation time
|
||||
sa.Column('created_at', sa.DateTime, nullable=False), # notification creation time
|
||||
sa.Column('score', sa.Integer, nullable=False),
|
||||
sa.Column('community_title', sa.String(32), nullable=True),
|
||||
sa.Column('community', sa.String(16), nullable=True),
|
||||
sa.Column('payload', sa.String, nullable=True),
|
||||
|
||||
sa.Index('hive_notification_cache_block_num_idx', 'block_num'),
|
||||
sa.Index('hive_notification_cache_dst_score_idx', 'dst', 'score', postgresql_where=sql_text("dst IS NOT NULL"))
|
||||
|
||||
sa.Index('hive_notification_cache_dst_score_idx', 'dst', 'score', postgresql_where=sql_text("dst IS NOT NULL")),
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
@ -430,6 +482,7 @@ def teardown(db):
|
|||
"""Drop all tables"""
|
||||
build_metadata().drop_all(db.engine())
|
||||
|
||||
|
||||
def drop_fk(db):
|
||||
db.query_no_return("START TRANSACTION")
|
||||
for table in build_metadata().sorted_tables:
|
||||
|
@ -438,9 +491,11 @@ def drop_fk(db):
|
|||
db.query_no_return(sql)
|
||||
db.query_no_return("COMMIT")
|
||||
|
||||
|
||||
def create_fk(db):
|
||||
from sqlalchemy.schema import AddConstraint
|
||||
from sqlalchemy import text
|
||||
|
||||
connection = db.get_new_connection('create_fk')
|
||||
connection.execute(text("START TRANSACTION"))
|
||||
for table in build_metadata().sorted_tables:
|
||||
|
@ -448,11 +503,12 @@ def create_fk(db):
|
|||
connection.execute(AddConstraint(fk.constraint))
|
||||
connection.execute(text("COMMIT"))
|
||||
|
||||
|
||||
def setup(db):
|
||||
"""Creates all tables and seed data"""
|
||||
|
||||
sql = """SELECT * FROM pg_extension WHERE extname='intarray'"""
|
||||
assert db.query_row( sql ), "The database requires created 'intarray' extension"
|
||||
assert db.query_row(sql), "The database requires created 'intarray' extension"
|
||||
|
||||
# initialize schema
|
||||
build_metadata().create_all(db.engine())
|
||||
|
@ -467,17 +523,14 @@ def setup(db):
|
|||
sqls = [
|
||||
"INSERT INTO hive_state (block_num, db_version, steem_per_mvest, usd_per_steem, sbd_per_steem, dgpo) VALUES (0, 0, 0, 0, 0, '')",
|
||||
"INSERT INTO hive_blocks (num, hash, created_at, completed) VALUES (0, '0000000000000000000000000000000000000000', '2016-03-24 16:04:57', true)",
|
||||
|
||||
"INSERT INTO hive_permlink_data (id, permlink) VALUES (0, '')",
|
||||
"INSERT INTO hive_category_data (id, category) VALUES (0, '')",
|
||||
"INSERT INTO hive_tag_data (id, tag) VALUES (0, '')",
|
||||
"INSERT INTO hive_accounts (id, name, created_at) VALUES (0, '', '1970-01-01T00:00:00')",
|
||||
|
||||
"INSERT INTO hive_accounts (name, created_at) VALUES ('miners', '2016-03-24 16:05:00')",
|
||||
"INSERT INTO hive_accounts (name, created_at) VALUES ('null', '2016-03-24 16:05:00')",
|
||||
"INSERT INTO hive_accounts (name, created_at) VALUES ('temp', '2016-03-24 16:05:00')",
|
||||
"INSERT INTO hive_accounts (name, created_at) VALUES ('initminer', '2016-03-24 16:05:00')",
|
||||
|
||||
"""
|
||||
INSERT INTO
|
||||
public.hive_posts(id, root_id, parent_id, author_id, permlink_id, category_id,
|
||||
|
@ -485,7 +538,8 @@ def setup(db):
|
|||
)
|
||||
VALUES
|
||||
(0, 0, 0, 0, 0, 0, 0, now(), 0, 0, 0);
|
||||
"""]
|
||||
""",
|
||||
]
|
||||
for sql in sqls:
|
||||
db.query(sql)
|
||||
|
||||
|
@ -554,75 +608,76 @@ def setup(db):
|
|||
# get_discussion definition moved to bridge_get_discussion.sql
|
||||
|
||||
sql_scripts = [
|
||||
"utility_functions.sql",
|
||||
"hive_accounts_view.sql",
|
||||
"hive_accounts_info_view.sql",
|
||||
"hive_posts_base_view.sql",
|
||||
"hive_posts_view.sql",
|
||||
"hive_votes_view.sql",
|
||||
"hive_muted_accounts_view.sql",
|
||||
"hive_muted_accounts_by_id_view.sql",
|
||||
"hive_blacklisted_accounts_by_observer_view.sql",
|
||||
"get_post_view_by_id.sql",
|
||||
"hive_post_operations.sql",
|
||||
"head_block_time.sql",
|
||||
"update_feed_cache.sql",
|
||||
"payout_stats_view.sql",
|
||||
"update_hive_posts_mentions.sql",
|
||||
"mutes.sql",
|
||||
"bridge_get_ranked_post_type.sql",
|
||||
"bridge_get_ranked_post_for_communities.sql",
|
||||
"bridge_get_ranked_post_for_observer_communities.sql",
|
||||
"bridge_get_ranked_post_for_tag.sql",
|
||||
"bridge_get_ranked_post_for_all.sql",
|
||||
"calculate_account_reputations.sql",
|
||||
"update_communities_rank.sql",
|
||||
"delete_hive_posts_mentions.sql",
|
||||
"notifications_view.sql",
|
||||
"notifications_api.sql",
|
||||
"bridge_get_account_posts_by_comments.sql",
|
||||
"bridge_get_account_posts_by_payout.sql",
|
||||
"bridge_get_account_posts_by_posts.sql",
|
||||
"bridge_get_account_posts_by_replies.sql",
|
||||
"bridge_get_relationship_between_accounts.sql",
|
||||
"bridge_get_post.sql",
|
||||
"bridge_get_discussion.sql",
|
||||
"condenser_api_post_type.sql",
|
||||
"condenser_api_post_ex_type.sql",
|
||||
"condenser_get_blog.sql",
|
||||
"condenser_get_content.sql",
|
||||
"condenser_tags.sql",
|
||||
"condenser_follows.sql",
|
||||
"hot_and_trends.sql",
|
||||
"update_hive_posts_children_count.sql",
|
||||
"update_hive_posts_api_helper.sql",
|
||||
"database_api_list_comments.sql",
|
||||
"database_api_list_votes.sql",
|
||||
"update_posts_rshares.sql",
|
||||
"update_hive_post_root_id.sql",
|
||||
"condenser_get_by_account_comments.sql",
|
||||
"condenser_get_by_blog_without_reblog.sql",
|
||||
"bridge_get_by_feed_with_reblog.sql",
|
||||
"condenser_get_by_blog.sql",
|
||||
"bridge_get_account_posts_by_blog.sql",
|
||||
"condenser_get_names_by_reblogged.sql",
|
||||
"condenser_get_account_reputations.sql",
|
||||
"bridge_get_community.sql",
|
||||
"bridge_get_community_context.sql",
|
||||
"bridge_list_all_subscriptions.sql",
|
||||
"bridge_list_communities.sql",
|
||||
"bridge_list_community_roles.sql",
|
||||
"bridge_list_pop_communities.sql",
|
||||
"bridge_list_subscribers.sql",
|
||||
"update_follow_count.sql",
|
||||
"delete_reblog_feed_cache.sql",
|
||||
"follows.sql",
|
||||
"is_superuser.sql",
|
||||
"update_hive_blocks_consistency_flag.sql",
|
||||
"update_table_statistics.sql",
|
||||
"upgrade/update_db_patchlevel.sql" #Additionally execute db patchlevel import to mark (already done) upgrade changes and avoid its reevaluation during next upgrade.
|
||||
"utility_functions.sql",
|
||||
"hive_accounts_view.sql",
|
||||
"hive_accounts_info_view.sql",
|
||||
"hive_posts_base_view.sql",
|
||||
"hive_posts_view.sql",
|
||||
"hive_votes_view.sql",
|
||||
"hive_muted_accounts_view.sql",
|
||||
"hive_muted_accounts_by_id_view.sql",
|
||||
"hive_blacklisted_accounts_by_observer_view.sql",
|
||||
"get_post_view_by_id.sql",
|
||||
"hive_post_operations.sql",
|
||||
"head_block_time.sql",
|
||||
"update_feed_cache.sql",
|
||||
"payout_stats_view.sql",
|
||||
"update_hive_posts_mentions.sql",
|
||||
"mutes.sql",
|
||||
"bridge_get_ranked_post_type.sql",
|
||||
"bridge_get_ranked_post_for_communities.sql",
|
||||
"bridge_get_ranked_post_for_observer_communities.sql",
|
||||
"bridge_get_ranked_post_for_tag.sql",
|
||||
"bridge_get_ranked_post_for_all.sql",
|
||||
"calculate_account_reputations.sql",
|
||||
"update_communities_rank.sql",
|
||||
"delete_hive_posts_mentions.sql",
|
||||
"notifications_view.sql",
|
||||
"notifications_api.sql",
|
||||
"bridge_get_account_posts_by_comments.sql",
|
||||
"bridge_get_account_posts_by_payout.sql",
|
||||
"bridge_get_account_posts_by_posts.sql",
|
||||
"bridge_get_account_posts_by_replies.sql",
|
||||
"bridge_get_relationship_between_accounts.sql",
|
||||
"bridge_get_post.sql",
|
||||
"bridge_get_discussion.sql",
|
||||
"condenser_api_post_type.sql",
|
||||
"condenser_api_post_ex_type.sql",
|
||||
"condenser_get_blog.sql",
|
||||
"condenser_get_content.sql",
|
||||
"condenser_tags.sql",
|
||||
"condenser_follows.sql",
|
||||
"hot_and_trends.sql",
|
||||
"update_hive_posts_children_count.sql",
|
||||
"update_hive_posts_api_helper.sql",
|
||||
"database_api_list_comments.sql",
|
||||
"database_api_list_votes.sql",
|
||||
"update_posts_rshares.sql",
|
||||
"update_hive_post_root_id.sql",
|
||||
"condenser_get_by_account_comments.sql",
|
||||
"condenser_get_by_blog_without_reblog.sql",
|
||||
"bridge_get_by_feed_with_reblog.sql",
|
||||
"condenser_get_by_blog.sql",
|
||||
"bridge_get_account_posts_by_blog.sql",
|
||||
"condenser_get_names_by_reblogged.sql",
|
||||
"condenser_get_account_reputations.sql",
|
||||
"bridge_get_community.sql",
|
||||
"bridge_get_community_context.sql",
|
||||
"bridge_list_all_subscriptions.sql",
|
||||
"bridge_list_communities.sql",
|
||||
"bridge_list_community_roles.sql",
|
||||
"bridge_list_pop_communities.sql",
|
||||
"bridge_list_subscribers.sql",
|
||||
"update_follow_count.sql",
|
||||
"delete_reblog_feed_cache.sql",
|
||||
"follows.sql",
|
||||
"is_superuser.sql",
|
||||
"update_hive_blocks_consistency_flag.sql",
|
||||
"update_table_statistics.sql",
|
||||
"upgrade/update_db_patchlevel.sql", # Additionally execute db patchlevel import to mark (already done) upgrade changes and avoid its reevaluation during next upgrade.
|
||||
]
|
||||
from os.path import dirname, realpath
|
||||
|
||||
dir_path = dirname(realpath(__file__))
|
||||
for script in sql_scripts:
|
||||
execute_sql_script(db.query_no_return, f"{dir_path}/sql_scripts/{script}")
|
||||
|
@ -636,8 +691,8 @@ def setup(db):
|
|||
"""
|
||||
|
||||
from hive.version import GIT_REVISION
|
||||
db.query_no_return(sql.format(GIT_REVISION))
|
||||
|
||||
db.query_no_return(sql.format(GIT_REVISION))
|
||||
|
||||
|
||||
def reset_autovac(db):
|
||||
|
@ -646,14 +701,14 @@ def reset_autovac(db):
|
|||
We use a scale factor of 0 and specify exact threshold tuple counts,
|
||||
per-table, in the format (autovacuum_threshold, autoanalyze_threshold)."""
|
||||
|
||||
autovac_config = { # vacuum analyze
|
||||
'hive_accounts': (50000, 100000),
|
||||
'hive_posts': (2500, 10000),
|
||||
'hive_follows': (5000, 5000),
|
||||
'hive_feed_cache': (5000, 5000),
|
||||
'hive_blocks': (5000, 25000),
|
||||
'hive_reblogs': (5000, 5000),
|
||||
'hive_payments': (5000, 5000),
|
||||
autovac_config = { # vacuum analyze
|
||||
'hive_accounts': (50000, 100000),
|
||||
'hive_posts': (2500, 10000),
|
||||
'hive_follows': (5000, 5000),
|
||||
'hive_feed_cache': (5000, 5000),
|
||||
'hive_blocks': (5000, 25000),
|
||||
'hive_reblogs': (5000, 5000),
|
||||
'hive_payments': (5000, 5000),
|
||||
}
|
||||
|
||||
for table, (n_vacuum, n_analyze) in autovac_config.items():
|
||||
|
@ -667,17 +722,13 @@ def reset_autovac(db):
|
|||
def set_fillfactor(db):
|
||||
"""Initializes/resets FILLFACTOR for tables which are intesively updated"""
|
||||
|
||||
fillfactor_config = {
|
||||
'hive_posts': 70,
|
||||
'hive_post_data': 70,
|
||||
'hive_votes': 70,
|
||||
'hive_reputation_data': 50
|
||||
}
|
||||
fillfactor_config = {'hive_posts': 70, 'hive_post_data': 70, 'hive_votes': 70, 'hive_reputation_data': 50}
|
||||
|
||||
for table, fillfactor in fillfactor_config.items():
|
||||
sql = """ALTER TABLE {} SET (FILLFACTOR = {})"""
|
||||
db.query(sql.format(table, fillfactor))
|
||||
|
||||
|
||||
def set_logged_table_attribute(db, logged):
|
||||
"""Initializes/resets LOGGED/UNLOGGED attribute for tables which are intesively updated"""
|
||||
|
||||
|
@ -687,7 +738,7 @@ def set_logged_table_attribute(db, logged):
|
|||
'hive_posts',
|
||||
'hive_post_data',
|
||||
'hive_votes',
|
||||
'hive_reputation_data'
|
||||
'hive_reputation_data',
|
||||
]
|
||||
|
||||
for table in logged_config:
|
||||
|
@ -695,18 +746,19 @@ def set_logged_table_attribute(db, logged):
|
|||
sql = """ALTER TABLE {} SET {}"""
|
||||
db.query_no_return(sql.format(table, 'LOGGED' if logged else 'UNLOGGED'))
|
||||
|
||||
def execute_sql_script(query_executor, path_to_script):
|
||||
""" Load and execute sql script from file
|
||||
Params:
|
||||
query_executor - callable to execute query with
|
||||
path_to_script - path to script
|
||||
Returns:
|
||||
depending on query_executor
|
||||
|
||||
Example:
|
||||
print(execute_sql_script(db.query_row, "./test.sql"))
|
||||
where test_sql: SELECT * FROM hive_state WHERE block_num = 0;
|
||||
will return something like: (0, 18, Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), '')
|
||||
def execute_sql_script(query_executor, path_to_script):
|
||||
"""Load and execute sql script from file
|
||||
Params:
|
||||
query_executor - callable to execute query with
|
||||
path_to_script - path to script
|
||||
Returns:
|
||||
depending on query_executor
|
||||
|
||||
Example:
|
||||
print(execute_sql_script(db.query_row, "./test.sql"))
|
||||
where test_sql: SELECT * FROM hive_state WHERE block_num = 0;
|
||||
will return something like: (0, 18, Decimal('0.000000'), Decimal('0.000000'), Decimal('0.000000'), '')
|
||||
"""
|
||||
try:
|
||||
sql_script = None
|
||||
|
|
|
@ -12,6 +12,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class Accounts(DbAdapterHolder):
|
||||
"""Manages account id map, dirty queue, and `hive_accounts` table."""
|
||||
|
||||
|
@ -37,7 +38,7 @@ class Accounts(DbAdapterHolder):
|
|||
raise RuntimeError("Fatal error")
|
||||
|
||||
key = update_operation['account']
|
||||
( _posting_json_metadata, _json_metadata ) = get_profile_str( update_operation )
|
||||
(_posting_json_metadata, _json_metadata) = get_profile_str(update_operation)
|
||||
|
||||
if key in cls._updates_data:
|
||||
if allow_change_posting:
|
||||
|
@ -46,7 +47,11 @@ class Accounts(DbAdapterHolder):
|
|||
|
||||
cls._updates_data[key]['json_metadata'] = _json_metadata
|
||||
else:
|
||||
cls._updates_data[key] = { 'allow_change_posting' : allow_change_posting, 'posting_json_metadata' : _posting_json_metadata, 'json_metadata' : _json_metadata }
|
||||
cls._updates_data[key] = {
|
||||
'allow_change_posting': allow_change_posting,
|
||||
'posting_json_metadata': _posting_json_metadata,
|
||||
'json_metadata': _json_metadata,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def load_ids(cls):
|
||||
|
@ -64,11 +69,16 @@ class Accounts(DbAdapterHolder):
|
|||
"""Return default notification score based on rank."""
|
||||
_id = cls.get_id(name)
|
||||
rank = cls._ranks[_id] if _id in cls._ranks else 1000000
|
||||
if rank < 200: return 70 # 0.02% 100k
|
||||
if rank < 1000: return 60 # 0.1% 10k
|
||||
if rank < 6500: return 50 # 0.5% 1k
|
||||
if rank < 25000: return 40 # 2.0% 100
|
||||
if rank < 100000: return 30 # 8.0% 15
|
||||
if rank < 200:
|
||||
return 70 # 0.02% 100k
|
||||
if rank < 1000:
|
||||
return 60 # 0.1% 10k
|
||||
if rank < 6500:
|
||||
return 50 # 0.5% 1k
|
||||
if rank < 25000:
|
||||
return 40 # 2.0% 100
|
||||
if rank < 100000:
|
||||
return 30 # 8.0% 15
|
||||
return 20
|
||||
|
||||
@classmethod
|
||||
|
@ -84,7 +94,6 @@ class Accounts(DbAdapterHolder):
|
|||
assert isinstance(name, str), "account name should be string"
|
||||
return cls._ids.get(name, None)
|
||||
|
||||
|
||||
@classmethod
|
||||
def exists(cls, names):
|
||||
"""Check if an account name exists."""
|
||||
|
@ -94,14 +103,14 @@ class Accounts(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def check_names(cls, names):
|
||||
""" Check which names from name list does not exists in the database """
|
||||
"""Check which names from name list does not exists in the database"""
|
||||
assert isinstance(names, list), "Expecting list as argument"
|
||||
return [name for name in names if name not in cls._ids]
|
||||
|
||||
@classmethod
|
||||
def get_json_data(cls, source ):
|
||||
def get_json_data(cls, source):
|
||||
"""json-data preprocessing."""
|
||||
return escape_characters( source )
|
||||
return escape_characters(source)
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, op_details, block_date, block_num):
|
||||
|
@ -120,7 +129,7 @@ class Accounts(DbAdapterHolder):
|
|||
if cls.exists(name):
|
||||
return True
|
||||
|
||||
( _posting_json_metadata, _json_metadata ) = get_profile_str( op_details )
|
||||
(_posting_json_metadata, _json_metadata) = get_profile_str(op_details)
|
||||
|
||||
sql = f"""
|
||||
INSERT INTO hive_accounts (name, created_at, posting_json_metadata, json_metadata )
|
||||
|
@ -128,13 +137,14 @@ class Accounts(DbAdapterHolder):
|
|||
RETURNING id
|
||||
"""
|
||||
|
||||
new_id = DB.query_one( sql )
|
||||
new_id = DB.query_one(sql)
|
||||
if new_id is None:
|
||||
return False
|
||||
return False
|
||||
cls._ids[name] = new_id
|
||||
|
||||
# post-insert: pass to communities to check for new registrations
|
||||
from hive.indexer.community import Community
|
||||
|
||||
if block_num > Community.start_block:
|
||||
Community.register(name, block_date, block_num)
|
||||
|
||||
|
@ -142,7 +152,7 @@ class Accounts(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
""" Flush json_metadatafrom cache to database """
|
||||
"""Flush json_metadatafrom cache to database"""
|
||||
|
||||
cls.inside_flush = True
|
||||
n = 0
|
||||
|
@ -182,7 +192,9 @@ class Accounts(DbAdapterHolder):
|
|||
values_limit = 1000
|
||||
|
||||
for name, data in cls._updates_data.items():
|
||||
values.append(f"({data['allow_change_posting']}, {cls.get_json_data(data['posting_json_metadata'])}, {cls.get_json_data(data['json_metadata'])}, '{name}')")
|
||||
values.append(
|
||||
f"({data['allow_change_posting']}, {cls.get_json_data(data['posting_json_metadata'])}, {cls.get_json_data(data['json_metadata'])}, '{name}')"
|
||||
)
|
||||
|
||||
if len(values) >= values_limit:
|
||||
values_str = ','.join(values)
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
class AutoDbDisposer(object):
|
||||
"""Manages whole lifecycle of a database.
|
||||
Object of this class should be created by `with` context.
|
||||
Object of this class should be created by `with` context.
|
||||
"""
|
||||
|
||||
def __init__(self, db, name):
|
||||
self.db = db.clone(name)
|
||||
|
||||
|
@ -10,4 +11,4 @@ class AutoDbDisposer(object):
|
|||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
if self.db is not None:
|
||||
self.db.close()
|
||||
self.db.close()
|
||||
|
|
|
@ -6,6 +6,7 @@ import queue
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VirtualOperationType(Enum):
|
||||
AuthorReward = 1
|
||||
CommentReward = 2
|
||||
|
@ -13,7 +14,7 @@ class VirtualOperationType(Enum):
|
|||
CommentPayoutUpdate = 4
|
||||
IneffectiveDeleteComment = 5
|
||||
|
||||
def from_name( operation_name ):
|
||||
def from_name(operation_name):
|
||||
if operation_name == 'author_reward_operation':
|
||||
return VirtualOperationType.AuthorReward
|
||||
if operation_name == 'comment_reward_operation':
|
||||
|
@ -43,7 +44,7 @@ class OperationType(Enum):
|
|||
Transfer = 12
|
||||
CustomJson = 13
|
||||
|
||||
def from_name( operation_name ):
|
||||
def from_name(operation_name):
|
||||
if operation_name == 'pow_operation':
|
||||
return OperationType.Pow
|
||||
if operation_name == 'pow2_operation':
|
||||
|
@ -74,7 +75,6 @@ class OperationType(Enum):
|
|||
return None
|
||||
|
||||
|
||||
|
||||
class Block(ABC):
|
||||
"""Represents one block of the chain"""
|
||||
|
||||
|
@ -110,6 +110,7 @@ class Block(ABC):
|
|||
def get_next_transaction(self):
|
||||
pass
|
||||
|
||||
|
||||
class Operation(ABC):
|
||||
@abstractmethod
|
||||
def get_type(self):
|
||||
|
@ -119,6 +120,7 @@ class Operation(ABC):
|
|||
def get_body(self):
|
||||
pass
|
||||
|
||||
|
||||
class Transaction(ABC):
|
||||
@abstractmethod
|
||||
def get_id(self):
|
||||
|
@ -128,10 +130,11 @@ class Transaction(ABC):
|
|||
def get_next_operation(self):
|
||||
pass
|
||||
|
||||
class BlockWrapper( Block ):
|
||||
def __init__(self, wrapped_block ):
|
||||
|
||||
class BlockWrapper(Block):
|
||||
def __init__(self, wrapped_block):
|
||||
"""
|
||||
wrapped_block - block which is wrapped
|
||||
wrapped_block - block which is wrapped
|
||||
"""
|
||||
assert wrapped_block
|
||||
self.wrapped_block = wrapped_block
|
||||
|
@ -160,11 +163,12 @@ class BlockWrapper( Block ):
|
|||
def get_next_transaction(self):
|
||||
return self.wrapped_block.get_next_transaction()
|
||||
|
||||
|
||||
class BlocksProviderBase(ABC):
|
||||
def __init__(self, breaker, exception_reporter):
|
||||
"""
|
||||
breaker - callable, returns true when sync can continue, false when break was requested
|
||||
exception_reporter - callable, use to inform about undesire exception in a synchronizaton thread
|
||||
breaker - callable, returns true when sync can continue, false when break was requested
|
||||
exception_reporter - callable, use to inform about undesire exception in a synchronizaton thread
|
||||
"""
|
||||
assert breaker
|
||||
assert exception_reporter
|
||||
|
@ -172,10 +176,10 @@ class BlocksProviderBase(ABC):
|
|||
self._breaker = breaker
|
||||
self._exception_reporter = exception_reporter
|
||||
|
||||
self._blocks_queue_size = 1500
|
||||
self._blocks_data_queue_size = 1500
|
||||
self._blocks_queue_size = 1500
|
||||
self._blocks_data_queue_size = 1500
|
||||
|
||||
self._operations_queue_size = 1500
|
||||
self._operations_queue_size = 1500
|
||||
|
||||
def report_exception():
|
||||
self._exception_reporter()
|
||||
|
@ -190,15 +194,15 @@ class BlocksProviderBase(ABC):
|
|||
"""Returns lists of blocks"""
|
||||
pass
|
||||
|
||||
def _get_from_queue( self, data_queue, number_of_elements ):
|
||||
def _get_from_queue(self, data_queue, number_of_elements):
|
||||
"""Tool function to get elements from queue"""
|
||||
ret = []
|
||||
for element in range( number_of_elements ):
|
||||
for element in range(number_of_elements):
|
||||
if not self._breaker():
|
||||
break
|
||||
while self._breaker():
|
||||
try:
|
||||
ret.append( data_queue.get(True, 1) )
|
||||
ret.append(data_queue.get(True, 1))
|
||||
data_queue.task_done()
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
|
|
@ -29,6 +29,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
def time_collector(f):
|
||||
startTime = FSM.start()
|
||||
result = f()
|
||||
|
@ -36,8 +37,10 @@ def time_collector(f):
|
|||
|
||||
return (result, elapsedTime)
|
||||
|
||||
|
||||
class Blocks:
|
||||
"""Processes blocks, dispatches work, manages `hive_blocks` table."""
|
||||
|
||||
blocks_to_flush = []
|
||||
_head_block_date = None
|
||||
_current_block_date = None
|
||||
|
@ -45,14 +48,14 @@ class Blocks:
|
|||
_is_initial_sync = False
|
||||
|
||||
_concurrent_flush = [
|
||||
('Posts', Posts.flush, Posts),
|
||||
('PostDataCache', PostDataCache.flush, PostDataCache),
|
||||
('Reputations', Reputations.flush, Reputations),
|
||||
('Votes', Votes.flush, Votes),
|
||||
('Follow', Follow.flush, Follow),
|
||||
('Reblog', Reblog.flush, Reblog),
|
||||
('Notify', Notify.flush, Notify),
|
||||
('Accounts', Accounts.flush, Accounts)
|
||||
('Posts', Posts.flush, Posts),
|
||||
('PostDataCache', PostDataCache.flush, PostDataCache),
|
||||
('Reputations', Reputations.flush, Reputations),
|
||||
('Votes', Votes.flush, Votes),
|
||||
('Follow', Follow.flush, Follow),
|
||||
('Reblog', Reblog.flush, Reblog),
|
||||
('Notify', Notify.flush, Notify),
|
||||
('Accounts', Accounts.flush, Accounts),
|
||||
]
|
||||
|
||||
def __init__(cls):
|
||||
|
@ -112,14 +115,20 @@ class Blocks:
|
|||
# after HF17 all posts are paid after 7 days which means it is safe to assume that
|
||||
# posts created at or before LIB - 7days will be paidout at the end of massive sync
|
||||
cls._last_safe_cashout_block = lib - 7 * 24 * 1200
|
||||
log.info( "End-of-sync LIB is set to %d, last block that guarantees cashout at end of sync is %d", lib, cls._last_safe_cashout_block )
|
||||
log.info(
|
||||
"End-of-sync LIB is set to %d, last block that guarantees cashout at end of sync is %d",
|
||||
lib,
|
||||
cls._last_safe_cashout_block,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flush_data_in_n_threads(cls):
|
||||
completedThreads = 0
|
||||
|
||||
pool = ThreadPoolExecutor(max_workers = len(cls._concurrent_flush))
|
||||
flush_futures = {pool.submit(time_collector, f): (description, c) for (description, f, c) in cls._concurrent_flush}
|
||||
pool = ThreadPoolExecutor(max_workers=len(cls._concurrent_flush))
|
||||
flush_futures = {
|
||||
pool.submit(time_collector, f): (description, c) for (description, f, c) in cls._concurrent_flush
|
||||
}
|
||||
for future in concurrent.futures.as_completed(flush_futures):
|
||||
(description, c) = flush_futures[future]
|
||||
completedThreads = completedThreads + 1
|
||||
|
@ -130,8 +139,8 @@ class Blocks:
|
|||
|
||||
FSM.flush_stat(description, elapsedTime, n)
|
||||
|
||||
# if n > 0:
|
||||
# log.info('%r flush generated %d records' % (description, n))
|
||||
# if n > 0:
|
||||
# log.info('%r flush generated %d records' % (description, n))
|
||||
except Exception as exc:
|
||||
log.error(f'{description!r} generated an exception: {exc}')
|
||||
raise exc
|
||||
|
@ -142,11 +151,11 @@ class Blocks:
|
|||
@classmethod
|
||||
def flush_data_in_1_thread(cls):
|
||||
for description, f, c in cls._concurrent_flush:
|
||||
try:
|
||||
f()
|
||||
except Exception as exc:
|
||||
log.error(f'{description!r} generated an exception: {exc}')
|
||||
raise exc
|
||||
try:
|
||||
f()
|
||||
except Exception as exc:
|
||||
log.error(f'{description!r} generated an exception: {exc}')
|
||||
raise exc
|
||||
|
||||
@classmethod
|
||||
def process_blocks(cls, blocks):
|
||||
|
@ -166,6 +175,7 @@ class Blocks:
|
|||
# deltas in memory and update follow/er counts in bulk.
|
||||
|
||||
flush_time = FSM.start()
|
||||
|
||||
def register_time(f_time, name, pushed):
|
||||
assert pushed is not None
|
||||
FSM.flush_stat(name, FSM.stop(f_time), pushed)
|
||||
|
@ -190,23 +200,24 @@ class Blocks:
|
|||
cls.flush_data_in_1_thread()
|
||||
if first_block > -1:
|
||||
log.info("[PROCESS MULTI] Tables updating in live synchronization")
|
||||
cls.on_live_blocks_processed( first_block, last_num )
|
||||
cls.on_live_blocks_processed(first_block, last_num)
|
||||
|
||||
DB.query("COMMIT")
|
||||
|
||||
if is_initial_sync:
|
||||
log.info("[PROCESS MULTI] Flushing data in N threads")
|
||||
cls.flush_data_in_n_threads()
|
||||
log.info("[PROCESS MULTI] Flushing data in N threads")
|
||||
cls.flush_data_in_n_threads()
|
||||
|
||||
log.info(f"[PROCESS MULTI] {len(blocks)} blocks in {OPSM.stop(time_start) :.4f}s")
|
||||
|
||||
@staticmethod
|
||||
def prepare_vops(comment_payout_ops, block, date, block_num, is_safe_cashout):
|
||||
def get_empty_ops():
|
||||
return { VirtualOperationType.AuthorReward:None
|
||||
, VirtualOperationType.CommentReward:None
|
||||
, VirtualOperationType.EffectiveCommentVote:None
|
||||
, VirtualOperationType.CommentPayoutUpdate:None
|
||||
return {
|
||||
VirtualOperationType.AuthorReward: None,
|
||||
VirtualOperationType.CommentReward: None,
|
||||
VirtualOperationType.EffectiveCommentVote: None,
|
||||
VirtualOperationType.CommentPayoutUpdate: None,
|
||||
}
|
||||
|
||||
ineffective_deleted_ops = {}
|
||||
|
@ -227,7 +238,7 @@ class Blocks:
|
|||
if key not in comment_payout_ops:
|
||||
comment_payout_ops[key] = get_empty_ops()
|
||||
|
||||
comment_payout_ops[key][op_type] = ( op_value, date )
|
||||
comment_payout_ops[key][op_type] = (op_value, date)
|
||||
|
||||
elif op_type == VirtualOperationType.CommentReward:
|
||||
if key not in comment_payout_ops:
|
||||
|
@ -235,7 +246,7 @@ class Blocks:
|
|||
|
||||
comment_payout_ops[key][VirtualOperationType.EffectiveCommentVote] = None
|
||||
|
||||
comment_payout_ops[key][op_type] = ( op_value, date )
|
||||
comment_payout_ops[key][op_type] = (op_value, date)
|
||||
|
||||
elif op_type == VirtualOperationType.EffectiveCommentVote:
|
||||
Reputations.process_vote(block_num, op_value)
|
||||
|
@ -245,7 +256,7 @@ class Blocks:
|
|||
# (we don't touch reputation - yet - because it affects a lot of test patterns)
|
||||
if block_num < 905693:
|
||||
op_value["rshares"] *= 1000000
|
||||
Votes.effective_comment_vote_op( op_value )
|
||||
Votes.effective_comment_vote_op(op_value)
|
||||
|
||||
# skip effective votes for those posts that will become paidout before massive sync ends (both
|
||||
# total_vote_weight and pending_payout carried by this vop become zero when post is paid) - note
|
||||
|
@ -254,13 +265,13 @@ class Blocks:
|
|||
if key not in comment_payout_ops:
|
||||
comment_payout_ops[key] = get_empty_ops()
|
||||
|
||||
comment_payout_ops[key][op_type] = ( op_value, date )
|
||||
comment_payout_ops[key][op_type] = (op_value, date)
|
||||
|
||||
elif op_type == VirtualOperationType.CommentPayoutUpdate:
|
||||
if key not in comment_payout_ops:
|
||||
comment_payout_ops[key] = get_empty_ops()
|
||||
|
||||
comment_payout_ops[key][op_type] = ( op_value, date )
|
||||
comment_payout_ops[key][op_type] = (op_value, date)
|
||||
|
||||
elif op_type == VirtualOperationType.IneffectiveDeleteComment:
|
||||
ineffective_deleted_ops[key] = {}
|
||||
|
@ -269,11 +280,10 @@ class Blocks:
|
|||
|
||||
return ineffective_deleted_ops
|
||||
|
||||
|
||||
@classmethod
|
||||
def _process(cls, block):
|
||||
"""Process a single block. Assumes a trx is open."""
|
||||
#pylint: disable=too-many-branches
|
||||
# pylint: disable=too-many-branches
|
||||
assert issubclass(type(block), Block)
|
||||
num = cls._push(block)
|
||||
cls._current_block_date = block.get_date()
|
||||
|
@ -286,7 +296,9 @@ class Blocks:
|
|||
if cls._head_block_date is None:
|
||||
cls._head_block_date = cls._current_block_date
|
||||
|
||||
ineffective_deleted_ops = Blocks.prepare_vops(Posts.comment_payout_ops, block, cls._current_block_date, num, num <= cls._last_safe_cashout_block)
|
||||
ineffective_deleted_ops = Blocks.prepare_vops(
|
||||
Posts.comment_payout_ops, block, cls._current_block_date, num, num <= cls._last_safe_cashout_block
|
||||
)
|
||||
|
||||
json_ops = []
|
||||
for transaction in block.get_next_transaction():
|
||||
|
@ -325,14 +337,16 @@ class Blocks:
|
|||
op_details = op
|
||||
potentially_new_account = True
|
||||
|
||||
if potentially_new_account and not Accounts.register(account_name, op_details, cls._head_block_date, num):
|
||||
if potentially_new_account and not Accounts.register(
|
||||
account_name, op_details, cls._head_block_date, num
|
||||
):
|
||||
log.error(f"Failed to register account {account_name} from operation: {op}")
|
||||
|
||||
# account metadata updates
|
||||
if op_type == OperationType.AccountUpdate:
|
||||
Accounts.update_op( op, False )
|
||||
Accounts.update_op(op, False)
|
||||
elif op_type == OperationType.AccountUpdate2:
|
||||
Accounts.update_op( op, True )
|
||||
Accounts.update_op(op, True)
|
||||
|
||||
# post ops
|
||||
elif op_type == OperationType.Comment:
|
||||
|
@ -349,7 +363,7 @@ class Blocks:
|
|||
# misc ops
|
||||
elif op_type == OperationType.Transfer:
|
||||
Payments.op_transfer(op, transaction.get_id(), num, cls._head_block_date)
|
||||
elif op_type == OperationType.CustomJson: # follow/reblog/community ops
|
||||
elif op_type == OperationType.CustomJson: # follow/reblog/community ops
|
||||
CustomOp.process_op(op, num, cls._head_block_date)
|
||||
|
||||
OPSM.op_stats(str(op_type), OPSM.stop(start))
|
||||
|
@ -373,19 +387,22 @@ class Blocks:
|
|||
hive_block = cls._get(cursor)
|
||||
steem_hash = steem.get_block(cursor)['block_id']
|
||||
match = hive_block['hash'] == steem_hash
|
||||
log.info("[INIT] fork check. block %d: %s vs %s --- %s",
|
||||
hive_block['num'], hive_block['hash'],
|
||||
steem_hash, 'ok' if match else 'invalid')
|
||||
log.info(
|
||||
"[INIT] fork check. block %d: %s vs %s --- %s",
|
||||
hive_block['num'],
|
||||
hive_block['hash'],
|
||||
steem_hash,
|
||||
'ok' if match else 'invalid',
|
||||
)
|
||||
if match:
|
||||
break
|
||||
to_pop.append(hive_block)
|
||||
cursor -= 1
|
||||
|
||||
if hive_head == cursor:
|
||||
return # no fork!
|
||||
return # no fork!
|
||||
|
||||
log.error("[FORK] depth is %d; popping blocks %d - %d",
|
||||
hive_head - cursor, cursor + 1, hive_head)
|
||||
log.error("[FORK] depth is %d; popping blocks %d - %d", hive_head - cursor, cursor + 1, hive_head)
|
||||
|
||||
# we should not attempt to recover from fork until it's safe
|
||||
fork_limit = steem.last_irreversible()
|
||||
|
@ -403,13 +420,16 @@ class Blocks:
|
|||
@classmethod
|
||||
def _push(cls, block):
|
||||
"""Insert a row in `hive_blocks`."""
|
||||
cls.blocks_to_flush.append({
|
||||
'num': block.get_num(),
|
||||
'hash': block.get_hash(),
|
||||
'prev': block.get_previous_block_hash(),
|
||||
'txs': block.get_number_of_transactions(),
|
||||
'ops': block.get_number_of_operations(),
|
||||
'date': block.get_date()})
|
||||
cls.blocks_to_flush.append(
|
||||
{
|
||||
'num': block.get_num(),
|
||||
'hash': block.get_hash(),
|
||||
'prev': block.get_previous_block_hash(),
|
||||
'txs': block.get_number_of_transactions(),
|
||||
'ops': block.get_number_of_operations(),
|
||||
'date': block.get_date(),
|
||||
}
|
||||
)
|
||||
return block.get_num()
|
||||
|
||||
@classmethod
|
||||
|
@ -421,7 +441,9 @@ class Blocks:
|
|||
"""
|
||||
values = []
|
||||
for block in cls.blocks_to_flush:
|
||||
values.append(f"({block['num']}, '{block['hash']}', '{block['prev']}', {block['txs']}, {block['ops']}, '{block['date']}', {False})")
|
||||
values.append(
|
||||
f"({block['num']}, '{block['hash']}', '{block['prev']}', {block['txs']}, {block['ops']}, '{block['date']}', {False})"
|
||||
)
|
||||
query = query + ",".join(values)
|
||||
DB.query_prepared(query)
|
||||
values.clear()
|
||||
|
@ -488,9 +510,9 @@ class Blocks:
|
|||
|
||||
@classmethod
|
||||
@time_it
|
||||
def on_live_blocks_processed( cls, first_block, last_block ):
|
||||
def on_live_blocks_processed(cls, first_block, last_block):
|
||||
"""Is invoked when processing of block range is done and received
|
||||
informations from hived are already stored in db
|
||||
informations from hived are already stored in db
|
||||
"""
|
||||
is_hour_action = last_block % 1200 == 0
|
||||
|
||||
|
@ -504,7 +526,7 @@ class Blocks:
|
|||
f"SELECT update_notification_cache({first_block}, {last_block}, {is_hour_action})",
|
||||
f"SELECT update_follow_count({first_block}, {last_block})",
|
||||
f"SELECT update_account_reputations({first_block}, {last_block}, False)",
|
||||
f"SELECT update_hive_blocks_consistency_flag({first_block}, {last_block})"
|
||||
f"SELECT update_hive_blocks_consistency_flag({first_block}, {last_block})",
|
||||
]
|
||||
|
||||
for query in queries:
|
||||
|
@ -515,10 +537,8 @@ class Blocks:
|
|||
@classmethod
|
||||
def is_consistency(cls):
|
||||
"""Check if all tuples in `hive_blocks` are written correctly.
|
||||
If any record has `completed` == false, it indicates that the database was closed incorrectly or a rollback failed.
|
||||
If any record has `completed` == false, it indicates that the database was closed incorrectly or a rollback failed.
|
||||
"""
|
||||
not_completed_blocks = DB.query_one("SELECT count(*) FROM hive_blocks WHERE completed = false LIMIT 1")
|
||||
log.info("[INIT] Number of not completed blocks: %s.", not_completed_blocks)
|
||||
return not_completed_blocks == 0
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""[WIP] Process community ops."""
|
||||
|
||||
#pylint: disable=too-many-lines
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
import logging
|
||||
import re
|
||||
|
@ -16,8 +16,10 @@ log = logging.getLogger(__name__)
|
|||
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class Role(IntEnum):
|
||||
"""Labels for `role_id` field."""
|
||||
|
||||
muted = -2
|
||||
guest = 0
|
||||
member = 2
|
||||
|
@ -25,20 +27,24 @@ class Role(IntEnum):
|
|||
admin = 6
|
||||
owner = 8
|
||||
|
||||
|
||||
TYPE_TOPIC = 1
|
||||
TYPE_JOURNAL = 2
|
||||
TYPE_COUNCIL = 3
|
||||
|
||||
# https://en.wikipedia.org/wiki/ISO_639-1
|
||||
LANGS = ("ab,aa,af,ak,sq,am,ar,an,hy,as,av,ae,ay,az,bm,ba,eu,be,bn,bh,bi,"
|
||||
"bs,br,bg,my,ca,ch,ce,ny,zh,cv,kw,co,cr,hr,cs,da,dv,nl,dz,en,eo,"
|
||||
"et,ee,fo,fj,fi,fr,ff,gl,ka,de,el,gn,gu,ht,ha,he,hz,hi,ho,hu,ia,"
|
||||
"id,ie,ga,ig,ik,io,is,it,iu,ja,jv,kl,kn,kr,ks,kk,km,ki,rw,ky,kv,"
|
||||
"kg,ko,ku,kj,la,lb,lg,li,ln,lo,lt,lu,lv,gv,mk,mg,ms,ml,mt,mi,mr,"
|
||||
"mh,mn,na,nv,nd,ne,ng,nb,nn,no,ii,nr,oc,oj,cu,om,or,os,pa,pi,fa,"
|
||||
"pl,ps,pt,qu,rm,rn,ro,ru,sa,sc,sd,se,sm,sg,sr,gd,sn,si,sk,sl,so,"
|
||||
"st,es,su,sw,ss,sv,ta,te,tg,th,ti,bo,tk,tl,tn,to,tr,ts,tt,tw,ty,"
|
||||
"ug,uk,ur,uz,ve,vi,vo,wa,cy,wo,fy,xh,yi,yo,za").split(',')
|
||||
LANGS = (
|
||||
"ab,aa,af,ak,sq,am,ar,an,hy,as,av,ae,ay,az,bm,ba,eu,be,bn,bh,bi,"
|
||||
"bs,br,bg,my,ca,ch,ce,ny,zh,cv,kw,co,cr,hr,cs,da,dv,nl,dz,en,eo,"
|
||||
"et,ee,fo,fj,fi,fr,ff,gl,ka,de,el,gn,gu,ht,ha,he,hz,hi,ho,hu,ia,"
|
||||
"id,ie,ga,ig,ik,io,is,it,iu,ja,jv,kl,kn,kr,ks,kk,km,ki,rw,ky,kv,"
|
||||
"kg,ko,ku,kj,la,lb,lg,li,ln,lo,lt,lu,lv,gv,mk,mg,ms,ml,mt,mi,mr,"
|
||||
"mh,mn,na,nv,nd,ne,ng,nb,nn,no,ii,nr,oc,oj,cu,om,or,os,pa,pi,fa,"
|
||||
"pl,ps,pt,qu,rm,rn,ro,ru,sa,sc,sd,se,sm,sg,sr,gd,sn,si,sk,sl,so,"
|
||||
"st,es,su,sw,ss,sv,ta,te,tg,th,ti,bo,tk,tl,tn,to,tr,ts,tt,tw,ty,"
|
||||
"ug,uk,ur,uz,ve,vi,vo,wa,cy,wo,fy,xh,yi,yo,za"
|
||||
).split(',')
|
||||
|
||||
|
||||
def _valid_url_proto(url):
|
||||
assert url
|
||||
|
@ -46,6 +52,7 @@ def _valid_url_proto(url):
|
|||
assert len(url) < 1024, 'url must be shorter than 1024 characters'
|
||||
return url[0:7] == 'http://' or url[0:8] == 'https://'
|
||||
|
||||
|
||||
def assert_keys_match(keys, expected, allow_missing=True):
|
||||
"""Compare a set of input keys to expected keys."""
|
||||
if not allow_missing:
|
||||
|
@ -54,10 +61,12 @@ def assert_keys_match(keys, expected, allow_missing=True):
|
|||
extra = keys - expected
|
||||
assert not extra, f'extraneous keys: {extra}'
|
||||
|
||||
|
||||
def process_json_community_op(actor, op_json, date, block_num):
|
||||
"""Validates community op and apply state changes to db."""
|
||||
CommunityOp.process_if_valid(actor, op_json, date, block_num)
|
||||
|
||||
|
||||
def read_key_bool(op, key):
|
||||
"""Reads a key from dict, ensuring valid bool if present."""
|
||||
if key in op:
|
||||
|
@ -65,6 +74,7 @@ def read_key_bool(op, key):
|
|||
return op[key]
|
||||
return None
|
||||
|
||||
|
||||
def read_key_str(op, key, maxlen=None, fmt=None, allow_blank=False):
|
||||
"""Reads a key from a dict, ensuring non-blank str if present."""
|
||||
if key not in op:
|
||||
|
@ -83,6 +93,7 @@ def read_key_str(op, key, maxlen=None, fmt=None, allow_blank=False):
|
|||
|
||||
return op[key]
|
||||
|
||||
|
||||
def read_key_dict(obj, key):
|
||||
"""Given a dict, read `key`, ensuring result is a dict."""
|
||||
assert key in obj, f'key `{key}` not found'
|
||||
|
@ -110,7 +121,7 @@ class Community:
|
|||
This method checks for any valid community names and inserts them.
|
||||
"""
|
||||
|
||||
#if not re.match(r'^hive-[123]\d{4,6}$', name):
|
||||
# if not re.match(r'^hive-[123]\d{4,6}$', name):
|
||||
if not re.match(r'^hive-[1]\d{4,6}$', name):
|
||||
return
|
||||
type_id = int(name[5])
|
||||
|
@ -124,8 +135,7 @@ class Community:
|
|||
# insert owner
|
||||
sql = """INSERT INTO hive_roles (community_id, account_id, role_id, created_at)
|
||||
VALUES (:community_id, :account_id, :role_id, :date)"""
|
||||
DB.query(sql, community_id=_id, account_id=_id,
|
||||
role_id=Role.owner.value, date=block_date)
|
||||
DB.query(sql, community_id=_id, account_id=_id, role_id=Role.owner.value, date=block_date)
|
||||
|
||||
@classmethod
|
||||
def validated_id(cls, name):
|
||||
|
@ -140,7 +150,7 @@ class Community:
|
|||
|
||||
@classmethod
|
||||
def validated_name(cls, name):
|
||||
if (check_community(name)):
|
||||
if check_community(name):
|
||||
return name
|
||||
return None
|
||||
|
||||
|
@ -171,26 +181,33 @@ class Community:
|
|||
@classmethod
|
||||
def get_all_muted(cls, community_id):
|
||||
"""Return a list of all muted accounts."""
|
||||
return DB.query_col("""SELECT name FROM hive_accounts
|
||||
return DB.query_col(
|
||||
"""SELECT name FROM hive_accounts
|
||||
WHERE id IN (SELECT account_id FROM hive_roles
|
||||
WHERE community_id = :community_id
|
||||
AND role_id < 0)""",
|
||||
community_id=community_id)
|
||||
community_id=community_id,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_user_role(cls, community_id, account_id):
|
||||
"""Get user role within a specific community."""
|
||||
|
||||
return DB.query_one("""SELECT role_id FROM hive_roles
|
||||
return (
|
||||
DB.query_one(
|
||||
"""SELECT role_id FROM hive_roles
|
||||
WHERE community_id = :community_id
|
||||
AND account_id = :account_id
|
||||
LIMIT 1""",
|
||||
community_id=community_id,
|
||||
account_id=account_id) or Role.guest.value
|
||||
community_id=community_id,
|
||||
account_id=account_id,
|
||||
)
|
||||
or Role.guest.value
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def is_post_valid(cls, community_id, comment_op: dict):
|
||||
""" Given a new post/comment, check if valid as per community rules
|
||||
"""Given a new post/comment, check if valid as per community rules
|
||||
|
||||
For a comment to be valid, these conditions apply:
|
||||
- Author is not muted in this community
|
||||
|
@ -213,23 +230,25 @@ class Community:
|
|||
return role >= Role.member
|
||||
elif type_id == TYPE_COUNCIL:
|
||||
return role >= Role.member
|
||||
return role >= Role.guest # or at least not muted
|
||||
return role >= Role.guest # or at least not muted
|
||||
|
||||
|
||||
class CommunityOp:
|
||||
"""Handles validating and processing of community custom_json ops."""
|
||||
#pylint: disable=too-many-instance-attributes
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
SCHEMA = {
|
||||
'updateProps': ['community', 'props'],
|
||||
'setRole': ['community', 'account', 'role'],
|
||||
'setUserTitle': ['community', 'account', 'title'],
|
||||
'mutePost': ['community', 'account', 'permlink', 'notes'],
|
||||
'unmutePost': ['community', 'account', 'permlink', 'notes'],
|
||||
'pinPost': ['community', 'account', 'permlink'],
|
||||
'unpinPost': ['community', 'account', 'permlink'],
|
||||
'flagPost': ['community', 'account', 'permlink', 'notes'],
|
||||
'subscribe': ['community'],
|
||||
'unsubscribe': ['community'],
|
||||
'updateProps': ['community', 'props'],
|
||||
'setRole': ['community', 'account', 'role'],
|
||||
'setUserTitle': ['community', 'account', 'title'],
|
||||
'mutePost': ['community', 'account', 'permlink', 'notes'],
|
||||
'unmutePost': ['community', 'account', 'permlink', 'notes'],
|
||||
'pinPost': ['community', 'account', 'permlink'],
|
||||
'unpinPost': ['community', 'account', 'permlink'],
|
||||
'flagPost': ['community', 'account', 'permlink', 'notes'],
|
||||
'subscribe': ['community'],
|
||||
'unsubscribe': ['community'],
|
||||
}
|
||||
|
||||
def __init__(self, actor, date, block_num):
|
||||
|
@ -290,8 +309,7 @@ class CommunityOp:
|
|||
except AssertionError as e:
|
||||
payload = str(e)
|
||||
log.info("validation failed with message: '%s'", payload)
|
||||
Notify(block_num=self.block_num, type_id='error', dst_id=self.actor_id,
|
||||
when=self.date, payload=payload)
|
||||
Notify(block_num=self.block_num, type_id='error', dst_id=self.actor_id, when=self.date, payload=payload)
|
||||
|
||||
return self.valid
|
||||
|
||||
|
@ -312,65 +330,94 @@ class CommunityOp:
|
|||
role_id=self.role_id,
|
||||
notes=self.notes,
|
||||
title=self.title,
|
||||
block_num=self.block_num
|
||||
block_num=self.block_num,
|
||||
)
|
||||
|
||||
# Community-level commands
|
||||
if action == 'updateProps':
|
||||
bind = ', '.join([k+" = :"+k for k in list(self.props.keys())])
|
||||
DB.query(f"UPDATE hive_communities SET {bind} WHERE id = :id",
|
||||
id=self.community_id, **self.props)
|
||||
bind = ', '.join([k + " = :" + k for k in list(self.props.keys())])
|
||||
DB.query(f"UPDATE hive_communities SET {bind} WHERE id = :id", id=self.community_id, **self.props)
|
||||
self._notify('set_props', payload=json.dumps(read_key_dict(self.op, 'props')))
|
||||
|
||||
elif action == 'subscribe':
|
||||
DB.query("""INSERT INTO hive_subscriptions
|
||||
DB.query(
|
||||
"""INSERT INTO hive_subscriptions
|
||||
(account_id, community_id, created_at, block_num)
|
||||
VALUES (:actor_id, :community_id, :date, :block_num)""", **params)
|
||||
DB.query("""UPDATE hive_communities
|
||||
VALUES (:actor_id, :community_id, :date, :block_num)""",
|
||||
**params,
|
||||
)
|
||||
DB.query(
|
||||
"""UPDATE hive_communities
|
||||
SET subscribers = subscribers + 1
|
||||
WHERE id = :community_id""", **params)
|
||||
WHERE id = :community_id""",
|
||||
**params,
|
||||
)
|
||||
elif action == 'unsubscribe':
|
||||
DB.query("""DELETE FROM hive_subscriptions
|
||||
DB.query(
|
||||
"""DELETE FROM hive_subscriptions
|
||||
WHERE account_id = :actor_id
|
||||
AND community_id = :community_id""", **params)
|
||||
DB.query("""UPDATE hive_communities
|
||||
AND community_id = :community_id""",
|
||||
**params,
|
||||
)
|
||||
DB.query(
|
||||
"""UPDATE hive_communities
|
||||
SET subscribers = subscribers - 1
|
||||
WHERE id = :community_id""", **params)
|
||||
WHERE id = :community_id""",
|
||||
**params,
|
||||
)
|
||||
|
||||
# Account-level actions
|
||||
elif action == 'setRole':
|
||||
DB.query("""INSERT INTO hive_roles
|
||||
DB.query(
|
||||
"""INSERT INTO hive_roles
|
||||
(account_id, community_id, role_id, created_at)
|
||||
VALUES (:account_id, :community_id, :role_id, :date)
|
||||
ON CONFLICT (account_id, community_id)
|
||||
DO UPDATE SET role_id = :role_id """, **params)
|
||||
DO UPDATE SET role_id = :role_id """,
|
||||
**params,
|
||||
)
|
||||
self._notify('set_role', payload=Role(self.role_id).name)
|
||||
elif action == 'setUserTitle':
|
||||
DB.query("""INSERT INTO hive_roles
|
||||
DB.query(
|
||||
"""INSERT INTO hive_roles
|
||||
(account_id, community_id, title, created_at)
|
||||
VALUES (:account_id, :community_id, :title, :date)
|
||||
ON CONFLICT (account_id, community_id)
|
||||
DO UPDATE SET title = :title""", **params)
|
||||
DO UPDATE SET title = :title""",
|
||||
**params,
|
||||
)
|
||||
self._notify('set_label', payload=self.title)
|
||||
|
||||
# Post-level actions
|
||||
elif action == 'mutePost':
|
||||
DB.query("""UPDATE hive_posts SET is_muted = '1'
|
||||
WHERE id = :post_id""", **params)
|
||||
DB.query(
|
||||
"""UPDATE hive_posts SET is_muted = '1'
|
||||
WHERE id = :post_id""",
|
||||
**params,
|
||||
)
|
||||
self._notify('mute_post', payload=self.notes)
|
||||
|
||||
elif action == 'unmutePost':
|
||||
DB.query("""UPDATE hive_posts SET is_muted = '0'
|
||||
WHERE id = :post_id""", **params)
|
||||
DB.query(
|
||||
"""UPDATE hive_posts SET is_muted = '0'
|
||||
WHERE id = :post_id""",
|
||||
**params,
|
||||
)
|
||||
self._notify('unmute_post', payload=self.notes)
|
||||
|
||||
elif action == 'pinPost':
|
||||
DB.query("""UPDATE hive_posts SET is_pinned = '1'
|
||||
WHERE id = :post_id""", **params)
|
||||
DB.query(
|
||||
"""UPDATE hive_posts SET is_pinned = '1'
|
||||
WHERE id = :post_id""",
|
||||
**params,
|
||||
)
|
||||
self._notify('pin_post', payload=self.notes)
|
||||
elif action == 'unpinPost':
|
||||
DB.query("""UPDATE hive_posts SET is_pinned = '0'
|
||||
WHERE id = :post_id""", **params)
|
||||
DB.query(
|
||||
"""UPDATE hive_posts SET is_pinned = '0'
|
||||
WHERE id = :post_id""",
|
||||
**params,
|
||||
)
|
||||
self._notify('unpin_post', payload=self.notes)
|
||||
elif action == 'flagPost':
|
||||
self._notify('flag_post', payload=self.notes)
|
||||
|
@ -386,10 +433,17 @@ class CommunityOp:
|
|||
if not self._subscribed(self.account_id):
|
||||
score = 15
|
||||
|
||||
Notify(block_num=self.block_num, type_id=op, src_id=self.actor_id, dst_id=dst_id,
|
||||
post_id=self.post_id, when=self.date,
|
||||
community_id=self.community_id,
|
||||
score=score, **kwargs)
|
||||
Notify(
|
||||
block_num=self.block_num,
|
||||
type_id=op,
|
||||
src_id=self.actor_id,
|
||||
dst_id=dst_id,
|
||||
post_id=self.post_id,
|
||||
when=self.date,
|
||||
community_id=self.community_id,
|
||||
score=score,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _validate_raw_op(self, raw_op):
|
||||
assert isinstance(raw_op, list), 'op json must be list'
|
||||
|
@ -403,13 +457,20 @@ class CommunityOp:
|
|||
"""Validate structure; read and validate keys."""
|
||||
schema = self.SCHEMA[self.action]
|
||||
assert_keys_match(self.op.keys(), schema, allow_missing=False)
|
||||
if 'community' in schema: self._read_community()
|
||||
if 'account' in schema: self._read_account()
|
||||
if 'permlink' in schema: self._read_permlink()
|
||||
if 'role' in schema: self._read_role()
|
||||
if 'notes' in schema: self._read_notes()
|
||||
if 'title' in schema: self._read_title()
|
||||
if 'props' in schema: self._read_props()
|
||||
if 'community' in schema:
|
||||
self._read_community()
|
||||
if 'account' in schema:
|
||||
self._read_account()
|
||||
if 'permlink' in schema:
|
||||
self._read_permlink()
|
||||
if 'role' in schema:
|
||||
self._read_role()
|
||||
if 'notes' in schema:
|
||||
self._read_notes()
|
||||
if 'title' in schema:
|
||||
self._read_title()
|
||||
if 'props' in schema:
|
||||
self._read_props()
|
||||
|
||||
def _read_community(self):
|
||||
_name = read_key_str(self.op, 'community', 16)
|
||||
|
@ -470,8 +531,7 @@ class CommunityOp:
|
|||
def _read_props(self):
|
||||
# TODO: assert props changed?
|
||||
props = read_key_dict(self.op, 'props')
|
||||
valid = ['title', 'about', 'lang', 'is_nsfw',
|
||||
'description', 'flag_text', 'settings']
|
||||
valid = ['title', 'about', 'lang', 'is_nsfw', 'description', 'flag_text', 'settings']
|
||||
assert_keys_match(props.keys(), valid, allow_missing=True)
|
||||
|
||||
out = {}
|
||||
|
@ -499,7 +559,6 @@ class CommunityOp:
|
|||
assert out, 'props were blank'
|
||||
self.props = out
|
||||
|
||||
|
||||
def _validate_permissions(self):
|
||||
community_id = self.community_id
|
||||
action = self.action
|
||||
|
@ -544,8 +603,7 @@ class CommunityOp:
|
|||
sql = """SELECT 1 FROM hive_subscriptions
|
||||
WHERE community_id = :community_id
|
||||
AND account_id = :account_id"""
|
||||
return bool(DB.query_one(
|
||||
sql, community_id=self.community_id, account_id=account_id))
|
||||
return bool(DB.query_one(sql, community_id=self.community_id, account_id=account_id))
|
||||
|
||||
def _muted(self):
|
||||
"""Check post's muted status."""
|
||||
|
@ -566,13 +624,18 @@ class CommunityOp:
|
|||
def _flagged(self):
|
||||
"""Check user's flag status."""
|
||||
from hive.indexer.notify import NotifyType
|
||||
|
||||
sql = """SELECT 1 FROM hive_notifs
|
||||
WHERE community_id = :community_id
|
||||
AND post_id = :post_id
|
||||
AND type_id = :type_id
|
||||
AND src_id = :src_id"""
|
||||
return bool(DB.query_one(sql,
|
||||
community_id=self.community_id,
|
||||
post_id=self.post_id,
|
||||
type_id=NotifyType['flag_post'],
|
||||
src_id=self.actor_id))
|
||||
return bool(
|
||||
DB.query_one(
|
||||
sql,
|
||||
community_id=self.community_id,
|
||||
post_id=self.post_id,
|
||||
type_id=NotifyType['flag_post'],
|
||||
src_id=self.actor_id,
|
||||
)
|
||||
)
|
||||
|
|
|
@ -19,6 +19,7 @@ DB = Db.instance()
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_auth(op):
|
||||
"""get account name submitting a custom_json op.
|
||||
|
||||
|
@ -33,16 +34,17 @@ def _get_auth(op):
|
|||
return None
|
||||
return op['required_posting_auths'][0]
|
||||
|
||||
|
||||
class CustomOp:
|
||||
"""Processes custom ops and dispatches updates."""
|
||||
|
||||
@classmethod
|
||||
def process_op(cls, op, block_num, block_date):
|
||||
opName = str(op['id']) + ( '-ignored' if op['id'] not in ['follow', 'community', 'notify', 'reblog'] else '' )
|
||||
opName = str(op['id']) + ('-ignored' if op['id'] not in ['follow', 'community', 'notify', 'reblog'] else '')
|
||||
|
||||
account = _get_auth(op)
|
||||
if not account:
|
||||
return
|
||||
return
|
||||
|
||||
op_json = load_json_key(op, 'json')
|
||||
if op['id'] == 'follow':
|
||||
|
@ -77,7 +79,11 @@ class CustomOp:
|
|||
else:
|
||||
date = valid_date(explicit_date)
|
||||
if date > block_date:
|
||||
log.warning("setLastRead::date: `%s' exceeds head block time. Correcting to head block time: `%s'", date, block_date)
|
||||
log.warning(
|
||||
"setLastRead::date: `%s' exceeds head block time. Correcting to head block time: `%s'",
|
||||
date,
|
||||
block_date,
|
||||
)
|
||||
date = block_date
|
||||
|
||||
Notify.set_lastread(account, date)
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DbLiveContextHolder(object):
|
||||
_live_context = False
|
||||
|
||||
|
@ -12,6 +14,7 @@ class DbLiveContextHolder(object):
|
|||
def is_live_context(cls):
|
||||
return cls._live_context
|
||||
|
||||
|
||||
class DbAdapterHolder(object):
|
||||
db = None
|
||||
|
||||
|
|
|
@ -12,26 +12,28 @@ from hive.utils.normalize import escape_characters
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Action(enum.IntEnum):
|
||||
Nothing = 0 # cancel existing Blog/Ignore
|
||||
Blog = 1 # follow
|
||||
Ignore = 2 # mute
|
||||
Nothing = 0 # cancel existing Blog/Ignore
|
||||
Blog = 1 # follow
|
||||
Ignore = 2 # mute
|
||||
Blacklist = 3
|
||||
Follow_blacklist = 4
|
||||
Unblacklist = 5 # cancel existing Blacklist
|
||||
Unfollow_blacklist = 6 # cancel existing Follow_blacklist
|
||||
Unblacklist = 5 # cancel existing Blacklist
|
||||
Unfollow_blacklist = 6 # cancel existing Follow_blacklist
|
||||
Follow_muted = 7
|
||||
Unfollow_muted = 8 # cancel existing Follow_muted
|
||||
Reset_blacklist = 9 # cancel all existing records of Blacklist type
|
||||
Reset_following_list = 10 # cancel all existing records of Blog type
|
||||
Reset_muted_list = 11 # cancel all existing records of Ignore type
|
||||
Reset_follow_blacklist = 12 # cancel all existing records of Follow_blacklist type
|
||||
Reset_follow_muted_list = 13 # cancel all existing records of Follow_muted type
|
||||
Reset_all_lists = 14 # cancel all existing records of ??? types
|
||||
Unfollow_muted = 8 # cancel existing Follow_muted
|
||||
Reset_blacklist = 9 # cancel all existing records of Blacklist type
|
||||
Reset_following_list = 10 # cancel all existing records of Blog type
|
||||
Reset_muted_list = 11 # cancel all existing records of Ignore type
|
||||
Reset_follow_blacklist = 12 # cancel all existing records of Follow_blacklist type
|
||||
Reset_follow_muted_list = 13 # cancel all existing records of Follow_muted type
|
||||
Reset_all_lists = 14 # cancel all existing records of ??? types
|
||||
|
||||
|
||||
class Follow(DbAdapterHolder):
|
||||
"""Handles processing of incoming follow ups and flushing to db."""
|
||||
|
||||
|
||||
follow_items_to_flush = dict()
|
||||
list_resets_to_flush = []
|
||||
|
||||
|
@ -42,28 +44,33 @@ class Follow(DbAdapterHolder):
|
|||
data['idx'] = cls.idx
|
||||
data['blacklisted'] = False
|
||||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _reset_following_list(cls, data, op):
|
||||
if data['state'] == 1:
|
||||
data['idx'] = cls.idx
|
||||
data['state'] = 0
|
||||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _reset_muted_list(cls, data, op):
|
||||
if data['state'] == 2:
|
||||
data['idx'] = cls.idx
|
||||
data['state'] = 0
|
||||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _reset_follow_blacklist(cls, data, op):
|
||||
data['idx'] = cls.idx
|
||||
data['follow_blacklists'] = False
|
||||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _reset_follow_muted_list(cls, data, op):
|
||||
data['idx'] = cls.idx
|
||||
data['follow_muted'] = False
|
||||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _reset_all_lists(cls, data, op):
|
||||
data['idx'] = cls.idx
|
||||
|
@ -74,8 +81,17 @@ class Follow(DbAdapterHolder):
|
|||
data['block_num'] = op['block_num']
|
||||
|
||||
@classmethod
|
||||
def _follow_single(cls, follower, following, at, block_num,
|
||||
new_state=None, new_blacklisted=None, new_follow_blacklists=None, new_follow_muted=None):
|
||||
def _follow_single(
|
||||
cls,
|
||||
follower,
|
||||
following,
|
||||
at,
|
||||
block_num,
|
||||
new_state=None,
|
||||
new_blacklisted=None,
|
||||
new_follow_blacklists=None,
|
||||
new_follow_muted=None,
|
||||
):
|
||||
# add or update single record in flush cache
|
||||
k = f'{follower}/{following}'
|
||||
if k not in cls.follow_items_to_flush:
|
||||
|
@ -89,7 +105,7 @@ class Follow(DbAdapterHolder):
|
|||
follow_blacklists=new_follow_blacklists if new_follow_blacklists is not None else 'NULL',
|
||||
follow_muted=new_follow_muted if new_follow_muted is not None else 'NULL',
|
||||
at=at,
|
||||
block_num=block_num
|
||||
block_num=block_num,
|
||||
)
|
||||
else:
|
||||
# follow item already in cache - just overwrite previous value where applicable
|
||||
|
@ -131,24 +147,36 @@ class Follow(DbAdapterHolder):
|
|||
add_null_muted = False
|
||||
if state == Action.Reset_blacklist:
|
||||
reset_list = Follow._reset_blacklist
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_blacklist', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_blacklist', block_num=block_num)
|
||||
)
|
||||
elif state == Action.Reset_following_list:
|
||||
reset_list = Follow._reset_following_list
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_following_list', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_following_list', block_num=block_num)
|
||||
)
|
||||
elif state == Action.Reset_muted_list:
|
||||
reset_list = Follow._reset_muted_list
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_muted_list', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_muted_list', block_num=block_num)
|
||||
)
|
||||
elif state == Action.Reset_follow_blacklist:
|
||||
reset_list = Follow._reset_follow_blacklist
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_follow_blacklist', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_follow_blacklist', block_num=block_num)
|
||||
)
|
||||
add_null_blacklist = True
|
||||
elif state == Action.Reset_follow_muted_list:
|
||||
reset_list = Follow._reset_follow_muted_list
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_follow_muted_list', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_follow_muted_list', block_num=block_num)
|
||||
)
|
||||
add_null_muted = True
|
||||
elif state == Action.Reset_all_lists:
|
||||
reset_list = Follow._reset_all_lists
|
||||
cls.list_resets_to_flush.append(dict(follower=follower, reset_call='follow_reset_all_lists', block_num=block_num))
|
||||
cls.list_resets_to_flush.append(
|
||||
dict(follower=follower, reset_call='follow_reset_all_lists', block_num=block_num)
|
||||
)
|
||||
add_null_blacklist = True
|
||||
add_null_muted = True
|
||||
else:
|
||||
|
@ -163,25 +191,39 @@ class Follow(DbAdapterHolder):
|
|||
# since 'null' account can't have its blacklist/mute list, following such list is only used
|
||||
# as an indicator for frontend to no longer bother user with proposition of following predefined
|
||||
# lists (since that user is already choosing his own lists)
|
||||
cls._follow_single(follower, escape_characters('null'), op['at'], op['block_num'], None, None, add_null_blacklist, add_null_muted)
|
||||
cls._follow_single(
|
||||
follower,
|
||||
escape_characters('null'),
|
||||
op['at'],
|
||||
op['block_num'],
|
||||
None,
|
||||
None,
|
||||
add_null_blacklist,
|
||||
add_null_muted,
|
||||
)
|
||||
else:
|
||||
# set new state/flags to be applied to each pair with changing 'following'
|
||||
new_state = state if state in (Action.Nothing, Action.Blog, Action.Ignore) else None
|
||||
new_blacklisted = true_false_none(state, Action.Blacklist, Action.Unblacklist)
|
||||
new_follow_blacklists = true_false_none(state, Action.Follow_blacklist, Action.Unfollow_blacklist)
|
||||
new_follow_muted = true_false_none(state, Action.Follow_muted, Action.Unfollow_muted)
|
||||
|
||||
|
||||
for following in op['following']:
|
||||
cls._follow_single(follower, following, op['at'], block_num,
|
||||
new_state, new_blacklisted, new_follow_blacklists, new_follow_muted)
|
||||
cls._follow_single(
|
||||
follower,
|
||||
following,
|
||||
op['at'],
|
||||
block_num,
|
||||
new_state,
|
||||
new_blacklisted,
|
||||
new_follow_blacklists,
|
||||
new_follow_muted,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _validated_op(cls, account, op, date):
|
||||
"""Validate and normalize the operation."""
|
||||
if (not 'what' in op
|
||||
or not isinstance(op['what'], list)
|
||||
or not 'follower' in op
|
||||
or not 'following' in op):
|
||||
if not 'what' in op or not isinstance(op['what'], list) or not 'follower' in op or not 'following' in op:
|
||||
log.info("follow_op %s ignored due to basic errors", op)
|
||||
return None
|
||||
|
||||
|
@ -191,13 +233,24 @@ class Follow(DbAdapterHolder):
|
|||
# only if we wanted to immediately remove empty records)
|
||||
# we could add aliases for '' - 'unfollow' and 'unignore'/'unmute'
|
||||
# we could add alias for 'ignore' - 'mute'
|
||||
defs = {'': Action.Nothing, 'blog': Action.Blog, 'follow': Action.Blog, 'ignore': Action.Ignore,
|
||||
'blacklist': Action.Blacklist, 'follow_blacklist': Action.Follow_blacklist,
|
||||
'unblacklist': Action.Unblacklist, 'unfollow_blacklist': Action.Unfollow_blacklist,
|
||||
'follow_muted': Action.Follow_muted, 'unfollow_muted': Action.Unfollow_muted,
|
||||
'reset_blacklist' : Action.Reset_blacklist, 'reset_following_list': Action.Reset_following_list,
|
||||
'reset_muted_list': Action.Reset_muted_list, 'reset_follow_blacklist': Action.Reset_follow_blacklist,
|
||||
'reset_follow_muted_list': Action.Reset_follow_muted_list, 'reset_all_lists': Action.Reset_all_lists}
|
||||
defs = {
|
||||
'': Action.Nothing,
|
||||
'blog': Action.Blog,
|
||||
'follow': Action.Blog,
|
||||
'ignore': Action.Ignore,
|
||||
'blacklist': Action.Blacklist,
|
||||
'follow_blacklist': Action.Follow_blacklist,
|
||||
'unblacklist': Action.Unblacklist,
|
||||
'unfollow_blacklist': Action.Unfollow_blacklist,
|
||||
'follow_muted': Action.Follow_muted,
|
||||
'unfollow_muted': Action.Unfollow_muted,
|
||||
'reset_blacklist': Action.Reset_blacklist,
|
||||
'reset_following_list': Action.Reset_following_list,
|
||||
'reset_muted_list': Action.Reset_muted_list,
|
||||
'reset_follow_blacklist': Action.Reset_follow_blacklist,
|
||||
'reset_follow_muted_list': Action.Reset_follow_muted_list,
|
||||
'reset_all_lists': Action.Reset_all_lists,
|
||||
}
|
||||
if not isinstance(what, str) or what not in defs:
|
||||
log.info("follow_op %s ignored due to unknown type of follow", op)
|
||||
return None
|
||||
|
@ -211,7 +264,11 @@ class Follow(DbAdapterHolder):
|
|||
op['following'] = op['following'] if isinstance(op['following'], list) else [op['following']]
|
||||
|
||||
# if following name does not exist do not process it: basically equal to drop op for single following entry
|
||||
op['following'] = [following for following in op['following'] if following and Accounts.exists(following) and following != op['follower']]
|
||||
op['following'] = [
|
||||
following
|
||||
for following in op['following']
|
||||
if following and Accounts.exists(following) and following != op['follower']
|
||||
]
|
||||
# ABW: note that since you could make 'following' list empty anyway by supplying nonexisting account
|
||||
# there was no point in excluding follow_op with provided empty list/empty string - such call actually
|
||||
# makes sense for state > 8 when 'following' is ignored
|
||||
|
@ -220,17 +277,19 @@ class Follow(DbAdapterHolder):
|
|||
log.info("follow_op %s is void due to effectively empty list of following", op)
|
||||
return None
|
||||
|
||||
return dict(follower=escape_characters(op['follower']),
|
||||
following=[escape_characters(following) for following in op['following']],
|
||||
state=state,
|
||||
at=date)
|
||||
return dict(
|
||||
follower=escape_characters(op['follower']),
|
||||
following=[escape_characters(following) for following in op['following']],
|
||||
state=state,
|
||||
at=date,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
n = 0
|
||||
if cls.follow_items_to_flush or cls.list_resets_to_flush:
|
||||
cls.beginTx()
|
||||
|
||||
|
||||
sql = "SELECT {}({}::VARCHAR, {}::INT)"
|
||||
for reset_list in cls.list_resets_to_flush:
|
||||
query = sql.format(reset_list['reset_call'], reset_list['follower'], reset_list['block_num'])
|
||||
|
@ -285,7 +344,9 @@ class Follow(DbAdapterHolder):
|
|||
count = 0
|
||||
|
||||
for _, follow_item in cls.follow_items_to_flush.items():
|
||||
values.append(f"({follow_item['idx']}, {follow_item['follower']}, {follow_item['following']}, '{follow_item['at']}'::timestamp, {follow_item['state']}::smallint, {follow_item['blacklisted']}::boolean, {follow_item['follow_blacklists']}::boolean, {follow_item['follow_muted']}::boolean, {follow_item['block_num']})")
|
||||
values.append(
|
||||
f"({follow_item['idx']}, {follow_item['follower']}, {follow_item['following']}, '{follow_item['at']}'::timestamp, {follow_item['state']}::smallint, {follow_item['blacklisted']}::boolean, {follow_item['follow_blacklists']}::boolean, {follow_item['follow_muted']}::boolean, {follow_item['block_num']})"
|
||||
)
|
||||
count = count + 1
|
||||
if count >= limit:
|
||||
query = str(sql).format(",".join(values))
|
||||
|
|
|
@ -7,8 +7,9 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VirtualOperationHiveDb(Operation):
|
||||
def __init__( self, operation_type, operation_body ):
|
||||
def __init__(self, operation_type, operation_body):
|
||||
self._operation_type = operation_type
|
||||
self._operation_body = operation_body
|
||||
|
||||
|
@ -16,11 +17,12 @@ class VirtualOperationHiveDb(Operation):
|
|||
return self._operation_type
|
||||
|
||||
def get_body(self):
|
||||
body = json.loads( str( self._operation_body ) )
|
||||
return body[ 'value' ]
|
||||
body = json.loads(str(self._operation_body))
|
||||
return body['value']
|
||||
|
||||
|
||||
class OperationHiveDb(Operation):
|
||||
def __init__( self, operation_type, operation_body ):
|
||||
def __init__(self, operation_type, operation_body):
|
||||
self._operation_type = operation_type
|
||||
self._operation_body = operation_body
|
||||
|
||||
|
@ -29,9 +31,10 @@ class OperationHiveDb(Operation):
|
|||
|
||||
def get_body(self):
|
||||
body = json.loads(self._operation_body)
|
||||
return body[ 'value' ]
|
||||
return body['value']
|
||||
|
||||
class TransactionHiveDb( Transaction ):
|
||||
|
||||
class TransactionHiveDb(Transaction):
|
||||
def __init__(self, block_num, operations, firts_operation_idx, operation_id_to_enum):
|
||||
self._block_num = block_num
|
||||
self._operations = operations
|
||||
|
@ -39,38 +42,39 @@ class TransactionHiveDb( Transaction ):
|
|||
self._operation_id_to_enum = operation_id_to_enum
|
||||
|
||||
def get_id(self):
|
||||
return 0 # it is a fake transactions which returns all operations
|
||||
return 0 # it is a fake transactions which returns all operations
|
||||
|
||||
def get_next_operation(self):
|
||||
if self._first_operation_idx is None:
|
||||
return None
|
||||
|
||||
for op_idx in range(self._first_operation_idx, len(self._operations)):
|
||||
assert self._operations[ op_idx ][ 'block_num' ] >= self._block_num
|
||||
assert self._operations[op_idx]['block_num'] >= self._block_num
|
||||
|
||||
if self._operations[ op_idx ][ 'block_num' ] > self._block_num:
|
||||
if self._operations[op_idx]['block_num'] > self._block_num:
|
||||
break
|
||||
|
||||
operation_type = self._operation_id_to_enum( self._operations[ op_idx ][ 'operation_type_id' ] )
|
||||
if ( type( operation_type ) != OperationType ):
|
||||
operation_type = self._operation_id_to_enum(self._operations[op_idx]['operation_type_id'])
|
||||
if type(operation_type) != OperationType:
|
||||
continue
|
||||
|
||||
operation = OperationHiveDb( operation_type, self._operations[ op_idx ][ 'body' ] )
|
||||
operation = OperationHiveDb(operation_type, self._operations[op_idx]['body'])
|
||||
yield operation
|
||||
|
||||
|
||||
class BlockHiveDb( Block ):
|
||||
def __init__(self
|
||||
, num
|
||||
, date
|
||||
, hash
|
||||
, previous_block_hash
|
||||
, number_of_transactions
|
||||
, number_of_operations
|
||||
, operations
|
||||
, first_operation_idx
|
||||
, opertion_id_to_enum
|
||||
):
|
||||
class BlockHiveDb(Block):
|
||||
def __init__(
|
||||
self,
|
||||
num,
|
||||
date,
|
||||
hash,
|
||||
previous_block_hash,
|
||||
number_of_transactions,
|
||||
number_of_operations,
|
||||
operations,
|
||||
first_operation_idx,
|
||||
opertion_id_to_enum,
|
||||
):
|
||||
|
||||
self._num = num
|
||||
self._date = date
|
||||
|
@ -90,14 +94,14 @@ class BlockHiveDb( Block ):
|
|||
return None
|
||||
|
||||
for virtual_op_idx in range(self._first_operation_idx, len(self._operations)):
|
||||
if self._operations[ virtual_op_idx ][ 'block_num' ] > self.get_num():
|
||||
if self._operations[virtual_op_idx]['block_num'] > self.get_num():
|
||||
break
|
||||
|
||||
operation_type = self._operation_id_to_enum( self._operations[ virtual_op_idx ][ 'operation_type_id' ] )
|
||||
if ( type( operation_type ) != VirtualOperationType ):
|
||||
operation_type = self._operation_id_to_enum(self._operations[virtual_op_idx]['operation_type_id'])
|
||||
if type(operation_type) != VirtualOperationType:
|
||||
continue
|
||||
|
||||
virtual_op = VirtualOperationHiveDb(operation_type, self._operations[ virtual_op_idx ][ 'body' ])
|
||||
virtual_op = VirtualOperationHiveDb(operation_type, self._operations[virtual_op_idx]['body'])
|
||||
yield virtual_op
|
||||
|
||||
def get_date(self):
|
||||
|
@ -118,5 +122,7 @@ class BlockHiveDb( Block ):
|
|||
def get_next_transaction(self):
|
||||
if self._first_operation_idx is None:
|
||||
return None
|
||||
trans = TransactionHiveDb( self.get_num(), self._operations, self._first_operation_idx, self._operation_id_to_enum )
|
||||
trans = TransactionHiveDb(
|
||||
self.get_num(), self._operations, self._first_operation_idx, self._operation_id_to_enum
|
||||
)
|
||||
yield trans
|
||||
|
|
|
@ -25,17 +25,28 @@ blocks_query = """SELECT * FROM enum_blocks4hivemind( :first, :last )"""
|
|||
|
||||
number_of_blocks_query = """SELECT num as num FROM hive_blocks ORDER BY num DESC LIMIT 1"""
|
||||
|
||||
|
||||
class BlocksDataFromDbProvider:
|
||||
"""Starts threads which takes operations for a range of blocks"""
|
||||
|
||||
def __init__(self, sql_query, db, blocks_per_request, start_block, max_block, breaker, exception_reporter, external_thread_pool = None):
|
||||
def __init__(
|
||||
self,
|
||||
sql_query,
|
||||
db,
|
||||
blocks_per_request,
|
||||
start_block,
|
||||
max_block,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
external_thread_pool=None,
|
||||
):
|
||||
"""
|
||||
db - database
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke it when an exception occurs in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
db - database
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke it when an exception occurs in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
"""
|
||||
|
||||
assert breaker
|
||||
|
@ -46,7 +57,7 @@ class BlocksDataFromDbProvider:
|
|||
self._breaker = breaker
|
||||
self._exception_reporter = exception_reporter
|
||||
self._start_block = start_block
|
||||
self._max_block = max_block # to inlude upperbound in results
|
||||
self._max_block = max_block # to inlude upperbound in results
|
||||
self._db = db
|
||||
if external_thread_pool:
|
||||
self._thread_pool = external_thread_pool
|
||||
|
@ -55,18 +66,18 @@ class BlocksDataFromDbProvider:
|
|||
self._blocks_per_request = blocks_per_request
|
||||
self._sql_query = sql_query
|
||||
|
||||
|
||||
|
||||
def thread_body_get_data( self, queue_for_data ):
|
||||
def thread_body_get_data(self, queue_for_data):
|
||||
try:
|
||||
for block in range ( self._start_block, self._max_block, self._blocks_per_request ):
|
||||
for block in range(self._start_block, self._max_block, self._blocks_per_request):
|
||||
if not self._breaker():
|
||||
break;
|
||||
break
|
||||
|
||||
data_rows = self._db.query_all( self._sql_query, first=block, last=min( [ block + self._blocks_per_request, self._max_block ] ))
|
||||
data_rows = self._db.query_all(
|
||||
self._sql_query, first=block, last=min([block + self._blocks_per_request, self._max_block])
|
||||
)
|
||||
while self._breaker():
|
||||
try:
|
||||
queue_for_data.put( data_rows, True, 1 )
|
||||
queue_for_data.put(data_rows, True, 1)
|
||||
break
|
||||
except queue.Full:
|
||||
continue
|
||||
|
@ -75,20 +86,25 @@ class BlocksDataFromDbProvider:
|
|||
raise
|
||||
|
||||
def start(self, queue_for_data):
|
||||
future = self._thread_pool.submit( self.thread_body_get_data, queue_for_data )
|
||||
future = self._thread_pool.submit(self.thread_body_get_data, queue_for_data)
|
||||
return future
|
||||
|
||||
|
||||
|
||||
class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
||||
_vop_types_dictionary = {}
|
||||
_op_types_dictionary = {}
|
||||
|
||||
class Databases:
|
||||
def __init__(self, conf):
|
||||
self._db_root = Db( conf.get('hived_database_url'), "MassiveBlocksProvider.Root", conf.get( 'log_explain_queries' ) )
|
||||
self._db_operations = Db( conf.get('hived_database_url'), "MassiveBlocksProvider.OperationsData", conf.get( 'log_explain_queries' ) )
|
||||
self._db_blocks_data = Db( conf.get('hived_database_url'), "MassiveBlocksProvider.BlocksData", conf.get( 'log_explain_queries' ) )
|
||||
self._db_root = Db(
|
||||
conf.get('hived_database_url'), "MassiveBlocksProvider.Root", conf.get('log_explain_queries')
|
||||
)
|
||||
self._db_operations = Db(
|
||||
conf.get('hived_database_url'), "MassiveBlocksProvider.OperationsData", conf.get('log_explain_queries')
|
||||
)
|
||||
self._db_blocks_data = Db(
|
||||
conf.get('hived_database_url'), "MassiveBlocksProvider.BlocksData", conf.get('log_explain_queries')
|
||||
)
|
||||
|
||||
assert self._db_root
|
||||
assert self._db_operations
|
||||
|
@ -108,20 +124,20 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
def get_blocks_data(self):
|
||||
return self._db_blocks_data
|
||||
|
||||
|
||||
def __init__(
|
||||
self
|
||||
, databases
|
||||
, number_of_blocks_in_batch
|
||||
, lbound
|
||||
, ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, external_thread_pool = None ):
|
||||
self,
|
||||
databases,
|
||||
number_of_blocks_in_batch,
|
||||
lbound,
|
||||
ubound,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
external_thread_pool=None,
|
||||
):
|
||||
"""
|
||||
databases - object Databases with opened databases
|
||||
lbound - start blocks
|
||||
ubound - last block
|
||||
databases - object Databases with opened databases
|
||||
lbound - start blocks
|
||||
ubound - last block
|
||||
"""
|
||||
assert lbound <= ubound
|
||||
assert lbound >= 0
|
||||
|
@ -132,12 +148,14 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
self._lbound = lbound
|
||||
self._ubound = ubound
|
||||
self._blocks_per_query = number_of_blocks_in_batch
|
||||
self._first_block_to_get = lbound
|
||||
self._blocks_queue = queue.Queue( maxsize=self._blocks_queue_size )
|
||||
self._operations_queue = queue.Queue( maxsize=self._operations_queue_size )
|
||||
self._blocks_data_queue = queue.Queue( maxsize=self._blocks_data_queue_size )
|
||||
self._first_block_to_get = lbound
|
||||
self._blocks_queue = queue.Queue(maxsize=self._blocks_queue_size)
|
||||
self._operations_queue = queue.Queue(maxsize=self._operations_queue_size)
|
||||
self._blocks_data_queue = queue.Queue(maxsize=self._blocks_data_queue_size)
|
||||
|
||||
self._last_block_num_in_db = self._db.query_one( """SELECT num as num FROM hive_blocks ORDER BY num DESC LIMIT 1""" )
|
||||
self._last_block_num_in_db = self._db.query_one(
|
||||
"""SELECT num as num FROM hive_blocks ORDER BY num DESC LIMIT 1"""
|
||||
)
|
||||
assert self._last_block_num_in_db is not None
|
||||
|
||||
if external_thread_pool:
|
||||
|
@ -148,52 +166,60 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
|
||||
# read all blocks from db, rest of blocks ( ubound - self._last_block_num_in_db ) are supposed to be mocks
|
||||
self._operations_provider = BlocksDataFromDbProvider(
|
||||
operations_query
|
||||
, databases.get_operations()
|
||||
, self._blocks_per_query
|
||||
, self._lbound
|
||||
, self._last_block_num_in_db + 1 # ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, self._thread_pool
|
||||
operations_query,
|
||||
databases.get_operations(),
|
||||
self._blocks_per_query,
|
||||
self._lbound,
|
||||
self._last_block_num_in_db + 1, # ubound
|
||||
breaker,
|
||||
exception_reporter,
|
||||
self._thread_pool,
|
||||
)
|
||||
|
||||
self._blocks_data_provider = BlocksDataFromDbProvider(
|
||||
blocks_query
|
||||
, databases.get_blocks_data()
|
||||
, self._blocks_per_query
|
||||
, self._lbound
|
||||
, self._last_block_num_in_db + 1 # ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, self._thread_pool
|
||||
blocks_query,
|
||||
databases.get_blocks_data(),
|
||||
self._blocks_per_query,
|
||||
self._lbound,
|
||||
self._last_block_num_in_db + 1, # ubound
|
||||
breaker,
|
||||
exception_reporter,
|
||||
self._thread_pool,
|
||||
)
|
||||
|
||||
if not MassiveBlocksDataProviderHiveDb._vop_types_dictionary:
|
||||
virtual_operations_types_ids = self._db.query_all( "SELECT id, name FROM hive_operation_types WHERE is_virtual = true" )
|
||||
virtual_operations_types_ids = self._db.query_all(
|
||||
"SELECT id, name FROM hive_operation_types WHERE is_virtual = true"
|
||||
)
|
||||
for id, name in virtual_operations_types_ids:
|
||||
MassiveBlocksDataProviderHiveDb._vop_types_dictionary[ id ] = VirtualOperationType.from_name( name[len('hive::protocol::'):] )
|
||||
MassiveBlocksDataProviderHiveDb._vop_types_dictionary[id] = VirtualOperationType.from_name(
|
||||
name[len('hive::protocol::') :]
|
||||
)
|
||||
|
||||
if not MassiveBlocksDataProviderHiveDb._op_types_dictionary:
|
||||
operations_types_ids = self._db.query_all( "SELECT id, name FROM hive_operation_types WHERE is_virtual = false" )
|
||||
operations_types_ids = self._db.query_all(
|
||||
"SELECT id, name FROM hive_operation_types WHERE is_virtual = false"
|
||||
)
|
||||
for id, name in operations_types_ids:
|
||||
MassiveBlocksDataProviderHiveDb._op_types_dictionary[ id ] = OperationType.from_name( name[len('hive::protocol::'):] )
|
||||
MassiveBlocksDataProviderHiveDb._op_types_dictionary[id] = OperationType.from_name(
|
||||
name[len('hive::protocol::') :]
|
||||
)
|
||||
|
||||
def _id_to_virtual_type(id):
|
||||
if id in MassiveBlocksDataProviderHiveDb._vop_types_dictionary:
|
||||
return MassiveBlocksDataProviderHiveDb._vop_types_dictionary[ id ]
|
||||
return MassiveBlocksDataProviderHiveDb._vop_types_dictionary[id]
|
||||
|
||||
def _id_to_operation_type(id):
|
||||
if id in MassiveBlocksDataProviderHiveDb._op_types_dictionary:
|
||||
return MassiveBlocksDataProviderHiveDb._op_types_dictionary[ id ]
|
||||
return MassiveBlocksDataProviderHiveDb._op_types_dictionary[id]
|
||||
|
||||
def _operation_id_to_enum( id ):
|
||||
vop = MassiveBlocksDataProviderHiveDb._id_to_virtual_type( id )
|
||||
def _operation_id_to_enum(id):
|
||||
vop = MassiveBlocksDataProviderHiveDb._id_to_virtual_type(id)
|
||||
if vop:
|
||||
return vop
|
||||
return MassiveBlocksDataProviderHiveDb._id_to_operation_type( id )
|
||||
return MassiveBlocksDataProviderHiveDb._id_to_operation_type(id)
|
||||
|
||||
def _get_mocked_block( self, block_num, always_create ):
|
||||
def _get_mocked_block(self, block_num, always_create):
|
||||
# normally it should create mocked block only when block mock or vops are added,
|
||||
# but there is a situation when we ask for mock blocks after the database head,
|
||||
# we need to alwyas return at least empty block otherwise live sync streamer
|
||||
|
@ -202,25 +228,25 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
# NOTE: it affects only situation when mocks are loaded, otherwiese mock provider methods
|
||||
# do not return block data
|
||||
vops = {}
|
||||
MockVopsProvider.add_mock_vops(vops, block_num, block_num+1)
|
||||
MockVopsProvider.add_mock_vops(vops, block_num, block_num + 1)
|
||||
|
||||
block_mock = MockBlockProvider.get_block_data(block_num, bool(vops) or always_create )
|
||||
block_mock = MockBlockProvider.get_block_data(block_num, bool(vops) or always_create)
|
||||
if not block_mock:
|
||||
return None
|
||||
|
||||
if vops:
|
||||
vops = vops[block_num][ 'ops' ]
|
||||
return BlockFromRpc( block_mock, vops )
|
||||
vops = vops[block_num]['ops']
|
||||
return BlockFromRpc(block_mock, vops)
|
||||
|
||||
def _get_mocks_after_db_blocks(self, first_mock_block_num):
|
||||
for block_proposition in range( first_mock_block_num, self._ubound ):
|
||||
for block_proposition in range(first_mock_block_num, self._ubound):
|
||||
if not self._breaker():
|
||||
return
|
||||
mocked_block = self._get_mocked_block( block_proposition, True )
|
||||
mocked_block = self._get_mocked_block(block_proposition, True)
|
||||
|
||||
while self._breaker():
|
||||
try:
|
||||
self._blocks_queue.put( mocked_block, True, 1 )
|
||||
self._blocks_queue.put(mocked_block, True, 1)
|
||||
break
|
||||
except queue.Full:
|
||||
continue
|
||||
|
@ -231,64 +257,65 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
|
||||
# only mocked blocks are possible
|
||||
if self._lbound > self._last_block_num_in_db:
|
||||
self._get_mocks_after_db_blocks( self._lbound )
|
||||
self._get_mocks_after_db_blocks(self._lbound)
|
||||
return
|
||||
|
||||
while self._breaker():
|
||||
blocks_data = self._get_from_queue(self._blocks_data_queue, 1)
|
||||
operations = self._get_from_queue(self._operations_queue, 1)
|
||||
|
||||
|
||||
if not self._breaker():
|
||||
break
|
||||
|
||||
assert len(blocks_data) == 1, "Always one element should be returned"
|
||||
assert len(operations) == 1, "Always one element should be returned"
|
||||
|
||||
operations = operations[ 0 ]
|
||||
operations = operations[0]
|
||||
|
||||
block_operation_idx = 0
|
||||
for block_data in blocks_data[0]:
|
||||
new_block = BlockHiveDb(
|
||||
block_data[ 'num' ]
|
||||
, block_data[ 'date' ]
|
||||
, block_data[ 'hash' ]
|
||||
, block_data[ 'prev' ]
|
||||
, block_data[ 'tx_number' ]
|
||||
, block_data[ 'op_number' ]
|
||||
, None
|
||||
, None
|
||||
, MassiveBlocksDataProviderHiveDb._operation_id_to_enum
|
||||
)
|
||||
block_data['num'],
|
||||
block_data['date'],
|
||||
block_data['hash'],
|
||||
block_data['prev'],
|
||||
block_data['tx_number'],
|
||||
block_data['op_number'],
|
||||
None,
|
||||
None,
|
||||
MassiveBlocksDataProviderHiveDb._operation_id_to_enum,
|
||||
)
|
||||
|
||||
for idx in range( block_operation_idx, len(operations) ):
|
||||
for idx in range(block_operation_idx, len(operations)):
|
||||
# find first the blocks' operation in the list
|
||||
if operations[ idx ]['block_num'] == block_data[ 'num' ]:
|
||||
if operations[idx]['block_num'] == block_data['num']:
|
||||
new_block = BlockHiveDb(
|
||||
block_data[ 'num' ]
|
||||
, block_data[ 'date' ]
|
||||
, block_data[ 'hash' ]
|
||||
, block_data[ 'prev' ]
|
||||
, block_data[ 'tx_number' ]
|
||||
, block_data[ 'op_number' ]
|
||||
, operations
|
||||
, idx
|
||||
, MassiveBlocksDataProviderHiveDb._operation_id_to_enum
|
||||
)
|
||||
block_data['num'],
|
||||
block_data['date'],
|
||||
block_data['hash'],
|
||||
block_data['prev'],
|
||||
block_data['tx_number'],
|
||||
block_data['op_number'],
|
||||
operations,
|
||||
idx,
|
||||
MassiveBlocksDataProviderHiveDb._operation_id_to_enum,
|
||||
)
|
||||
block_operation_idx = idx
|
||||
break;
|
||||
if operations[ block_operation_idx ]['block_num'] > block_data[ 'num' ]:
|
||||
break;
|
||||
break
|
||||
if operations[block_operation_idx]['block_num'] > block_data['num']:
|
||||
break
|
||||
|
||||
mocked_block = self._get_mocked_block( new_block.get_num(), False )
|
||||
mocked_block = self._get_mocked_block(new_block.get_num(), False)
|
||||
# live sync with mocks needs this, otherwise stream will wait almost forever for a block
|
||||
MockBlockProvider.set_last_real_block_num_date( new_block.get_num(), new_block.get_date(), new_block.get_hash() )
|
||||
MockBlockProvider.set_last_real_block_num_date(
|
||||
new_block.get_num(), new_block.get_date(), new_block.get_hash()
|
||||
)
|
||||
if mocked_block:
|
||||
new_block = ExtendedByMockBlockAdapter( new_block, mocked_block )
|
||||
new_block = ExtendedByMockBlockAdapter(new_block, mocked_block)
|
||||
|
||||
while self._breaker():
|
||||
try:
|
||||
self._blocks_queue.put( new_block, True, 1 )
|
||||
self._blocks_queue.put(new_block, True, 1)
|
||||
currently_received_block += 1
|
||||
if currently_received_block >= (self._ubound - 1):
|
||||
return
|
||||
|
@ -298,7 +325,7 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
|
||||
# we reach last block in db, now only mocked blocks are possible
|
||||
if new_block.get_num() >= self._last_block_num_in_db:
|
||||
self._get_mocks_after_db_blocks( new_block.get_num() + 1 )
|
||||
self._get_mocks_after_db_blocks(new_block.get_num() + 1)
|
||||
return
|
||||
except:
|
||||
self._exception_reporter()
|
||||
|
@ -309,29 +336,29 @@ class MassiveBlocksDataProviderHiveDb(BlocksProviderBase):
|
|||
You can pass the thread pool to provider during its creation to controll its lifetime
|
||||
outside the provider"""
|
||||
|
||||
return ThreadPoolExecutor( max_workers = MassiveBlocksDataProviderHiveDb.get_number_of_threads() )
|
||||
return ThreadPoolExecutor(max_workers=MassiveBlocksDataProviderHiveDb.get_number_of_threads())
|
||||
|
||||
def get_number_of_threads():
|
||||
return 3 # block data + operations + collect thread
|
||||
|
||||
def start(self):
|
||||
futures = []
|
||||
futures.append( self._operations_provider.start( self._operations_queue ) )
|
||||
futures.append( self._blocks_data_provider.start( self._blocks_data_queue ) )
|
||||
futures.append(self._operations_provider.start(self._operations_queue))
|
||||
futures.append(self._blocks_data_provider.start(self._blocks_data_queue))
|
||||
|
||||
futures.append( self._thread_pool.submit( self._thread_get_block ) )
|
||||
futures.append(self._thread_pool.submit(self._thread_get_block))
|
||||
return futures
|
||||
|
||||
def get( self, number_of_blocks ):
|
||||
def get(self, number_of_blocks):
|
||||
"""Returns blocks and vops data for next number_of_blocks"""
|
||||
blocks = []
|
||||
wait_blocks_time = WSM.start()
|
||||
|
||||
if self._blocks_queue.qsize() < number_of_blocks and self._breaker():
|
||||
log.info(f"Awaiting any blocks to process... {self._blocks_queue.qsize()}")
|
||||
log.info(f"Awaiting any blocks to process... {self._blocks_queue.qsize()}")
|
||||
|
||||
if not self._blocks_queue.empty() or self._breaker():
|
||||
blocks = self._get_from_queue( self._blocks_queue, number_of_blocks )
|
||||
blocks = self._get_from_queue(self._blocks_queue, number_of_blocks)
|
||||
|
||||
WSM.wait_stat('block_consumer_block', WSM.stop(wait_blocks_time))
|
||||
|
||||
|
|
|
@ -4,9 +4,10 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VirtualOperationFromRpc(Operation):
|
||||
def __init__( self, operation_name, operation_body ):
|
||||
self._operation_type = VirtualOperationType.from_name( operation_name )
|
||||
def __init__(self, operation_name, operation_body):
|
||||
self._operation_type = VirtualOperationType.from_name(operation_name)
|
||||
self._operation_body = operation_body
|
||||
|
||||
def get_type(self):
|
||||
|
@ -17,8 +18,8 @@ class VirtualOperationFromRpc(Operation):
|
|||
|
||||
|
||||
class OperationFromRpc(Operation):
|
||||
def __init__( self, operation_name, operation_body ):
|
||||
self._operation_type = OperationType.from_name( operation_name )
|
||||
def __init__(self, operation_name, operation_body):
|
||||
self._operation_type = OperationType.from_name(operation_name)
|
||||
self._operation_body = operation_body
|
||||
|
||||
def get_type(self):
|
||||
|
@ -28,7 +29,7 @@ class OperationFromRpc(Operation):
|
|||
return self._operation_body
|
||||
|
||||
|
||||
class TransactionFromRpc(Transaction ):
|
||||
class TransactionFromRpc(Transaction):
|
||||
def __init__(self, id, transaction):
|
||||
self._id = id
|
||||
self._transaction = transaction
|
||||
|
@ -38,12 +39,13 @@ class TransactionFromRpc(Transaction ):
|
|||
|
||||
def get_next_operation(self):
|
||||
for raw_operation in self._transaction['operations']:
|
||||
operation = OperationFromRpc( raw_operation[ 'type' ], raw_operation['value'] )
|
||||
operation = OperationFromRpc(raw_operation['type'], raw_operation['value'])
|
||||
if not operation.get_type():
|
||||
continue
|
||||
yield operation
|
||||
yield operation
|
||||
|
||||
class BlockFromRpc( Block ):
|
||||
|
||||
class BlockFromRpc(Block):
|
||||
def __init__(self, block_data, virtual_ops):
|
||||
"""
|
||||
block_data - raw format of the blocks
|
||||
|
@ -74,12 +76,11 @@ class BlockFromRpc( Block ):
|
|||
|
||||
def get_next_vop(self):
|
||||
for vop in self._virtual_ops:
|
||||
vop_object = VirtualOperationFromRpc( vop[ 'type' ], vop[ 'value' ] )
|
||||
vop_object = VirtualOperationFromRpc(vop['type'], vop['value'])
|
||||
if not vop_object.get_type():
|
||||
continue
|
||||
yield vop_object
|
||||
|
||||
|
||||
def get_next_transaction(self):
|
||||
for tx_idx, tx in enumerate(self._blocks_data['transactions']):
|
||||
yield TransactionFromRpc( tx_idx, tx )
|
||||
yield TransactionFromRpc(tx_idx, tx)
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import logging
|
||||
import queue
|
||||
|
@ -9,18 +8,29 @@ from hive.indexer.mock_block_provider import MockBlockProvider
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BlocksProvider:
|
||||
"""Starts threads which request node for blocks, and collect responses to one queue"""
|
||||
|
||||
def __init__(cls, http_client, number_of_threads, blocks_per_request, start_block, max_block, breaker, exception_reporter, external_thread_pool = None):
|
||||
def __init__(
|
||||
cls,
|
||||
http_client,
|
||||
number_of_threads,
|
||||
blocks_per_request,
|
||||
start_block,
|
||||
max_block,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
external_thread_pool=None,
|
||||
):
|
||||
"""
|
||||
http_client - object which will ask the node for blocks
|
||||
number_of_threads - how many threads will be used to ask for blocks
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
http_client - object which will ask the node for blocks
|
||||
number_of_threads - how many threads will be used to ask for blocks
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
"""
|
||||
|
||||
assert number_of_threads > 0
|
||||
|
@ -34,43 +44,49 @@ class BlocksProvider:
|
|||
cls._breaker = breaker
|
||||
cls._exception_reporter = exception_reporter
|
||||
cls._start_block = start_block
|
||||
cls._max_block = max_block # to inlude upperbound in results
|
||||
cls._max_block = max_block # to inlude upperbound in results
|
||||
cls._http_client = http_client
|
||||
if external_thread_pool:
|
||||
assert type(external_thread_pool) == ThreadPoolExecutor
|
||||
cls._thread_pool = external_thread_pool
|
||||
assert type(external_thread_pool) == ThreadPoolExecutor
|
||||
cls._thread_pool = external_thread_pool
|
||||
else:
|
||||
cls._thread_pool = ThreadPoolExecutor( BlocksProvider.get_number_of_threads( number_of_threads ) )
|
||||
cls._thread_pool = ThreadPoolExecutor(BlocksProvider.get_number_of_threads(number_of_threads))
|
||||
cls._number_of_threads = number_of_threads
|
||||
cls._blocks_per_request = blocks_per_request
|
||||
|
||||
# prepare quques and threads
|
||||
for i in range( 0, number_of_threads):
|
||||
cls._responses_queues.append( queue.Queue( maxsize = 50 ) )
|
||||
for i in range(0, number_of_threads):
|
||||
cls._responses_queues.append(queue.Queue(maxsize=50))
|
||||
|
||||
def get_number_of_threads( number_of_threads ):
|
||||
def get_number_of_threads(number_of_threads):
|
||||
"""Return number of used thread if user want to collects blocks in some threads number
|
||||
number_of_threads - how many threds will ask for blocks
|
||||
number_of_threads - how many threds will ask for blocks
|
||||
"""
|
||||
return number_of_threads + 1 # +1 because of a thread for collecting blocks from threads
|
||||
return number_of_threads + 1 # +1 because of a thread for collecting blocks from threads
|
||||
|
||||
def thread_body_get_block( cls, blocks_shift ):
|
||||
def thread_body_get_block(cls, blocks_shift):
|
||||
try:
|
||||
for block in range ( cls._start_block + blocks_shift * cls._blocks_per_request, cls._max_block, cls._number_of_threads * cls._blocks_per_request ):
|
||||
for block in range(
|
||||
cls._start_block + blocks_shift * cls._blocks_per_request,
|
||||
cls._max_block,
|
||||
cls._number_of_threads * cls._blocks_per_request,
|
||||
):
|
||||
if not cls._breaker():
|
||||
return;
|
||||
return
|
||||
|
||||
results = []
|
||||
number_of_expected_blocks = 1
|
||||
|
||||
query_param = [{'block_num': i} for i in range( block, min( [ block + cls._blocks_per_request, cls._max_block ] ))]
|
||||
query_param = [
|
||||
{'block_num': i} for i in range(block, min([block + cls._blocks_per_request, cls._max_block]))
|
||||
]
|
||||
number_of_expected_blocks = len(query_param)
|
||||
results = cls._http_client.exec( 'get_block', query_param, True )
|
||||
results = cls._http_client.exec('get_block', query_param, True)
|
||||
|
||||
if results:
|
||||
while cls._breaker():
|
||||
try:
|
||||
cls._responses_queues[ blocks_shift ].put( results, True, 1 )
|
||||
cls._responses_queues[blocks_shift].put(results, True, 1)
|
||||
break
|
||||
except queue.Full:
|
||||
continue
|
||||
|
@ -78,37 +94,43 @@ class BlocksProvider:
|
|||
cls._exception_reporter()
|
||||
raise
|
||||
|
||||
def thread_body_blocks_collector( cls, queue_for_blocks ):
|
||||
def thread_body_blocks_collector(cls, queue_for_blocks):
|
||||
try:
|
||||
currently_received_block = cls._start_block - 1;
|
||||
currently_received_block = cls._start_block - 1
|
||||
while cls._breaker():
|
||||
# take in order all blocks from threads queues
|
||||
for blocks_queue in range ( 0, cls._number_of_threads ):
|
||||
for blocks_queue in range(0, cls._number_of_threads):
|
||||
if not cls._breaker():
|
||||
return;
|
||||
return
|
||||
while cls._breaker():
|
||||
try:
|
||||
blocks = cls._responses_queues[ blocks_queue ].get( True, 1 )
|
||||
cls._responses_queues[ blocks_queue ].task_done()
|
||||
#split blocks range
|
||||
blocks = cls._responses_queues[blocks_queue].get(True, 1)
|
||||
cls._responses_queues[blocks_queue].task_done()
|
||||
# split blocks range
|
||||
|
||||
for block in blocks:
|
||||
if 'block' in block:
|
||||
MockBlockProvider.set_last_real_block_num_date(currently_received_block+1, block['block']['timestamp'], block['block']['block_id']);
|
||||
MockBlockProvider.set_last_real_block_num_date(
|
||||
currently_received_block + 1,
|
||||
block['block']['timestamp'],
|
||||
block['block']['block_id'],
|
||||
)
|
||||
|
||||
block_mock = MockBlockProvider.get_block_data(currently_received_block+1, True)
|
||||
block_mock = MockBlockProvider.get_block_data(currently_received_block + 1, True)
|
||||
|
||||
if block_mock is not None:
|
||||
if 'block' in block:
|
||||
block["block"]["transactions"].extend( block_mock["transactions"] )
|
||||
block["block"]["transactions"].extend(block_mock["transactions"])
|
||||
else:
|
||||
block["block"] = block_mock
|
||||
log.warning(f"Pure mock block: id {block_mock['block_id']}, previous {block_mock['previous']}")
|
||||
log.warning(
|
||||
f"Pure mock block: id {block_mock['block_id']}, previous {block_mock['previous']}"
|
||||
)
|
||||
block_for_queue = None if not 'block' in block else block['block']
|
||||
|
||||
while cls._breaker():
|
||||
try:
|
||||
queue_for_blocks.put( block_for_queue, True, 1 )
|
||||
queue_for_blocks.put(block_for_queue, True, 1)
|
||||
currently_received_block += 1
|
||||
if currently_received_block >= (cls._max_block - 1):
|
||||
return
|
||||
|
@ -125,9 +147,9 @@ class BlocksProvider:
|
|||
def start(cls, queue_for_blocks):
|
||||
futures = []
|
||||
for future_number in range(0, cls._number_of_threads):
|
||||
future = cls._thread_pool.submit( cls.thread_body_get_block, future_number )
|
||||
futures.append( future )
|
||||
future = cls._thread_pool.submit(cls.thread_body_get_block, future_number)
|
||||
futures.append(future)
|
||||
|
||||
future = cls._thread_pool.submit( cls.thread_body_blocks_collector, queue_for_blocks )
|
||||
futures.append( future )
|
||||
future = cls._thread_pool.submit(cls.thread_body_blocks_collector, queue_for_blocks)
|
||||
futures.append(future)
|
||||
return futures
|
||||
|
|
|
@ -12,30 +12,32 @@ import queue
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MassiveBlocksDataProviderHiveRpc(BlocksProviderBase):
|
||||
def __init__(
|
||||
self
|
||||
, conf
|
||||
, node_client
|
||||
, blocks_get_threads
|
||||
, vops_get_threads
|
||||
, number_of_blocks_data_in_one_batch
|
||||
, lbound
|
||||
, ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, external_thread_pool = None):
|
||||
self,
|
||||
conf,
|
||||
node_client,
|
||||
blocks_get_threads,
|
||||
vops_get_threads,
|
||||
number_of_blocks_data_in_one_batch,
|
||||
lbound,
|
||||
ubound,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
external_thread_pool=None,
|
||||
):
|
||||
"""
|
||||
conf - configuration
|
||||
node_client - SteemClient
|
||||
blocks_get_threads - number of threads which get blocks from node
|
||||
vops_get_threads - number of threads which get virtual operations from node
|
||||
number_of_blocks_data_in_one_batch - number of blocks which will be asked for the node in one HTTP get
|
||||
lbound - first block to get
|
||||
ubound - last block to get
|
||||
breaker - callable, returns False when processing must be stopped
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
conf - configuration
|
||||
node_client - SteemClient
|
||||
blocks_get_threads - number of threads which get blocks from node
|
||||
vops_get_threads - number of threads which get virtual operations from node
|
||||
number_of_blocks_data_in_one_batch - number of blocks which will be asked for the node in one HTTP get
|
||||
lbound - first block to get
|
||||
ubound - last block to get
|
||||
breaker - callable, returns False when processing must be stopped
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
"""
|
||||
|
||||
BlocksProviderBase.__init__(self, breaker, exception_reporter)
|
||||
|
@ -45,80 +47,79 @@ class MassiveBlocksDataProviderHiveRpc(BlocksProviderBase):
|
|||
assert type(external_thread_pool) == ThreadPoolExecutor
|
||||
thread_pool = external_thread_pool
|
||||
else:
|
||||
thread_pool = MassiveBlocksDataProviderHiveRpc.create_thread_pool( blocks_get_threads, vops_get_threads )
|
||||
thread_pool = MassiveBlocksDataProviderHiveRpc.create_thread_pool(blocks_get_threads, vops_get_threads)
|
||||
|
||||
self.blocks_provider = BlocksProvider(
|
||||
node_client._client["get_block"] if "get_block" in node_client._client else node_client._client["default"]
|
||||
, blocks_get_threads
|
||||
, number_of_blocks_data_in_one_batch
|
||||
, lbound
|
||||
, ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, thread_pool
|
||||
node_client._client["get_block"] if "get_block" in node_client._client else node_client._client["default"],
|
||||
blocks_get_threads,
|
||||
number_of_blocks_data_in_one_batch,
|
||||
lbound,
|
||||
ubound,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
thread_pool,
|
||||
)
|
||||
|
||||
self.vops_provider = VopsProvider(
|
||||
conf
|
||||
, node_client
|
||||
, vops_get_threads
|
||||
, number_of_blocks_data_in_one_batch
|
||||
, lbound
|
||||
, ubound
|
||||
, breaker
|
||||
, exception_reporter
|
||||
, thread_pool
|
||||
conf,
|
||||
node_client,
|
||||
vops_get_threads,
|
||||
number_of_blocks_data_in_one_batch,
|
||||
lbound,
|
||||
ubound,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
thread_pool,
|
||||
)
|
||||
|
||||
self.vops_queue = queue.Queue( maxsize=self._operations_queue_size )
|
||||
self.blocks_queue = queue.Queue( maxsize=self._blocks_data_queue_size )
|
||||
self.vops_queue = queue.Queue(maxsize=self._operations_queue_size)
|
||||
self.blocks_queue = queue.Queue(maxsize=self._blocks_data_queue_size)
|
||||
|
||||
def create_thread_pool( threads_for_blocks, threads_for_vops ):
|
||||
def create_thread_pool(threads_for_blocks, threads_for_vops):
|
||||
"""Creates initialzied thread pool with number of threads required by the provider.
|
||||
You can pass the thread pool to provider during its creation to controll its lifetime
|
||||
outside the provider"""
|
||||
|
||||
return ThreadPoolExecutor(
|
||||
BlocksProvider.get_number_of_threads( threads_for_blocks )
|
||||
+ VopsProvider.get_number_of_threads( threads_for_vops )
|
||||
)
|
||||
BlocksProvider.get_number_of_threads(threads_for_blocks)
|
||||
+ VopsProvider.get_number_of_threads(threads_for_vops)
|
||||
)
|
||||
|
||||
|
||||
def get( self, number_of_blocks ):
|
||||
def get(self, number_of_blocks):
|
||||
"""Returns blocks and vops data for next number_of_blocks"""
|
||||
vops_and_blocks = { 'vops': [], 'blocks': [] }
|
||||
vops_and_blocks = {'vops': [], 'blocks': []}
|
||||
|
||||
log.info(f"vops_queue.qsize: {self.vops_queue.qsize()} blocks_queue.qsize: {self.blocks_queue.qsize()}")
|
||||
|
||||
wait_vops_time = WSM.start()
|
||||
if self.vops_queue.qsize() < number_of_blocks and self._breaker():
|
||||
log.info("Awaiting any vops to process...")
|
||||
log.info("Awaiting any vops to process...")
|
||||
|
||||
if not self.vops_queue.empty() or self._breaker():
|
||||
vops = self._get_from_queue( self.vops_queue, number_of_blocks )
|
||||
vops = self._get_from_queue(self.vops_queue, number_of_blocks)
|
||||
|
||||
if self._breaker():
|
||||
assert len( vops ) == number_of_blocks
|
||||
vops_and_blocks[ 'vops' ] = vops
|
||||
assert len(vops) == number_of_blocks
|
||||
vops_and_blocks['vops'] = vops
|
||||
WSM.wait_stat('block_consumer_vop', WSM.stop(wait_vops_time))
|
||||
|
||||
wait_blocks_time = WSM.start()
|
||||
if ( self.blocks_queue.qsize() < number_of_blocks ) and self._breaker():
|
||||
if (self.blocks_queue.qsize() < number_of_blocks) and self._breaker():
|
||||
log.info("Awaiting any block to process...")
|
||||
|
||||
if not self.blocks_queue.empty() or self._breaker():
|
||||
vops_and_blocks[ 'blocks' ] = self._get_from_queue( self.blocks_queue, number_of_blocks )
|
||||
vops_and_blocks['blocks'] = self._get_from_queue(self.blocks_queue, number_of_blocks)
|
||||
WSM.wait_stat('block_consumer_block', WSM.stop(wait_blocks_time))
|
||||
|
||||
result = []
|
||||
for vop_nr in range( len(vops_and_blocks['blocks']) ):
|
||||
if vops_and_blocks[ 'blocks' ][ vop_nr ] is not None:
|
||||
result.append( BlockFromRpc( vops_and_blocks[ 'blocks' ][ vop_nr ], vops_and_blocks[ 'vops' ][ vop_nr ] ) )
|
||||
for vop_nr in range(len(vops_and_blocks['blocks'])):
|
||||
if vops_and_blocks['blocks'][vop_nr] is not None:
|
||||
result.append(BlockFromRpc(vops_and_blocks['blocks'][vop_nr], vops_and_blocks['vops'][vop_nr]))
|
||||
|
||||
return result
|
||||
|
||||
def start(self):
|
||||
futures = self.blocks_provider.start( self.blocks_queue )
|
||||
futures.extend( self.vops_provider.start( self.vops_queue ) )
|
||||
futures = self.blocks_provider.start(self.blocks_queue)
|
||||
futures.extend(self.vops_provider.start(self.vops_queue))
|
||||
|
||||
return futures
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import logging
|
||||
import queue
|
||||
|
@ -9,19 +8,31 @@ from hive.indexer.mock_block_provider import MockBlockProvider
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VopsProvider:
|
||||
"""Starts threads which request node for blocks, and collect responses to one queue"""
|
||||
|
||||
def __init__(cls, conf, client, number_of_threads, blocks_per_request, start_block, max_block, breaker, exception_reporter, external_thread_pool = None):
|
||||
def __init__(
|
||||
cls,
|
||||
conf,
|
||||
client,
|
||||
number_of_threads,
|
||||
blocks_per_request,
|
||||
start_block,
|
||||
max_block,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
external_thread_pool=None,
|
||||
):
|
||||
"""
|
||||
conf - configuration
|
||||
steem client - object which will ask the node for blocks
|
||||
number_of_threads - how many threads will be used to ask for blocks
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
conf - configuration
|
||||
steem client - object which will ask the node for blocks
|
||||
number_of_threads - how many threads will be used to ask for blocks
|
||||
start_block - block from which the processing starts
|
||||
max_block - last to get block's number
|
||||
breaker - callable object which returns true if processing must be continues
|
||||
exception_reporter - callable, invoke to report an undesire exception in a thread
|
||||
external_thread_pool - thread pool controlled outside the class
|
||||
"""
|
||||
|
||||
assert conf
|
||||
|
@ -37,41 +48,47 @@ class VopsProvider:
|
|||
cls._breaker = breaker
|
||||
cls._exception_reporter = exception_reporter
|
||||
cls._start_block = start_block
|
||||
cls._max_block = max_block # to inlude upperbound in results
|
||||
cls._max_block = max_block # to inlude upperbound in results
|
||||
cls._client = client
|
||||
if external_thread_pool:
|
||||
assert type(external_thread_pool) == ThreadPoolExecutor
|
||||
cls._thread_pool = external_thread_pool
|
||||
assert type(external_thread_pool) == ThreadPoolExecutor
|
||||
cls._thread_pool = external_thread_pool
|
||||
else:
|
||||
cls._thread_pool = ThreadPoolExecutor( VopsProvider.get_number_of_threads( number_of_threads ) )
|
||||
cls._thread_pool = ThreadPoolExecutor(VopsProvider.get_number_of_threads(number_of_threads))
|
||||
cls._number_of_threads = number_of_threads
|
||||
cls._blocks_per_request = blocks_per_request
|
||||
cls.currently_received_block = cls._start_block - 1
|
||||
cls.currently_received_block = cls._start_block - 1
|
||||
|
||||
# prepare quques and threads
|
||||
for i in range( 0, number_of_threads):
|
||||
cls._responses_queues.append( queue.Queue( maxsize = 50 ) )
|
||||
for i in range(0, number_of_threads):
|
||||
cls._responses_queues.append(queue.Queue(maxsize=50))
|
||||
|
||||
def get_number_of_threads( number_of_threads ):
|
||||
def get_number_of_threads(number_of_threads):
|
||||
"""Return number of used thread if user want to collects virtual operations in some threads number
|
||||
number_of_threads - how many threads will ask for vops
|
||||
number_of_threads - how many threads will ask for vops
|
||||
"""
|
||||
return number_of_threads + 1 # +1 because of a thread for collecting blocks from threads
|
||||
return number_of_threads + 1 # +1 because of a thread for collecting blocks from threads
|
||||
|
||||
@staticmethod
|
||||
def get_virtual_operation_for_blocks(client, conf, start_block_num, number_of_blocks):
|
||||
return client.enum_virtual_ops(conf, start_block_num, start_block_num + number_of_blocks)
|
||||
|
||||
def thread_body_get_block( cls, blocks_shift ):
|
||||
def thread_body_get_block(cls, blocks_shift):
|
||||
try:
|
||||
for block in range ( cls._start_block + blocks_shift * cls._blocks_per_request, cls._max_block + cls._blocks_per_request, cls._number_of_threads * cls._blocks_per_request ):
|
||||
for block in range(
|
||||
cls._start_block + blocks_shift * cls._blocks_per_request,
|
||||
cls._max_block + cls._blocks_per_request,
|
||||
cls._number_of_threads * cls._blocks_per_request,
|
||||
):
|
||||
if not cls._breaker():
|
||||
return;
|
||||
return
|
||||
|
||||
results = VopsProvider.get_virtual_operation_for_blocks(cls._client, cls._conf, block, cls._blocks_per_request)
|
||||
results = VopsProvider.get_virtual_operation_for_blocks(
|
||||
cls._client, cls._conf, block, cls._blocks_per_request
|
||||
)
|
||||
while cls._breaker():
|
||||
try:
|
||||
cls._responses_queues[ blocks_shift ].put( results, True, 1 )
|
||||
cls._responses_queues[blocks_shift].put(results, True, 1)
|
||||
break
|
||||
except queue.Full:
|
||||
continue
|
||||
|
@ -80,10 +97,10 @@ class VopsProvider:
|
|||
raise
|
||||
|
||||
def _fill_queue_with_no_vops(cls, queue_for_vops, number_of_no_vops):
|
||||
for vop in range( 0, number_of_no_vops):
|
||||
for vop in range(0, number_of_no_vops):
|
||||
while cls._breaker():
|
||||
try:
|
||||
queue_for_vops.put( [], True, 1 )
|
||||
queue_for_vops.put([], True, 1)
|
||||
cls.currently_received_block += 1
|
||||
if cls.currently_received_block >= (cls._max_block - 1):
|
||||
return True
|
||||
|
@ -92,29 +109,31 @@ class VopsProvider:
|
|||
continue
|
||||
return False
|
||||
|
||||
def thread_body_blocks_collector( cls, queue_for_vops ):
|
||||
def thread_body_blocks_collector(cls, queue_for_vops):
|
||||
try:
|
||||
while cls._breaker():
|
||||
# take in order all vops from threads queues
|
||||
for vops_queue in range ( 0, cls._number_of_threads ):
|
||||
for vops_queue in range(0, cls._number_of_threads):
|
||||
if not cls._breaker():
|
||||
return;
|
||||
return
|
||||
while cls._breaker():
|
||||
try:
|
||||
vops = cls._responses_queues[ vops_queue ].get( True, 1)
|
||||
cls._responses_queues[ vops_queue ].task_done()
|
||||
#split blocks range
|
||||
vops = cls._responses_queues[vops_queue].get(True, 1)
|
||||
cls._responses_queues[vops_queue].task_done()
|
||||
# split blocks range
|
||||
if not vops:
|
||||
if cls._fill_queue_with_no_vops( queue_for_vops, cls._blocks_per_request ):
|
||||
return;
|
||||
if cls._fill_queue_with_no_vops(queue_for_vops, cls._blocks_per_request):
|
||||
return
|
||||
else:
|
||||
for block in vops:
|
||||
if cls._fill_queue_with_no_vops( queue_for_vops, block - ( cls.currently_received_block + 1 ) ):
|
||||
return;
|
||||
vop = vops[ block ]
|
||||
if cls._fill_queue_with_no_vops(
|
||||
queue_for_vops, block - (cls.currently_received_block + 1)
|
||||
):
|
||||
return
|
||||
vop = vops[block]
|
||||
while cls._breaker():
|
||||
try:
|
||||
queue_for_vops.put( vop[ 'ops' ], True, 1 )
|
||||
queue_for_vops.put(vop['ops'], True, 1)
|
||||
cls.currently_received_block += 1
|
||||
if cls.currently_received_block >= (cls._max_block - 1):
|
||||
return
|
||||
|
@ -131,9 +150,9 @@ class VopsProvider:
|
|||
def start(cls, queue_for_vops):
|
||||
futures = []
|
||||
for future_number in range(0, cls._number_of_threads):
|
||||
future = cls._thread_pool.submit( cls.thread_body_get_block, future_number )
|
||||
futures.append( future )
|
||||
future = cls._thread_pool.submit(cls.thread_body_get_block, future_number)
|
||||
futures.append(future)
|
||||
|
||||
future = cls._thread_pool.submit( cls.thread_body_blocks_collector, queue_for_vops )
|
||||
futures.append( future )
|
||||
future = cls._thread_pool.submit(cls.thread_body_blocks_collector, queue_for_vops)
|
||||
futures.append(future)
|
||||
return futures
|
||||
|
|
|
@ -1,39 +1,40 @@
|
|||
from hive.indexer.block import Block, Transaction
|
||||
|
||||
|
||||
class ExtendedByMockBlockAdapter(Block):
|
||||
def __init__( self, block, extended_block ):
|
||||
assert issubclass(type(block), Block)
|
||||
assert issubclass(type(extended_block), Block)
|
||||
def __init__(self, block, extended_block):
|
||||
assert issubclass(type(block), Block)
|
||||
assert issubclass(type(extended_block), Block)
|
||||
|
||||
self._wrapped_block = block
|
||||
self._extended_block = extended_block
|
||||
self._wrapped_block = block
|
||||
self._extended_block = extended_block
|
||||
|
||||
def get_num(self):
|
||||
return self._wrapped_block.get_num()
|
||||
def get_num(self):
|
||||
return self._wrapped_block.get_num()
|
||||
|
||||
def get_next_vop(self):
|
||||
for vop in self._wrapped_block.get_next_vop():
|
||||
yield vop
|
||||
for vop in self._extended_block.get_next_vop():
|
||||
yield vop
|
||||
def get_next_vop(self):
|
||||
for vop in self._wrapped_block.get_next_vop():
|
||||
yield vop
|
||||
for vop in self._extended_block.get_next_vop():
|
||||
yield vop
|
||||
|
||||
def get_date(self):
|
||||
return self._wrapped_block.get_date()
|
||||
def get_date(self):
|
||||
return self._wrapped_block.get_date()
|
||||
|
||||
def get_hash(self):
|
||||
return self._wrapped_block.get_hash()
|
||||
def get_hash(self):
|
||||
return self._wrapped_block.get_hash()
|
||||
|
||||
def get_previous_block_hash(self):
|
||||
return self._wrapped_block.get_previous_block_hash()
|
||||
def get_previous_block_hash(self):
|
||||
return self._wrapped_block.get_previous_block_hash()
|
||||
|
||||
def get_number_of_transactions(self):
|
||||
return self._wrapped_block.get_number_of_transactions() + self._extended_block.get_number_of_transactions()
|
||||
def get_number_of_transactions(self):
|
||||
return self._wrapped_block.get_number_of_transactions() + self._extended_block.get_number_of_transactions()
|
||||
|
||||
def get_number_of_operations(self):
|
||||
return self._wrapped_block.get_number_of_operations() + self._extended_block.get_number_of_operations()
|
||||
def get_number_of_operations(self):
|
||||
return self._wrapped_block.get_number_of_operations() + self._extended_block.get_number_of_operations()
|
||||
|
||||
def get_next_transaction(self):
|
||||
for transaction in self._wrapped_block.get_next_transaction():
|
||||
yield transaction
|
||||
for transaction in self._extended_block.get_next_transaction():
|
||||
yield transaction
|
||||
def get_next_transaction(self):
|
||||
for transaction in self._wrapped_block.get_next_transaction():
|
||||
yield transaction
|
||||
for transaction in self._extended_block.get_next_transaction():
|
||||
yield transaction
|
||||
|
|
|
@ -7,8 +7,9 @@ from hive.indexer.mock_data_provider import MockDataProvider, MockDataProviderEx
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MockBlockProvider(MockDataProvider):
|
||||
""" Data provider for test ops """
|
||||
"""Data provider for test ops"""
|
||||
|
||||
min_block = 0
|
||||
max_block = 0
|
||||
|
@ -20,17 +21,22 @@ class MockBlockProvider(MockDataProvider):
|
|||
@classmethod
|
||||
def set_last_real_block_num_date(cls, block_num, block_date, block_id):
|
||||
if cls.last_real_block_num > block_num:
|
||||
log.error( f"Incoming block has lower number than previous one: old {cls.last_real_block_num}, new {block_num}" )
|
||||
log.error(
|
||||
f"Incoming block has lower number than previous one: old {cls.last_real_block_num}, new {block_num}"
|
||||
)
|
||||
cls.last_real_block_num = int(block_num)
|
||||
cls.last_real_block_id = block_id
|
||||
new_date = dateutil.parser.isoparse(block_date)
|
||||
if cls.last_real_block_time > new_date:
|
||||
log.error( f"Incoming block has older timestamp than previous one: old {cls.last_real_block_time}, new {new_date}" )
|
||||
log.error(
|
||||
f"Incoming block has older timestamp than previous one: old {cls.last_real_block_time}, new {new_date}"
|
||||
)
|
||||
cls.last_real_block_time = new_date
|
||||
|
||||
@classmethod
|
||||
def add_block_data_from_file(cls, file_name):
|
||||
from json import load
|
||||
|
||||
data = {}
|
||||
with open(file_name, "r") as src:
|
||||
data = load(src)
|
||||
|
@ -52,13 +58,17 @@ class MockBlockProvider(MockDataProvider):
|
|||
# supplement data with defaults here because they depend on last_real_block_...
|
||||
assert 'transactions' in cls.block_data[block_num]
|
||||
assert 'transactions' in block_content
|
||||
cls.block_data[block_num]['transactions'] = cls.block_data[block_num]['transactions'] + block_content['transactions']
|
||||
cls.block_data[block_num]['transactions'] = (
|
||||
cls.block_data[block_num]['transactions'] + block_content['transactions']
|
||||
)
|
||||
else:
|
||||
cls.block_data[block_num] = dict(block_content)
|
||||
|
||||
@classmethod
|
||||
def get_block_data(cls, block_num, make_on_empty=False):
|
||||
if len(cls.block_data) == 0: # this means there are no mocks, so none should be returned (even with make_on_empty)
|
||||
if (
|
||||
len(cls.block_data) == 0
|
||||
): # this means there are no mocks, so none should be returned (even with make_on_empty)
|
||||
return None
|
||||
|
||||
data = cls.block_data.get(block_num, None)
|
||||
|
@ -87,24 +97,28 @@ class MockBlockProvider(MockDataProvider):
|
|||
@classmethod
|
||||
def make_block_timestamp(cls, block_num):
|
||||
block_delta = block_num - cls.last_real_block_num
|
||||
time_delta = datetime.timedelta(days=0, seconds=block_delta*3, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0)
|
||||
time_delta = datetime.timedelta(
|
||||
days=0, seconds=block_delta * 3, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0
|
||||
)
|
||||
ret_time = cls.last_real_block_time + time_delta
|
||||
return ret_time.replace(microsecond=0).isoformat()
|
||||
|
||||
@classmethod
|
||||
def make_empty_block(cls, block_num, witness="initminer"):
|
||||
fake_block = dict({
|
||||
"previous": cls.make_block_id(block_num - 1),
|
||||
"timestamp": cls.make_block_timestamp(block_num),
|
||||
"witness": witness,
|
||||
"transaction_merkle_root": "0000000000000000000000000000000000000000",
|
||||
"extensions": [],
|
||||
"witness_signature": "",
|
||||
"transactions": [],
|
||||
"block_id": cls.make_block_id(block_num),
|
||||
"signing_key": "",
|
||||
"transaction_ids": []
|
||||
})
|
||||
fake_block = dict(
|
||||
{
|
||||
"previous": cls.make_block_id(block_num - 1),
|
||||
"timestamp": cls.make_block_timestamp(block_num),
|
||||
"witness": witness,
|
||||
"transaction_merkle_root": "0000000000000000000000000000000000000000",
|
||||
"extensions": [],
|
||||
"witness_signature": "",
|
||||
"transactions": [],
|
||||
"block_id": cls.make_block_id(block_num),
|
||||
"signing_key": "",
|
||||
"transaction_ids": [],
|
||||
}
|
||||
)
|
||||
# supply enough blocks to fill block queue with empty blocks only
|
||||
# throw exception if there is no more data to serve
|
||||
if cls.min_block < block_num < cls.max_block + 3:
|
||||
|
|
|
@ -6,11 +6,14 @@ from json import dumps
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MockDataProviderException(Exception):
|
||||
pass
|
||||
|
||||
class MockDataProvider():
|
||||
""" Data provider for test operations """
|
||||
|
||||
class MockDataProvider:
|
||||
"""Data provider for test operations"""
|
||||
|
||||
block_data = {}
|
||||
|
||||
@classmethod
|
||||
|
@ -20,6 +23,7 @@ class MockDataProvider():
|
|||
@classmethod
|
||||
def add_block_data_from_directory(cls, dir_name):
|
||||
from fnmatch import fnmatch
|
||||
|
||||
pattern = "*.json"
|
||||
for path, _, files in os.walk(dir_name):
|
||||
for name in files:
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
""" Data provider for test vops """
|
||||
from hive.indexer.mock_data_provider import MockDataProvider
|
||||
|
||||
|
||||
class MockVopsProvider(MockDataProvider):
|
||||
""" Data provider for test vops """
|
||||
block_data = {
|
||||
'ops' : {},
|
||||
'ops_by_block' : {}
|
||||
}
|
||||
"""Data provider for test vops"""
|
||||
|
||||
block_data = {'ops': {}, 'ops_by_block': {}}
|
||||
|
||||
@classmethod
|
||||
def add_block_data_from_file(cls, file_name):
|
||||
from json import load
|
||||
|
||||
data = {}
|
||||
with open(file_name, "r") as src:
|
||||
data = load(src)
|
||||
|
@ -67,6 +67,6 @@ class MockVopsProvider(MockDataProvider):
|
|||
ret[block_num]['ops'].extend(mock_vops['ops'])
|
||||
else:
|
||||
if 'ops' in mock_vops:
|
||||
ret[block_num] = {"ops" : mock_vops['ops']}
|
||||
ret[block_num] = {"ops": mock_vops['ops']}
|
||||
if 'ops_by_block' in mock_vops:
|
||||
ret[block_num] = {"ops" : mock_vops['ops_by_block']}
|
||||
ret[block_num] = {"ops": mock_vops['ops_by_block']}
|
||||
|
|
|
@ -5,13 +5,16 @@ import logging
|
|||
from hive.db.adapter import Db
|
||||
from hive.indexer.db_adapter_holder import DbAdapterHolder
|
||||
from hive.utils.normalize import escape_characters
|
||||
#pylint: disable=too-many-lines,line-too-long
|
||||
|
||||
# pylint: disable=too-many-lines,line-too-long
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class NotifyType(IntEnum):
|
||||
"""Labels for notify `type_id` field."""
|
||||
|
||||
# active
|
||||
new_community = 1
|
||||
set_role = 2
|
||||
|
@ -33,25 +36,38 @@ class NotifyType(IntEnum):
|
|||
vote = 17
|
||||
|
||||
# inactive
|
||||
#vote_comment = 16
|
||||
# vote_comment = 16
|
||||
|
||||
#update_account = 19
|
||||
#receive = 20
|
||||
#send = 21
|
||||
# update_account = 19
|
||||
# receive = 20
|
||||
# send = 21
|
||||
|
||||
# reward = 22
|
||||
# power_up = 23
|
||||
# power_down = 24
|
||||
# message = 25
|
||||
|
||||
#reward = 22
|
||||
#power_up = 23
|
||||
#power_down = 24
|
||||
#message = 25
|
||||
|
||||
class Notify(DbAdapterHolder):
|
||||
"""Handles writing notifications/messages."""
|
||||
|
||||
# pylint: disable=too-many-instance-attributes,too-many-arguments
|
||||
DEFAULT_SCORE = 35
|
||||
_notifies = []
|
||||
|
||||
def __init__(self, block_num, type_id, when=None, src_id=None, dst_id=None, community_id=None,
|
||||
post_id=None, payload=None, score=None, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
block_num,
|
||||
type_id,
|
||||
when=None,
|
||||
src_id=None,
|
||||
dst_id=None,
|
||||
community_id=None,
|
||||
post_id=None,
|
||||
payload=None,
|
||||
score=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Create a notification."""
|
||||
|
||||
assert type_id, 'op is blank :('
|
||||
|
@ -76,7 +92,7 @@ class Notify(DbAdapterHolder):
|
|||
# for HF24 we started save notifications from block 44300000
|
||||
# about 90 days before release day
|
||||
if block_num > 44300000:
|
||||
Notify._notifies.append( self )
|
||||
Notify._notifies.append(self)
|
||||
|
||||
@classmethod
|
||||
def set_lastread(cls, account, date):
|
||||
|
@ -87,20 +103,22 @@ class Notify(DbAdapterHolder):
|
|||
def to_db_values(self):
|
||||
"""Generate a db row."""
|
||||
return "( {}, {}, {}, '{}'::timestamp, {}, {}, {}, {}, {} )".format(
|
||||
self.block_num
|
||||
, self.enum.value
|
||||
, self.score
|
||||
, self.when if self.when else "NULL"
|
||||
, self.src_id if self.src_id else "NULL"
|
||||
, self.dst_id if self.dst_id else "NULL"
|
||||
, self.post_id if self.post_id else "NULL"
|
||||
, self.community_id if self.community_id else "NULL"
|
||||
, escape_characters(str(self.payload)) if self.payload else "NULL")
|
||||
self.block_num,
|
||||
self.enum.value,
|
||||
self.score,
|
||||
self.when if self.when else "NULL",
|
||||
self.src_id if self.src_id else "NULL",
|
||||
self.dst_id if self.dst_id else "NULL",
|
||||
self.post_id if self.post_id else "NULL",
|
||||
self.community_id if self.community_id else "NULL",
|
||||
escape_characters(str(self.payload)) if self.payload else "NULL",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
"""Store buffered notifs"""
|
||||
def execute_query( sql, values ):
|
||||
|
||||
def execute_query(sql, values):
|
||||
values_str = ','.join(values)
|
||||
actual_query = sql.format(values_str)
|
||||
cls.db.query_prepared(actual_query)
|
||||
|
@ -121,7 +139,7 @@ class Notify(DbAdapterHolder):
|
|||
values_limit = 1000
|
||||
|
||||
for notify in Notify._notifies:
|
||||
values.append( f"{notify.to_db_values()}" )
|
||||
values.append(f"{notify.to_db_values()}")
|
||||
|
||||
if len(values) >= values_limit:
|
||||
execute_query(sql, values)
|
||||
|
|
|
@ -12,9 +12,11 @@ log = logging.getLogger(__name__)
|
|||
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class Payments:
|
||||
"""Handles payments to update post promotion values."""
|
||||
#pylint: disable=too-few-public-methods
|
||||
|
||||
# pylint: disable=too-few-public-methods
|
||||
|
||||
@classmethod
|
||||
def op_transfer(cls, op, tx_idx, num, date):
|
||||
|
@ -26,8 +28,7 @@ class Payments:
|
|||
record, author_id, permlink = result
|
||||
|
||||
# add payment record and return post id
|
||||
sql = \
|
||||
"""
|
||||
sql = """
|
||||
INSERT INTO hive_payments(block_num, tx_idx, post_id, from_account, to_account, amount, token) SELECT
|
||||
bn, tx, hp.id, fa, ta, am, tkn
|
||||
FROM
|
||||
|
@ -43,55 +44,61 @@ ON hp.author_id=vv.auth_id AND hp.permlink_id=vv.hpd_id
|
|||
RETURNING post_id
|
||||
"""
|
||||
|
||||
post_id = DB.query_one(sql,
|
||||
_block_num=record['block_num'],
|
||||
_tx_idx=record['tx_idx'],
|
||||
_permlink=permlink,
|
||||
_author_id=author_id,
|
||||
_from_account=record['from_account'],
|
||||
_to_account=record['to_account'],
|
||||
_amount=record['amount'],
|
||||
_token=record['token']
|
||||
post_id = DB.query_one(
|
||||
sql,
|
||||
_block_num=record['block_num'],
|
||||
_tx_idx=record['tx_idx'],
|
||||
_permlink=permlink,
|
||||
_author_id=author_id,
|
||||
_from_account=record['from_account'],
|
||||
_to_account=record['to_account'],
|
||||
_amount=record['amount'],
|
||||
_token=record['token'],
|
||||
)
|
||||
|
||||
amount = record['amount']
|
||||
if not isinstance(amount, float):
|
||||
amount = float(amount)
|
||||
amount = float(amount)
|
||||
|
||||
if amount != 0.0 and post_id is not None:
|
||||
# update post record
|
||||
sql = "UPDATE hive_posts SET promoted = promoted + :val WHERE id = :id"
|
||||
DB.query(sql, val=amount, id=post_id)
|
||||
# update post record
|
||||
sql = "UPDATE hive_posts SET promoted = promoted + :val WHERE id = :id"
|
||||
DB.query(sql, val=amount, id=post_id)
|
||||
|
||||
@classmethod
|
||||
def _validated(cls, op, tx_idx, num, date):
|
||||
"""Validate and normalize the transfer op."""
|
||||
# pylint: disable=unused-argument
|
||||
if op['to'] != 'null':
|
||||
return # only care about payments to null
|
||||
return # only care about payments to null
|
||||
|
||||
amount, token = parse_amount(op['amount'])
|
||||
if token != 'HBD':
|
||||
return # only care about HBD payments
|
||||
return # only care about HBD payments
|
||||
|
||||
url = op['memo']
|
||||
if not cls._validate_url(url):
|
||||
log.debug("invalid url: %s", url)
|
||||
return # invalid url
|
||||
return # invalid url
|
||||
|
||||
author, permlink = cls._split_url(url)
|
||||
author_id = Accounts.get_id_noexept(author)
|
||||
if not author_id:
|
||||
return
|
||||
|
||||
|
||||
return [{'id': None,
|
||||
return [
|
||||
{
|
||||
'id': None,
|
||||
'block_num': num,
|
||||
'tx_idx': tx_idx,
|
||||
'from_account': Accounts.get_id(op['from']),
|
||||
'to_account': Accounts.get_id(op['to']),
|
||||
'amount': amount,
|
||||
'token': token}, author_id, permlink]
|
||||
'token': token,
|
||||
},
|
||||
author_id,
|
||||
permlink,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _validate_url(url):
|
||||
|
|
|
@ -5,19 +5,20 @@ from hive.indexer.db_adapter_holder import DbAdapterHolder
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class PostDataCache(DbAdapterHolder):
|
||||
""" Procides cache for DB operations on post data table in order to speed up initial sync """
|
||||
_data = {}
|
||||
|
||||
class PostDataCache(DbAdapterHolder):
|
||||
"""Procides cache for DB operations on post data table in order to speed up initial sync"""
|
||||
|
||||
_data = {}
|
||||
|
||||
@classmethod
|
||||
def is_cached(cls, pid):
|
||||
""" Check if data is cached """
|
||||
"""Check if data is cached"""
|
||||
return pid in cls._data
|
||||
|
||||
@classmethod
|
||||
def add_data(cls, pid, post_data, is_new_post):
|
||||
""" Add data to cache """
|
||||
"""Add data to cache"""
|
||||
if not cls.is_cached(pid):
|
||||
cls._data[pid] = post_data
|
||||
cls._data[pid]['is_new_post'] = is_new_post
|
||||
|
@ -29,20 +30,20 @@ class PostDataCache(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def get_post_body(cls, pid):
|
||||
""" Returns body of given post from collected cache or from underlying DB storage. """
|
||||
"""Returns body of given post from collected cache or from underlying DB storage."""
|
||||
try:
|
||||
post_data = cls._data[pid]
|
||||
except KeyError:
|
||||
sql = """
|
||||
SELECT hpd.body FROM hive_post_data hpd WHERE hpd.id = :post_id;
|
||||
"""
|
||||
row = cls.db.query_row(sql, post_id = pid)
|
||||
row = cls.db.query_row(sql, post_id=pid)
|
||||
post_data = dict(row)
|
||||
return post_data['body']
|
||||
|
||||
@classmethod
|
||||
def flush(cls, print_query = False):
|
||||
""" Flush data from cache to db """
|
||||
def flush(cls, print_query=False):
|
||||
"""Flush data from cache to db"""
|
||||
if cls._data:
|
||||
values_insert = []
|
||||
values_update = []
|
||||
|
@ -74,7 +75,7 @@ class PostDataCache(DbAdapterHolder):
|
|||
if print_query:
|
||||
log.info(f"Executing query:\n{sql}")
|
||||
cls.db.query_prepared(sql)
|
||||
values_insert.clear();
|
||||
values_insert.clear()
|
||||
|
||||
if len(values_update) > 0:
|
||||
sql = """
|
||||
|
|
|
@ -24,6 +24,7 @@ from hive.utils.normalize import sbd_amount, legacy_amount, safe_img_url, escape
|
|||
log = logging.getLogger(__name__)
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class Posts(DbAdapterHolder):
|
||||
"""Handles critical/core post ops and data."""
|
||||
|
||||
|
@ -68,15 +69,24 @@ class Posts(DbAdapterHolder):
|
|||
if md and 'tags' in md and isinstance(md['tags'], list):
|
||||
for tag in md['tags']:
|
||||
if tag and isinstance(tag, str):
|
||||
tags.append(tag) # No escaping needed due to used sqlalchemy formatting features
|
||||
tags.append(tag) # No escaping needed due to used sqlalchemy formatting features
|
||||
|
||||
sql = """
|
||||
SELECT is_new_post, id, author_id, permlink_id, post_category, parent_id, community_id, is_valid, is_muted, depth
|
||||
FROM process_hive_post_operation((:author)::varchar, (:permlink)::varchar, (:parent_author)::varchar, (:parent_permlink)::varchar, (:date)::timestamp, (:community_support_start_block)::integer, (:block_num)::integer, (:tags)::VARCHAR[]);
|
||||
"""
|
||||
|
||||
row = DB.query_row(sql, author=op['author'], permlink=op['permlink'], parent_author=op['parent_author'],
|
||||
parent_permlink=op['parent_permlink'], date=block_date, community_support_start_block=Community.start_block, block_num=op['block_num'], tags=tags)
|
||||
row = DB.query_row(
|
||||
sql,
|
||||
author=op['author'],
|
||||
permlink=op['permlink'],
|
||||
parent_author=op['parent_author'],
|
||||
parent_permlink=op['parent_permlink'],
|
||||
date=block_date,
|
||||
community_support_start_block=Community.start_block,
|
||||
block_num=op['block_num'],
|
||||
tags=tags,
|
||||
)
|
||||
|
||||
if not row:
|
||||
log.error(f"Failed to process comment_op: {op}")
|
||||
|
@ -97,10 +107,12 @@ class Posts(DbAdapterHolder):
|
|||
is_new_post = result['is_new_post']
|
||||
if is_new_post:
|
||||
# add content data to hive_post_data
|
||||
post_data = dict(title=op['title'] if op['title'] else '',
|
||||
img_url=img_url if img_url else '',
|
||||
body=op['body'] if op['body'] else '',
|
||||
json=op['json_metadata'] if op['json_metadata'] else '')
|
||||
post_data = dict(
|
||||
title=op['title'] if op['title'] else '',
|
||||
img_url=img_url if img_url else '',
|
||||
body=op['body'] if op['body'] else '',
|
||||
json=op['json_metadata'] if op['json_metadata'] else '',
|
||||
)
|
||||
else:
|
||||
# edit case. Now we need to (potentially) apply patch to the post body.
|
||||
# empty new body means no body edit, not clear (same with other data)
|
||||
|
@ -111,14 +123,20 @@ class Posts(DbAdapterHolder):
|
|||
new_img = img_url if img_url else '' if new_json else None
|
||||
post_data = dict(title=new_title, img_url=new_img, body=new_body, json=new_json)
|
||||
|
||||
# log.info("Adding author: {} permlink: {}".format(op['author'], op['permlink']))
|
||||
# log.info("Adding author: {} permlink: {}".format(op['author'], op['permlink']))
|
||||
PostDataCache.add_data(result['id'], post_data, is_new_post)
|
||||
|
||||
if not DbState.is_initial_sync():
|
||||
if error:
|
||||
author_id = result['author_id']
|
||||
Notify(block_num=op['block_num'], type_id='error', dst_id=author_id, when=block_date,
|
||||
post_id=result['id'], payload=error)
|
||||
Notify(
|
||||
block_num=op['block_num'],
|
||||
type_id='error',
|
||||
dst_id=author_id,
|
||||
when=block_date,
|
||||
post_id=result['id'],
|
||||
payload=error,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flush_into_db(cls):
|
||||
|
@ -197,107 +215,105 @@ class Posts(DbAdapterHolder):
|
|||
|
||||
""" Process comment payment operations """
|
||||
for k, v in cls.comment_payout_ops.items():
|
||||
author = None
|
||||
permlink = None
|
||||
author = None
|
||||
permlink = None
|
||||
|
||||
# author payouts
|
||||
author_rewards = 0
|
||||
author_rewards_hive = None
|
||||
author_rewards_hbd = None
|
||||
author_rewards_vests = None
|
||||
author_rewards = 0
|
||||
author_rewards_hive = None
|
||||
author_rewards_hbd = None
|
||||
author_rewards_vests = None
|
||||
|
||||
# total payout for comment
|
||||
#comment_author_reward = None
|
||||
#curators_vesting_payout = None
|
||||
total_payout_value = None;
|
||||
curator_payout_value = None;
|
||||
#beneficiary_payout_value = None;
|
||||
# comment_author_reward = None
|
||||
# curators_vesting_payout = None
|
||||
total_payout_value = None
|
||||
curator_payout_value = None
|
||||
# beneficiary_payout_value = None;
|
||||
|
||||
payout = None
|
||||
pending_payout = None
|
||||
payout = None
|
||||
pending_payout = None
|
||||
|
||||
payout_at = None
|
||||
last_payout_at = None
|
||||
cashout_time = None
|
||||
payout_at = None
|
||||
last_payout_at = None
|
||||
cashout_time = None
|
||||
|
||||
is_paidout = None
|
||||
is_paidout = None
|
||||
|
||||
total_vote_weight = None
|
||||
total_vote_weight = None
|
||||
|
||||
# [final] payout indicator - by default all rewards are zero, but might be overwritten by other operations
|
||||
# ABW: prior to some early HF that was not necessarily final payout since those were discussion driven so new comment/vote could trigger new cashout window, see f.e.
|
||||
# soulsistashakti/re-emily-cook-let-me-introduce-myself-my-name-is-emily-cook-and-i-m-the-producer-and-presenter-of-a-monthly-film-show-film-focus-20160701t012330329z
|
||||
# it emits that "final" operation at blocks: 2889020, 3053237, 3172559 and 4028469
|
||||
if v[ VirtualOperationType.CommentPayoutUpdate ] is not None:
|
||||
value, date = v[ VirtualOperationType.CommentPayoutUpdate ]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
is_paidout = True
|
||||
payout_at = date
|
||||
last_payout_at = date
|
||||
cashout_time = "infinity"
|
||||
if v[VirtualOperationType.CommentPayoutUpdate] is not None:
|
||||
value, date = v[VirtualOperationType.CommentPayoutUpdate]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
is_paidout = True
|
||||
payout_at = date
|
||||
last_payout_at = date
|
||||
cashout_time = "infinity"
|
||||
|
||||
pending_payout = 0
|
||||
total_vote_weight = 0
|
||||
pending_payout = 0
|
||||
total_vote_weight = 0
|
||||
|
||||
# author rewards in current (final or nonfinal) payout (always comes with comment_reward_operation)
|
||||
if v[ VirtualOperationType.AuthorReward ] is not None:
|
||||
value, date = v[ VirtualOperationType.AuthorReward ]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
author_rewards_hive = value['hive_payout']['amount']
|
||||
author_rewards_hbd = value['hbd_payout']['amount']
|
||||
author_rewards_vests = value['vesting_payout']['amount']
|
||||
#curators_vesting_payout = value['curators_vesting_payout']['amount']
|
||||
if v[VirtualOperationType.AuthorReward] is not None:
|
||||
value, date = v[VirtualOperationType.AuthorReward]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
author_rewards_hive = value['hive_payout']['amount']
|
||||
author_rewards_hbd = value['hbd_payout']['amount']
|
||||
author_rewards_vests = value['vesting_payout']['amount']
|
||||
# curators_vesting_payout = value['curators_vesting_payout']['amount']
|
||||
|
||||
# summary of comment rewards in current (final or nonfinal) payout (always comes with author_reward_operation)
|
||||
if v[ VirtualOperationType.CommentReward ] is not None:
|
||||
value, date = v[ VirtualOperationType.CommentReward ]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
#comment_author_reward = value['payout']
|
||||
author_rewards = value['author_rewards']
|
||||
total_payout_value = value['total_payout_value']
|
||||
curator_payout_value = value['curator_payout_value']
|
||||
#beneficiary_payout_value = value['beneficiary_payout_value']
|
||||
if v[VirtualOperationType.CommentReward] is not None:
|
||||
value, date = v[VirtualOperationType.CommentReward]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
# comment_author_reward = value['payout']
|
||||
author_rewards = value['author_rewards']
|
||||
total_payout_value = value['total_payout_value']
|
||||
curator_payout_value = value['curator_payout_value']
|
||||
# beneficiary_payout_value = value['beneficiary_payout_value']
|
||||
|
||||
payout = sum([ sbd_amount(total_payout_value), sbd_amount(curator_payout_value) ])
|
||||
pending_payout = 0
|
||||
last_payout_at = date
|
||||
payout = sum([sbd_amount(total_payout_value), sbd_amount(curator_payout_value)])
|
||||
pending_payout = 0
|
||||
last_payout_at = date
|
||||
|
||||
# estimated pending_payout from vote (if exists with actual payout the value comes from vote cast after payout)
|
||||
if v[ VirtualOperationType.EffectiveCommentVote ] is not None:
|
||||
value, date = v[ VirtualOperationType.EffectiveCommentVote ]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
pending_payout = sbd_amount( value['pending_payout'] )
|
||||
total_vote_weight = value['total_vote_weight']
|
||||
|
||||
|
||||
cls._comment_payout_ops.append("('{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
|
||||
author,
|
||||
escape_characters(permlink),
|
||||
"NULL" if ( total_payout_value is None ) else ( "'{}'".format( legacy_amount(total_payout_value) ) ),
|
||||
"NULL" if ( curator_payout_value is None ) else ( "'{}'".format( legacy_amount(curator_payout_value) ) ),
|
||||
author_rewards,
|
||||
"NULL" if ( author_rewards_hive is None ) else author_rewards_hive,
|
||||
"NULL" if ( author_rewards_hbd is None ) else author_rewards_hbd,
|
||||
"NULL" if ( author_rewards_vests is None ) else author_rewards_vests,
|
||||
"NULL" if ( payout is None ) else payout,
|
||||
"NULL" if ( pending_payout is None ) else pending_payout,
|
||||
|
||||
"NULL" if ( payout_at is None ) else ( f"'{payout_at}'::timestamp" ),
|
||||
"NULL" if ( last_payout_at is None ) else ( f"'{last_payout_at}'::timestamp" ),
|
||||
"NULL" if ( cashout_time is None ) else ( f"'{cashout_time}'::timestamp" ),
|
||||
|
||||
"NULL" if ( is_paidout is None ) else is_paidout,
|
||||
|
||||
"NULL" if ( total_vote_weight is None ) else total_vote_weight ))
|
||||
if v[VirtualOperationType.EffectiveCommentVote] is not None:
|
||||
value, date = v[VirtualOperationType.EffectiveCommentVote]
|
||||
if author is None:
|
||||
author = value['author']
|
||||
permlink = value['permlink']
|
||||
pending_payout = sbd_amount(value['pending_payout'])
|
||||
total_vote_weight = value['total_vote_weight']
|
||||
|
||||
cls._comment_payout_ops.append(
|
||||
"('{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
|
||||
author,
|
||||
escape_characters(permlink),
|
||||
"NULL" if (total_payout_value is None) else ("'{}'".format(legacy_amount(total_payout_value))),
|
||||
"NULL" if (curator_payout_value is None) else ("'{}'".format(legacy_amount(curator_payout_value))),
|
||||
author_rewards,
|
||||
"NULL" if (author_rewards_hive is None) else author_rewards_hive,
|
||||
"NULL" if (author_rewards_hbd is None) else author_rewards_hbd,
|
||||
"NULL" if (author_rewards_vests is None) else author_rewards_vests,
|
||||
"NULL" if (payout is None) else payout,
|
||||
"NULL" if (pending_payout is None) else pending_payout,
|
||||
"NULL" if (payout_at is None) else (f"'{payout_at}'::timestamp"),
|
||||
"NULL" if (last_payout_at is None) else (f"'{last_payout_at}'::timestamp"),
|
||||
"NULL" if (cashout_time is None) else (f"'{cashout_time}'::timestamp"),
|
||||
"NULL" if (is_paidout is None) else is_paidout,
|
||||
"NULL" if (total_vote_weight is None) else total_vote_weight,
|
||||
)
|
||||
)
|
||||
|
||||
n = len(cls.comment_payout_ops)
|
||||
cls.comment_payout_ops.clear()
|
||||
|
@ -305,7 +321,7 @@ class Posts(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def update_child_count(cls, child_id, op='+'):
|
||||
""" Increase/decrease child count by 1 """
|
||||
"""Increase/decrease child count by 1"""
|
||||
sql = """
|
||||
UPDATE
|
||||
hive_posts
|
||||
|
@ -332,8 +348,10 @@ class Posts(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def comment_options_op(cls, op):
|
||||
""" Process comment_options_operation """
|
||||
max_accepted_payout = legacy_amount(op['max_accepted_payout']) if 'max_accepted_payout' in op else '1000000.000 HBD'
|
||||
"""Process comment_options_operation"""
|
||||
max_accepted_payout = (
|
||||
legacy_amount(op['max_accepted_payout']) if 'max_accepted_payout' in op else '1000000.000 HBD'
|
||||
)
|
||||
allow_votes = op['allow_votes'] if 'allow_votes' in op else True
|
||||
allow_curation_rewards = op['allow_curation_rewards'] if 'allow_curation_rewards' in op else True
|
||||
percent_hbd = op['percent_hbd'] if 'percent_hbd' in op else 10000
|
||||
|
@ -355,15 +373,26 @@ class Posts(DbAdapterHolder):
|
|||
hp.author_id = (SELECT id FROM hive_accounts WHERE name = :author) AND
|
||||
hp.permlink_id = (SELECT id FROM hive_permlink_data WHERE permlink = :permlink)
|
||||
"""
|
||||
DB.query(sql, author=op['author'], permlink=op['permlink'], max_accepted_payout=max_accepted_payout,
|
||||
percent_hbd=percent_hbd, allow_votes=allow_votes, allow_curation_rewards=allow_curation_rewards,
|
||||
beneficiaries=dumps(beneficiaries))
|
||||
DB.query(
|
||||
sql,
|
||||
author=op['author'],
|
||||
permlink=op['permlink'],
|
||||
max_accepted_payout=max_accepted_payout,
|
||||
percent_hbd=percent_hbd,
|
||||
allow_votes=allow_votes,
|
||||
allow_curation_rewards=allow_curation_rewards,
|
||||
beneficiaries=dumps(beneficiaries),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def delete(cls, op, block_date):
|
||||
"""Marks a post record as being deleted."""
|
||||
sql = "SELECT delete_hive_post((:author)::varchar, (:permlink)::varchar, (:block_num)::int, (:date)::timestamp);"
|
||||
DB.query_no_return(sql, author=op['author'], permlink = op['permlink'], block_num=op['block_num'], date=block_date)
|
||||
sql = (
|
||||
"SELECT delete_hive_post((:author)::varchar, (:permlink)::varchar, (:block_num)::int, (:date)::timestamp);"
|
||||
)
|
||||
DB.query_no_return(
|
||||
sql, author=op['author'], permlink=op['permlink'], block_num=op['block_num'], date=block_date
|
||||
)
|
||||
# all votes for that post that are still not pushed to DB have to be removed, since the same author/permlink
|
||||
# is now free to be taken by new post and we don't want those votes to match new post
|
||||
Votes.drop_votes_of_deleted_comment(op)
|
||||
|
@ -373,7 +402,7 @@ class Posts(DbAdapterHolder):
|
|||
error = None
|
||||
if community_id and is_valid and not Community.is_post_valid(community_id, op):
|
||||
error = 'not authorized'
|
||||
#is_valid = False # TODO: reserved for future blacklist status?
|
||||
# is_valid = False # TODO: reserved for future blacklist status?
|
||||
is_muted = True
|
||||
return error
|
||||
|
||||
|
@ -388,14 +417,14 @@ class Posts(DbAdapterHolder):
|
|||
if patch is not None and len(patch):
|
||||
old_body = PostDataCache.get_post_body(id)
|
||||
new_body, _ = dmp.patch_apply(patch, old_body)
|
||||
#new_utf8_body = new_body.decode('utf-8')
|
||||
#new_body = new_utf8_body
|
||||
# new_utf8_body = new_body.decode('utf-8')
|
||||
# new_body = new_utf8_body
|
||||
else:
|
||||
new_body = new_body_def
|
||||
except ValueError as e:
|
||||
# log.info("Merging a body post id: {} caused an ValueError exception {}".format(id, e))
|
||||
# log.info("New body definition: {}".format(new_body_def))
|
||||
# log.info("Old body definition: {}".format(old_body))
|
||||
# log.info("Merging a body post id: {} caused an ValueError exception {}".format(id, e))
|
||||
# log.info("New body definition: {}".format(new_body_def))
|
||||
# log.info("Old body definition: {}".format(old_body))
|
||||
new_body = new_body_def
|
||||
except Exception as ex:
|
||||
log.info(f"Merging a body post id: {id} caused an unknown exception {ex}")
|
||||
|
@ -405,7 +434,6 @@ class Posts(DbAdapterHolder):
|
|||
|
||||
return new_body
|
||||
|
||||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
return cls.comment_payout_op() + cls.flush_into_db()
|
||||
return cls.comment_payout_op() + cls.flush_into_db()
|
||||
|
|
|
@ -11,19 +11,19 @@ from hive.utils.normalize import escape_characters
|
|||
log = logging.getLogger(__name__)
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
class Reblog(DbAdapterHolder):
|
||||
""" Class for reblog operations """
|
||||
"""Class for reblog operations"""
|
||||
|
||||
reblog_items_to_flush = {}
|
||||
|
||||
@classmethod
|
||||
def _validated_op(cls, actor, op, block_date, block_num):
|
||||
if 'account' not in op or \
|
||||
'author' not in op or \
|
||||
'permlink' not in op:
|
||||
if 'account' not in op or 'author' not in op or 'permlink' not in op:
|
||||
return None
|
||||
|
||||
if op['account'] != actor:
|
||||
return None # impersonation
|
||||
return None # impersonation
|
||||
|
||||
if not Accounts.exists(op['account']):
|
||||
return None
|
||||
|
@ -32,16 +32,18 @@ class Reblog(DbAdapterHolder):
|
|||
|
||||
_delete = True if ('delete' in op and op['delete'] == 'delete') else False
|
||||
|
||||
return dict(author = op['author'],
|
||||
permlink = op['permlink'],
|
||||
account = op['account'],
|
||||
block_date = block_date,
|
||||
block_num = block_num,
|
||||
delete = _delete )
|
||||
return dict(
|
||||
author=op['author'],
|
||||
permlink=op['permlink'],
|
||||
account=op['account'],
|
||||
block_date=block_date,
|
||||
block_num=block_num,
|
||||
delete=_delete,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def reblog_op(cls, actor, op, block_date, block_num):
|
||||
""" Process reblog operation """
|
||||
"""Process reblog operation"""
|
||||
op = cls._validated_op(actor, op, block_date, block_num)
|
||||
if not op:
|
||||
return
|
||||
|
@ -51,23 +53,22 @@ class Reblog(DbAdapterHolder):
|
|||
if op['delete']:
|
||||
if key in cls.reblog_items_to_flush:
|
||||
del cls.reblog_items_to_flush[key]
|
||||
cls.delete( op['author'], op['permlink'], op['account'] )
|
||||
cls.delete(op['author'], op['permlink'], op['account'])
|
||||
else:
|
||||
cls.reblog_items_to_flush[key] = { 'op': op }
|
||||
cls.reblog_items_to_flush[key] = {'op': op}
|
||||
|
||||
@classmethod
|
||||
def delete(cls, author, permlink, account ):
|
||||
"""Remove a reblog from hive_reblogs + feed from hive_feed_cache.
|
||||
"""
|
||||
def delete(cls, author, permlink, account):
|
||||
"""Remove a reblog from hive_reblogs + feed from hive_feed_cache."""
|
||||
sql = "SELECT delete_reblog_feed_cache( (:author)::VARCHAR, (:permlink)::VARCHAR, (:account)::VARCHAR );"
|
||||
status = DB.query_col(sql, author=author, permlink=permlink, account=account)
|
||||
assert status is not None
|
||||
if status == 0:
|
||||
log.debug("reblog: post not found: %s/%s", author, permlink)
|
||||
log.debug("reblog: post not found: %s/%s", author, permlink)
|
||||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
""" Flush collected data to database """
|
||||
"""Flush collected data to database"""
|
||||
sql_prefix = """
|
||||
INSERT INTO hive_reblogs (blogger_id, post_id, created_at, block_num)
|
||||
SELECT
|
||||
|
@ -97,14 +98,18 @@ class Reblog(DbAdapterHolder):
|
|||
for k, v in cls.reblog_items_to_flush.items():
|
||||
reblog_item = v['op']
|
||||
if count < limit:
|
||||
values.append(f"({escape_characters(reblog_item['account'])}, {escape_characters(reblog_item['author'])}, {escape_characters(reblog_item['permlink'])}, '{reblog_item['block_date']}'::timestamp, {reblog_item['block_num']})")
|
||||
values.append(
|
||||
f"({escape_characters(reblog_item['account'])}, {escape_characters(reblog_item['author'])}, {escape_characters(reblog_item['permlink'])}, '{reblog_item['block_date']}'::timestamp, {reblog_item['block_num']})"
|
||||
)
|
||||
count = count + 1
|
||||
else:
|
||||
values_str = ",".join(values)
|
||||
query = sql_prefix.format(values_str, values_str)
|
||||
cls.db.query_prepared(query)
|
||||
values.clear()
|
||||
values.append(f"({escape_characters(reblog_item['account'])}, {escape_characters(reblog_item['author'])}, {escape_characters(reblog_item['permlink'])}, '{reblog_item['block_date']}'::timestamp, {reblog_item['block_num']})")
|
||||
values.append(
|
||||
f"({escape_characters(reblog_item['account'])}, {escape_characters(reblog_item['author'])}, {escape_characters(reblog_item['permlink'])}, '{reblog_item['block_date']}'::timestamp, {reblog_item['block_num']})"
|
||||
)
|
||||
count = 1
|
||||
|
||||
if len(values) > 0:
|
||||
|
|
|
@ -8,6 +8,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
CACHED_ITEMS_LIMIT = 200
|
||||
|
||||
|
||||
class Reputations(DbAdapterHolder):
|
||||
_values = []
|
||||
_total_values = 0
|
||||
|
|
|
@ -52,25 +52,30 @@ old_sig_int_handler = None
|
|||
old_sig_term_handler = None
|
||||
trail_blocks = None
|
||||
|
||||
|
||||
def set_handlers():
|
||||
global old_sig_int_handler
|
||||
global old_sig_term_handler
|
||||
old_sig_int_handler = signal(SIGINT, finish_signals_handler)
|
||||
old_sig_term_handler = signal(SIGTERM, finish_signals_handler)
|
||||
|
||||
|
||||
def restore_handlers():
|
||||
signal(SIGINT, old_sig_int_handler)
|
||||
signal(SIGTERM, old_sig_term_handler)
|
||||
|
||||
|
||||
def show_info(_db):
|
||||
database_head_block = Blocks.head_num()
|
||||
|
||||
sql = "SELECT level, patch_date, patched_to_revision FROM hive_db_patch_level ORDER BY level DESC LIMIT 1"
|
||||
patch_level_data = _db.query_row(sql)
|
||||
|
||||
from hive.utils.misc import show_app_version;
|
||||
from hive.utils.misc import show_app_version
|
||||
|
||||
show_app_version(log, database_head_block, patch_level_data)
|
||||
|
||||
|
||||
def _blocks_data_provider(blocks_data_provider):
|
||||
try:
|
||||
futures = blocks_data_provider.start()
|
||||
|
@ -83,13 +88,15 @@ def _blocks_data_provider(blocks_data_provider):
|
|||
log.exception("Exception caught during fetching blocks data")
|
||||
raise
|
||||
|
||||
|
||||
def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
|
||||
from hive.utils.stats import minmax
|
||||
|
||||
is_debug = log.isEnabledFor(10)
|
||||
num = 0
|
||||
time_start = OPSM.start()
|
||||
rate = {}
|
||||
LIMIT_FOR_PROCESSED_BLOCKS = 1000;
|
||||
LIMIT_FOR_PROCESSED_BLOCKS = 1000
|
||||
|
||||
rate = minmax(rate, 0, 1.0, 0)
|
||||
sync_type_prefix = "[INITIAL SYNC]" if is_initial_sync else "[FAST SYNC]"
|
||||
|
@ -103,23 +110,27 @@ def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
|
|||
ttm = ftm + otm + wtm
|
||||
log.info(f"Elapsed time: {stop :.4f}s. Calculated elapsed time: {ttm :.4f}s. Difference: {stop - ttm :.4f}s")
|
||||
if rate:
|
||||
log.info(f"Highest block processing rate: {rate['max'] :.4f} bps. From: {rate['max_from']} To: {rate['max_to']}")
|
||||
log.info(f"Lowest block processing rate: {rate['min'] :.4f} bps. From: {rate['min_from']} To: {rate['min_to']}")
|
||||
log.info(
|
||||
f"Highest block processing rate: {rate['max'] :.4f} bps. From: {rate['max_from']} To: {rate['max_to']}"
|
||||
)
|
||||
log.info(
|
||||
f"Lowest block processing rate: {rate['min'] :.4f} bps. From: {rate['min_from']} To: {rate['min_to']}"
|
||||
)
|
||||
log.info("=== TOTAL STATS ===")
|
||||
|
||||
try:
|
||||
Blocks.set_end_of_sync_lib( ubound )
|
||||
Blocks.set_end_of_sync_lib(ubound)
|
||||
count = ubound - lbound
|
||||
timer = Timer(count, entity='block', laps=['rps', 'wps'])
|
||||
|
||||
while lbound < ubound:
|
||||
number_of_blocks_to_proceed = min( [ LIMIT_FOR_PROCESSED_BLOCKS, ubound - lbound ] )
|
||||
number_of_blocks_to_proceed = min([LIMIT_FOR_PROCESSED_BLOCKS, ubound - lbound])
|
||||
time_before_waiting_for_data = perf()
|
||||
|
||||
blocks = blocks_data_provider.get( number_of_blocks_to_proceed )
|
||||
blocks = blocks_data_provider.get(number_of_blocks_to_proceed)
|
||||
|
||||
if not can_continue_thread():
|
||||
break;
|
||||
break
|
||||
|
||||
assert len(blocks) == number_of_blocks_to_proceed
|
||||
|
||||
|
@ -134,8 +145,7 @@ def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
|
|||
timer.batch_finish(len(blocks))
|
||||
time_current = perf()
|
||||
|
||||
prefix = ("%s Got block %d @ %s" % (
|
||||
sync_type_prefix, to - 1, blocks[-1].get_date()))
|
||||
prefix = "%s Got block %d @ %s" % (sync_type_prefix, to - 1, blocks[-1].get_date())
|
||||
log.info(timer.batch_status(prefix))
|
||||
log.info("%s Time elapsed: %fs", sync_type_prefix, time_current - time_start)
|
||||
log.info("%s Current system time: %s", sync_type_prefix, datetime.now().strftime("%H:%M:%S"))
|
||||
|
@ -168,10 +178,11 @@ def _block_consumer(blocks_data_provider, is_initial_sync, lbound, ubound):
|
|||
print_summary()
|
||||
return num
|
||||
|
||||
def _process_blocks_from_provider(self, massive_block_provider, is_initial_sync, lbound, ubound):
|
||||
assert issubclass( type(massive_block_provider), BlocksProviderBase )
|
||||
|
||||
with ThreadPoolExecutor(max_workers = 2) as pool:
|
||||
def _process_blocks_from_provider(self, massive_block_provider, is_initial_sync, lbound, ubound):
|
||||
assert issubclass(type(massive_block_provider), BlocksProviderBase)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=2) as pool:
|
||||
block_data_provider_future = pool.submit(_blocks_data_provider, massive_block_provider)
|
||||
blockConsumerFuture = pool.submit(_block_consumer, massive_block_provider, is_initial_sync, lbound, ubound)
|
||||
|
||||
|
@ -184,6 +195,7 @@ def _process_blocks_from_provider(self, massive_block_provider, is_initial_sync,
|
|||
if block_data_provider_exception:
|
||||
raise block_data_provider_exception
|
||||
|
||||
|
||||
class DBSync:
|
||||
def __init__(self, conf, db, steem, live_context):
|
||||
self._conf = conf
|
||||
|
@ -201,8 +213,8 @@ class DBSync:
|
|||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
#During massive-sync every object has own copy of database, as a result all copies have to be closed
|
||||
#During live-sync an original database is used and can't be closed, because it can be used later.
|
||||
# During massive-sync every object has own copy of database, as a result all copies have to be closed
|
||||
# During live-sync an original database is used and can't be closed, because it can be used later.
|
||||
if not DbLiveContextHolder.is_live_context():
|
||||
Blocks.close_own_db_access()
|
||||
|
||||
|
@ -210,14 +222,16 @@ class DBSync:
|
|||
def _update_chain_state(self):
|
||||
"""Update basic state props (head block, feed price) in db."""
|
||||
state = self._steem.gdgp_extended()
|
||||
self._db.query("""UPDATE hive_state SET block_num = :block_num,
|
||||
self._db.query(
|
||||
"""UPDATE hive_state SET block_num = :block_num,
|
||||
steem_per_mvest = :spm, usd_per_steem = :ups,
|
||||
sbd_per_steem = :sps, dgpo = :dgpo""",
|
||||
block_num=Blocks.head_num(),
|
||||
spm=state['steem_per_mvest'],
|
||||
ups=state['usd_per_steem'],
|
||||
sps=state['sbd_per_steem'],
|
||||
dgpo=json.dumps(state['dgpo']))
|
||||
block_num=Blocks.head_num(),
|
||||
spm=state['steem_per_mvest'],
|
||||
ups=state['usd_per_steem'],
|
||||
sps=state['sbd_per_steem'],
|
||||
dgpo=json.dumps(state['dgpo']),
|
||||
)
|
||||
return state['dgpo']['head_block_number']
|
||||
|
||||
def from_steemd(self, is_initial_sync=False, chunk_size=1000):
|
||||
|
@ -237,32 +251,28 @@ class DBSync:
|
|||
massive_blocks_data_provider = None
|
||||
databases = None
|
||||
if self._conf.get('hived_database_url'):
|
||||
databases = MassiveBlocksDataProviderHiveDb.Databases( self._conf )
|
||||
databases = MassiveBlocksDataProviderHiveDb.Databases(self._conf)
|
||||
massive_blocks_data_provider = MassiveBlocksDataProviderHiveDb(
|
||||
databases
|
||||
, self._conf.get( 'max_batch' )
|
||||
, lbound
|
||||
, ubound
|
||||
, can_continue_thread
|
||||
, set_exception_thrown
|
||||
databases, self._conf.get('max_batch'), lbound, ubound, can_continue_thread, set_exception_thrown
|
||||
)
|
||||
else:
|
||||
massive_blocks_data_provider = MassiveBlocksDataProviderHiveRpc(
|
||||
self._conf
|
||||
, self._steem
|
||||
, self._conf.get( 'max_workers' )
|
||||
, self._conf.get( 'max_workers' )
|
||||
, self._conf.get( 'max_batch' )
|
||||
, lbound
|
||||
, ubound
|
||||
, can_continue_thread
|
||||
, set_exception_thrown
|
||||
self._conf,
|
||||
self._steem,
|
||||
self._conf.get('max_workers'),
|
||||
self._conf.get('max_workers'),
|
||||
self._conf.get('max_batch'),
|
||||
lbound,
|
||||
ubound,
|
||||
can_continue_thread,
|
||||
set_exception_thrown,
|
||||
)
|
||||
_process_blocks_from_provider( self, massive_blocks_data_provider, is_initial_sync, lbound, ubound )
|
||||
_process_blocks_from_provider(self, massive_blocks_data_provider, is_initial_sync, lbound, ubound)
|
||||
|
||||
if databases:
|
||||
databases.close()
|
||||
|
||||
|
||||
class MassiveSync(DBSync):
|
||||
def __init__(self, conf, db, steem):
|
||||
super().__init__(conf, db, steem, False)
|
||||
|
@ -276,7 +286,7 @@ class MassiveSync(DBSync):
|
|||
if not can_continue_thread():
|
||||
return
|
||||
|
||||
def load_mock_data(self,mock_block_data_path):
|
||||
def load_mock_data(self, mock_block_data_path):
|
||||
if mock_block_data_path:
|
||||
MockBlockProvider.load_block_data(mock_block_data_path)
|
||||
# MockBlockProvider.print_data()
|
||||
|
@ -292,8 +302,10 @@ class MassiveSync(DBSync):
|
|||
# ensure db schema up to date, check app status
|
||||
DbState.initialize()
|
||||
if self._conf.get("log_explain_queries"):
|
||||
is_superuser = self._db.query_one( "SELECT is_superuser()" )
|
||||
assert is_superuser, 'The parameter --log_explain_queries=true can be used only when connect to the database with SUPERUSER privileges'
|
||||
is_superuser = self._db.query_one("SELECT is_superuser()")
|
||||
assert (
|
||||
is_superuser
|
||||
), 'The parameter --log_explain_queries=true can be used only when connect to the database with SUPERUSER privileges'
|
||||
|
||||
_is_consistency = Blocks.is_consistency()
|
||||
if not _is_consistency:
|
||||
|
@ -303,7 +315,7 @@ class MassiveSync(DBSync):
|
|||
|
||||
paths = self._conf.get("mock_block_data_path") or []
|
||||
for path in paths:
|
||||
self.load_mock_data(path)
|
||||
self.load_mock_data(path)
|
||||
|
||||
mock_vops_data_path = self._conf.get("mock_vops_data_path")
|
||||
if mock_vops_data_path:
|
||||
|
@ -350,6 +362,7 @@ class MassiveSync(DBSync):
|
|||
assert trail_blocks >= 0
|
||||
assert trail_blocks <= 100
|
||||
|
||||
|
||||
class LiveSync(DBSync):
|
||||
def __init__(self, conf, db, steem):
|
||||
super().__init__(conf, db, steem, True)
|
||||
|
@ -363,15 +376,26 @@ class LiveSync(DBSync):
|
|||
executor.submit(PayoutStats.generate)
|
||||
executor.submit(Mentions.refresh)
|
||||
|
||||
def _stream_blocks(self, start_from, breaker, exception_reporter, trail_blocks=0, max_gap=100, do_stale_block_check=True):
|
||||
def _stream_blocks(
|
||||
self, start_from, breaker, exception_reporter, trail_blocks=0, max_gap=100, do_stale_block_check=True
|
||||
):
|
||||
"""Stream blocks. Returns a generator."""
|
||||
return BlockStream.stream(self._conf, self._steem, start_from, breaker, exception_reporter, trail_blocks, max_gap, do_stale_block_check)
|
||||
return BlockStream.stream(
|
||||
self._conf,
|
||||
self._steem,
|
||||
start_from,
|
||||
breaker,
|
||||
exception_reporter,
|
||||
trail_blocks,
|
||||
max_gap,
|
||||
do_stale_block_check,
|
||||
)
|
||||
|
||||
def listen(self, trail_blocks, max_sync_block, do_stale_block_check):
|
||||
"""Live (block following) mode.
|
||||
trail_blocks - how many blocks need to be collected to start processed the oldest ( delay in blocks processing against blocks collecting )
|
||||
max_sync_block - limit of blocks to sync, the function will return if it is reached
|
||||
do_stale_block_check - check if the last collected block is not older than 60s
|
||||
trail_blocks - how many blocks need to be collected to start processed the oldest ( delay in blocks processing against blocks collecting )
|
||||
max_sync_block - limit of blocks to sync, the function will return if it is reached
|
||||
do_stale_block_check - check if the last collected block is not older than 60s
|
||||
"""
|
||||
|
||||
# debug: no max gap if disable_sync in effect
|
||||
|
@ -384,12 +408,18 @@ class LiveSync(DBSync):
|
|||
|
||||
if hive_head >= max_sync_block:
|
||||
self.refresh_sparse_stats()
|
||||
log.info("[LIVE SYNC] Exiting due to block limit exceeded: synced block number: %d, max_sync_block: %d", hive_head, max_sync_block)
|
||||
log.info(
|
||||
"[LIVE SYNC] Exiting due to block limit exceeded: synced block number: %d, max_sync_block: %d",
|
||||
hive_head,
|
||||
max_sync_block,
|
||||
)
|
||||
return
|
||||
|
||||
for block in self._stream_blocks(hive_head + 1, can_continue_thread, set_exception_thrown, trail_blocks, max_gap, do_stale_block_check):
|
||||
for block in self._stream_blocks(
|
||||
hive_head + 1, can_continue_thread, set_exception_thrown, trail_blocks, max_gap, do_stale_block_check
|
||||
):
|
||||
if not can_continue_thread():
|
||||
break;
|
||||
break
|
||||
num = block.get_num()
|
||||
log.info("[LIVE SYNC] =====> About to process block %d with timestamp %s", num, block.get_date())
|
||||
|
||||
|
@ -400,12 +430,17 @@ class LiveSync(DBSync):
|
|||
ftm = FSM.log_current("Flushing times")
|
||||
|
||||
ms = (perf() - start_time) * 1000
|
||||
log.info("[LIVE SYNC] <===== Processed block %d at %s --% 4d txs"
|
||||
" --% 5dms%s", num, block.get_date(), block.get_number_of_transactions(),
|
||||
ms, ' SLOW' if ms > 1000 else '')
|
||||
log.info(
|
||||
"[LIVE SYNC] <===== Processed block %d at %s --% 4d txs" " --% 5dms%s",
|
||||
num,
|
||||
block.get_date(),
|
||||
block.get_number_of_transactions(),
|
||||
ms,
|
||||
' SLOW' if ms > 1000 else '',
|
||||
)
|
||||
log.info("[LIVE SYNC] Current system time: %s", datetime.now().strftime("%H:%M:%S"))
|
||||
|
||||
if num % 1200 == 0: #1hour
|
||||
if num % 1200 == 0: # 1hour
|
||||
log.warning("head block %d @ %s", num, block.get_date())
|
||||
log.info("[LIVE SYNC] hourly stats")
|
||||
|
||||
|
@ -413,9 +448,9 @@ class LiveSync(DBSync):
|
|||
with ThreadPoolExecutor(max_workers=2) as executor:
|
||||
executor.submit(PayoutStats.generate)
|
||||
executor.submit(Mentions.refresh)
|
||||
if num % 200 == 0: #10min
|
||||
if num % 200 == 0: # 10min
|
||||
update_communities_posts_and_rank(self._db)
|
||||
if num % 20 == 0: #1min
|
||||
if num % 20 == 0: # 1min
|
||||
self._update_chain_state()
|
||||
|
||||
PC.broadcast(BroadcastObject('sync_current_block', num, 'blocks'))
|
||||
|
@ -428,6 +463,7 @@ class LiveSync(DBSync):
|
|||
|
||||
def run(self):
|
||||
import sys
|
||||
|
||||
max_block_limit = sys.maxsize
|
||||
do_stale_block_check = True
|
||||
if self._conf.get('test_max_block'):
|
||||
|
@ -435,11 +471,13 @@ class LiveSync(DBSync):
|
|||
do_stale_block_check = False
|
||||
# Correct max_block_limit by trail_blocks
|
||||
max_block_limit = max_block_limit - trail_blocks
|
||||
log.info("max_block_limit corrected by specified trail_blocks number: %d is: %d", trail_blocks, max_block_limit)
|
||||
log.info(
|
||||
"max_block_limit corrected by specified trail_blocks number: %d is: %d", trail_blocks, max_block_limit
|
||||
)
|
||||
|
||||
if self._conf.get('test_disable_sync'):
|
||||
# debug mode: no sync, just stream
|
||||
result = self.listen(trail_blocks, max_block_limit, do_stale_block_check)
|
||||
result = self.listen(trail_blocks, max_block_limit, do_stale_block_check)
|
||||
restore_handlers()
|
||||
return result
|
||||
|
||||
|
@ -452,8 +490,11 @@ class LiveSync(DBSync):
|
|||
head = Blocks.head_num()
|
||||
if head >= max_block_limit:
|
||||
self.refresh_sparse_stats()
|
||||
log.info("Exiting [LIVE SYNC] because irreversible block sync reached specified block limit: %d", max_block_limit)
|
||||
break;
|
||||
log.info(
|
||||
"Exiting [LIVE SYNC] because irreversible block sync reached specified block limit: %d",
|
||||
max_block_limit,
|
||||
)
|
||||
break
|
||||
|
||||
try:
|
||||
# listen for new blocks
|
||||
|
@ -466,12 +507,13 @@ class LiveSync(DBSync):
|
|||
if head >= max_block_limit:
|
||||
self.refresh_sparse_stats()
|
||||
log.info("Exiting [LIVE SYNC] because of specified block limit: %d", max_block_limit)
|
||||
break;
|
||||
break
|
||||
|
||||
if not can_continue_thread():
|
||||
break
|
||||
restore_handlers()
|
||||
|
||||
|
||||
class Sync:
|
||||
"""Manages the sync/index process.
|
||||
|
||||
|
@ -489,15 +531,16 @@ class Sync:
|
|||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, value, traceback): pass
|
||||
def __exit__(self, exc_type, value, traceback):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
"""Initialize state; setup/recovery checks; sync and runloop."""
|
||||
with MassiveSync(conf=self._conf, db=self._db, steem=self._steem) as massive_sync:
|
||||
massive_sync.run()
|
||||
massive_sync.run()
|
||||
|
||||
if not can_continue_thread():
|
||||
return;
|
||||
return
|
||||
|
||||
with LiveSync(conf=self._conf, db=self._db, steem=self._steem) as live_sync:
|
||||
live_sync.run()
|
||||
live_sync.run()
|
||||
|
|
|
@ -8,8 +8,10 @@ from hive.utils.normalize import escape_characters
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Votes(DbAdapterHolder):
|
||||
""" Class for managing posts votes """
|
||||
"""Class for managing posts votes"""
|
||||
|
||||
_votes_data = collections.OrderedDict()
|
||||
_votes_per_post = {}
|
||||
|
||||
|
@ -17,11 +19,11 @@ class Votes(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def vote_op(cls, vote_operation, date):
|
||||
""" Process vote_operation """
|
||||
voter = vote_operation['voter']
|
||||
author = vote_operation['author']
|
||||
permlink = vote_operation['permlink']
|
||||
weight = vote_operation['weight']
|
||||
"""Process vote_operation"""
|
||||
voter = vote_operation['voter']
|
||||
author = vote_operation['author']
|
||||
permlink = vote_operation['permlink']
|
||||
weight = vote_operation['weight']
|
||||
block_num = vote_operation['block_num']
|
||||
|
||||
if cls.inside_flush:
|
||||
|
@ -40,20 +42,22 @@ class Votes(DbAdapterHolder):
|
|||
if not post_key in cls._votes_per_post:
|
||||
cls._votes_per_post[post_key] = []
|
||||
cls._votes_per_post[post_key].append(voter)
|
||||
cls._votes_data[key] = dict(voter=voter,
|
||||
author=author,
|
||||
permlink=escape_characters(permlink),
|
||||
vote_percent=weight,
|
||||
weight=0,
|
||||
rshares=0,
|
||||
last_update=date,
|
||||
is_effective=False,
|
||||
num_changes=0,
|
||||
block_num=block_num)
|
||||
cls._votes_data[key] = dict(
|
||||
voter=voter,
|
||||
author=author,
|
||||
permlink=escape_characters(permlink),
|
||||
vote_percent=weight,
|
||||
weight=0,
|
||||
rshares=0,
|
||||
last_update=date,
|
||||
is_effective=False,
|
||||
num_changes=0,
|
||||
block_num=block_num,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def drop_votes_of_deleted_comment(cls, comment_delete_operation):
|
||||
""" Remove cached votes for comment that was deleted """
|
||||
"""Remove cached votes for comment that was deleted"""
|
||||
# ABW: note that it only makes difference when comment was deleted and its author/permlink
|
||||
# reused in the same pack of blocks - in case of no reuse, votes on deleted comment won't
|
||||
# make it to the DB due to "counter_deleted = 0" condition and "INNER JOIN hive_posts"
|
||||
|
@ -68,35 +72,38 @@ class Votes(DbAdapterHolder):
|
|||
|
||||
@classmethod
|
||||
def effective_comment_vote_op(cls, vop):
|
||||
""" Process effective_comment_vote_operation """
|
||||
"""Process effective_comment_vote_operation"""
|
||||
|
||||
post_key = f"{vop['author']}/{vop['permlink']}"
|
||||
key = f"{vop['voter']}/{post_key}"
|
||||
|
||||
if key in cls._votes_data:
|
||||
vote_data = cls._votes_data[key]
|
||||
vote_data["weight"] = vop["weight"]
|
||||
vote_data["rshares"] = vop["rshares"]
|
||||
vote_data["weight"] = vop["weight"]
|
||||
vote_data["rshares"] = vop["rshares"]
|
||||
vote_data["is_effective"] = True
|
||||
vote_data["num_changes"] += 1
|
||||
vote_data["block_num"] = vop["block_num"]
|
||||
vote_data["block_num"] = vop["block_num"]
|
||||
else:
|
||||
if not post_key in cls._votes_per_post:
|
||||
cls._votes_per_post[post_key] = []
|
||||
cls._votes_per_post[post_key].append(vop['voter'])
|
||||
cls._votes_data[key] = dict(voter=vop["voter"],
|
||||
author=vop["author"],
|
||||
permlink=escape_characters(vop["permlink"]),
|
||||
vote_percent=0,
|
||||
weight=vop["weight"],
|
||||
rshares=vop["rshares"],
|
||||
last_update="1970-01-01 00:00:00",
|
||||
is_effective=True,
|
||||
num_changes=0,
|
||||
block_num=vop["block_num"])
|
||||
cls._votes_data[key] = dict(
|
||||
voter=vop["voter"],
|
||||
author=vop["author"],
|
||||
permlink=escape_characters(vop["permlink"]),
|
||||
vote_percent=0,
|
||||
weight=vop["weight"],
|
||||
rshares=vop["rshares"],
|
||||
last_update="1970-01-01 00:00:00",
|
||||
is_effective=True,
|
||||
num_changes=0,
|
||||
block_num=vop["block_num"],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def flush(cls):
|
||||
""" Flush vote data from cache to database """
|
||||
"""Flush vote data from cache to database"""
|
||||
|
||||
cls.inside_flush = True
|
||||
n = 0
|
||||
|
@ -138,10 +145,21 @@ class Votes(DbAdapterHolder):
|
|||
values_limit = 1000
|
||||
|
||||
for _, vd in cls._votes_data.items():
|
||||
values.append("({}, '{}', '{}', {}, {}, {}, {}, '{}'::timestamp, {}, {}, {})".format(
|
||||
len(values), # for ordering
|
||||
vd['voter'], vd['author'], vd['permlink'], vd['weight'], vd['rshares'],
|
||||
vd['vote_percent'], vd['last_update'], vd['num_changes'], vd['block_num'], vd['is_effective']))
|
||||
values.append(
|
||||
"({}, '{}', '{}', {}, {}, {}, {}, '{}'::timestamp, {}, {}, {})".format(
|
||||
len(values), # for ordering
|
||||
vd['voter'],
|
||||
vd['author'],
|
||||
vd['permlink'],
|
||||
vd['weight'],
|
||||
vd['rshares'],
|
||||
vd['vote_percent'],
|
||||
vd['last_update'],
|
||||
vd['num_changes'],
|
||||
vd['block_num'],
|
||||
vd['is_effective'],
|
||||
)
|
||||
)
|
||||
|
||||
if len(values) >= values_limit:
|
||||
values_str = ','.join(values)
|
||||
|
|
|
@ -9,7 +9,8 @@ from hive.server.common.helpers import (
|
|||
valid_tag,
|
||||
valid_limit,
|
||||
check_community,
|
||||
json_date)
|
||||
json_date,
|
||||
)
|
||||
|
||||
from hive.utils.account import safe_db_profile_metadata
|
||||
|
||||
|
@ -17,7 +18,8 @@ from hive.server.hive_api.common import get_account_id
|
|||
from hive.server.hive_api.community import list_top_communities
|
||||
from hive.server.common.mutes import Mutes
|
||||
|
||||
#pylint: disable=too-many-arguments, no-else-return
|
||||
# pylint: disable=too-many-arguments, no-else-return
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_profile(context, account, observer=None):
|
||||
|
@ -27,36 +29,37 @@ async def get_profile(context, account, observer=None):
|
|||
observer = valid_account(observer, allow_empty=True)
|
||||
|
||||
ret = await load_profiles(db, [valid_account(account)])
|
||||
assert ret, f'Account \'{account}\' does not exist' # should not be needed
|
||||
assert ret, f'Account \'{account}\' does not exist' # should not be needed
|
||||
|
||||
observer_id = await get_account_id(db, observer) if observer else None
|
||||
if observer_id:
|
||||
await _follow_contexts(db, {ret[0]['id']: ret[0]}, observer_id, True)
|
||||
return ret[0]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_trending_topics(context, limit:int=10, observer:str=None):
|
||||
async def get_trending_topics(context, limit: int = 10, observer: str = None):
|
||||
"""Return top trending topics across pending posts."""
|
||||
# pylint: disable=unused-argument
|
||||
#db = context['db']
|
||||
#observer_id = await get_account_id(db, observer) if observer else None
|
||||
#assert not observer, 'observer not supported'
|
||||
# db = context['db']
|
||||
# observer_id = await get_account_id(db, observer) if observer else None
|
||||
# assert not observer, 'observer not supported'
|
||||
limit = valid_limit(limit, 25, 10)
|
||||
out = []
|
||||
cells = await list_top_communities(context, limit)
|
||||
for name, title in cells:
|
||||
out.append((name, title or name))
|
||||
for tag in ('photography', 'travel', 'gaming',
|
||||
'crypto', 'newsteem', 'music', 'food'):
|
||||
for tag in ('photography', 'travel', 'gaming', 'crypto', 'newsteem', 'music', 'food'):
|
||||
if len(out) < limit:
|
||||
out.append((tag, '#' + tag))
|
||||
return out
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_post(context, author, permlink, observer=None):
|
||||
"""Fetch a single post"""
|
||||
# pylint: disable=unused-variable
|
||||
#TODO: `observer` logic for user-post state
|
||||
# TODO: `observer` logic for user-post state
|
||||
db = context['db']
|
||||
valid_account(author)
|
||||
valid_account(observer, allow_empty=True)
|
||||
|
@ -70,10 +73,13 @@ async def get_post(context, author, permlink, observer=None):
|
|||
post = append_statistics_to_post(post, result[0], False)
|
||||
return post
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def _get_ranked_posts_for_observer_communities( db, sort:str, start_author:str, start_permlink:str, limit, observer:str):
|
||||
async def _get_ranked_posts_for_observer_communities(
|
||||
db, sort: str, start_author: str, start_permlink: str, limit, observer: str
|
||||
):
|
||||
async def execute_observer_community_query(db, sql, limit):
|
||||
return await db.query_all(sql, observer=observer, author=start_author, permlink=start_permlink, limit=limit )
|
||||
return await db.query_all(sql, observer=observer, author=start_author, permlink=start_permlink, limit=limit)
|
||||
|
||||
if sort == 'trending':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_observer_communities( (:observer)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
|
||||
|
@ -105,10 +111,15 @@ async def _get_ranked_posts_for_observer_communities( db, sort:str, start_author
|
|||
|
||||
assert False, "Unknown sort order"
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def _get_ranked_posts_for_communities( db, sort:str, community, start_author:str, start_permlink:str, limit, observer:str ):
|
||||
async def _get_ranked_posts_for_communities(
|
||||
db, sort: str, community, start_author: str, start_permlink: str, limit, observer: str
|
||||
):
|
||||
async def execute_community_query(db, sql, limit):
|
||||
return await db.query_all(sql, community=community, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
|
||||
return await db.query_all(
|
||||
sql, community=community, author=start_author, permlink=start_permlink, limit=limit, observer=observer
|
||||
)
|
||||
|
||||
pinned_sql = "SELECT * FROM bridge_get_ranked_post_pinned_for_community( (:community)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
|
||||
|
@ -153,9 +164,11 @@ async def _get_ranked_posts_for_communities( db, sort:str, community, start_auth
|
|||
|
||||
|
||||
@return_error_info
|
||||
async def _get_ranked_posts_for_tag( db, sort:str, tag, start_author:str, start_permlink:str, limit, observer:str ):
|
||||
async def _get_ranked_posts_for_tag(db, sort: str, tag, start_author: str, start_permlink: str, limit, observer: str):
|
||||
async def execute_tags_query(db, sql):
|
||||
return await db.query_all(sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
|
||||
return await db.query_all(
|
||||
sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer
|
||||
)
|
||||
|
||||
if sort == 'hot':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
|
@ -187,10 +200,11 @@ async def _get_ranked_posts_for_tag( db, sort:str, tag, start_author:str, start_
|
|||
|
||||
assert False, "Unknown sort order"
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def _get_ranked_posts_for_all( db, sort:str, start_author:str, start_permlink:str, limit, observer:str ):
|
||||
async def _get_ranked_posts_for_all(db, sort: str, start_author: str, start_permlink: str, limit, observer: str):
|
||||
async def execute_query(db, sql):
|
||||
return await db.query_all(sql, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
|
||||
return await db.query_all(sql, author=start_author, permlink=start_permlink, limit=limit, observer=observer)
|
||||
|
||||
if sort == 'trending':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
|
@ -222,20 +236,30 @@ async def _get_ranked_posts_for_all( db, sort:str, start_author:str, start_perml
|
|||
|
||||
assert False, "Unknown sort order"
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_ranked_posts(context, sort:str, start_author:str='', start_permlink:str='',
|
||||
limit:int=20, tag:str='', observer:str=''):
|
||||
async def get_ranked_posts(
|
||||
context,
|
||||
sort: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = '',
|
||||
observer: str = '',
|
||||
):
|
||||
"""Query posts, sorted by given method."""
|
||||
supported_sort_list = ['trending', 'hot', 'created', 'promoted', 'payout', 'payout_comments', 'muted']
|
||||
assert sort in supported_sort_list, f"Unsupported sort, valid sorts: {', '.join(supported_sort_list)}"
|
||||
|
||||
db = context['db']
|
||||
|
||||
async def process_query_results( sql_result ):
|
||||
async def process_query_results(sql_result):
|
||||
posts = []
|
||||
for row in sql_result:
|
||||
post = _bridge_post_object(row)
|
||||
post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.BridgeApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db, row['author'], row['permlink'], VotesPresentation.BridgeApi
|
||||
)
|
||||
post = append_statistics_to_post(post, row, row['is_pinned'])
|
||||
posts.append(post)
|
||||
return posts
|
||||
|
@ -247,37 +271,47 @@ async def get_ranked_posts(context, sort:str, start_author:str='', start_permlin
|
|||
observer = valid_account(observer, allow_empty=(tag != "my"))
|
||||
|
||||
if tag == "my":
|
||||
result = await _get_ranked_posts_for_observer_communities(db, sort, start_author, start_permlink, limit, observer)
|
||||
result = await _get_ranked_posts_for_observer_communities(
|
||||
db, sort, start_author, start_permlink, limit, observer
|
||||
)
|
||||
return await process_query_results(result)
|
||||
|
||||
if tag and check_community(tag):
|
||||
result = await _get_ranked_posts_for_communities(db, sort, tag, start_author, start_permlink, limit, observer)
|
||||
return await process_query_results(result)
|
||||
|
||||
if ( tag and tag != "all" ):
|
||||
if tag and tag != "all":
|
||||
result = await _get_ranked_posts_for_tag(db, sort, tag, start_author, start_permlink, limit, observer)
|
||||
return await process_query_results(result)
|
||||
|
||||
result = await _get_ranked_posts_for_all(db, sort, start_author, start_permlink, limit, observer)
|
||||
return await process_query_results(result)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_account_posts(context, sort:str, account:str, start_author:str='', start_permlink:str='',
|
||||
limit:int=20, observer:str=None):
|
||||
async def get_account_posts(
|
||||
context,
|
||||
sort: str,
|
||||
account: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
observer: str = None,
|
||||
):
|
||||
"""Get posts for an account -- blog, feed, comments, or replies."""
|
||||
supported_sort_list = ['blog', 'feed', 'posts', 'comments', 'replies', 'payout']
|
||||
assert sort in supported_sort_list, f"Unsupported sort, valid sorts: {', '.join(supported_sort_list)}"
|
||||
|
||||
db = context['db']
|
||||
|
||||
account = valid_account(account)
|
||||
start_author = valid_account(start_author, allow_empty=True)
|
||||
start_permlink = valid_permlink(start_permlink, allow_empty=True)
|
||||
observer = valid_account(observer, allow_empty=True)
|
||||
limit = valid_limit(limit, 100, 20)
|
||||
account = valid_account(account)
|
||||
start_author = valid_account(start_author, allow_empty=True)
|
||||
start_permlink = valid_permlink(start_permlink, allow_empty=True)
|
||||
observer = valid_account(observer, allow_empty=True)
|
||||
limit = valid_limit(limit, 100, 20)
|
||||
|
||||
sql = None
|
||||
account_posts = True # set when only posts (or reblogs) of given account are supposed to be in results
|
||||
account_posts = True # set when only posts (or reblogs) of given account are supposed to be in results
|
||||
if sort == 'blog':
|
||||
sql = "SELECT * FROM bridge_get_account_posts_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER, True )"
|
||||
elif sort == 'feed':
|
||||
|
@ -292,7 +326,7 @@ async def get_account_posts(context, sort:str, account:str, start_author:str='',
|
|||
elif sort == 'payout':
|
||||
sql = "SELECT * FROM bridge_get_account_posts_by_payout( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
|
||||
|
||||
sql_result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit )
|
||||
sql_result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit)
|
||||
posts = []
|
||||
|
||||
for row in sql_result:
|
||||
|
@ -303,7 +337,7 @@ async def get_account_posts(context, sort:str, account:str, start_author:str='',
|
|||
post['reblogged_by'] = [account]
|
||||
elif sort == 'feed':
|
||||
reblogged_by = set(row['reblogged_by'])
|
||||
reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
|
||||
reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
|
||||
if reblogged_by:
|
||||
reblogged_by_list = list(reblogged_by)
|
||||
reblogged_by_list.sort()
|
||||
|
@ -329,7 +363,7 @@ async def get_relationship_between_accounts(context, account1, account2, observe
|
|||
'ignores': False,
|
||||
'blacklists': False,
|
||||
'follows_blacklists': False,
|
||||
'follows_muted': False
|
||||
'follows_muted': False,
|
||||
}
|
||||
|
||||
row = dict(sql_result)
|
||||
|
@ -355,9 +389,10 @@ async def get_relationship_between_accounts(context, account1, account2, observe
|
|||
|
||||
return result
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def does_user_follow_any_lists(context, observer):
|
||||
""" Tells if given observer follows any blacklist or mute list """
|
||||
"""Tells if given observer follows any blacklist or mute list"""
|
||||
blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context)
|
||||
|
||||
if len(blacklists_for_user) == 0:
|
||||
|
@ -365,10 +400,11 @@ async def does_user_follow_any_lists(context, observer):
|
|||
else:
|
||||
return True
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_follow_list(context, observer, follow_type='blacklisted'):
|
||||
""" For given observer gives directly blacklisted/muted accounts or
|
||||
list of blacklists/mute lists followed by observer
|
||||
"""For given observer gives directly blacklisted/muted accounts or
|
||||
list of blacklists/mute lists followed by observer
|
||||
"""
|
||||
observer = valid_account(observer)
|
||||
valid_types = dict(blacklisted=1, follow_blacklist=2, muted=4, follow_muted=8)
|
||||
|
@ -378,28 +414,35 @@ async def get_follow_list(context, observer, follow_type='blacklisted'):
|
|||
|
||||
results = []
|
||||
if follow_type == 'follow_blacklist' or follow_type == 'follow_muted':
|
||||
blacklists_for_user = await Mutes.get_blacklists_for_observer(observer, context, follow_type == 'follow_blacklist', follow_type == 'follow_muted')
|
||||
blacklists_for_user = await Mutes.get_blacklists_for_observer(
|
||||
observer, context, follow_type == 'follow_blacklist', follow_type == 'follow_muted'
|
||||
)
|
||||
for row in blacklists_for_user:
|
||||
metadata = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
|
||||
|
||||
#list_data = await get_profile(context, row['list'])
|
||||
#metadata = list_data["metadata"]["profile"]
|
||||
# list_data = await get_profile(context, row['list'])
|
||||
# metadata = list_data["metadata"]["profile"]
|
||||
blacklist_description = metadata["blacklist_description"] if "blacklist_description" in metadata else ''
|
||||
muted_list_description = metadata["muted_list_description"] if "muted_list_description" in metadata else ''
|
||||
results.append({'name': row['list'], 'blacklist_description': blacklist_description, 'muted_list_description': muted_list_description})
|
||||
else: # blacklisted or muted
|
||||
results.append(
|
||||
{
|
||||
'name': row['list'],
|
||||
'blacklist_description': blacklist_description,
|
||||
'muted_list_description': muted_list_description,
|
||||
}
|
||||
)
|
||||
else: # blacklisted or muted
|
||||
blacklisted_for_user = await Mutes.get_blacklisted_for_observer(observer, context, valid_types[follow_type])
|
||||
for account in blacklisted_for_user.keys():
|
||||
results.append({'name': account, 'blacklist_description': '', 'muted_list_description': ''})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
async def _follow_contexts(db, accounts, observer_id, include_mute=False):
|
||||
sql = """SELECT following, state FROM hive_follows
|
||||
WHERE follower = :account_id AND following IN :ids"""
|
||||
rows = await db.query_all(sql,
|
||||
account_id=observer_id,
|
||||
ids=tuple(accounts.keys()))
|
||||
rows = await db.query_all(sql, account_id=observer_id, ids=tuple(accounts.keys()))
|
||||
for row in rows:
|
||||
following_id = row[0]
|
||||
state = row[1]
|
||||
|
|
|
@ -15,9 +15,10 @@ log = logging.getLogger(__name__)
|
|||
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
|
||||
def append_statistics_to_post(post, row, is_pinned):
|
||||
""" apply information such as blacklists and community names/roles to a given post """
|
||||
|
||||
"""apply information such as blacklists and community names/roles to a given post"""
|
||||
|
||||
post['blacklists'] = []
|
||||
if 'blacklists' in row and row['blacklists']:
|
||||
split_lists = row['blacklists'].split(',')
|
||||
|
@ -26,7 +27,7 @@ def append_statistics_to_post(post, row, is_pinned):
|
|||
reputation = post['author_reputation']
|
||||
if reputation < 1:
|
||||
post['blacklists'].append('reputation-0')
|
||||
elif reputation == 1:
|
||||
elif reputation == 1:
|
||||
post['blacklists'].append('reputation-1')
|
||||
|
||||
if 'community_title' in row and row['community_title']:
|
||||
|
@ -44,16 +45,18 @@ def append_statistics_to_post(post, row, is_pinned):
|
|||
post['stats']['is_pinned'] = True
|
||||
return post
|
||||
|
||||
|
||||
async def load_profiles(db, names):
|
||||
"""`get_accounts`-style lookup for `get_state` compat layer."""
|
||||
sql = get_hive_accounts_info_view_query_string( names )
|
||||
sql = get_hive_accounts_info_view_query_string(names)
|
||||
rows = await db.query_all(sql, names=tuple(names))
|
||||
return [_bridge_profile_object(row) for row in rows]
|
||||
|
||||
|
||||
def _bridge_profile_object(row):
|
||||
"""Convert an internal account record into legacy-steemd style."""
|
||||
|
||||
#Important. The member `sp` in `stats` is removed, because currently the hivemind doesn't hold any balances.
|
||||
# Important. The member `sp` in `stats` is removed, because currently the hivemind doesn't hold any balances.
|
||||
# The member `vote_weight` from `hive_accounts` is removed as well.
|
||||
profile = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
|
||||
|
||||
|
@ -71,15 +74,21 @@ def _bridge_profile_object(row):
|
|||
'followers': row['followers'],
|
||||
},
|
||||
'metadata': {
|
||||
'profile': {'name': profile['name'],
|
||||
'about': profile['about'],
|
||||
'website': profile['website'],
|
||||
'location': profile['location'],
|
||||
'cover_image': profile['cover_image'],
|
||||
'profile_image': profile['profile_image'],
|
||||
'blacklist_description': profile['blacklist_description'] if 'blacklist_description' in profile else '',
|
||||
'muted_list_description': profile['muted_list_description'] if 'muted_list_description' in profile else ''
|
||||
}}}
|
||||
'profile': {
|
||||
'name': profile['name'],
|
||||
'about': profile['about'],
|
||||
'website': profile['website'],
|
||||
'location': profile['location'],
|
||||
'cover_image': profile['cover_image'],
|
||||
'profile_image': profile['profile_image'],
|
||||
'blacklist_description': profile['blacklist_description'] if 'blacklist_description' in profile else '',
|
||||
'muted_list_description': profile['muted_list_description']
|
||||
if 'muted_list_description' in profile
|
||||
else '',
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _bridge_post_object(row, truncate_body=0):
|
||||
"""Given a hive_posts row, create a legacy-style post object."""
|
||||
|
@ -108,14 +117,14 @@ def _bridge_post_object(row, truncate_body=0):
|
|||
post['payout_at'] = json_date(row['payout_at'])
|
||||
post['payout'] = float(row['payout'] + row['pending_payout'])
|
||||
post['pending_payout_value'] = _amount(0 if paid else post['payout'])
|
||||
post['author_payout_value'] = _amount(0) # supplemented below
|
||||
post['curator_payout_value'] = _amount(0) # supplemented below
|
||||
post['author_payout_value'] = _amount(0) # supplemented below
|
||||
post['curator_payout_value'] = _amount(0) # supplemented below
|
||||
post['promoted'] = _amount(row['promoted'])
|
||||
|
||||
post['replies'] = []
|
||||
post['author_reputation'] = rep_log10(row['author_rep'])
|
||||
|
||||
neg_rshares = ( row['rshares'] - row['abs_rshares'] ) // 2 # effectively sum of all negative rshares
|
||||
neg_rshares = (row['rshares'] - row['abs_rshares']) // 2 # effectively sum of all negative rshares
|
||||
# take negative rshares, divide by 2, truncate 10 digits (plus neg sign),
|
||||
# and count digits. creates a cheap log10, stake-based flag weight.
|
||||
# result: 1 = approx $400 of downvoting stake; 2 = $4,000; etc
|
||||
|
@ -125,10 +134,10 @@ def _bridge_post_object(row, truncate_body=0):
|
|||
'hide': row['is_hidden'],
|
||||
'gray': row['is_grayed'],
|
||||
'total_votes': row['total_votes'],
|
||||
'flag_weight': float(flag_weight)} # TODO: down_weight
|
||||
'flag_weight': float(flag_weight),
|
||||
} # TODO: down_weight
|
||||
|
||||
|
||||
#post['author_reputation'] = rep_to_raw(row['author_rep'])
|
||||
# post['author_reputation'] = rep_to_raw(row['author_rep'])
|
||||
|
||||
post['url'] = row['url']
|
||||
post['beneficiaries'] = row['beneficiaries']
|
||||
|
@ -144,10 +153,11 @@ def _bridge_post_object(row, truncate_body=0):
|
|||
if row['depth'] > 0:
|
||||
post['parent_author'] = row['parent_author']
|
||||
post['parent_permlink'] = row['parent_permlink_or_category']
|
||||
post['title'] = 'RE: ' + row['root_title'] # PostSummary & comment context
|
||||
post['title'] = 'RE: ' + row['root_title'] # PostSummary & comment context
|
||||
|
||||
return post
|
||||
|
||||
|
||||
def _amount(amount, asset='HBD'):
|
||||
"""Return a steem-style amount string given a (numeric, asset-str)."""
|
||||
assert asset == 'HBD', f'unhandled asset {asset}'
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
"""Handles building condenser-compatible response objects."""
|
||||
|
||||
import logging
|
||||
from hive.server.common.helpers import (
|
||||
valid_account,
|
||||
valid_permlink)
|
||||
#import ujson as json
|
||||
from hive.server.common.helpers import valid_account, valid_permlink
|
||||
|
||||
# import ujson as json
|
||||
|
||||
from hive.server.bridge_api.methods import get_post
|
||||
from hive.server.common.helpers import (
|
||||
#ApiError,
|
||||
return_error_info)
|
||||
# ApiError,
|
||||
return_error_info,
|
||||
)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_post_header(context, author, permlink):
|
||||
"""Fetch basic post data"""
|
||||
|
@ -36,11 +37,7 @@ async def get_post_header(context, author, permlink):
|
|||
|
||||
assert row, f'Post {author}/{permlink} does not exist'
|
||||
|
||||
return dict(
|
||||
author=row['author'],
|
||||
permlink=row['permlink'],
|
||||
category=row['category'],
|
||||
depth=row['depth'])
|
||||
return dict(author=row['author'], permlink=row['permlink'], category=row['category'], depth=row['depth'])
|
||||
|
||||
|
||||
@return_error_info
|
||||
|
@ -51,7 +48,7 @@ async def normalize_post(context, post):
|
|||
return await get_post(context, post['author'], post['permlink'])
|
||||
|
||||
# decorate
|
||||
#if core['community_id']:
|
||||
# if core['community_id']:
|
||||
# sql = """SELECT title FROM hive_communities WHERE id = :id"""
|
||||
# title = await db.query_one(sql, id=core['community_id'])
|
||||
|
||||
|
@ -65,4 +62,4 @@ async def normalize_post(context, post):
|
|||
# ret['author_role'] = ROLES[role[0] if role else 0]
|
||||
# ret['author_title'] = role[1] if role else ''
|
||||
|
||||
#return ret
|
||||
# return ret
|
||||
|
|
|
@ -4,16 +4,14 @@ import logging
|
|||
|
||||
from hive.server.bridge_api.objects import _bridge_post_object, append_statistics_to_post
|
||||
from hive.server.database_api.methods import find_votes_impl, VotesPresentation
|
||||
from hive.server.common.helpers import (
|
||||
return_error_info,
|
||||
valid_account,
|
||||
valid_permlink)
|
||||
from hive.server.common.helpers import return_error_info, valid_account, valid_permlink
|
||||
from hive.server.common.mutes import Mutes
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_discussion(context, author:str, permlink:str, observer:str=''):
|
||||
async def get_discussion(context, author: str, permlink: str, observer: str = ''):
|
||||
"""Modified `get_state` thread implementation."""
|
||||
db = context['db']
|
||||
|
||||
|
@ -28,7 +26,9 @@ async def get_discussion(context, author:str, permlink:str, observer:str=''):
|
|||
root_id = rows[0]['id']
|
||||
all_posts = {}
|
||||
root_post = _bridge_post_object(rows[0])
|
||||
root_post['active_votes'] = await find_votes_impl(db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi)
|
||||
root_post['active_votes'] = await find_votes_impl(
|
||||
db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi
|
||||
)
|
||||
root_post = append_statistics_to_post(root_post, rows[0], False)
|
||||
root_post['replies'] = []
|
||||
all_posts[root_id] = root_post
|
||||
|
@ -41,7 +41,9 @@ async def get_discussion(context, author:str, permlink:str, observer:str=''):
|
|||
parent_to_children_id_map[parent_id] = []
|
||||
parent_to_children_id_map[parent_id].append(rows[index]['id'])
|
||||
post = _bridge_post_object(rows[index])
|
||||
post['active_votes'] = await find_votes_impl(db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi
|
||||
)
|
||||
post = append_statistics_to_post(post, rows[index], False)
|
||||
post['replies'] = []
|
||||
all_posts[post['post_id']] = post
|
||||
|
@ -52,12 +54,13 @@ async def get_discussion(context, author:str, permlink:str, observer:str=''):
|
|||
for child_id in children:
|
||||
post['replies'].append(_ref(all_posts[child_id]))
|
||||
|
||||
#result has to be in form of dictionary of dictionaries {post_ref: post}
|
||||
# result has to be in form of dictionary of dictionaries {post_ref: post}
|
||||
results = {}
|
||||
for key in all_posts:
|
||||
post_ref = _ref(all_posts[key])
|
||||
results[post_ref] = all_posts[key]
|
||||
return results
|
||||
|
||||
|
||||
def _ref(post):
|
||||
return post['author'] + '/' + post['permlink']
|
||||
|
|
|
@ -12,13 +12,17 @@ from jsonrpcserver.exceptions import ApiError as RPCApiError
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApiError(Exception):
|
||||
"""API-specific errors: unimplemented/bad params. Pass back to client."""
|
||||
|
||||
# pylint: disable=unnecessary-pass
|
||||
pass
|
||||
|
||||
|
||||
# values -32768..-32000 are reserved
|
||||
ACCESS_TO_DELETED_POST_ERROR_CODE = -31999 # SQLSTATE = 'CEHM3'
|
||||
ACCESS_TO_DELETED_POST_ERROR_CODE = -31999 # SQLSTATE = 'CEHM3'
|
||||
|
||||
|
||||
def valid_custom_sql_error(exc):
|
||||
"""Tests given DatabaseError, rethrows if it is not custom Hivemind error"""
|
||||
|
@ -29,8 +33,10 @@ def valid_custom_sql_error(exc):
|
|||
raise exc
|
||||
return e
|
||||
|
||||
|
||||
def return_error_info(function):
|
||||
"""Async API method decorator which catches and formats exceptions."""
|
||||
|
||||
@wraps(function)
|
||||
async def wrapper(*args, **kwargs):
|
||||
"""Catch ApiError and AssertionError (always due to user error)."""
|
||||
|
@ -45,7 +51,7 @@ def return_error_info(function):
|
|||
raise AssertionError(msg)
|
||||
except (ApiError, AssertionError, TypeError, Exception) as e:
|
||||
if isinstance(e, KeyError):
|
||||
#TODO: KeyError overloaded for method not found. Any KeyErrors
|
||||
# TODO: KeyError overloaded for method not found. Any KeyErrors
|
||||
# captured in this decorater are likely irrelevant to
|
||||
# json_rpc_server. Verify. (e.g. `KeyError: 'flag_weight'`)
|
||||
log.error("ERR3: %s\n%s", repr(e), traceback.format_exc())
|
||||
|
@ -65,22 +71,26 @@ def return_error_info(function):
|
|||
raise e
|
||||
log.error("ERR0: %s\n%s", repr(e), traceback.format_exc())
|
||||
raise e
|
||||
#return {
|
||||
# return {
|
||||
# "error": {
|
||||
# "code": -32000,
|
||||
# "message": repr(e) + " (hivemind-beta)",
|
||||
# "trace": traceback.format_exc()}}
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def json_date(date=None):
|
||||
"""Given a db datetime, return a steemd/json-friendly version."""
|
||||
if not date or date == datetime.datetime.max: return '1969-12-31T23:59:59'
|
||||
if not date or date == datetime.datetime.max:
|
||||
return '1969-12-31T23:59:59'
|
||||
return 'T'.join(str(date).split(' '))
|
||||
|
||||
def get_hive_accounts_info_view_query_string(names, lite = False):
|
||||
|
||||
def get_hive_accounts_info_view_query_string(names, lite=False):
|
||||
values = []
|
||||
for name in names:
|
||||
values.append(f"('{name}')")
|
||||
values.append(f"('{name}')")
|
||||
values_str = ','.join(values)
|
||||
sql = f"""
|
||||
SELECT *
|
||||
|
@ -92,13 +102,21 @@ def get_hive_accounts_info_view_query_string(names, lite = False):
|
|||
"""
|
||||
return sql
|
||||
|
||||
|
||||
def check_community(name) -> bool:
|
||||
"""Perform basic validation on community name"""
|
||||
if (name and isinstance(name, str) and len(name) > 5 and name[:5] == 'hive-'
|
||||
and name[5] in ['1', '2', '3'] and re.match(r'^hive-[123]\d{4,6}$', name)):
|
||||
if (
|
||||
name
|
||||
and isinstance(name, str)
|
||||
and len(name) > 5
|
||||
and name[:5] == 'hive-'
|
||||
and name[5] in ['1', '2', '3']
|
||||
and re.match(r'^hive-[123]\d{4,6}$', name)
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def valid_community(name, allow_empty=False):
|
||||
"""Checks is given name of community matches community regex, if not asserts"""
|
||||
if not name:
|
||||
|
@ -107,6 +125,7 @@ def valid_community(name, allow_empty=False):
|
|||
assert check_community(name), "given community name is not valid"
|
||||
return name
|
||||
|
||||
|
||||
def valid_account(name, allow_empty=False):
|
||||
"""Returns validated account name or throws Assert."""
|
||||
if not name:
|
||||
|
@ -118,6 +137,7 @@ def valid_account(name, allow_empty=False):
|
|||
assert re.match(r'^[a-z0-9-\.]+$', name), 'invalid account char'
|
||||
return name
|
||||
|
||||
|
||||
def valid_permlink(permlink, allow_empty=False):
|
||||
"""Returns validated permlink or throws Assert."""
|
||||
if not permlink:
|
||||
|
@ -127,6 +147,7 @@ def valid_permlink(permlink, allow_empty=False):
|
|||
assert len(permlink) <= 256, "invalid permlink length"
|
||||
return permlink
|
||||
|
||||
|
||||
def valid_sort(sort, allow_empty=False):
|
||||
"""Returns validated sort name or throws Assert."""
|
||||
if not sort:
|
||||
|
@ -134,11 +155,11 @@ def valid_sort(sort, allow_empty=False):
|
|||
return ""
|
||||
assert isinstance(sort, str), 'sort must be a string'
|
||||
# TODO: differentiate valid sorts on comm vs tag
|
||||
valid_sorts = ['trending', 'promoted', 'hot', 'created',
|
||||
'payout', 'payout_comments', 'muted']
|
||||
valid_sorts = ['trending', 'promoted', 'hot', 'created', 'payout', 'payout_comments', 'muted']
|
||||
assert sort in valid_sorts, f'invalid sort `{sort}`'
|
||||
return sort
|
||||
|
||||
|
||||
def valid_tag(tag, allow_empty=False):
|
||||
"""Returns validated tag or throws Assert."""
|
||||
if not tag:
|
||||
|
@ -148,28 +169,33 @@ def valid_tag(tag, allow_empty=False):
|
|||
assert re.match('^[a-z0-9-_]+$', tag), f'invalid tag `{tag}`'
|
||||
return tag
|
||||
|
||||
|
||||
def valid_number(num, default=None, name='integer value', lbound=None, ubound=None):
|
||||
"""Given a user-provided number, return a valid int, or raise."""
|
||||
if not num and num != 0:
|
||||
assert default is not None, f"{name} must be provided"
|
||||
num = default
|
||||
assert default is not None, f"{name} must be provided"
|
||||
num = default
|
||||
try:
|
||||
num = int(num)
|
||||
num = int(num)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise AssertionError(str(e))
|
||||
raise AssertionError(str(e))
|
||||
if lbound is not None and ubound is not None:
|
||||
assert lbound <= num and num <= ubound, "%s = %d outside valid range [%d:%d]" % (name, num, lbound, ubound)
|
||||
assert lbound <= num and num <= ubound, "%s = %d outside valid range [%d:%d]" % (name, num, lbound, ubound)
|
||||
return num
|
||||
|
||||
|
||||
def valid_limit(limit, ubound, default):
|
||||
return valid_number(limit, default, "limit", 1, ubound)
|
||||
|
||||
|
||||
def valid_score(score, ubound, default):
|
||||
return valid_number(score, default, "score", 0, ubound)
|
||||
|
||||
|
||||
def valid_truncate(truncate_body):
|
||||
return valid_number(truncate_body, 0, "truncate_body")
|
||||
|
||||
|
||||
def valid_offset(offset, ubound=None):
|
||||
"""Given a user-provided offset, return a valid int, or raise."""
|
||||
offset = int(offset)
|
||||
|
@ -178,28 +204,32 @@ def valid_offset(offset, ubound=None):
|
|||
assert offset <= ubound, "offset too large"
|
||||
return offset
|
||||
|
||||
|
||||
def valid_follow_type(follow_type: str):
|
||||
"""Ensure follow type is valid steemd type."""
|
||||
# ABW: should be extended with blacklists etc. (and those should be implemented as next 'state' values)
|
||||
supported_follow_types = dict(blog=1, ignore=2)
|
||||
assert follow_type in supported_follow_types, f"Unsupported follow type, valid types: {', '.join(supported_follow_types.keys())}"
|
||||
assert (
|
||||
follow_type in supported_follow_types
|
||||
), f"Unsupported follow type, valid types: {', '.join(supported_follow_types.keys())}"
|
||||
return supported_follow_types[follow_type]
|
||||
|
||||
|
||||
def valid_date(date, allow_empty=False):
|
||||
""" Ensure that date is in correct format """
|
||||
"""Ensure that date is in correct format"""
|
||||
if not date:
|
||||
assert allow_empty, 'Date is blank'
|
||||
check_date = False
|
||||
# check format "%Y-%m-%d %H:%M:%S"
|
||||
try:
|
||||
check_date = (date == datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").strftime('%Y-%m-%d %H:%M:%S'))
|
||||
check_date = date == datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").strftime('%Y-%m-%d %H:%M:%S')
|
||||
except ValueError:
|
||||
check_date = False
|
||||
# if check failed for format above try another format
|
||||
# check format "%Y-%m-%dT%H:%M:%S"
|
||||
if not check_date:
|
||||
try:
|
||||
check_date = (date == datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").strftime('%Y-%m-%dT%H:%M:%S'))
|
||||
check_date = date == datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S").strftime('%Y-%m-%dT%H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
|
|
@ -6,12 +6,12 @@ from hive.indexer.db_adapter_holder import DbAdapterHolder
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class Mentions(DbAdapterHolder):
|
||||
|
||||
class Mentions(DbAdapterHolder):
|
||||
@classmethod
|
||||
def refresh(cls):
|
||||
"""Deleting too old mentions"""
|
||||
|
||||
log.warning("Deleting too old mentions")
|
||||
|
||||
cls.db.query_no_return("SELECT delete_hive_posts_mentions();" )
|
||||
cls.db.query_no_return("SELECT delete_hive_posts_mentions();")
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
"""List of muted accounts for server process."""
|
||||
|
||||
|
||||
class Mutes:
|
||||
"""Singleton tracking muted accounts."""
|
||||
|
||||
@classmethod
|
||||
async def get_blacklisted_for_observer(cls, observer, context, flags=1+2+4+8):
|
||||
""" fetch the list of users that the observer has blacklisted
|
||||
flags allow filtering the query:
|
||||
1 - accounts blacklisted by observer
|
||||
2 - accounts blacklisted by observer's follow_blacklist lists
|
||||
4 - accounts muted by observer
|
||||
8 - accounts muted by observer's follow_mutes lists
|
||||
by default all flags are set
|
||||
async def get_blacklisted_for_observer(cls, observer, context, flags=1 + 2 + 4 + 8):
|
||||
"""fetch the list of users that the observer has blacklisted
|
||||
flags allow filtering the query:
|
||||
1 - accounts blacklisted by observer
|
||||
2 - accounts blacklisted by observer's follow_blacklist lists
|
||||
4 - accounts muted by observer
|
||||
8 - accounts muted by observer's follow_mutes lists
|
||||
by default all flags are set
|
||||
"""
|
||||
if not observer or not context:
|
||||
return {}
|
||||
|
@ -27,13 +28,13 @@ class Mutes:
|
|||
blacklisted_users[account_name] = ([], [])
|
||||
if row['is_blacklisted']:
|
||||
blacklisted_users[account_name][0].append(row['source'])
|
||||
else: # muted
|
||||
else: # muted
|
||||
blacklisted_users[account_name][1].append(row['source'])
|
||||
return blacklisted_users
|
||||
|
||||
@classmethod
|
||||
async def get_blacklists_for_observer(cls, observer, context, follow_blacklist = True, follow_muted = True):
|
||||
""" fetch the list of accounts that are followed by observer through follow_blacklist/follow_muted """
|
||||
async def get_blacklists_for_observer(cls, observer, context, follow_blacklist=True, follow_muted=True):
|
||||
"""fetch the list of accounts that are followed by observer through follow_blacklist/follow_muted"""
|
||||
if not observer or not context:
|
||||
return {}
|
||||
|
||||
|
|
|
@ -6,12 +6,12 @@ from hive.indexer.db_adapter_holder import DbAdapterHolder
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class PayoutStats(DbAdapterHolder):
|
||||
|
||||
class PayoutStats(DbAdapterHolder):
|
||||
@classmethod
|
||||
def generate(cls):
|
||||
"""Re-generate payout_stats_view."""
|
||||
|
||||
log.warning("Rebuilding payout_stats_view")
|
||||
|
||||
cls.db.query_no_return("REFRESH MATERIALIZED VIEW CONCURRENTLY payout_stats_view;" )
|
||||
cls.db.query_no_return("REFRESH MATERIALIZED VIEW CONCURRENTLY payout_stats_view;")
|
||||
|
|
|
@ -10,42 +10,37 @@ from hive.server.condenser_api.methods import (
|
|||
get_followers,
|
||||
get_following,
|
||||
get_follow_count,
|
||||
|
||||
get_reblogged_by,
|
||||
get_account_reputations,
|
||||
|
||||
get_content,
|
||||
get_content_replies,
|
||||
|
||||
get_discussions_by_trending,
|
||||
get_discussions_by_hot,
|
||||
get_discussions_by_promoted,
|
||||
get_discussions_by_created,
|
||||
get_post_discussions_by_payout,
|
||||
get_comment_discussions_by_payout,
|
||||
|
||||
get_discussions_by_blog,
|
||||
get_discussions_by_feed,
|
||||
get_discussions_by_comments,
|
||||
get_replies_by_last_update,
|
||||
|
||||
get_discussions_by_author_before_date,
|
||||
get_blog,
|
||||
get_blog_entries,
|
||||
|
||||
get_account_votes,
|
||||
get_active_votes
|
||||
get_active_votes,
|
||||
)
|
||||
|
||||
|
||||
def _strict_list(params, expected_len, min_len=None):
|
||||
assert isinstance(params, list), "params not a list"
|
||||
if min_len is None:
|
||||
assert len(params) == expected_len, "expected %d params" % expected_len
|
||||
else:
|
||||
assert (len(params) <= expected_len and
|
||||
len(params) >= min_len), "expected %d params" % expected_len
|
||||
assert len(params) <= expected_len and len(params) >= min_len, "expected %d params" % expected_len
|
||||
return params
|
||||
|
||||
|
||||
def _strict_query(params):
|
||||
query = _strict_list(params, 1)[0]
|
||||
assert isinstance(query, dict), "query must be dict"
|
||||
|
@ -53,9 +48,20 @@ def _strict_query(params):
|
|||
# remove optional-yet-blank param keys -- some clients include every key
|
||||
# possible, and steemd seems to ignore them silently. need to strip
|
||||
# them here, if blank, to avoid argument mismatch errors.
|
||||
all_keys = ['filter_tags', 'select_tags', 'select_authors', 'author',
|
||||
'start_author', 'start_permlink', 'start_tag', 'parent_author',
|
||||
'parent_permlink', 'start_parent_author', 'before_date', 'tag']
|
||||
all_keys = [
|
||||
'filter_tags',
|
||||
'select_tags',
|
||||
'select_authors',
|
||||
'author',
|
||||
'start_author',
|
||||
'start_permlink',
|
||||
'start_tag',
|
||||
'parent_author',
|
||||
'parent_permlink',
|
||||
'start_parent_author',
|
||||
'before_date',
|
||||
'tag',
|
||||
]
|
||||
for key in all_keys:
|
||||
if key in query and not query[key]:
|
||||
del query[key]
|
||||
|
@ -79,6 +85,7 @@ def _strict_query(params):
|
|||
|
||||
return query
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def call(context, api, method, params):
|
||||
"""Routes legacy-style `call` method requests.
|
||||
|
|
|
@ -5,11 +5,13 @@ from hive.server.database_api.methods import find_votes_impl, VotesPresentation
|
|||
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
|
||||
async def get_followers(db, account: str, start: str, state: int, limit: int):
|
||||
"""Get a list of accounts following given account."""
|
||||
sql = "SELECT * FROM condenser_get_followers( (:account)::VARCHAR, (:start)::VARCHAR, :type, :limit )"
|
||||
return await db.query_col(sql, account=account, start=start, type=state, limit=limit)
|
||||
|
||||
|
||||
async def get_following(db, account: str, start: str, state: int, limit: int):
|
||||
"""Get a list of accounts followed by a given account."""
|
||||
sql = "SELECT * FROM condenser_get_following( (:account)::VARCHAR, (:start)::VARCHAR, :type, :limit )"
|
||||
|
@ -26,6 +28,7 @@ async def get_reblogged_by(db, author: str, permlink: str):
|
|||
names.remove(author)
|
||||
return names
|
||||
|
||||
|
||||
async def process_posts(db, sql_result, truncate_body: int = 0):
|
||||
posts = []
|
||||
for row in sql_result:
|
||||
|
@ -37,26 +40,36 @@ async def process_posts(db, sql_result, truncate_body: int = 0):
|
|||
|
||||
return posts
|
||||
|
||||
async def get_by_blog_without_reblog(db, account: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
|
||||
"""Get a list of posts for an author's blog without reblogs."""
|
||||
sql = "SELECT * FROM condenser_get_by_blog_without_reblog( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit);
|
||||
return await process_posts(db, result, truncate_body)
|
||||
|
||||
async def get_by_blog_without_reblog(
|
||||
db, account: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0
|
||||
):
|
||||
"""Get a list of posts for an author's blog without reblogs."""
|
||||
sql = "SELECT * FROM condenser_get_by_blog_without_reblog( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit)
|
||||
return await process_posts(db, result, truncate_body)
|
||||
|
||||
|
||||
async def get_by_account_comments(db, account: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
|
||||
"""Get a list of posts representing comments by an author."""
|
||||
sql = "SELECT * FROM condenser_get_by_account_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit);
|
||||
return await process_posts(db, result, truncate_body)
|
||||
"""Get a list of posts representing comments by an author."""
|
||||
sql = "SELECT * FROM condenser_get_by_account_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, author=account, permlink=start_permlink, limit=limit)
|
||||
return await process_posts(db, result, truncate_body)
|
||||
|
||||
|
||||
async def get_by_replies_to_account(
|
||||
db, start_author: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0
|
||||
):
|
||||
"""Get a list of posts representing replies to an author."""
|
||||
sql = "SELECT * FROM bridge_get_account_posts_by_replies( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False )"
|
||||
result = await db.query_all(
|
||||
sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit
|
||||
)
|
||||
return await process_posts(db, result, truncate_body)
|
||||
|
||||
async def get_by_replies_to_account(db, start_author: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0):
|
||||
"""Get a list of posts representing replies to an author."""
|
||||
sql = "SELECT * FROM bridge_get_account_posts_by_replies( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False )"
|
||||
result = await db.query_all(sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit);
|
||||
return await process_posts(db, result, truncate_body)
|
||||
|
||||
async def get_by_blog(db, account: str = '', start_author: str = '', start_permlink: str = '', limit: int = 20):
|
||||
"""Get a list of posts for an author's blog."""
|
||||
sql = "SELECT * FROM condenser_get_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit);
|
||||
return await process_posts(db, result)
|
||||
"""Get a list of posts for an author's blog."""
|
||||
sql = "SELECT * FROM condenser_get_by_blog( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, :limit )"
|
||||
result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit)
|
||||
return await process_posts(db, result)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""Routes then builds a get_state response object"""
|
||||
|
||||
#pylint: disable=line-too-long,too-many-lines
|
||||
# pylint: disable=line-too-long,too-many-lines
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
import ujson as json
|
||||
|
@ -9,19 +9,9 @@ from aiocache import cached
|
|||
from hive.utils.normalize import legacy_amount
|
||||
from hive.server.common.mutes import Mutes
|
||||
|
||||
from hive.server.condenser_api.objects import (
|
||||
load_accounts,
|
||||
_condenser_post_object)
|
||||
from hive.server.common.helpers import (
|
||||
ApiError,
|
||||
return_error_info,
|
||||
valid_account,
|
||||
valid_permlink,
|
||||
valid_sort,
|
||||
valid_tag)
|
||||
from hive.server.condenser_api.tags import (
|
||||
get_trending_tags,
|
||||
get_top_trending_tags_summary)
|
||||
from hive.server.condenser_api.objects import load_accounts, _condenser_post_object
|
||||
from hive.server.common.helpers import ApiError, return_error_info, valid_account, valid_permlink, valid_sort, valid_tag
|
||||
from hive.server.condenser_api.tags import get_trending_tags, get_top_trending_tags_summary
|
||||
|
||||
import hive.server.condenser_api.cursor as cursor
|
||||
|
||||
|
@ -31,19 +21,10 @@ from hive.server.database_api.methods import find_votes_impl, VotesPresentation
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
# steemd account 'tabs' - specific post list queries
|
||||
ACCOUNT_TAB_KEYS = {
|
||||
'blog': 'blog',
|
||||
'feed': 'feed',
|
||||
'comments': 'comments',
|
||||
'recent-replies': 'recent_replies'}
|
||||
ACCOUNT_TAB_KEYS = {'blog': 'blog', 'feed': 'feed', 'comments': 'comments', 'recent-replies': 'recent_replies'}
|
||||
|
||||
# dummy account paths used by condenser - just need account object
|
||||
ACCOUNT_TAB_IGNORE = [
|
||||
'followed',
|
||||
'followers',
|
||||
'permissions',
|
||||
'password',
|
||||
'settings']
|
||||
ACCOUNT_TAB_IGNORE = ['followed', 'followers', 'permissions', 'password', 'settings']
|
||||
|
||||
# misc dummy paths used by condenser - send minimal get_state structure
|
||||
CONDENSER_NOOP_URLS = [
|
||||
|
@ -82,6 +63,7 @@ POST_LIST_SORTS = [
|
|||
'cashout',
|
||||
]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_state(context, path: str):
|
||||
"""`get_state` reimplementation.
|
||||
|
@ -99,7 +81,8 @@ async def get_state(context, path: str):
|
|||
'accounts': {},
|
||||
'content': {},
|
||||
'tag_idx': {'trending': []},
|
||||
'discussion_idx': {"": {}}}
|
||||
'discussion_idx': {"": {}},
|
||||
}
|
||||
|
||||
# account - `/@account/tab` (feed, blog, comments, replies)
|
||||
if part[0] and part[0][0] == '@':
|
||||
|
@ -118,7 +101,7 @@ async def get_state(context, path: str):
|
|||
state['content'] = _keyed_posts(posts)
|
||||
state['accounts'][account][key] = list(state['content'].keys())
|
||||
elif part[1] in ACCOUNT_TAB_IGNORE:
|
||||
pass # condenser no-op URLs
|
||||
pass # condenser no-op URLs
|
||||
else:
|
||||
# invalid/undefined case; probably requesting `@user/permlink`,
|
||||
# but condenser still relies on a valid response for redirect.
|
||||
|
@ -156,6 +139,7 @@ async def get_state(context, path: str):
|
|||
|
||||
return state
|
||||
|
||||
|
||||
async def _get_account_discussion_by_key(db, account, key):
|
||||
assert account, 'account must be specified'
|
||||
assert key, 'discussion key must be specified'
|
||||
|
@ -173,6 +157,7 @@ async def _get_account_discussion_by_key(db, account, key):
|
|||
|
||||
return posts
|
||||
|
||||
|
||||
def _normalize_path(path):
|
||||
if path and path[0] == '/':
|
||||
path = path[1:]
|
||||
|
@ -194,19 +179,23 @@ def _normalize_path(path):
|
|||
parts.append('')
|
||||
return (path, parts)
|
||||
|
||||
|
||||
def _keyed_posts(posts):
|
||||
out = OrderedDict()
|
||||
for post in posts:
|
||||
out[_ref(post)] = post
|
||||
return out
|
||||
|
||||
|
||||
def _ref(post):
|
||||
return post['author'] + '/' + post['permlink']
|
||||
|
||||
|
||||
def _ref_parent(post):
|
||||
return post['parent_author'] + '/' + post['parent_permlink']
|
||||
|
||||
async def _load_content_accounts(db, content, lite = False):
|
||||
|
||||
async def _load_content_accounts(db, content, lite=False):
|
||||
if not content:
|
||||
return {}
|
||||
posts = content.values()
|
||||
|
@ -214,6 +203,7 @@ async def _load_content_accounts(db, content, lite = False):
|
|||
accounts = await load_accounts(db, names, lite)
|
||||
return {a['name']: a for a in accounts}
|
||||
|
||||
|
||||
async def _load_account(db, name):
|
||||
ret = await load_accounts(db, [name])
|
||||
assert ret, f'account not found: `{name}`'
|
||||
|
@ -222,6 +212,7 @@ async def _load_account(db, name):
|
|||
account[key] = []
|
||||
return account
|
||||
|
||||
|
||||
async def _child_ids(db, parent_ids):
|
||||
"""Load child ids for multuple parent ids."""
|
||||
sql = """
|
||||
|
@ -234,6 +225,7 @@ async def _child_ids(db, parent_ids):
|
|||
rows = await db.query_all(sql, ids=tuple(parent_ids))
|
||||
return [[row[0], row[1]] for row in rows]
|
||||
|
||||
|
||||
async def _load_discussion(db, author, permlink, observer=None):
|
||||
"""Load a full discussion thread."""
|
||||
|
||||
|
@ -245,53 +237,61 @@ async def _load_discussion(db, author, permlink, observer=None):
|
|||
replies = {}
|
||||
|
||||
for row in sql_result:
|
||||
post = _condenser_post_object(row)
|
||||
post = _condenser_post_object(row)
|
||||
|
||||
post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
|
||||
posts.append(post)
|
||||
post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
|
||||
posts.append(post)
|
||||
|
||||
parent_key = _ref_parent(post)
|
||||
_key = _ref(post)
|
||||
if parent_key not in replies:
|
||||
replies[parent_key] = []
|
||||
replies[parent_key].append(_key)
|
||||
parent_key = _ref_parent(post)
|
||||
_key = _ref(post)
|
||||
if parent_key not in replies:
|
||||
replies[parent_key] = []
|
||||
replies[parent_key].append(_key)
|
||||
|
||||
for post in posts:
|
||||
_key = _ref(post)
|
||||
if _key in replies:
|
||||
replies[_key].sort()
|
||||
post['replies'] = replies[_key]
|
||||
_key = _ref(post)
|
||||
if _key in replies:
|
||||
replies[_key].sort()
|
||||
post['replies'] = replies[_key]
|
||||
|
||||
for post in posts:
|
||||
posts_by_id[_ref(post)] = post
|
||||
posts_by_id[_ref(post)] = post
|
||||
|
||||
return posts_by_id
|
||||
|
||||
|
||||
@cached(ttl=1800, timeout=1200)
|
||||
async def _get_feed_price(db):
|
||||
"""Get a steemd-style ratio object representing feed price."""
|
||||
price = await db.query_one("SELECT usd_per_steem FROM hive_state")
|
||||
return {"base": f"{price:.3f} HBD", "quote": "1.000 HIVE"}
|
||||
|
||||
|
||||
@cached(ttl=1800, timeout=1200)
|
||||
async def _get_props_lite(db):
|
||||
"""Return a minimal version of get_dynamic_global_properties data."""
|
||||
raw = json.loads(await db.query_one("SELECT dgpo FROM hive_state"))
|
||||
|
||||
# convert NAI amounts to legacy
|
||||
nais = ['virtual_supply', 'current_supply', 'current_sbd_supply',
|
||||
'pending_rewarded_vesting_hive', 'pending_rewarded_vesting_shares',
|
||||
'total_vesting_fund_hive', 'total_vesting_shares']
|
||||
nais = [
|
||||
'virtual_supply',
|
||||
'current_supply',
|
||||
'current_sbd_supply',
|
||||
'pending_rewarded_vesting_hive',
|
||||
'pending_rewarded_vesting_shares',
|
||||
'total_vesting_fund_hive',
|
||||
'total_vesting_shares',
|
||||
]
|
||||
for k in nais:
|
||||
if k in raw:
|
||||
raw[k] = legacy_amount(raw[k])
|
||||
|
||||
return dict(
|
||||
time=raw['time'], #*
|
||||
time=raw['time'], # *
|
||||
hbd_print_rate=raw['hbd_print_rate'],
|
||||
hbd_interest_rate=raw['hbd_interest_rate'],
|
||||
head_block_number=raw['head_block_number'], #*
|
||||
head_block_number=raw['head_block_number'], # *
|
||||
total_vesting_shares=raw['total_vesting_shares'],
|
||||
total_vesting_fund_hive=raw['total_vesting_fund_hive'],
|
||||
last_irreversible_block_num=raw['last_irreversible_block_num'], #*
|
||||
last_irreversible_block_num=raw['last_irreversible_block_num'], # *
|
||||
)
|
||||
|
|
|
@ -13,26 +13,31 @@ from hive.server.common.helpers import (
|
|||
valid_offset,
|
||||
valid_limit,
|
||||
valid_truncate,
|
||||
valid_follow_type)
|
||||
valid_follow_type,
|
||||
)
|
||||
from hive.server.database_api.methods import find_votes_impl, VotesPresentation
|
||||
|
||||
# pylint: disable=too-many-arguments,line-too-long,too-many-lines
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_account_votes(context, account):
|
||||
"""Return an info message about get_acccount_votes being unsupported."""
|
||||
# pylint: disable=unused-argument
|
||||
assert False, "get_account_votes is no longer supported, for details see https://hive.blog/steemit/@steemitdev/additional-public-api-change"
|
||||
assert (
|
||||
False
|
||||
), "get_account_votes is no longer supported, for details see https://hive.blog/steemit/@steemitdev/additional-public-api-change"
|
||||
|
||||
|
||||
# Follows Queries
|
||||
|
||||
|
||||
def _legacy_follower(follower, following, follow_type):
|
||||
return dict(follower=follower, following=following, what=[follow_type])
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_followers(context, account: str, start: str = '', follow_type: str = None,
|
||||
limit: int = 1000, **kwargs):
|
||||
async def get_followers(context, account: str, start: str = '', follow_type: str = None, limit: int = 1000, **kwargs):
|
||||
"""Get all accounts following `account`. (EOL)"""
|
||||
# `type` reserved word workaround
|
||||
if not follow_type and 'type' in kwargs:
|
||||
|
@ -44,12 +49,13 @@ async def get_followers(context, account: str, start: str = '', follow_type: str
|
|||
valid_account(account),
|
||||
valid_account(start, allow_empty=True),
|
||||
valid_follow_type(follow_type),
|
||||
valid_limit(limit, 1000, 1000))
|
||||
valid_limit(limit, 1000, 1000),
|
||||
)
|
||||
return [_legacy_follower(name, account, follow_type) for name in followers]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_following(context, account: str, start: str = '', follow_type: str = None,
|
||||
limit: int = 1000, **kwargs):
|
||||
async def get_following(context, account: str, start: str = '', follow_type: str = None, limit: int = 1000, **kwargs):
|
||||
"""Get all accounts `account` follows. (EOL)"""
|
||||
# `type` reserved word workaround
|
||||
if not follow_type and 'type' in kwargs:
|
||||
|
@ -61,9 +67,11 @@ async def get_following(context, account: str, start: str = '', follow_type: str
|
|||
valid_account(account),
|
||||
valid_account(start, allow_empty=True),
|
||||
valid_follow_type(follow_type),
|
||||
valid_limit(limit, 1000, 1000))
|
||||
valid_limit(limit, 1000, 1000),
|
||||
)
|
||||
return [_legacy_follower(account, name, follow_type) for name in following]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_follow_count(context, account: str):
|
||||
"""Get follow count stats. (EOL)"""
|
||||
|
@ -71,27 +79,25 @@ async def get_follow_count(context, account: str):
|
|||
account = valid_account(account)
|
||||
sql = "SELECT * FROM condenser_get_follow_count( (:account)::VARCHAR )"
|
||||
counters = await db.query_row(sql, account=account)
|
||||
return dict(account=account,
|
||||
following_count=counters[0],
|
||||
follower_count=counters[1])
|
||||
return dict(account=account, following_count=counters[0], follower_count=counters[1])
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_reblogged_by(context, author: str, permlink: str):
|
||||
"""Get all rebloggers of a post."""
|
||||
return await cursor.get_reblogged_by(
|
||||
context['db'],
|
||||
valid_account(author),
|
||||
valid_permlink(permlink))
|
||||
return await cursor.get_reblogged_by(context['db'], valid_account(author), valid_permlink(permlink))
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_account_reputations(context, account_lower_bound: str = '', limit: int = 1000):
|
||||
db = context['db']
|
||||
return await _get_account_reputations_impl(db, True, account_lower_bound, limit)
|
||||
|
||||
|
||||
async def _get_account_reputations_impl(db, fat_node_style, account_lower_bound, limit):
|
||||
"""Enumerate account reputations."""
|
||||
if not account_lower_bound:
|
||||
account_lower_bound = ''
|
||||
account_lower_bound = ''
|
||||
assert isinstance(account_lower_bound, str), "invalid account_lower_bound type"
|
||||
limit = valid_limit(limit, 1000, 1000)
|
||||
|
||||
|
@ -102,13 +108,16 @@ async def _get_account_reputations_impl(db, fat_node_style, account_lower_bound,
|
|||
else:
|
||||
return {'reputations': [dict(name=r[0], reputation=r[1]) for r in rows]}
|
||||
|
||||
|
||||
# Content Primitives
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_content(context, author: str, permlink: str, observer=None):
|
||||
db = context['db']
|
||||
return await _get_content_impl(db, True, author, permlink, observer)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def _get_content_impl(db, fat_node_style, author: str, permlink: str, observer=None):
|
||||
"""Get a single post object."""
|
||||
|
@ -122,15 +131,19 @@ async def _get_content_impl(db, fat_node_style, author: str, permlink: str, obse
|
|||
if result:
|
||||
result = dict(result[0])
|
||||
post = _condenser_post_object(result, 0, fat_node_style)
|
||||
post['active_votes'] = await find_votes_impl(db, author, permlink, VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db, author, permlink, VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi
|
||||
)
|
||||
|
||||
return post
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_content_replies(context, author: str, permlink: str):
|
||||
db = context['db']
|
||||
return await _get_content_replies_impl(db, True, author, permlink)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def _get_content_replies_impl(db, fat_node_style, author: str, permlink: str):
|
||||
"""Get a list of post objects based on parent."""
|
||||
|
@ -144,13 +157,20 @@ async def _get_content_replies_impl(db, fat_node_style, author: str, permlink: s
|
|||
for row in result:
|
||||
row = dict(row)
|
||||
post = _condenser_post_object(row, get_content_additions=fat_node_style)
|
||||
post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db,
|
||||
row['author'],
|
||||
row['permlink'],
|
||||
VotesPresentation.ActiveVotes if fat_node_style else VotesPresentation.CondenserApi,
|
||||
)
|
||||
posts.append(post)
|
||||
|
||||
return posts
|
||||
|
||||
|
||||
# Discussion Queries
|
||||
|
||||
|
||||
def nested_query_compat(function):
|
||||
"""Unpack strange format used by some clients, accepted by steemd.
|
||||
|
||||
|
@ -163,74 +183,87 @@ def nested_query_compat(function):
|
|||
arg. This decorator checks for this specific condition and unpacks
|
||||
the query to be passed as kwargs.
|
||||
"""
|
||||
|
||||
@wraps(function)
|
||||
def wrapper(*args, **kwargs):
|
||||
"""Checks for specific condition signature and unpacks query"""
|
||||
if args and not kwargs and len(args) == 2 and isinstance(args[1], dict):
|
||||
return function(args[0], **args[1])
|
||||
return function(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
async def get_posts_by_given_sort(context, sort: str, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
|
||||
async def get_posts_by_given_sort(
|
||||
context,
|
||||
sort: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
"""Query posts, sorted by creation date."""
|
||||
assert not filter_tags, 'filter_tags not supported'
|
||||
|
||||
db = context['db']
|
||||
|
||||
start_author = valid_account(start_author, allow_empty=True),
|
||||
start_permlink = valid_permlink(start_permlink, allow_empty=True),
|
||||
limit = valid_limit(limit, 100, 20),
|
||||
tag = valid_tag(tag, allow_empty=True)
|
||||
observer = valid_account(observer, allow_empty=True)
|
||||
truncate_body = valid_truncate(truncate_body)
|
||||
start_author = (valid_account(start_author, allow_empty=True),)
|
||||
start_permlink = (valid_permlink(start_permlink, allow_empty=True),)
|
||||
limit = (valid_limit(limit, 100, 20),)
|
||||
tag = valid_tag(tag, allow_empty=True)
|
||||
observer = valid_account(observer, allow_empty=True)
|
||||
truncate_body = valid_truncate(truncate_body)
|
||||
|
||||
posts = []
|
||||
is_community = tag[:5] == 'hive-'
|
||||
|
||||
if sort == 'created':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'trending':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'hot':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'promoted':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'post_by_payout':
|
||||
if tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif sort == 'comment_by_payout':
|
||||
if tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
return posts
|
||||
|
||||
sql_result = await db.query_all(sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer )
|
||||
if sort == 'created':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_created_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'trending':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_trends_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'hot':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_hot_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'promoted':
|
||||
if is_community:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_community( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_promoted_for_tag( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
elif sort == 'post_by_payout':
|
||||
if tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, False, (:observer)::VARCHAR )"
|
||||
elif sort == 'comment_by_payout':
|
||||
if tag == '':
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments( (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
sql = "SELECT * FROM bridge_get_ranked_post_by_payout_comments_for_category( (:tag)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT, (:observer)::VARCHAR )"
|
||||
else:
|
||||
return posts
|
||||
|
||||
sql_result = await db.query_all(
|
||||
sql, tag=tag, author=start_author, permlink=start_permlink, limit=limit, observer=observer
|
||||
)
|
||||
|
||||
for row in sql_result:
|
||||
post = _condenser_post_object(row, truncate_body)
|
||||
|
@ -238,53 +271,118 @@ async def get_posts_by_given_sort(context, sort: str, start_author: str = '', st
|
|||
posts.append(post)
|
||||
return posts
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_created(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'created', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_trending(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'trending', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
|
||||
async def get_discussions_by_created(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'created', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_hot(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'hot', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
|
||||
async def get_discussions_by_trending(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'trending', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_promoted(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'promoted', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer)
|
||||
async def get_discussions_by_hot(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'hot', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_post_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'post_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer)
|
||||
async def get_discussions_by_promoted(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'promoted', start_author, start_permlink, limit, tag, truncate_body, filter_tags, observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_comment_discussions_by_payout(context, start_author: str = '', start_permlink: str = '',
|
||||
limit: int = 20, tag: str = None,
|
||||
truncate_body: int = 0, observer:str=None):
|
||||
return await get_posts_by_given_sort(context, 'comment_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer)
|
||||
async def get_post_discussions_by_payout(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'post_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_blog(context, tag: str, start_author: str = '',
|
||||
start_permlink: str = '', limit: int = 20,
|
||||
truncate_body: int = 0, filter_tags: list = None):
|
||||
async def get_comment_discussions_by_payout(
|
||||
context,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
tag: str = None,
|
||||
truncate_body: int = 0,
|
||||
observer: str = None,
|
||||
):
|
||||
return await get_posts_by_given_sort(
|
||||
context, 'comment_by_payout', start_author, start_permlink, limit, tag, truncate_body, [], observer
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_blog(
|
||||
context,
|
||||
tag: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
):
|
||||
"""Retrieve account's blog posts, including reblogs."""
|
||||
assert not filter_tags, 'filter_tags not supported'
|
||||
tag = valid_account(tag)
|
||||
|
@ -302,23 +400,35 @@ async def get_discussions_by_blog(context, tag: str, start_author: str = '',
|
|||
for row in result:
|
||||
row = dict(row)
|
||||
post = _condenser_post_object(row, truncate_body=truncate_body)
|
||||
post['active_votes'] = await find_votes_impl(db, post['author'], post['permlink'], VotesPresentation.CondenserApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db, post['author'], post['permlink'], VotesPresentation.CondenserApi
|
||||
)
|
||||
posts_by_id.append(post)
|
||||
|
||||
return posts_by_id
|
||||
|
||||
async def get_discussions_by_feed_impl(db, account: str, start_author: str = '',
|
||||
start_permlink: str = '', limit: int = 20, truncate_body: int = 0, observer:str=None):
|
||||
|
||||
async def get_discussions_by_feed_impl(
|
||||
db,
|
||||
account: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
truncate_body: int = 0,
|
||||
observer: str = None,
|
||||
):
|
||||
"""Get a list of posts for an account's feed."""
|
||||
sql = "SELECT * FROM bridge_get_by_feed_with_reblog((:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::INTEGER)"
|
||||
result = await db.query_all(sql, account=account, author=start_author, permlink=start_permlink, limit=limit, observer=observer)
|
||||
result = await db.query_all(
|
||||
sql, account=account, author=start_author, permlink=start_permlink, limit=limit, observer=observer
|
||||
)
|
||||
|
||||
posts = []
|
||||
for row in result:
|
||||
row = dict(row)
|
||||
post = _condenser_post_object(row, truncate_body=truncate_body)
|
||||
reblogged_by = set(row['reblogged_by'])
|
||||
reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
|
||||
reblogged_by.discard(row['author']) # Eliminate original author of reblogged post
|
||||
if reblogged_by:
|
||||
reblogged_by_list = list(reblogged_by)
|
||||
reblogged_by_list.sort()
|
||||
|
@ -329,11 +439,19 @@ async def get_discussions_by_feed_impl(db, account: str, start_author: str = '',
|
|||
|
||||
return posts
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_feed(context, tag: str, start_author: str = '',
|
||||
start_permlink: str = '', limit: int = 20,
|
||||
truncate_body: int = 0, filter_tags: list = None, observer:str=None):
|
||||
async def get_discussions_by_feed(
|
||||
context,
|
||||
tag: str,
|
||||
start_author: str = '',
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
observer: str = None,
|
||||
):
|
||||
"""Retrieve account's personalized feed."""
|
||||
assert not filter_tags, 'filter_tags not supported'
|
||||
return await get_discussions_by_feed_impl(
|
||||
|
@ -342,13 +460,21 @@ async def get_discussions_by_feed(context, tag: str, start_author: str = '',
|
|||
valid_account(start_author, allow_empty=True),
|
||||
valid_permlink(start_permlink, allow_empty=True),
|
||||
valid_limit(limit, 100, 20),
|
||||
valid_truncate(truncate_body), observer)
|
||||
valid_truncate(truncate_body),
|
||||
observer,
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_comments(context, start_author: str, start_permlink: str = '',
|
||||
limit: int = 20, truncate_body: int = 0,
|
||||
filter_tags: list = None):
|
||||
async def get_discussions_by_comments(
|
||||
context,
|
||||
start_author: str,
|
||||
start_permlink: str = '',
|
||||
limit: int = 20,
|
||||
truncate_body: int = 0,
|
||||
filter_tags: list = None,
|
||||
):
|
||||
"""Get comments by made by author."""
|
||||
assert not filter_tags, 'filter_tags not supported'
|
||||
start_author = valid_account(start_author)
|
||||
|
@ -360,20 +486,26 @@ async def get_discussions_by_comments(context, start_author: str, start_permlink
|
|||
db = context['db']
|
||||
|
||||
sql = "SELECT * FROM bridge_get_account_posts_by_comments( (:account)::VARCHAR, (:author)::VARCHAR, (:permlink)::VARCHAR, (:limit)::SMALLINT )"
|
||||
result = await db.query_all(sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit)
|
||||
result = await db.query_all(
|
||||
sql, account=start_author, author=start_author if start_permlink else '', permlink=start_permlink, limit=limit
|
||||
)
|
||||
|
||||
for row in result:
|
||||
row = dict(row)
|
||||
post = _condenser_post_object(row, truncate_body=truncate_body)
|
||||
post['active_votes'] = await find_votes_impl(db, post['author'], post['permlink'], VotesPresentation.CondenserApi)
|
||||
post['active_votes'] = await find_votes_impl(
|
||||
db, post['author'], post['permlink'], VotesPresentation.CondenserApi
|
||||
)
|
||||
posts.append(post)
|
||||
|
||||
return posts
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_replies_by_last_update(context, start_author: str, start_permlink: str = '',
|
||||
limit: int = 20, truncate_body: int = 0):
|
||||
async def get_replies_by_last_update(
|
||||
context, start_author: str, start_permlink: str = '', limit: int = 20, truncate_body: int = 0
|
||||
):
|
||||
"""Get all replies made to any of author's posts."""
|
||||
# despite the name time of last edit is not used, posts ranked by creation time (that is, their id)
|
||||
# note that in this call start_author has dual meaning:
|
||||
|
@ -386,12 +518,15 @@ async def get_replies_by_last_update(context, start_author: str, start_permlink:
|
|||
valid_account(start_author),
|
||||
valid_permlink(start_permlink, allow_empty=True),
|
||||
valid_limit(limit, 100, 20),
|
||||
valid_truncate(truncate_body))
|
||||
valid_truncate(truncate_body),
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_discussions_by_author_before_date(context, author: str, start_permlink: str = '',
|
||||
before_date: str = '', limit: int = 10, truncate_body: int = 0):
|
||||
async def get_discussions_by_author_before_date(
|
||||
context, author: str, start_permlink: str = '', before_date: str = '', limit: int = 10, truncate_body: int = 0
|
||||
):
|
||||
"""Retrieve account's blog posts, without reblogs.
|
||||
|
||||
NOTE: before_date is completely ignored, and it appears to be broken and/or
|
||||
|
@ -404,7 +539,9 @@ async def get_discussions_by_author_before_date(context, author: str, start_perm
|
|||
valid_account(author),
|
||||
valid_permlink(start_permlink, allow_empty=True),
|
||||
valid_limit(limit, 100, 10),
|
||||
valid_truncate(truncate_body))
|
||||
valid_truncate(truncate_body),
|
||||
)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
|
@ -440,13 +577,18 @@ async def get_blog(context, account: str, start_entry_id: int = 0, limit: int =
|
|||
post = _condenser_post_object(row)
|
||||
|
||||
post['active_votes'] = await find_votes_impl(db, row['author'], row['permlink'], VotesPresentation.CondenserApi)
|
||||
out.append({"blog": account,
|
||||
"entry_id": row['entry_id'],
|
||||
"comment": post,
|
||||
"reblogged_on": json_date(row['reblogged_at'])})
|
||||
out.append(
|
||||
{
|
||||
"blog": account,
|
||||
"entry_id": row['entry_id'],
|
||||
"comment": post,
|
||||
"reblogged_on": json_date(row['reblogged_at']),
|
||||
}
|
||||
)
|
||||
|
||||
return list(reversed(out))
|
||||
|
||||
|
||||
@return_error_info
|
||||
@nested_query_compat
|
||||
async def get_blog_entries(context, account: str, start_entry_id: int = 0, limit: int = None):
|
||||
|
@ -471,19 +613,24 @@ async def get_blog_entries(context, account: str, start_entry_id: int = 0, limit
|
|||
out = []
|
||||
for row in result:
|
||||
row = dict(row)
|
||||
out.append({"blog": account,
|
||||
"entry_id": row['entry_id'],
|
||||
"author": row['author'],
|
||||
"permlink": row['permlink'],
|
||||
"reblogged_on": json_date(row['reblogged_at'])})
|
||||
out.append(
|
||||
{
|
||||
"blog": account,
|
||||
"entry_id": row['entry_id'],
|
||||
"author": row['author'],
|
||||
"permlink": row['permlink'],
|
||||
"reblogged_on": json_date(row['reblogged_at']),
|
||||
}
|
||||
)
|
||||
|
||||
return list(reversed(out))
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_active_votes(context, author: str, permlink: str):
|
||||
""" Returns all votes for the given post. """
|
||||
"""Returns all votes for the given post."""
|
||||
valid_account(author)
|
||||
valid_permlink(permlink)
|
||||
db = context['db']
|
||||
|
||||
return await find_votes_impl( db, author, permlink, VotesPresentation.ActiveVotes )
|
||||
return await find_votes_impl(db, author, permlink, VotesPresentation.ActiveVotes)
|
||||
|
|
|
@ -12,15 +12,17 @@ log = logging.getLogger(__name__)
|
|||
|
||||
# Building of legacy account objects
|
||||
|
||||
async def load_accounts(db, names, lite = False):
|
||||
|
||||
async def load_accounts(db, names, lite=False):
|
||||
"""`get_accounts`-style lookup for `get_state` compat layer."""
|
||||
sql = get_hive_accounts_info_view_query_string( names, lite )
|
||||
sql = get_hive_accounts_info_view_query_string(names, lite)
|
||||
rows = await db.query_all(sql, names=tuple(names))
|
||||
return [_condenser_account_object(row) for row in rows]
|
||||
|
||||
|
||||
def _condenser_account_object(row):
|
||||
"""Convert an internal account record into legacy-steemd style."""
|
||||
#The member `vote_weight` from `hive_accounts` is removed, so currently the member `net_vesting_shares` is equals to zero.
|
||||
# The member `vote_weight` from `hive_accounts` is removed, so currently the member `net_vesting_shares` is equals to zero.
|
||||
|
||||
profile = safe_db_profile_metadata(row['posting_json_metadata'], row['json_metadata'])
|
||||
|
||||
|
@ -31,28 +33,34 @@ def _condenser_account_object(row):
|
|||
'reputation': row['reputation'],
|
||||
'net_vesting_shares': 0,
|
||||
'transfer_history': [],
|
||||
'json_metadata': json.dumps({
|
||||
'profile': {'name': profile['name'],
|
||||
'about': profile['about'],
|
||||
'website': profile['website'],
|
||||
'location': profile['location'],
|
||||
'cover_image': profile['cover_image'],
|
||||
'profile_image': profile['profile_image'],
|
||||
}})}
|
||||
'json_metadata': json.dumps(
|
||||
{
|
||||
'profile': {
|
||||
'name': profile['name'],
|
||||
'about': profile['about'],
|
||||
'website': profile['website'],
|
||||
'location': profile['location'],
|
||||
'cover_image': profile['cover_image'],
|
||||
'profile_image': profile['profile_image'],
|
||||
}
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
|
||||
"""Given a hive_posts row, create a legacy-style post object."""
|
||||
paid = row['is_paidout']
|
||||
|
||||
full_payout = row['pending_payout'] + row['payout'];
|
||||
full_payout = row['pending_payout'] + row['payout']
|
||||
post = {}
|
||||
post['author'] = row['author']
|
||||
post['permlink'] = row['permlink']
|
||||
|
||||
if not row['category']:
|
||||
post['category'] = 'undefined' # condenser#3424 mitigation
|
||||
post['category'] = 'undefined' # condenser#3424 mitigation
|
||||
else:
|
||||
post['category'] = row['category']
|
||||
post['category'] = row['category']
|
||||
|
||||
post['title'] = row['title']
|
||||
post['body'] = row['body'][0:truncate_body] if truncate_body else row['body']
|
||||
|
@ -86,9 +94,11 @@ def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
|
|||
post['percent_hbd'] = row['percent_hbd']
|
||||
|
||||
if get_content_additions:
|
||||
post['id'] = row['id'] # let's be compatible with old code until this API is supported.
|
||||
post['id'] = row['id'] # let's be compatible with old code until this API is supported.
|
||||
post['author_rewards'] = row['author_rewards']
|
||||
post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)
|
||||
post['max_cashout_time'] = json_date(
|
||||
None
|
||||
) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)
|
||||
curator_payout = sbd_amount(row['curator_payout_value'])
|
||||
post['curator_payout_value'] = _amount(curator_payout)
|
||||
post['total_payout_value'] = _amount(row['payout'] - curator_payout)
|
||||
|
@ -104,8 +114,8 @@ def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
|
|||
post['reblogged_by'] = []
|
||||
post['net_votes'] = row['net_votes']
|
||||
|
||||
post['children_abs_rshares'] = 0 # see: hive/server/database_api/objects.py:68
|
||||
post['total_pending_payout_value'] = '0.000 HBD' # no data
|
||||
post['children_abs_rshares'] = 0 # see: hive/server/database_api/objects.py:68
|
||||
post['total_pending_payout_value'] = '0.000 HBD' # no data
|
||||
|
||||
if paid:
|
||||
post['total_vote_weight'] = 0
|
||||
|
@ -114,7 +124,7 @@ def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
|
|||
post['abs_rshares'] = 0
|
||||
else:
|
||||
post['total_vote_weight'] = row['total_vote_weight']
|
||||
post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2
|
||||
post['vote_rshares'] = (row['rshares'] + row['abs_rshares']) // 2
|
||||
post['net_rshares'] = row['rshares']
|
||||
post['abs_rshares'] = row['abs_rshares']
|
||||
else:
|
||||
|
@ -127,6 +137,7 @@ def _condenser_post_object(row, truncate_body=0, get_content_additions=False):
|
|||
|
||||
return post
|
||||
|
||||
|
||||
def _amount(amount, asset='HBD'):
|
||||
"""Return a steem-style amount string given a (numeric, asset-str)."""
|
||||
assert asset == 'HBD', f'unhandled asset {asset}'
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
"""condenser_api trending tag fetching methods"""
|
||||
|
||||
from aiocache import cached
|
||||
from hive.server.common.helpers import (return_error_info, valid_tag, valid_limit)
|
||||
from hive.server.common.helpers import return_error_info, valid_tag, valid_limit
|
||||
|
||||
|
||||
@return_error_info
|
||||
@cached(ttl=7200, timeout=1200)
|
||||
|
@ -10,6 +11,7 @@ async def get_top_trending_tags_summary(context):
|
|||
sql = "SELECT condenser_get_top_trending_tags_summary(50)"
|
||||
return await context['db'].query_col(sql)
|
||||
|
||||
|
||||
@return_error_info
|
||||
@cached(ttl=3600, timeout=1200)
|
||||
async def get_trending_tags(context, start_tag: str = '', limit: int = 250):
|
||||
|
@ -22,10 +24,13 @@ async def get_trending_tags(context, start_tag: str = '', limit: int = 250):
|
|||
|
||||
out = []
|
||||
for row in await context['db'].query_all(sql, limit=limit, tag=start_tag):
|
||||
out.append({
|
||||
'name': row['category'],
|
||||
'comments': row['total_posts'] - row['top_posts'],
|
||||
'top_posts': row['top_posts'],
|
||||
'total_payouts': f"{row['total_payouts']:.3f} HBD"})
|
||||
out.append(
|
||||
{
|
||||
'name': row['category'],
|
||||
'comments': row['total_posts'] - row['top_posts'],
|
||||
'top_posts': row['top_posts'],
|
||||
'total_payouts': f"{row['total_payouts']:.3f} HBD",
|
||||
}
|
||||
)
|
||||
|
||||
return out
|
||||
|
|
|
@ -6,11 +6,19 @@ from hive.server.database_api.objects import database_post_object
|
|||
from hive.server.common.helpers import json_date
|
||||
from hive.utils.normalize import escape_characters
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def list_comments(context, start: list, limit: int = 1000, order: str = None):
|
||||
"""Returns all comments, starting with the specified options."""
|
||||
|
||||
supported_order_list = ['by_cashout_time', 'by_permlink', 'by_root', 'by_parent', 'by_last_update', 'by_author_last_update']
|
||||
supported_order_list = [
|
||||
'by_cashout_time',
|
||||
'by_permlink',
|
||||
'by_root',
|
||||
'by_parent',
|
||||
'by_last_update',
|
||||
'by_author_last_update',
|
||||
]
|
||||
assert not order is None, "missing a required argument: 'order'"
|
||||
assert order in supported_order_list, f"Unsupported order, valid orders: {', '.join(supported_order_list)}"
|
||||
limit = valid_limit(limit, 1000, 1000)
|
||||
|
@ -18,7 +26,9 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
|
||||
result = []
|
||||
if order == 'by_cashout_time':
|
||||
assert len(start) == 3, "Expecting three arguments in 'start' array: cashout time, optional page start author and permlink"
|
||||
assert (
|
||||
len(start) == 3
|
||||
), "Expecting three arguments in 'start' array: cashout time, optional page start author and permlink"
|
||||
cashout_time = start[0]
|
||||
valid_date(cashout_time)
|
||||
if cashout_time[0:4] == '1969':
|
||||
|
@ -38,7 +48,9 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
sql = "SELECT * FROM list_comments_by_permlink(:author, :permlink, :limit)"
|
||||
result = await db.query_all(sql, author=author, permlink=permlink, limit=limit)
|
||||
elif order == 'by_root':
|
||||
assert len(start) == 4, "Expecting 4 arguments in 'start' array: discussion root author and permlink, optional page start author and permlink"
|
||||
assert (
|
||||
len(start) == 4
|
||||
), "Expecting 4 arguments in 'start' array: discussion root author and permlink, optional page start author and permlink"
|
||||
root_author = start[0]
|
||||
valid_account(root_author)
|
||||
root_permlink = start[1]
|
||||
|
@ -48,9 +60,18 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
start_post_permlink = start[3]
|
||||
valid_permlink(start_post_permlink, allow_empty=True)
|
||||
sql = "SELECT * FROM list_comments_by_root(:root_author, :root_permlink, :start_post_author, :start_post_permlink, :limit)"
|
||||
result = await db.query_all(sql, root_author=root_author, root_permlink=root_permlink, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
|
||||
result = await db.query_all(
|
||||
sql,
|
||||
root_author=root_author,
|
||||
root_permlink=root_permlink,
|
||||
start_post_author=start_post_author,
|
||||
start_post_permlink=start_post_permlink,
|
||||
limit=limit,
|
||||
)
|
||||
elif order == 'by_parent':
|
||||
assert len(start) == 4, "Expecting 4 arguments in 'start' array: parent post author and permlink, optional page start author and permlink"
|
||||
assert (
|
||||
len(start) == 4
|
||||
), "Expecting 4 arguments in 'start' array: parent post author and permlink, optional page start author and permlink"
|
||||
parent_author = start[0]
|
||||
valid_account(parent_author)
|
||||
parent_permlink = start[1]
|
||||
|
@ -60,9 +81,18 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
start_post_permlink = start[3]
|
||||
valid_permlink(start_post_permlink, allow_empty=True)
|
||||
sql = "SELECT * FROM list_comments_by_parent(:parent_author, :parent_permlink, :start_post_author, :start_post_permlink, :limit)"
|
||||
result = await db.query_all(sql, parent_author=parent_author, parent_permlink=parent_permlink, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
|
||||
result = await db.query_all(
|
||||
sql,
|
||||
parent_author=parent_author,
|
||||
parent_permlink=parent_permlink,
|
||||
start_post_author=start_post_author,
|
||||
start_post_permlink=start_post_permlink,
|
||||
limit=limit,
|
||||
)
|
||||
elif order == 'by_last_update':
|
||||
assert len(start) == 4, "Expecting 4 arguments in 'start' array: parent author, update time, optional page start author and permlink"
|
||||
assert (
|
||||
len(start) == 4
|
||||
), "Expecting 4 arguments in 'start' array: parent author, update time, optional page start author and permlink"
|
||||
parent_author = start[0]
|
||||
valid_account(parent_author)
|
||||
updated_at = start[1]
|
||||
|
@ -72,9 +102,18 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
start_post_permlink = start[3]
|
||||
valid_permlink(start_post_permlink, allow_empty=True)
|
||||
sql = "SELECT * FROM list_comments_by_last_update(:parent_author, :updated_at, :start_post_author, :start_post_permlink, :limit)"
|
||||
result = await db.query_all(sql, parent_author=parent_author, updated_at=updated_at, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
|
||||
result = await db.query_all(
|
||||
sql,
|
||||
parent_author=parent_author,
|
||||
updated_at=updated_at,
|
||||
start_post_author=start_post_author,
|
||||
start_post_permlink=start_post_permlink,
|
||||
limit=limit,
|
||||
)
|
||||
elif order == 'by_author_last_update':
|
||||
assert len(start) == 4, "Expecting 4 arguments in 'start' array: author, update time, optional page start author and permlink"
|
||||
assert (
|
||||
len(start) == 4
|
||||
), "Expecting 4 arguments in 'start' array: author, update time, optional page start author and permlink"
|
||||
author = start[0]
|
||||
valid_account(author)
|
||||
updated_at = start[1]
|
||||
|
@ -84,13 +123,21 @@ async def list_comments(context, start: list, limit: int = 1000, order: str = No
|
|||
start_post_permlink = start[3]
|
||||
valid_permlink(start_post_permlink, allow_empty=True)
|
||||
sql = "SELECT * FROM list_comments_by_author_last_update(:author, :updated_at, :start_post_author, :start_post_permlink, :limit)"
|
||||
result = await db.query_all(sql, author=author, updated_at=updated_at, start_post_author=start_post_author, start_post_permlink=start_post_permlink, limit=limit)
|
||||
result = await db.query_all(
|
||||
sql,
|
||||
author=author,
|
||||
updated_at=updated_at,
|
||||
start_post_author=start_post_author,
|
||||
start_post_permlink=start_post_permlink,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
return {"comments": [database_post_object(dict(row)) for row in result]}
|
||||
|
||||
return { "comments": [database_post_object(dict(row)) for row in result] }
|
||||
|
||||
@return_error_info
|
||||
async def find_comments(context, comments: list):
|
||||
""" Search for comments: limit and order is ignored in hive code """
|
||||
"""Search for comments: limit and order is ignored in hive code"""
|
||||
result = []
|
||||
|
||||
assert isinstance(comments, list), "Expected array of author+permlink pairs"
|
||||
|
@ -175,7 +222,8 @@ async def find_comments(context, comments: list):
|
|||
cpo = database_post_object(dict(row))
|
||||
result.append(cpo)
|
||||
|
||||
return { "comments": result }
|
||||
return {"comments": result}
|
||||
|
||||
|
||||
class VotesPresentation(Enum):
|
||||
ActiveVotes = 1
|
||||
|
@ -183,24 +231,41 @@ class VotesPresentation(Enum):
|
|||
CondenserApi = 3
|
||||
BridgeApi = 4
|
||||
|
||||
|
||||
def api_vote_info(rows, votes_presentation):
|
||||
ret = []
|
||||
for row in rows:
|
||||
if votes_presentation == VotesPresentation.DatabaseApi:
|
||||
ret.append(dict(id = row.id, voter = row.voter, author = row.author, permlink = row.permlink,
|
||||
weight = row.weight, rshares = row.rshares, vote_percent = row.percent,
|
||||
last_update = json_date(row.last_update), num_changes = row.num_changes))
|
||||
elif votes_presentation == VotesPresentation.CondenserApi:
|
||||
ret.append(dict(percent = str(row.percent), reputation = row.reputation,
|
||||
rshares = row.rshares, voter = row.voter))
|
||||
elif votes_presentation == VotesPresentation.BridgeApi:
|
||||
ret.append(dict(rshares = row.rshares, voter = row.voter))
|
||||
else:
|
||||
ret.append(dict(percent = row.percent, reputation = row.reputation,
|
||||
rshares = row.rshares, time = json_date(row.last_update),
|
||||
voter = row.voter, weight = row.weight
|
||||
))
|
||||
return ret
|
||||
ret = []
|
||||
for row in rows:
|
||||
if votes_presentation == VotesPresentation.DatabaseApi:
|
||||
ret.append(
|
||||
dict(
|
||||
id=row.id,
|
||||
voter=row.voter,
|
||||
author=row.author,
|
||||
permlink=row.permlink,
|
||||
weight=row.weight,
|
||||
rshares=row.rshares,
|
||||
vote_percent=row.percent,
|
||||
last_update=json_date(row.last_update),
|
||||
num_changes=row.num_changes,
|
||||
)
|
||||
)
|
||||
elif votes_presentation == VotesPresentation.CondenserApi:
|
||||
ret.append(dict(percent=str(row.percent), reputation=row.reputation, rshares=row.rshares, voter=row.voter))
|
||||
elif votes_presentation == VotesPresentation.BridgeApi:
|
||||
ret.append(dict(rshares=row.rshares, voter=row.voter))
|
||||
else:
|
||||
ret.append(
|
||||
dict(
|
||||
percent=row.percent,
|
||||
reputation=row.reputation,
|
||||
rshares=row.rshares,
|
||||
time=json_date(row.last_update),
|
||||
voter=row.voter,
|
||||
weight=row.weight,
|
||||
)
|
||||
)
|
||||
return ret
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def find_votes_impl(db, author: str, permlink: str, votes_presentation, limit: int = 1000):
|
||||
|
@ -208,16 +273,18 @@ async def find_votes_impl(db, author: str, permlink: str, votes_presentation, li
|
|||
rows = await db.query_all(sql, author=author, permlink=permlink, limit=limit)
|
||||
return api_vote_info(rows, votes_presentation)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def find_votes(context, author: str, permlink: str):
|
||||
""" Returns all votes for the given post """
|
||||
"""Returns all votes for the given post"""
|
||||
valid_account(author)
|
||||
valid_permlink(permlink)
|
||||
return { 'votes': await find_votes_impl(context['db'], author, permlink, VotesPresentation.DatabaseApi) }
|
||||
return {'votes': await find_votes_impl(context['db'], author, permlink, VotesPresentation.DatabaseApi)}
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def list_votes(context, start: list, limit: int = 1000, order: str = None):
|
||||
""" Returns all votes, starting with the specified voter and/or author and permlink. """
|
||||
"""Returns all votes, starting with the specified voter and/or author and permlink."""
|
||||
supported_order_list = ["by_comment_voter", "by_voter_comment"]
|
||||
assert not order is None, "missing a required argument: 'order'"
|
||||
assert order in supported_order_list, f"Unsupported order, valid orders: {', '.join(supported_order_list)}"
|
||||
|
@ -235,7 +302,9 @@ async def list_votes(context, start: list, limit: int = 1000, order: str = None)
|
|||
sql = "SELECT * FROM list_votes_by_voter_comment(:voter,:author,:permlink,:limit)"
|
||||
rows = await db.query_all(sql, voter=voter, author=start_post_author, permlink=start_post_permlink, limit=limit)
|
||||
else:
|
||||
assert len(start) == 3, "Expecting 3 arguments in 'start' array: post author and permlink, optional page start voter"
|
||||
assert (
|
||||
len(start) == 3
|
||||
), "Expecting 3 arguments in 'start' array: post author and permlink, optional page start voter"
|
||||
author = start[0]
|
||||
valid_account(author)
|
||||
permlink = start[1]
|
||||
|
@ -244,4 +313,4 @@ async def list_votes(context, start: list, limit: int = 1000, order: str = None)
|
|||
valid_account(start_voter, allow_empty=True)
|
||||
sql = "SELECT * FROM list_votes_by_comment_voter(:voter,:author,:permlink,:limit)"
|
||||
rows = await db.query_all(sql, voter=start_voter, author=author, permlink=permlink, limit=limit)
|
||||
return { 'votes': api_vote_info(rows, VotesPresentation.DatabaseApi) }
|
||||
return {'votes': api_vote_info(rows, VotesPresentation.DatabaseApi)}
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
from hive.server.common.helpers import json_date
|
||||
from hive.utils.normalize import sbd_amount, to_nai
|
||||
|
||||
|
||||
def _amount(amount, asset='HBD'):
|
||||
"""Return a steem-style amount string given a (numeric, asset-str)."""
|
||||
assert asset == 'HBD', f'unhandled asset {asset}'
|
||||
return f"{amount:.3f} HBD"
|
||||
|
||||
|
||||
def database_post_object(row, truncate_body=0):
|
||||
"""Given a hive_posts row, create a legacy-style post object."""
|
||||
|
||||
|
@ -29,13 +31,15 @@ def database_post_object(row, truncate_body=0):
|
|||
|
||||
post['last_payout'] = json_date(row['last_payout_at'])
|
||||
post['cashout_time'] = json_date(row['cashout_time'])
|
||||
post['max_cashout_time'] = json_date(None) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)
|
||||
post['max_cashout_time'] = json_date(
|
||||
None
|
||||
) # ABW: only relevant up to HF17, timestamp::max for all posts later (and also all paid)
|
||||
|
||||
curator_payout = sbd_amount(row['curator_payout_value'])
|
||||
post['curator_payout_value'] = to_nai(_amount(curator_payout))
|
||||
post['total_payout_value'] = to_nai(_amount(row['payout'] - curator_payout))
|
||||
|
||||
post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value
|
||||
post['reward_weight'] = 10000 # ABW: only relevant between HF12 and HF17 and we don't have access to correct value
|
||||
|
||||
post['root_author'] = row['root_author']
|
||||
post['root_permlink'] = row['root_permlink']
|
||||
|
@ -55,14 +59,18 @@ def database_post_object(row, truncate_body=0):
|
|||
if paid:
|
||||
post['total_vote_weight'] = 0
|
||||
post['vote_rshares'] = 0
|
||||
post['net_rshares'] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0
|
||||
post[
|
||||
'net_rshares'
|
||||
] = 0 # if row['rshares'] > 0 else row['rshares'] ABW: used to be like this but after HF19 cashouts disappear and all give 0
|
||||
post['abs_rshares'] = 0
|
||||
post['children_abs_rshares'] = 0
|
||||
else:
|
||||
post['total_vote_weight'] = row['total_vote_weight']
|
||||
post['vote_rshares'] = ( row['rshares'] + row['abs_rshares'] ) // 2 # effectively sum of all positive rshares
|
||||
post['vote_rshares'] = (row['rshares'] + row['abs_rshares']) // 2 # effectively sum of all positive rshares
|
||||
post['net_rshares'] = row['rshares']
|
||||
post['abs_rshares'] = row['abs_rshares']
|
||||
post['children_abs_rshares'] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time)
|
||||
post[
|
||||
'children_abs_rshares'
|
||||
] = 0 # TODO - ABW: I'm not sure about that, it is costly and useless (used to be part of mechanism to determine cashout time)
|
||||
|
||||
return post
|
||||
|
|
|
@ -12,15 +12,19 @@ from hive.utils.stats import Stats
|
|||
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def sqltimer(function):
|
||||
"""Decorator for DB query methods which tracks timing."""
|
||||
|
||||
async def _wrapper(*args, **kwargs):
|
||||
start = perf()
|
||||
result = await function(*args, **kwargs)
|
||||
Stats.log_db(args[1], perf() - start)
|
||||
return result
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
class Db:
|
||||
"""Wrapper for aiopg.sa db driver."""
|
||||
|
||||
|
@ -104,8 +108,7 @@ class Db:
|
|||
try:
|
||||
return await conn.execute(self._sql_text(sql), **kwargs)
|
||||
except Exception as e:
|
||||
log.warning("[SQL-ERR] %s in query %s (%s)",
|
||||
e.__class__.__name__, sql, kwargs)
|
||||
log.warning("[SQL-ERR] %s in query %s (%s)", e.__class__.__name__, sql, kwargs)
|
||||
raise e
|
||||
|
||||
def _sql_text(self, sql):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from hive.server.condenser_api.methods import _get_account_reputations_impl
|
||||
from hive.server.common.helpers import return_error_info
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_account_reputations(context, account_lower_bound: str = '', limit: int = 1000):
|
||||
db = context['db']
|
||||
|
|
|
@ -3,11 +3,13 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_community_id(db, name):
|
||||
"""Get community id from db."""
|
||||
assert name, 'community name cannot be blank'
|
||||
return await db.query_one("SELECT find_community_id( (:name)::VARCHAR, True )", name=name)
|
||||
|
||||
|
||||
async def get_account_id(db, name):
|
||||
"""Get account id from account name."""
|
||||
return await db.query_one("SELECT find_account_id( (:name)::VARCHAR, True )", name=name)
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
"""Hive API: Community methods"""
|
||||
import logging
|
||||
|
||||
from hive.server.hive_api.common import (get_community_id)
|
||||
from hive.server.common.helpers import (return_error_info, valid_community, valid_account, valid_limit, json_date)
|
||||
from hive.server.hive_api.common import get_community_id
|
||||
from hive.server.common.helpers import return_error_info, valid_community, valid_account, valid_limit, json_date
|
||||
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_community(context, name, observer=None):
|
||||
"""Retrieve full community object. Includes metadata, leadership team
|
||||
|
@ -24,6 +25,7 @@ async def get_community(context, name, observer=None):
|
|||
|
||||
return result
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_community_context(context, name, account):
|
||||
"""For a community/account: returns role, title, subscribed state"""
|
||||
|
@ -36,14 +38,15 @@ async def get_community_context(context, name, account):
|
|||
|
||||
return dict(row['bridge_get_community_context'])
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def list_top_communities(context, limit=25):
|
||||
"""List top communities. Returns lite community list."""
|
||||
limit = valid_limit(limit, 100, 25)
|
||||
sql = """SELECT hc.name, hc.title FROM hive_communities hc
|
||||
WHERE hc.rank > 0 ORDER BY hc.rank LIMIT :limit"""
|
||||
#ABW: restored older version since hardcoded id is out of the question
|
||||
#sql = """SELECT name, title FROM hive_communities
|
||||
# ABW: restored older version since hardcoded id is out of the question
|
||||
# sql = """SELECT name, title FROM hive_communities
|
||||
# WHERE id = 1344247 OR rank > 0
|
||||
# ORDER BY (CASE WHEN id = 1344247 THEN 0 ELSE rank END)
|
||||
# LIMIT :limit"""
|
||||
|
@ -54,7 +57,7 @@ async def list_top_communities(context, limit=25):
|
|||
|
||||
|
||||
@return_error_info
|
||||
async def list_pop_communities(context, limit:int=25):
|
||||
async def list_pop_communities(context, limit: int = 25):
|
||||
"""List communities by new subscriber count. Returns lite community list."""
|
||||
limit = valid_limit(limit, 25, 25)
|
||||
sql = "SELECT * FROM bridge_list_pop_communities( (:limit)::INT )"
|
||||
|
@ -73,6 +76,7 @@ async def list_all_subscriptions(context, account):
|
|||
rows = await db.query_all(sql, account=account)
|
||||
return [(r[0], r[1], r[2], r[3]) for r in rows]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def list_subscribers(context, community, last='', limit=100):
|
||||
"""Lists subscribers of `community`."""
|
||||
|
@ -84,6 +88,7 @@ async def list_subscribers(context, community, last='', limit=100):
|
|||
rows = await db.query_all(sql, community=community, last=last, limit=limit)
|
||||
return [(r[0], r[1], r[2], json_date(r[3])) for r in rows]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def list_communities(context, last='', limit=100, query=None, sort='rank', observer=None):
|
||||
"""List all communities, paginated. Returns lite community list."""
|
||||
|
@ -96,8 +101,11 @@ async def list_communities(context, last='', limit=100, query=None, sort='rank',
|
|||
search = query
|
||||
db = context['db']
|
||||
|
||||
sql = "SELECT * FROM bridge_list_communities_by_" + \
|
||||
sort + "( (:observer)::VARCHAR, (:last)::VARCHAR, (:search)::VARCHAR, (:limit)::INT )"
|
||||
sql = (
|
||||
"SELECT * FROM bridge_list_communities_by_"
|
||||
+ sort
|
||||
+ "( (:observer)::VARCHAR, (:last)::VARCHAR, (:search)::VARCHAR, (:limit)::INT )"
|
||||
)
|
||||
|
||||
rows = await db.query_all(sql, observer=observer, last=last, search=search, limit=limit)
|
||||
|
||||
|
@ -117,6 +125,7 @@ async def list_community_roles(context, community, last='', limit=50):
|
|||
|
||||
return [(r['name'], r['role'], r['title']) for r in rows]
|
||||
|
||||
|
||||
# Communities - internal
|
||||
# ----------------------
|
||||
def remove_empty_admins_field(rows):
|
||||
|
@ -128,9 +137,11 @@ def remove_empty_admins_field(rows):
|
|||
result.append(new)
|
||||
return result
|
||||
|
||||
|
||||
# Stats
|
||||
# -----
|
||||
|
||||
|
||||
async def top_community_voters(context, community):
|
||||
"""Get a list of top 5 (pending) community voters."""
|
||||
# TODO: which are voting on muted posts?
|
||||
|
@ -145,6 +156,7 @@ async def top_community_voters(context, community):
|
|||
total[voter] += abs(int(rshares))
|
||||
return sorted(total, key=total.get, reverse=True)[:5]
|
||||
|
||||
|
||||
async def top_community_authors(context, community):
|
||||
"""Get a list of top 5 (pending) community authors."""
|
||||
db = context['db']
|
||||
|
@ -157,6 +169,7 @@ async def top_community_authors(context, community):
|
|||
total[author] += payout
|
||||
return sorted(total, key=total.get, reverse=True)[:5]
|
||||
|
||||
|
||||
async def top_community_muted(context, community):
|
||||
"""Get top authors (by SP) who are muted in a community."""
|
||||
db = context['db']
|
||||
|
@ -167,6 +180,7 @@ async def top_community_muted(context, community):
|
|||
ORDER BY voting_weight DESC LIMIT 5"""
|
||||
return await db.query(sql, community_id=cid)
|
||||
|
||||
|
||||
async def _top_community_posts(db, community, limit=50):
|
||||
# TODO: muted equivalent
|
||||
sql = """
|
||||
|
@ -181,4 +195,4 @@ async def _top_community_posts(db, community, limit=50):
|
|||
AND post_id IN (SELECT id FROM hive_posts WHERE is_muted = '0')
|
||||
ORDER BY ( hp.payout + hp.pending_payout ) DESC LIMIT :limit"""
|
||||
|
||||
return await db.query_all(sql, community=community, limit=limit)
|
||||
return await db.query_all(sql, community=community, limit=limit)
|
||||
|
|
|
@ -1,7 +1,15 @@
|
|||
"""Hive API: Notifications"""
|
||||
import logging
|
||||
|
||||
from hive.server.common.helpers import return_error_info, valid_account, valid_permlink, valid_number, valid_limit, valid_score, json_date
|
||||
from hive.server.common.helpers import (
|
||||
return_error_info,
|
||||
valid_account,
|
||||
valid_permlink,
|
||||
valid_number,
|
||||
valid_limit,
|
||||
valid_score,
|
||||
json_date,
|
||||
)
|
||||
from hive.indexer.notify import NotifyType
|
||||
from hive.server.common.mutes import Mutes
|
||||
|
||||
|
@ -9,36 +17,34 @@ log = logging.getLogger(__name__)
|
|||
|
||||
STRINGS = {
|
||||
# community
|
||||
NotifyType.new_community: '<dst> was created', # no <src> available
|
||||
NotifyType.set_role: '<src> set <dst> <payload>',
|
||||
NotifyType.set_props: '<src> set properties <payload>',
|
||||
NotifyType.set_label: '<src> label <dst> <payload>',
|
||||
NotifyType.mute_post: '<src> mute <post> - <payload>',
|
||||
NotifyType.unmute_post: '<src> unmute <post> - <payload>',
|
||||
NotifyType.pin_post: '<src> pin <post>',
|
||||
NotifyType.unpin_post: '<src> unpin <post>',
|
||||
NotifyType.flag_post: '<src> flag <post> - <payload>',
|
||||
NotifyType.subscribe: '<src> subscribed to <comm>',
|
||||
|
||||
NotifyType.new_community: '<dst> was created', # no <src> available
|
||||
NotifyType.set_role: '<src> set <dst> <payload>',
|
||||
NotifyType.set_props: '<src> set properties <payload>',
|
||||
NotifyType.set_label: '<src> label <dst> <payload>',
|
||||
NotifyType.mute_post: '<src> mute <post> - <payload>',
|
||||
NotifyType.unmute_post: '<src> unmute <post> - <payload>',
|
||||
NotifyType.pin_post: '<src> pin <post>',
|
||||
NotifyType.unpin_post: '<src> unpin <post>',
|
||||
NotifyType.flag_post: '<src> flag <post> - <payload>',
|
||||
NotifyType.subscribe: '<src> subscribed to <comm>',
|
||||
# personal
|
||||
NotifyType.error: 'error: <payload>',
|
||||
NotifyType.reblog: '<src> reblogged your post',
|
||||
NotifyType.follow: '<src> followed you',
|
||||
NotifyType.reply: '<src> replied to your post',
|
||||
NotifyType.reply_comment: '<src> replied to your comment',
|
||||
NotifyType.mention: '<src> mentioned you and <other_mentions> others',
|
||||
NotifyType.vote: '<src> voted on your post',
|
||||
|
||||
#NotifyType.update_account: '<dst> updated account',
|
||||
#NotifyType.receive: '<src> sent <dst> <payload>',
|
||||
#NotifyType.send: '<dst> sent <src> <payload>',
|
||||
|
||||
#NotifyType.reward: '<post> rewarded <payload>',
|
||||
#NotifyType.power_up: '<dst> power up <payload>',
|
||||
#NotifyType.power_down: '<dst> power down <payload>',
|
||||
#NotifyType.message: '<src>: <payload>',
|
||||
NotifyType.error: 'error: <payload>',
|
||||
NotifyType.reblog: '<src> reblogged your post',
|
||||
NotifyType.follow: '<src> followed you',
|
||||
NotifyType.reply: '<src> replied to your post',
|
||||
NotifyType.reply_comment: '<src> replied to your comment',
|
||||
NotifyType.mention: '<src> mentioned you and <other_mentions> others',
|
||||
NotifyType.vote: '<src> voted on your post',
|
||||
# NotifyType.update_account: '<dst> updated account',
|
||||
# NotifyType.receive: '<src> sent <dst> <payload>',
|
||||
# NotifyType.send: '<dst> sent <src> <payload>',
|
||||
# NotifyType.reward: '<post> rewarded <payload>',
|
||||
# NotifyType.power_up: '<dst> power up <payload>',
|
||||
# NotifyType.power_down: '<dst> power down <payload>',
|
||||
# NotifyType.message: '<src>: <payload>',
|
||||
}
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def unread_notifications(context, account, min_score=25):
|
||||
"""Load notification status for a named account."""
|
||||
|
@ -50,6 +56,7 @@ async def unread_notifications(context, account, min_score=25):
|
|||
row = await db.query_row(sql, account=account, min_score=min_score)
|
||||
return dict(lastread=str(row['lastread_at']), unread=row['unread'])
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def account_notifications(context, account, min_score=25, last_id=None, limit=100):
|
||||
"""Load notifications for named account."""
|
||||
|
@ -64,8 +71,11 @@ async def account_notifications(context, account, min_score=25, last_id=None, li
|
|||
rows = await db.query_all(sql_query, account=account, min_score=min_score, last_id=last_id, limit=limit)
|
||||
return [_render(row) for row in rows]
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def post_notifications(context, author:str, permlink:str, min_score:int=25, last_id:int=None, limit:int=100):
|
||||
async def post_notifications(
|
||||
context, author: str, permlink: str, min_score: int = 25, last_id: int = None, limit: int = 100
|
||||
):
|
||||
"""Load notifications for a specific post."""
|
||||
# pylint: disable=too-many-arguments
|
||||
db = context['db']
|
||||
|
@ -77,9 +87,12 @@ async def post_notifications(context, author:str, permlink:str, min_score:int=25
|
|||
|
||||
sql_query = "SELECT * FROM post_notifications( (:author)::VARCHAR, (:permlink)::VARCHAR, (:min_score)::SMALLINT, (:last_id)::BIGINT, (:limit)::SMALLINT )"
|
||||
|
||||
rows = await db.query_all(sql_query, author=author, permlink=permlink, min_score=min_score, last_id=last_id, limit=limit)
|
||||
rows = await db.query_all(
|
||||
sql_query, author=author, permlink=permlink, min_score=min_score, last_id=last_id, limit=limit
|
||||
)
|
||||
return [_render(row) for row in rows]
|
||||
|
||||
|
||||
def _notifs_sql(where):
|
||||
sql = """SELECT hn.id, hn.type_id, hn.score, hn.created_at,
|
||||
src.name src, dst.name dst,
|
||||
|
@ -99,43 +112,58 @@ def _notifs_sql(where):
|
|||
LIMIT :limit"""
|
||||
return sql % where
|
||||
|
||||
|
||||
def _render(row):
|
||||
"""Convert object to string rep."""
|
||||
# src dst payload community post
|
||||
out = {'id': row['id'],
|
||||
'type': NotifyType(row['type_id']).name,
|
||||
'score': row['score'],
|
||||
'date': json_date(row['created_at']),
|
||||
'msg': _render_msg(row),
|
||||
'url': _render_url(row),
|
||||
}
|
||||
out = {
|
||||
'id': row['id'],
|
||||
'type': NotifyType(row['type_id']).name,
|
||||
'score': row['score'],
|
||||
'date': json_date(row['created_at']),
|
||||
'msg': _render_msg(row),
|
||||
'url': _render_url(row),
|
||||
}
|
||||
|
||||
#if row['community']:
|
||||
# if row['community']:
|
||||
# out['community'] = (row['community'], row['community_title'])
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def _render_msg(row):
|
||||
msg = STRINGS[row['type_id']]
|
||||
payload = row['payload']
|
||||
if row['type_id'] == NotifyType.vote and payload:
|
||||
msg += ' <payload>'
|
||||
|
||||
if '<dst>' in msg: msg = msg.replace('<dst>', '@' + row['dst'])
|
||||
if '<src>' in msg: msg = msg.replace('<src>', '@' + row['src'])
|
||||
if '<post>' in msg: msg = msg.replace('<post>', _post_url(row))
|
||||
if '<payload>' in msg: msg = msg.replace('<payload>', payload or 'null')
|
||||
if '<comm>' in msg: msg = msg.replace('<comm>', row['community_title'])
|
||||
if '<other_mentions>' in msg: msg = msg.replace('<other_mentions>', str( row['number_of_mentions'] - 1 ) )
|
||||
if '<dst>' in msg:
|
||||
msg = msg.replace('<dst>', '@' + row['dst'])
|
||||
if '<src>' in msg:
|
||||
msg = msg.replace('<src>', '@' + row['src'])
|
||||
if '<post>' in msg:
|
||||
msg = msg.replace('<post>', _post_url(row))
|
||||
if '<payload>' in msg:
|
||||
msg = msg.replace('<payload>', payload or 'null')
|
||||
if '<comm>' in msg:
|
||||
msg = msg.replace('<comm>', row['community_title'])
|
||||
if '<other_mentions>' in msg:
|
||||
msg = msg.replace('<other_mentions>', str(row['number_of_mentions'] - 1))
|
||||
return msg
|
||||
|
||||
|
||||
def _post_url(row):
|
||||
return '@' + row['author'] + '/' + row['permlink']
|
||||
|
||||
|
||||
def _render_url(row):
|
||||
if row['permlink']: return '@' + row['author'] + '/' + row['permlink']
|
||||
if row['community']: return 'trending/' + row['community']
|
||||
if row['src']: return '@' + row['src']
|
||||
if row['dst']: return '@' + row['dst']
|
||||
if row['permlink']:
|
||||
return '@' + row['author'] + '/' + row['permlink']
|
||||
if row['community']:
|
||||
return 'trending/' + row['community']
|
||||
if row['src']:
|
||||
return '@' + row['src']
|
||||
if row['dst']:
|
||||
return '@' + row['dst']
|
||||
assert False, f'no url for {row}'
|
||||
return None
|
||||
|
|
|
@ -4,6 +4,7 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_info(context):
|
||||
db = context['db']
|
||||
|
||||
|
@ -16,13 +17,13 @@ async def get_info(context):
|
|||
from hive.version import VERSION, GIT_REVISION, GIT_DATE
|
||||
|
||||
ret = {
|
||||
"hivemind_version" : VERSION,
|
||||
"hivemind_git_rev" : GIT_REVISION,
|
||||
"hivemind_git_date" : GIT_DATE,
|
||||
"database_schema_version" : patch_level_data['level'],
|
||||
"database_patch_date" : str(patch_level_data['patch_date']),
|
||||
"database_patched_to_revision" : patch_level_data['patched_to_revision'],
|
||||
"database_head_block" : database_head_block
|
||||
"hivemind_version": VERSION,
|
||||
"hivemind_git_rev": GIT_REVISION,
|
||||
"hivemind_git_date": GIT_DATE,
|
||||
"database_schema_version": patch_level_data['level'],
|
||||
"database_patch_date": str(patch_level_data['patch_date']),
|
||||
"database_patched_to_revision": patch_level_data['patched_to_revision'],
|
||||
"database_head_block": database_head_block,
|
||||
}
|
||||
|
||||
return ret
|
||||
|
|
|
@ -5,6 +5,7 @@ from hive.server.common.helpers import return_error_info, valid_limit
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _row(row):
|
||||
if row['name']:
|
||||
url = row['name']
|
||||
|
@ -15,6 +16,7 @@ def _row(row):
|
|||
|
||||
return (url, label, float(row['payout']), row['posts'], row['authors'])
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_payout_stats(context, limit=250):
|
||||
"""Get payout stats for building treemap."""
|
||||
|
@ -41,4 +43,8 @@ async def get_payout_stats(context, limit=250):
|
|||
WHERE community_id IS NULL AND author IS NULL"""
|
||||
blog_ttl = await db.query_one(sql)
|
||||
|
||||
return dict(items=items, total=float(total if total is not None else 0.), blogs=float(blog_ttl if blog_ttl is not None else 0.))
|
||||
return dict(
|
||||
items=items,
|
||||
total=float(total if total is not None else 0.0),
|
||||
blogs=float(blog_ttl if blog_ttl is not None else 0.0),
|
||||
)
|
||||
|
|
|
@ -38,133 +38,148 @@ from hive.server.db import Db
|
|||
|
||||
# pylint: disable=too-many-lines
|
||||
|
||||
|
||||
def decimal_serialize(obj):
|
||||
return simplejson.dumps(obj=obj, use_decimal=True)
|
||||
|
||||
|
||||
def decimal_deserialize(s):
|
||||
return simplejson.loads(s=s, use_decimal=True)
|
||||
|
||||
|
||||
async def db_head_state(context):
|
||||
"""Status/health check."""
|
||||
db = context['db']
|
||||
sql = ("SELECT num, created_at, extract(epoch from created_at) ts "
|
||||
"FROM hive_blocks ORDER BY num DESC LIMIT 1")
|
||||
sql = "SELECT num, created_at, extract(epoch from created_at) ts " "FROM hive_blocks ORDER BY num DESC LIMIT 1"
|
||||
row = await db.query_row(sql)
|
||||
return dict(db_head_block=row['num'],
|
||||
db_head_time=str(row['created_at']),
|
||||
db_head_age=int(time.time() - row['ts']))
|
||||
return dict(db_head_block=row['num'], db_head_time=str(row['created_at']), db_head_age=int(time.time() - row['ts']))
|
||||
|
||||
|
||||
def build_methods():
|
||||
"""Register all supported hive_api/condenser_api.calls."""
|
||||
# pylint: disable=expression-not-assigned, line-too-long
|
||||
methods = Methods()
|
||||
|
||||
methods.add(**{'hive.' + method.__name__: method for method in (
|
||||
db_head_state,
|
||||
)})
|
||||
methods.add(**{'hive.' + method.__name__: method for method in (db_head_state,)})
|
||||
|
||||
methods.add(**{'hive.get_info' : hive_api_get_info})
|
||||
methods.add(**{'hive.get_info': hive_api_get_info})
|
||||
|
||||
methods.add(**{'condenser_api.' + method.__name__: method for method in (
|
||||
condenser_api.get_followers,
|
||||
condenser_api.get_following,
|
||||
condenser_api.get_follow_count,
|
||||
condenser_api.get_content,
|
||||
condenser_api.get_content_replies,
|
||||
condenser_api_get_state,
|
||||
condenser_api_get_trending_tags,
|
||||
condenser_api.get_discussions_by_trending,
|
||||
condenser_api.get_discussions_by_hot,
|
||||
condenser_api.get_discussions_by_promoted,
|
||||
condenser_api.get_discussions_by_created,
|
||||
condenser_api.get_discussions_by_blog,
|
||||
condenser_api.get_discussions_by_feed,
|
||||
condenser_api.get_discussions_by_comments,
|
||||
condenser_api.get_replies_by_last_update,
|
||||
|
||||
condenser_api.get_discussions_by_author_before_date,
|
||||
condenser_api.get_post_discussions_by_payout,
|
||||
condenser_api.get_comment_discussions_by_payout,
|
||||
condenser_api.get_blog,
|
||||
condenser_api.get_blog_entries,
|
||||
condenser_api.get_account_reputations,
|
||||
condenser_api.get_reblogged_by,
|
||||
condenser_api.get_active_votes
|
||||
)})
|
||||
methods.add(
|
||||
**{
|
||||
'condenser_api.' + method.__name__: method
|
||||
for method in (
|
||||
condenser_api.get_followers,
|
||||
condenser_api.get_following,
|
||||
condenser_api.get_follow_count,
|
||||
condenser_api.get_content,
|
||||
condenser_api.get_content_replies,
|
||||
condenser_api_get_state,
|
||||
condenser_api_get_trending_tags,
|
||||
condenser_api.get_discussions_by_trending,
|
||||
condenser_api.get_discussions_by_hot,
|
||||
condenser_api.get_discussions_by_promoted,
|
||||
condenser_api.get_discussions_by_created,
|
||||
condenser_api.get_discussions_by_blog,
|
||||
condenser_api.get_discussions_by_feed,
|
||||
condenser_api.get_discussions_by_comments,
|
||||
condenser_api.get_replies_by_last_update,
|
||||
condenser_api.get_discussions_by_author_before_date,
|
||||
condenser_api.get_post_discussions_by_payout,
|
||||
condenser_api.get_comment_discussions_by_payout,
|
||||
condenser_api.get_blog,
|
||||
condenser_api.get_blog_entries,
|
||||
condenser_api.get_account_reputations,
|
||||
condenser_api.get_reblogged_by,
|
||||
condenser_api.get_active_votes,
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
# dummy methods -- serve informational error
|
||||
methods.add(**{
|
||||
'condenser_api.get_account_votes': condenser_api.get_account_votes,
|
||||
'tags_api.get_account_votes': condenser_api.get_account_votes,
|
||||
})
|
||||
methods.add(
|
||||
**{
|
||||
'condenser_api.get_account_votes': condenser_api.get_account_votes,
|
||||
'tags_api.get_account_votes': condenser_api.get_account_votes,
|
||||
}
|
||||
)
|
||||
|
||||
# follow_api aliases
|
||||
methods.add(**{
|
||||
'follow_api.get_followers': condenser_api.get_followers,
|
||||
'follow_api.get_following': condenser_api.get_following,
|
||||
'follow_api.get_follow_count': condenser_api.get_follow_count,
|
||||
'follow_api.get_account_reputations': follow_api.get_account_reputations,
|
||||
'follow_api.get_blog': condenser_api.get_blog,
|
||||
'follow_api.get_blog_entries': condenser_api.get_blog_entries,
|
||||
'follow_api.get_reblogged_by': condenser_api.get_reblogged_by
|
||||
})
|
||||
methods.add(
|
||||
**{
|
||||
'follow_api.get_followers': condenser_api.get_followers,
|
||||
'follow_api.get_following': condenser_api.get_following,
|
||||
'follow_api.get_follow_count': condenser_api.get_follow_count,
|
||||
'follow_api.get_account_reputations': follow_api.get_account_reputations,
|
||||
'follow_api.get_blog': condenser_api.get_blog,
|
||||
'follow_api.get_blog_entries': condenser_api.get_blog_entries,
|
||||
'follow_api.get_reblogged_by': condenser_api.get_reblogged_by,
|
||||
}
|
||||
)
|
||||
|
||||
# tags_api aliases
|
||||
methods.add(**{
|
||||
'tags_api.get_discussion': tags_api.get_discussion,
|
||||
'tags_api.get_content_replies': tags_api.get_content_replies,
|
||||
'tags_api.get_discussions_by_trending': condenser_api.get_discussions_by_trending,
|
||||
'tags_api.get_discussions_by_hot': condenser_api.get_discussions_by_hot,
|
||||
'tags_api.get_discussions_by_promoted': condenser_api.get_discussions_by_promoted,
|
||||
'tags_api.get_discussions_by_created': condenser_api.get_discussions_by_created,
|
||||
'tags_api.get_discussions_by_blog': condenser_api.get_discussions_by_blog,
|
||||
'tags_api.get_discussions_by_comments': condenser_api.get_discussions_by_comments,
|
||||
'tags_api.get_discussions_by_author_before_date': condenser_api.get_discussions_by_author_before_date,
|
||||
'tags_api.get_post_discussions_by_payout': condenser_api.get_post_discussions_by_payout,
|
||||
'tags_api.get_comment_discussions_by_payout': condenser_api.get_comment_discussions_by_payout
|
||||
})
|
||||
methods.add(
|
||||
**{
|
||||
'tags_api.get_discussion': tags_api.get_discussion,
|
||||
'tags_api.get_content_replies': tags_api.get_content_replies,
|
||||
'tags_api.get_discussions_by_trending': condenser_api.get_discussions_by_trending,
|
||||
'tags_api.get_discussions_by_hot': condenser_api.get_discussions_by_hot,
|
||||
'tags_api.get_discussions_by_promoted': condenser_api.get_discussions_by_promoted,
|
||||
'tags_api.get_discussions_by_created': condenser_api.get_discussions_by_created,
|
||||
'tags_api.get_discussions_by_blog': condenser_api.get_discussions_by_blog,
|
||||
'tags_api.get_discussions_by_comments': condenser_api.get_discussions_by_comments,
|
||||
'tags_api.get_discussions_by_author_before_date': condenser_api.get_discussions_by_author_before_date,
|
||||
'tags_api.get_post_discussions_by_payout': condenser_api.get_post_discussions_by_payout,
|
||||
'tags_api.get_comment_discussions_by_payout': condenser_api.get_comment_discussions_by_payout,
|
||||
}
|
||||
)
|
||||
|
||||
# legacy `call` style adapter
|
||||
methods.add(**{
|
||||
'call': condenser_api_call
|
||||
})
|
||||
methods.add(**{'call': condenser_api_call})
|
||||
|
||||
# bridge_api methods
|
||||
methods.add(**{'bridge.' + method.__name__: method for method in (
|
||||
bridge_api_normalize_post,
|
||||
bridge_api_get_post_header,
|
||||
bridge_api_get_discussion,
|
||||
bridge_api.get_post,
|
||||
bridge_api.get_account_posts,
|
||||
bridge_api.get_ranked_posts,
|
||||
bridge_api.get_profile,
|
||||
bridge_api.get_trending_topics,
|
||||
bridge_api.get_relationship_between_accounts,
|
||||
bridge_api.get_follow_list,
|
||||
bridge_api.does_user_follow_any_lists,
|
||||
hive_api_notify.post_notifications,
|
||||
hive_api_notify.account_notifications,
|
||||
hive_api_notify.unread_notifications,
|
||||
hive_api_stats.get_payout_stats,
|
||||
hive_api_community.get_community,
|
||||
hive_api_community.get_community_context,
|
||||
hive_api_community.list_communities,
|
||||
hive_api_community.list_pop_communities,
|
||||
hive_api_community.list_community_roles,
|
||||
hive_api_community.list_subscribers,
|
||||
hive_api_community.list_all_subscriptions,
|
||||
)})
|
||||
methods.add(
|
||||
**{
|
||||
'bridge.' + method.__name__: method
|
||||
for method in (
|
||||
bridge_api_normalize_post,
|
||||
bridge_api_get_post_header,
|
||||
bridge_api_get_discussion,
|
||||
bridge_api.get_post,
|
||||
bridge_api.get_account_posts,
|
||||
bridge_api.get_ranked_posts,
|
||||
bridge_api.get_profile,
|
||||
bridge_api.get_trending_topics,
|
||||
bridge_api.get_relationship_between_accounts,
|
||||
bridge_api.get_follow_list,
|
||||
bridge_api.does_user_follow_any_lists,
|
||||
hive_api_notify.post_notifications,
|
||||
hive_api_notify.account_notifications,
|
||||
hive_api_notify.unread_notifications,
|
||||
hive_api_stats.get_payout_stats,
|
||||
hive_api_community.get_community,
|
||||
hive_api_community.get_community_context,
|
||||
hive_api_community.list_communities,
|
||||
hive_api_community.list_pop_communities,
|
||||
hive_api_community.list_community_roles,
|
||||
hive_api_community.list_subscribers,
|
||||
hive_api_community.list_all_subscriptions,
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
# database_api methods
|
||||
methods.add(**{
|
||||
'database_api.list_comments' : database_api.list_comments,
|
||||
'database_api.find_comments' : database_api.find_comments,
|
||||
'database_api.list_votes' : database_api.list_votes,
|
||||
'database_api.find_votes' : database_api.find_votes
|
||||
})
|
||||
methods.add(
|
||||
**{
|
||||
'database_api.list_comments': database_api.list_comments,
|
||||
'database_api.find_comments': database_api.find_comments,
|
||||
'database_api.list_votes': database_api.list_votes,
|
||||
'database_api.find_votes': database_api.find_votes,
|
||||
}
|
||||
)
|
||||
|
||||
return methods
|
||||
|
||||
|
||||
def truncate_response_log(logger):
|
||||
"""Overwrite jsonrpcserver resp logger to truncate output.
|
||||
|
||||
|
@ -180,6 +195,7 @@ def truncate_response_log(logger):
|
|||
logger.propagate = False
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def conf_stdout_custom_file_logger(logger, file_name):
|
||||
stdout_handler = logging.StreamHandler(sys.stdout)
|
||||
file_handler = logging.FileHandler(file_name, 'a', 'utf-8')
|
||||
|
@ -187,9 +203,10 @@ def conf_stdout_custom_file_logger(logger, file_name):
|
|||
logger.addHandler(stdout_handler)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
|
||||
def run_server(conf):
|
||||
"""Configure and launch the API server."""
|
||||
#pylint: disable=too-many-statements
|
||||
# pylint: disable=too-many-statements
|
||||
|
||||
# configure jsonrpcserver logging
|
||||
log_level = conf.log_level()
|
||||
|
@ -201,13 +218,13 @@ def run_server(conf):
|
|||
# init
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# logger for storing Request processing times
|
||||
# logger for storing Request processing times
|
||||
|
||||
req_res_log = None
|
||||
|
||||
if conf.get('log_request_times'):
|
||||
req_res_log = logging.getLogger("Request-Process-Time-Logger")
|
||||
conf_stdout_custom_file_logger(req_res_log, "./request_process_times.log")
|
||||
req_res_log = logging.getLogger("Request-Process-Time-Logger")
|
||||
conf_stdout_custom_file_logger(req_res_log, "./request_process_times.log")
|
||||
|
||||
methods = build_methods()
|
||||
|
||||
|
@ -215,7 +232,7 @@ def run_server(conf):
|
|||
app['config'] = dict()
|
||||
app['config']['args'] = conf.args()
|
||||
app['config']['hive.MAX_DB_ROW_RESULTS'] = 100000
|
||||
#app['config']['hive.logger'] = logger
|
||||
# app['config']['hive.logger'] = logger
|
||||
|
||||
async def init_db(app):
|
||||
"""Initialize db adapter."""
|
||||
|
@ -234,7 +251,8 @@ def run_server(conf):
|
|||
sql = "SELECT level, patch_date, patched_to_revision FROM hive_db_patch_level ORDER BY level DESC LIMIT 1"
|
||||
patch_level_data = await app['db'].query_row(sql)
|
||||
|
||||
from hive.utils.misc import show_app_version;
|
||||
from hive.utils.misc import show_app_version
|
||||
|
||||
show_app_version(log, database_head_block, patch_level_data)
|
||||
|
||||
app.on_startup.append(init_db)
|
||||
|
@ -243,8 +261,8 @@ def run_server(conf):
|
|||
|
||||
async def head_age(request):
|
||||
"""Get hive head block age in seconds. 500 status if age > 15s."""
|
||||
#pylint: disable=unused-argument
|
||||
healthy_age = 15 # hive is synced if head block within 15s
|
||||
# pylint: disable=unused-argument
|
||||
healthy_age = 15 # hive is synced if head block within 15s
|
||||
try:
|
||||
state = await db_head_state(app)
|
||||
curr_age = state['db_head_age']
|
||||
|
@ -256,12 +274,12 @@ def run_server(conf):
|
|||
|
||||
async def health(request):
|
||||
"""Get hive health state. 500 if db unavailable or too far behind."""
|
||||
#pylint: disable=unused-argument
|
||||
# pylint: disable=unused-argument
|
||||
is_syncer = conf.get('sync_to_s3')
|
||||
|
||||
# while 1 hr is a bit stale, such a condition is a symptom of a
|
||||
# writer issue, *not* a reader node issue. Discussion in #174.
|
||||
max_head_age = 3600 # 1hr
|
||||
max_head_age = 3600 # 1hr
|
||||
|
||||
try:
|
||||
state = await db_head_state(app)
|
||||
|
@ -277,21 +295,25 @@ def run_server(conf):
|
|||
result = f"head block age ({state['db_head_age']}) > max ({max_head_age}); head block num: {state['db_head_block']}"
|
||||
else:
|
||||
status = 200
|
||||
result = 'head block age is %d, head block num is %d' % (
|
||||
state['db_head_age'], state['db_head_block'])
|
||||
result = 'head block age is %d, head block num is %d' % (state['db_head_age'], state['db_head_block'])
|
||||
|
||||
return web.json_response(status=status, data=dict(
|
||||
state=state,
|
||||
result=result,
|
||||
status='OK' if status == 200 else 'WARN',
|
||||
sync_service=is_syncer,
|
||||
source_commit=os.environ.get('SOURCE_COMMIT'),
|
||||
schema_hash=os.environ.get('SCHEMA_HASH'),
|
||||
docker_tag=os.environ.get('DOCKER_TAG'),
|
||||
timestamp=datetime.utcnow().isoformat()))
|
||||
return web.json_response(
|
||||
status=status,
|
||||
data=dict(
|
||||
state=state,
|
||||
result=result,
|
||||
status='OK' if status == 200 else 'WARN',
|
||||
sync_service=is_syncer,
|
||||
source_commit=os.environ.get('SOURCE_COMMIT'),
|
||||
schema_hash=os.environ.get('SCHEMA_HASH'),
|
||||
docker_tag=os.environ.get('DOCKER_TAG'),
|
||||
timestamp=datetime.utcnow().isoformat(),
|
||||
),
|
||||
)
|
||||
|
||||
async def jsonrpc_handler(request):
|
||||
"""Handles all hive jsonrpc API requests."""
|
||||
|
||||
def current_millis():
|
||||
return round(time.time() * 1000)
|
||||
|
||||
|
@ -300,7 +322,14 @@ def run_server(conf):
|
|||
# debug=True refs https://github.com/bcb/jsonrpcserver/issues/71
|
||||
response = None
|
||||
try:
|
||||
response = await dispatch(request, methods=methods, debug=True, context=app, serialize=decimal_serialize, deserialize=decimal_deserialize)
|
||||
response = await dispatch(
|
||||
request,
|
||||
methods=methods,
|
||||
debug=True,
|
||||
context=app,
|
||||
serialize=decimal_serialize,
|
||||
deserialize=decimal_deserialize,
|
||||
)
|
||||
except simplejson.errors.JSONDecodeError as ex:
|
||||
# first log exception
|
||||
# TODO: consider removing this log - potential log spam
|
||||
|
@ -308,17 +337,15 @@ def run_server(conf):
|
|||
|
||||
# create and send error response
|
||||
error_response = {
|
||||
"jsonrpc":"2.0",
|
||||
"error" : {
|
||||
"jsonrpc": "2.0",
|
||||
"error": {
|
||||
"code": -32602,
|
||||
"data": "Invalid JSON in request: " + str(ex),
|
||||
"message": "Invalid parameters"
|
||||
"message": "Invalid parameters",
|
||||
},
|
||||
"id" : -1
|
||||
}
|
||||
headers = {
|
||||
'Access-Control-Allow-Origin': '*'
|
||||
"id": -1,
|
||||
}
|
||||
headers = {'Access-Control-Allow-Origin': '*'}
|
||||
ret = web.json_response(error_response, status=200, headers=headers, dumps=decimal_serialize)
|
||||
if req_res_log is not None:
|
||||
req_res_log.info(f"{current_millis()} Request: {request} processed in {perf_counter() - t_start:.4f}s")
|
||||
|
@ -326,9 +353,7 @@ def run_server(conf):
|
|||
return ret
|
||||
|
||||
if response is not None and response.wanted:
|
||||
headers = {
|
||||
'Access-Control-Allow-Origin': '*'
|
||||
}
|
||||
headers = {'Access-Control-Allow-Origin': '*'}
|
||||
ret = web.json_response(response.deserialized(), status=200, headers=headers, dumps=decimal_serialize)
|
||||
if req_res_log is not None:
|
||||
req_res_log.info(f"{current_millis()} Request: {request} processed in {perf_counter() - t_start:.4f}s")
|
||||
|
@ -346,7 +371,9 @@ def run_server(conf):
|
|||
app.router.add_get('/health', health)
|
||||
app.router.add_post('/', jsonrpc_handler)
|
||||
if 'auto_http_server_port' in app['config']['args'] and app['config']['args']['auto_http_server_port'] is not None:
|
||||
log.debug("auto-http-server-port detected in program arguments, http_server_port will be overriden with port from given range")
|
||||
log.debug(
|
||||
"auto-http-server-port detected in program arguments, http_server_port will be overriden with port from given range"
|
||||
)
|
||||
port_range = app['config']['args']['auto_http_server_port']
|
||||
port_range_len = len(port_range)
|
||||
port_from = port_range[0]
|
||||
|
@ -357,6 +384,7 @@ def run_server(conf):
|
|||
port_from = 1024
|
||||
|
||||
import socket
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
while port_from <= port_to:
|
||||
try:
|
||||
|
|
|
@ -1 +1 @@
|
|||
""" Tags api """
|
||||
""" Tags api """
|
||||
|
|
|
@ -1,16 +1,14 @@
|
|||
from hive.server.condenser_api.methods import _get_content_impl, _get_content_replies_impl
|
||||
from hive.server.common.helpers import (
|
||||
return_error_info,
|
||||
valid_account,
|
||||
valid_permlink)
|
||||
from hive.server.common.helpers import return_error_info, valid_account, valid_permlink
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_discussion(context, author: str, permlink: str, observer=None):
|
||||
db = context['db']
|
||||
return await _get_content_impl(db, False, author, permlink, observer)
|
||||
|
||||
|
||||
@return_error_info
|
||||
async def get_content_replies(context, author: str, permlink: str):
|
||||
db = context['db']
|
||||
return await _get_content_replies_impl(db, False, author, permlink)
|
||||
|
||||
|
|
|
@ -10,9 +10,9 @@ import logging
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class OneBlockProviderBase(ABC):
|
||||
|
||||
def __init__(self, conf, node, breaker, exception_reporter, thread_pool ):
|
||||
class OneBlockProviderBase(ABC):
|
||||
def __init__(self, conf, node, breaker, exception_reporter, thread_pool):
|
||||
self._conf = conf
|
||||
self._node = node
|
||||
self._breaker = breaker
|
||||
|
@ -26,39 +26,39 @@ class OneBlockProviderBase(ABC):
|
|||
if exception:
|
||||
raise exception
|
||||
|
||||
blocks = blocks_provider.get( 1 )
|
||||
blocks = blocks_provider.get(1)
|
||||
if len(blocks):
|
||||
return blocks[ 0 ]
|
||||
return blocks[0]
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
def get_block( self, block_num ):
|
||||
def get_block(self, block_num):
|
||||
pass
|
||||
|
||||
class OneBlockProviderFromHivedDb(OneBlockProviderBase):
|
||||
|
||||
def __init__(self, conf, node, breaker, exception_reporter, databases, thread_pool ):
|
||||
class OneBlockProviderFromHivedDb(OneBlockProviderBase):
|
||||
def __init__(self, conf, node, breaker, exception_reporter, databases, thread_pool):
|
||||
assert databases
|
||||
|
||||
OneBlockProviderBase.__init__(self, conf, node, breaker, exception_reporter, thread_pool )
|
||||
OneBlockProviderBase.__init__(self, conf, node, breaker, exception_reporter, thread_pool)
|
||||
self._databases_for_massive_sync = databases
|
||||
|
||||
|
||||
def get_block( self, block_num ):
|
||||
def get_block(self, block_num):
|
||||
blocks_provider = MassiveBlocksDataProviderHiveDb(
|
||||
self._databases_for_massive_sync
|
||||
, 1
|
||||
, block_num
|
||||
, block_num + 1
|
||||
, self._breaker
|
||||
, self._exception_reporter
|
||||
, self._thread_pool
|
||||
self._databases_for_massive_sync,
|
||||
1,
|
||||
block_num,
|
||||
block_num + 1,
|
||||
self._breaker,
|
||||
self._exception_reporter,
|
||||
self._thread_pool,
|
||||
)
|
||||
|
||||
return self._get_block_from_provider(blocks_provider, block_num)
|
||||
|
||||
|
||||
class LiveSyncBlockFromRpc(BlockWrapper):
|
||||
def __init__(self, wrapped_block, conf, client ):
|
||||
def __init__(self, wrapped_block, conf, client):
|
||||
BlockWrapper.__init__(self, wrapped_block)
|
||||
assert conf
|
||||
self._conf = conf
|
||||
|
@ -66,7 +66,9 @@ class LiveSyncBlockFromRpc(BlockWrapper):
|
|||
|
||||
def get_next_vop(self):
|
||||
block_num = self.wrapped_block.get_num()
|
||||
result = VopsProvider.get_virtual_operation_for_blocks( self._client, self._conf, self.wrapped_block.get_num(), 1 )
|
||||
result = VopsProvider.get_virtual_operation_for_blocks(
|
||||
self._client, self._conf, self.wrapped_block.get_num(), 1
|
||||
)
|
||||
|
||||
virtual_operations = []
|
||||
|
||||
|
@ -74,28 +76,28 @@ class LiveSyncBlockFromRpc(BlockWrapper):
|
|||
virtual_operations = result[block_num]['ops']
|
||||
|
||||
for vop in virtual_operations:
|
||||
vop_object = VirtualOperationFromRpc( vop[ 'type' ], vop[ 'value' ] )
|
||||
vop_object = VirtualOperationFromRpc(vop['type'], vop['value'])
|
||||
if not vop_object.get_type():
|
||||
continue
|
||||
yield vop_object
|
||||
|
||||
|
||||
class OneBlockProviderFromNode(OneBlockProviderBase):
|
||||
def __init__(self, conf, node, breaker, exception_reporter, thread_pool):
|
||||
OneBlockProviderBase.__init__(self, conf, node, breaker, exception_reporter, thread_pool)
|
||||
|
||||
def __init__(self, conf, node, breaker, exception_reporter, thread_pool ):
|
||||
OneBlockProviderBase.__init__(self, conf, node, breaker, exception_reporter, thread_pool )
|
||||
|
||||
def get_block( self, block_num ):
|
||||
def get_block(self, block_num):
|
||||
blocks_provider = MassiveBlocksDataProviderHiveRpc(
|
||||
self._conf
|
||||
, self._node # node client
|
||||
, blocks_get_threads = 1
|
||||
, vops_get_threads = 1
|
||||
, number_of_blocks_data_in_one_batch = 1
|
||||
, lbound = block_num
|
||||
, ubound = block_num + 1
|
||||
, breaker = self._breaker
|
||||
, exception_reporter = self._exception_reporter
|
||||
, external_thread_pool = self._thread_pool
|
||||
self._conf,
|
||||
self._node, # node client
|
||||
blocks_get_threads=1,
|
||||
vops_get_threads=1,
|
||||
number_of_blocks_data_in_one_batch=1,
|
||||
lbound=block_num,
|
||||
ubound=block_num + 1,
|
||||
breaker=self._breaker,
|
||||
exception_reporter=self._exception_reporter,
|
||||
external_thread_pool=self._thread_pool,
|
||||
)
|
||||
block = self._get_block_from_provider(blocks_provider, block_num)
|
||||
|
||||
|
@ -104,8 +106,9 @@ class OneBlockProviderFromNode(OneBlockProviderBase):
|
|||
|
||||
return LiveSyncBlockFromRpc(block, self._conf, self._node)
|
||||
|
||||
|
||||
class OneBlockProviderFactory:
|
||||
def __init__( self, conf, node, breaker, exception_reporter ):
|
||||
def __init__(self, conf, node, breaker, exception_reporter):
|
||||
self._conf = conf
|
||||
self._node = node
|
||||
self._breaker = breaker
|
||||
|
@ -114,13 +117,22 @@ class OneBlockProviderFactory:
|
|||
self._thread_pool = None
|
||||
|
||||
def __enter__(self):
|
||||
if ( self._conf.get('hived_database_url') ):
|
||||
self._databases_for_massive_sync = MassiveBlocksDataProviderHiveDb.Databases( self._conf )
|
||||
if self._conf.get('hived_database_url'):
|
||||
self._databases_for_massive_sync = MassiveBlocksDataProviderHiveDb.Databases(self._conf)
|
||||
self._thread_pool = MassiveBlocksDataProviderHiveDb.create_thread_pool()
|
||||
return OneBlockProviderFromHivedDb( self._conf, self._node, self._breaker, self._exception_reporter, self._databases_for_massive_sync, self._thread_pool )
|
||||
return OneBlockProviderFromHivedDb(
|
||||
self._conf,
|
||||
self._node,
|
||||
self._breaker,
|
||||
self._exception_reporter,
|
||||
self._databases_for_massive_sync,
|
||||
self._thread_pool,
|
||||
)
|
||||
|
||||
self._thread_pool = MassiveBlocksDataProviderHiveRpc.create_thread_pool( 1, 1 )
|
||||
return OneBlockProviderFromNode( self._conf, self._node, self._breaker, self._exception_reporter, self._thread_pool )
|
||||
self._thread_pool = MassiveBlocksDataProviderHiveRpc.create_thread_pool(1, 1)
|
||||
return OneBlockProviderFromNode(
|
||||
self._conf, self._node, self._breaker, self._exception_reporter, self._thread_pool
|
||||
)
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if self._databases_for_massive_sync:
|
||||
|
|
|
@ -6,10 +6,13 @@ from hive.utils.stats import Stats
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StaleHeadException(Exception):
|
||||
"""Raised when the head block appears to be too old."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class BlockSchedule:
|
||||
"""Maintains a self-adjusting schedule which anticipates new blocks."""
|
||||
|
||||
|
@ -34,8 +37,7 @@ class BlockSchedule:
|
|||
while head_time >= self._next_expected:
|
||||
self._advance()
|
||||
if head_time < self._next_expected:
|
||||
log.warning("%d blocks behind",
|
||||
self._head_num - num)
|
||||
log.warning("%d blocks behind", self._head_num - num)
|
||||
|
||||
# if head is behind, sleep until ready
|
||||
while self._head_num < num:
|
||||
|
@ -62,8 +64,7 @@ class BlockSchedule:
|
|||
self._last_date = date
|
||||
else:
|
||||
self._drift_backward()
|
||||
log.info("block %d not available. head:%s drift:%fs",
|
||||
num, self._head_num, self._drift)
|
||||
log.info("block %d not available. head:%s drift:%fs", num, self._head_num, self._drift)
|
||||
|
||||
def _check_head_date(self, num, date):
|
||||
"""Sanity-checking of head block date.
|
||||
|
|
|
@ -8,14 +8,19 @@ from hive.indexer.block import Block
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ForkException(Exception):
|
||||
"""Raised when a non-trivial fork is encountered."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class MicroForkException(Exception):
|
||||
"""Raised when a potentially trivial fork is encountered."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class BlockQueue:
|
||||
"""A block queue with fork detection and adjustable length buffer.
|
||||
|
||||
|
@ -24,6 +29,7 @@ class BlockQueue:
|
|||
|
||||
Throws ForkException; or MicroForkException if the fork seems to be
|
||||
confined to the buffer (ie easily recoverable by restarting stream)."""
|
||||
|
||||
def __init__(self, max_size, prev_block):
|
||||
self._max_size = max_size
|
||||
self._prev = prev_block
|
||||
|
@ -36,7 +42,7 @@ class BlockQueue:
|
|||
MicroForkException is thrown; otherwise, ForkException."""
|
||||
if self._prev.get_hash() != block.get_previous_block_hash():
|
||||
fork = f"{self._prev}--> {block.get_previous_block_hash()}->{block.get_hash()}"
|
||||
if self._queue: # if using max_size>0, fork might be in buffer only
|
||||
if self._queue: # if using max_size>0, fork might be in buffer only
|
||||
buff = self.size()
|
||||
alert = "NOTIFYALERT " if buff < self._max_size else ""
|
||||
raise MicroForkException("%squeue:%d %s" % (alert, buff, fork))
|
||||
|
@ -51,11 +57,14 @@ class BlockQueue:
|
|||
"""Count blocks in our queue."""
|
||||
return len(self._queue)
|
||||
|
||||
|
||||
class BlockStream:
|
||||
"""ETA-based block streamer."""
|
||||
|
||||
@classmethod
|
||||
def stream(cls, conf, client, start_block, breaker, exception_reporter, min_gap=0, max_gap=100, do_stale_block_check=True):
|
||||
def stream(
|
||||
cls, conf, client, start_block, breaker, exception_reporter, min_gap=0, max_gap=100, do_stale_block_check=True
|
||||
):
|
||||
"""Instantiates a BlockStream and returns a generator."""
|
||||
streamer = BlockStream(conf, client, min_gap, max_gap)
|
||||
return streamer.start(start_block, do_stale_block_check, breaker, exception_reporter)
|
||||
|
@ -77,10 +86,10 @@ class BlockStream:
|
|||
Will run forever unless `max_gap` is specified and exceeded.
|
||||
"""
|
||||
|
||||
with OneBlockProviderFactory( self._conf, self._client, breaker, exception_reporter ) as one_block_provider:
|
||||
with OneBlockProviderFactory(self._conf, self._client, breaker, exception_reporter) as one_block_provider:
|
||||
curr = start_block
|
||||
head = self._client.head_block()
|
||||
prev = one_block_provider.get_block( curr - 1 )
|
||||
prev = one_block_provider.get_block(curr - 1)
|
||||
|
||||
assert prev
|
||||
|
||||
|
|
|
@ -14,10 +14,12 @@ from hive.indexer.mock_vops_provider import MockVopsProvider
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SteemClient:
|
||||
"""Handles upstream calls to jussi/steemd, with batching and retrying."""
|
||||
|
||||
# dangerous default value of url but it should be fine since we are not writting to it
|
||||
def __init__(self, url={"default" : 'https://api.hive.blog'}, max_batch=50, max_workers=1, max_retries=-1):
|
||||
def __init__(self, url={"default": 'https://api.hive.blog'}, max_batch=50, max_workers=1, max_retries=-1):
|
||||
assert url, 'steem-API endpoints undefined'
|
||||
assert "default" in url, "Url should have default endpoint defined"
|
||||
assert max_batch > 0 and max_batch <= 5000
|
||||
|
@ -37,7 +39,7 @@ class SteemClient:
|
|||
assert accounts, "no accounts passed to get_accounts"
|
||||
assert len(accounts) <= 1000, "max 1000 accounts"
|
||||
ret = self.__exec('get_accounts', [accounts])
|
||||
assert len(accounts) == len(ret), (f"requested {len(accounts)} accounts got {len(ret)}")
|
||||
assert len(accounts) == len(ret), f"requested {len(accounts)} accounts got {len(ret)}"
|
||||
return ret
|
||||
|
||||
def get_all_account_names(self):
|
||||
|
@ -63,7 +65,7 @@ class SteemClient:
|
|||
if 'block' in result:
|
||||
ret = result['block']
|
||||
|
||||
#logger.info("Found real block %d with timestamp: %s", num, ret['timestamp'])
|
||||
# logger.info("Found real block %d with timestamp: %s", num, ret['timestamp'])
|
||||
|
||||
MockBlockProvider.set_last_real_block_num_date(num, ret['timestamp'], ret['block_id'])
|
||||
data = MockBlockProvider.get_block_data(num)
|
||||
|
@ -74,13 +76,17 @@ class SteemClient:
|
|||
# if block does not exist in hived but exist in Mock Provider
|
||||
# return block from block provider
|
||||
mocked_block = MockBlockProvider.get_block_data(num, True)
|
||||
if mocked_block is not None: # during regular live sync blocks can be missing and there are no mocks either
|
||||
if mocked_block is not None: # during regular live sync blocks can be missing and there are no mocks either
|
||||
logger.warning(f"Pure mock block: id {mocked_block['block_id']}, previous {mocked_block['previous']}")
|
||||
return mocked_block
|
||||
|
||||
def stream_blocks(self, conf, start_from, breaker, exception_reporter, trail_blocks=0, max_gap=100, do_stale_block_check=True):
|
||||
def stream_blocks(
|
||||
self, conf, start_from, breaker, exception_reporter, trail_blocks=0, max_gap=100, do_stale_block_check=True
|
||||
):
|
||||
"""Stream blocks. Returns a generator."""
|
||||
return BlockStream.stream(conself, start_from, breaker, exception_reporter, trail_blocks, max_gap, do_stale_block_check)
|
||||
return BlockStream.stream(
|
||||
conself, start_from, breaker, exception_reporter, trail_blocks, max_gap, do_stale_block_check
|
||||
)
|
||||
|
||||
def _gdgp(self):
|
||||
ret = self.__exec('get_dynamic_global_properties')
|
||||
|
@ -89,7 +95,7 @@ class SteemClient:
|
|||
if mock_max_block_number > ret['head_block_number']:
|
||||
ret['time'] = MockBlockProvider.get_block_data(mock_max_block_number)['timestamp']
|
||||
ret['head_block_number'] = max([int(ret['head_block_number']), mock_max_block_number])
|
||||
#ret['last_irreversible_block_num'] = max([int(ret['last_irreversible_block_num']), mock_max_block_number])
|
||||
# ret['last_irreversible_block_num'] = max([int(ret['last_irreversible_block_num']), mock_max_block_number])
|
||||
return ret
|
||||
|
||||
def head_time(self):
|
||||
|
@ -109,9 +115,14 @@ class SteemClient:
|
|||
dgpo = self._gdgp()
|
||||
|
||||
# remove unused/deprecated keys
|
||||
unused = ['total_pow', 'num_pow_witnesses', 'confidential_supply',
|
||||
'confidential_sbd_supply', 'total_reward_fund_steem',
|
||||
'total_reward_shares2']
|
||||
unused = [
|
||||
'total_pow',
|
||||
'num_pow_witnesses',
|
||||
'confidential_supply',
|
||||
'confidential_sbd_supply',
|
||||
'total_reward_fund_steem',
|
||||
'total_reward_shares2',
|
||||
]
|
||||
for key in unused:
|
||||
if key in dgpo:
|
||||
del dgpo[key]
|
||||
|
@ -120,7 +131,8 @@ class SteemClient:
|
|||
'dgpo': dgpo,
|
||||
'usd_per_steem': self._get_feed_price(),
|
||||
'sbd_per_steem': self._get_steem_price(),
|
||||
'steem_per_mvest': SteemClient._get_steem_per_mvest(dgpo)}
|
||||
'steem_per_mvest': SteemClient._get_steem_per_mvest(dgpo),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _get_steem_per_mvest(dgpo):
|
||||
|
@ -176,9 +188,15 @@ class SteemClient:
|
|||
return [blocks[x] for x in block_nums]
|
||||
|
||||
def get_virtual_operations(self, block):
|
||||
""" Get virtual ops from block """
|
||||
result = self.__exec('get_ops_in_block', {"block_num":block, "only_virtual":True})
|
||||
tracked_ops = ['author_reward_operation', 'comment_reward_operation', 'effective_comment_vote_operation', 'comment_payout_update_operation', 'ineffective_delete_comment_operation']
|
||||
"""Get virtual ops from block"""
|
||||
result = self.__exec('get_ops_in_block', {"block_num": block, "only_virtual": True})
|
||||
tracked_ops = [
|
||||
'author_reward_operation',
|
||||
'comment_reward_operation',
|
||||
'effective_comment_vote_operation',
|
||||
'comment_payout_update_operation',
|
||||
'ineffective_delete_comment_operation',
|
||||
]
|
||||
ret = []
|
||||
result = result['ops'] if 'ops' in result else []
|
||||
for vop in result:
|
||||
|
@ -187,46 +205,62 @@ class SteemClient:
|
|||
return ret
|
||||
|
||||
def enum_virtual_ops(self, conf, begin_block, end_block):
|
||||
""" Get virtual ops for range of blocks """
|
||||
"""Get virtual ops for range of blocks"""
|
||||
|
||||
ret = {}
|
||||
|
||||
from_block = begin_block
|
||||
|
||||
#According to definition of hive::plugins::acount_history::enum_vops_filter:
|
||||
# According to definition of hive::plugins::acount_history::enum_vops_filter:
|
||||
|
||||
author_reward_operation = 0x000002
|
||||
comment_reward_operation = 0x000008
|
||||
effective_comment_vote_operation = 0x400000
|
||||
comment_payout_update_operation = 0x000800
|
||||
ineffective_delete_comment_operation = 0x800000
|
||||
author_reward_operation = 0x000002
|
||||
comment_reward_operation = 0x000008
|
||||
effective_comment_vote_operation = 0x400000
|
||||
comment_payout_update_operation = 0x000800
|
||||
ineffective_delete_comment_operation = 0x800000
|
||||
|
||||
tracked_ops_filter = author_reward_operation | comment_reward_operation | effective_comment_vote_operation | comment_payout_update_operation | ineffective_delete_comment_operation
|
||||
tracked_ops_filter = (
|
||||
author_reward_operation
|
||||
| comment_reward_operation
|
||||
| effective_comment_vote_operation
|
||||
| comment_payout_update_operation
|
||||
| ineffective_delete_comment_operation
|
||||
)
|
||||
|
||||
resume_on_operation = 0
|
||||
|
||||
while from_block < end_block:
|
||||
call_result = self.__exec('enum_virtual_ops', {"block_range_begin":from_block, "block_range_end":end_block
|
||||
, "group_by_block": True, "include_reversible": True, "operation_begin": resume_on_operation, "limit": 1000, "filter": tracked_ops_filter
|
||||
})
|
||||
call_result = self.__exec(
|
||||
'enum_virtual_ops',
|
||||
{
|
||||
"block_range_begin": from_block,
|
||||
"block_range_end": end_block,
|
||||
"group_by_block": True,
|
||||
"include_reversible": True,
|
||||
"operation_begin": resume_on_operation,
|
||||
"limit": 1000,
|
||||
"filter": tracked_ops_filter,
|
||||
},
|
||||
)
|
||||
|
||||
if conf.get('log_virtual_op_calls'):
|
||||
call = f"""
|
||||
Call enum_virtual_ops:
|
||||
Query: {{"block_range_begin":{from_block}, "block_range_end":{end_block}, "group_by_block": True, "operation_begin": {resume_on_operation}, "limit": 1000, "filter": {tracked_ops_filter} }}
|
||||
Response: {call_result}"""
|
||||
logger.info( call )
|
||||
logger.info(call)
|
||||
|
||||
|
||||
one_block_ops = {opb["block"] : {"ops":[op["op"] for op in opb["ops"]]} for opb in call_result["ops_by_block"]}
|
||||
one_block_ops = {
|
||||
opb["block"]: {"ops": [op["op"] for op in opb["ops"]]} for opb in call_result["ops_by_block"]
|
||||
}
|
||||
|
||||
if one_block_ops:
|
||||
first_block = list(one_block_ops.keys())[0]
|
||||
# if we continue collecting ops from previous iteration
|
||||
if first_block in ret:
|
||||
ret.update( { first_block : { "ops":ret[ first_block ]["ops"] + one_block_ops[ first_block ]["ops"]} } )
|
||||
one_block_ops.pop( first_block, None )
|
||||
ret.update( one_block_ops )
|
||||
ret.update({first_block: {"ops": ret[first_block]["ops"] + one_block_ops[first_block]["ops"]}})
|
||||
one_block_ops.pop(first_block, None)
|
||||
ret.update(one_block_ops)
|
||||
|
||||
resume_on_operation = call_result['next_operation_begin'] if 'next_operation_begin' in call_result else 0
|
||||
|
||||
|
@ -236,7 +270,9 @@ class SteemClient:
|
|||
break
|
||||
|
||||
if next_block < begin_block:
|
||||
logger.error( f"Next next block nr {next_block} returned by enum_virtual_ops is smaller than begin block {begin_block}." )
|
||||
logger.error(
|
||||
f"Next next block nr {next_block} returned by enum_virtual_ops is smaller than begin block {begin_block}."
|
||||
)
|
||||
break
|
||||
|
||||
# Move to next block only if operations from current one have been processed completely.
|
||||
|
@ -247,8 +283,8 @@ class SteemClient:
|
|||
return ret
|
||||
|
||||
def get_comment_pending_payouts(self, comments):
|
||||
""" Get comment pending payout data """
|
||||
ret = self.__exec('get_comment_pending_payouts', {'comments':comments})
|
||||
"""Get comment pending payout data"""
|
||||
ret = self.__exec('get_comment_pending_payouts', {'comments': comments})
|
||||
return ret['cashout_infos']
|
||||
|
||||
def __exec(self, method, params=None):
|
||||
|
@ -270,17 +306,13 @@ class SteemClient:
|
|||
result = []
|
||||
if method in self._client:
|
||||
for part in self._client[method].exec_multi(
|
||||
method,
|
||||
params,
|
||||
max_workers=self._max_workers,
|
||||
batch_size=self._max_batch):
|
||||
method, params, max_workers=self._max_workers, batch_size=self._max_batch
|
||||
):
|
||||
result.extend(part)
|
||||
else:
|
||||
for part in self._client["default"].exec_multi(
|
||||
method,
|
||||
params,
|
||||
max_workers=self._max_workers,
|
||||
batch_size=self._max_batch):
|
||||
method, params, max_workers=self._max_workers, batch_size=self._max_batch
|
||||
):
|
||||
result.extend(part)
|
||||
|
||||
Stats.log_steem(method, perf() - start, len(params))
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
"""Defines exceptions which can be thrown by HttpClient."""
|
||||
|
||||
|
||||
def _str_trunc(value, max_length):
|
||||
value = str(value)
|
||||
if len(value) > max_length:
|
||||
value = value[0:max_length] + '...'
|
||||
return value
|
||||
|
||||
|
||||
class RPCError(Exception):
|
||||
"""Raised when an error is returned from upstream (jussi/steem)."""
|
||||
|
||||
|
@ -38,7 +40,7 @@ class RPCError(Exception):
|
|||
would be more appropriate but since hive uses only 'prepared
|
||||
queries', fatal errors can only be due to dev error.
|
||||
"""
|
||||
#pylint: disable=unused-argument
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
@ -48,11 +50,11 @@ class RPCError(Exception):
|
|||
code = error['code'] if 'code' in error else -1
|
||||
|
||||
info = ''
|
||||
if 'data' not in error: # eg db_lock_error
|
||||
if 'data' not in error: # eg db_lock_error
|
||||
name = 'error'
|
||||
elif 'name' in error['data']: # steemd errs
|
||||
elif 'name' in error['data']: # steemd errs
|
||||
name = error['data']['name']
|
||||
elif 'error_id' in error['data']: # jussi errs
|
||||
elif 'error_id' in error['data']: # jussi errs
|
||||
if 'exception' in error['data']:
|
||||
name = error['data']['exception']
|
||||
else:
|
||||
|
@ -64,6 +66,8 @@ class RPCError(Exception):
|
|||
|
||||
return f"{name}[{code}]: `{message}` {info}"
|
||||
|
||||
|
||||
class RPCErrorFatal(RPCError):
|
||||
"""Represents a steemd error which is not recoverable."""
|
||||
|
||||
pass
|
||||
|
|
|
@ -22,6 +22,7 @@ from hive.steem.signal import can_continue_thread
|
|||
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validated_json_payload(response):
|
||||
"""Asserts that the HTTP response was successful and valid JSON."""
|
||||
if response.status != 200:
|
||||
|
@ -35,6 +36,7 @@ def validated_json_payload(response):
|
|||
|
||||
return payload
|
||||
|
||||
|
||||
def validated_result(payload, body):
|
||||
"""Asserts that the JSON-RPC payload is valid/sane."""
|
||||
assert payload, "response entirely blank"
|
||||
|
@ -48,6 +50,7 @@ def validated_result(payload, body):
|
|||
assert 'result' in payload, "response with no result key"
|
||||
return payload['result']
|
||||
|
||||
|
||||
def _validated_batch_result(payload, body):
|
||||
"""Asserts that the batch payload, and each item, is valid/sane."""
|
||||
assert isinstance(payload, list), "batch result must be list"
|
||||
|
@ -60,6 +63,7 @@ def _validated_batch_result(payload, body):
|
|||
assert 'result' in item, "batch[%d] resp empty" % idx
|
||||
return [item['result'] for item in payload]
|
||||
|
||||
|
||||
def chunkify(iterable, chunksize=3000):
|
||||
"""Yields chunks of an iterator."""
|
||||
i = 0
|
||||
|
@ -74,11 +78,13 @@ def chunkify(iterable, chunksize=3000):
|
|||
if chunk:
|
||||
yield chunk
|
||||
|
||||
|
||||
def _rpc_body(method, args, _id=0):
|
||||
if args is None:
|
||||
args = [] if 'condenser_api' in method else {}
|
||||
return dict(jsonrpc="2.0", id=_id, method=method, params=args)
|
||||
|
||||
|
||||
class HttpClient(object):
|
||||
"""Simple Steem JSON-HTTP-RPC API"""
|
||||
|
||||
|
@ -90,13 +96,14 @@ class HttpClient(object):
|
|||
get_dynamic_global_properties='database_api',
|
||||
get_comment_pending_payouts='database_api',
|
||||
get_ops_in_block='account_history_api',
|
||||
enum_virtual_ops='account_history_api'
|
||||
enum_virtual_ops='account_history_api',
|
||||
)
|
||||
|
||||
def __init__(self, nodes, max_retries, **kwargs):
|
||||
if kwargs.get('tcp_keepalive', True):
|
||||
socket_options = HTTPConnection.default_socket_options + \
|
||||
[(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ]
|
||||
socket_options = HTTPConnection.default_socket_options + [
|
||||
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
|
||||
]
|
||||
else:
|
||||
socket_options = HTTPConnection.default_socket_options
|
||||
|
||||
|
@ -107,11 +114,10 @@ class HttpClient(object):
|
|||
socket_options=socket_options,
|
||||
block=False,
|
||||
retries=Retry(total=False),
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'accept-encoding': 'gzip'},
|
||||
headers={'Content-Type': 'application/json', 'accept-encoding': 'gzip'},
|
||||
cert_reqs='CERT_REQUIRED',
|
||||
ca_certs=certifi.where())
|
||||
ca_certs=certifi.where(),
|
||||
)
|
||||
|
||||
self.nodes = cycle(nodes)
|
||||
self.url = ''
|
||||
|
@ -137,7 +143,7 @@ class HttpClient(object):
|
|||
if not is_batch:
|
||||
body = _rpc_body(fqm, args, -1)
|
||||
else:
|
||||
body = [_rpc_body(fqm, arg, i+1) for i, arg in enumerate(args)]
|
||||
body = [_rpc_body(fqm, arg, i + 1) for i, arg in enumerate(args)]
|
||||
|
||||
return body
|
||||
|
||||
|
@ -158,9 +164,7 @@ class HttpClient(object):
|
|||
response = self.request(body=body_data)
|
||||
secs = perf() - start
|
||||
|
||||
info = {'jussi-id': response.headers.get('x-jussi-request-id'),
|
||||
'secs': round(secs, 3),
|
||||
'try': tries}
|
||||
info = {'jussi-id': response.headers.get('x-jussi-request-id'), 'secs': round(secs, 3), 'try': tries}
|
||||
|
||||
# strict validation/asserts, error check
|
||||
payload = validated_json_payload(response)
|
||||
|
@ -175,11 +179,10 @@ class HttpClient(object):
|
|||
raise e
|
||||
|
||||
except (Exception, socket.timeout) as e:
|
||||
if secs < 0: # request failed
|
||||
if secs < 0: # request failed
|
||||
secs = perf() - start
|
||||
info = {'secs': round(secs, 3), 'try': tries}
|
||||
log.warning('%s failed in %.1fs. try %d. %s - %s',
|
||||
what, secs, tries, info, repr(e))
|
||||
log.warning('%s failed in %.1fs. try %d. %s - %s', what, secs, tries, info, repr(e))
|
||||
|
||||
if not can_continue_thread():
|
||||
break
|
||||
|
@ -191,7 +194,7 @@ class HttpClient(object):
|
|||
allowed_tries -= 1
|
||||
if allowed_tries == 0:
|
||||
break
|
||||
if allowed_tries < 0: # case of infinite retries
|
||||
if allowed_tries < 0: # case of infinite retries
|
||||
allowed_tries = 0
|
||||
|
||||
raise Exception("abort %s after %d tries" % (method, tries))
|
||||
|
@ -201,7 +204,7 @@ class HttpClient(object):
|
|||
chunks = [[name, args, True] for args in chunkify(params, batch_size)]
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
for items in executor.map(lambda tup: self.exec(*tup), chunks):
|
||||
yield list(items) # (use of `map` preserves request order)
|
||||
yield list(items) # (use of `map` preserves request order)
|
||||
|
||||
def exec_multi_as_completed(self, name, params, max_workers, batch_size):
|
||||
"""Process a batch as parallel requests; yields unordered."""
|
||||
|
|
|
@ -15,15 +15,19 @@ FINISH_SIGNAL_DURING_SYNC = AtomicLong(0)
|
|||
def finish_signals_handler(signal, frame):
|
||||
global FINISH_SIGNAL_DURING_SYNC
|
||||
FINISH_SIGNAL_DURING_SYNC += 1
|
||||
log.info(f"""
|
||||
log.info(
|
||||
f"""
|
||||
**********************************************************
|
||||
CAUGHT {'SIGINT' if signal == SIGINT else 'SIGTERM'}. PLEASE WAIT... PROCESSING DATA IN QUEUES...
|
||||
**********************************************************
|
||||
""" )
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def set_exception_thrown():
|
||||
global EXCEPTION_THROWN
|
||||
EXCEPTION_THROWN += 1
|
||||
|
||||
|
||||
def can_continue_thread():
|
||||
return EXCEPTION_THROWN.value == 0 and FINISH_SIGNAL_DURING_SYNC.value == 0
|
||||
|
|
|
@ -3,26 +3,28 @@
|
|||
import ujson as json
|
||||
from hive.utils.normalize import trunc
|
||||
|
||||
|
||||
def get_profile_str(account):
|
||||
_posting_json_metadata = ""
|
||||
_json_metadata = ""
|
||||
|
||||
if account is not None:
|
||||
if 'posting_json_metadata' in account:
|
||||
_posting_json_metadata = account['posting_json_metadata']
|
||||
if 'json_metadata' in account:
|
||||
_json_metadata = account['json_metadata']
|
||||
if 'posting_json_metadata' in account:
|
||||
_posting_json_metadata = account['posting_json_metadata']
|
||||
if 'json_metadata' in account:
|
||||
_json_metadata = account['json_metadata']
|
||||
|
||||
return (_posting_json_metadata, _json_metadata)
|
||||
|
||||
return ( _posting_json_metadata, _json_metadata )
|
||||
|
||||
def get_db_profile(posting_json_metadata, json_metadata):
|
||||
prof = {}
|
||||
json_metadata_is_read = False
|
||||
|
||||
#`posting_json_metadata` should dominate, so at the start is necessary to load `posting_json_metadata`
|
||||
# `posting_json_metadata` should dominate, so at the start is necessary to load `posting_json_metadata`
|
||||
# We can skip `posting_json_metadata` loading when it doesn't exist or content doesn't make any sense(f.e. '' or '{}' )
|
||||
try:
|
||||
if posting_json_metadata is None or len( posting_json_metadata ) <= 2:
|
||||
if posting_json_metadata is None or len(posting_json_metadata) <= 2:
|
||||
json_metadata_is_read = True
|
||||
prof = json.loads(json_metadata)['profile']
|
||||
else:
|
||||
|
@ -36,6 +38,7 @@ def get_db_profile(posting_json_metadata, json_metadata):
|
|||
|
||||
return prof
|
||||
|
||||
|
||||
def get_profile(account):
|
||||
prof = {}
|
||||
|
||||
|
@ -54,6 +57,7 @@ def get_profile(account):
|
|||
|
||||
return prof
|
||||
|
||||
|
||||
def process_profile(prof):
|
||||
"""Returns profile data."""
|
||||
|
||||
|
@ -105,18 +109,22 @@ def process_profile(prof):
|
|||
muted_list_description=muted_list_description or '',
|
||||
)
|
||||
|
||||
|
||||
def safe_db_profile_metadata(posting_json_metadata, json_metadata):
|
||||
prof = get_db_profile(posting_json_metadata, json_metadata)
|
||||
return process_profile(prof)
|
||||
prof = get_db_profile(posting_json_metadata, json_metadata)
|
||||
return process_profile(prof)
|
||||
|
||||
|
||||
def safe_profile_metadata(account):
|
||||
prof = get_profile(account)
|
||||
return process_profile(prof)
|
||||
prof = get_profile(account)
|
||||
return process_profile(prof)
|
||||
|
||||
|
||||
def _valid_url_proto(url):
|
||||
assert url
|
||||
return url[0:7] == 'http://' or url[0:8] == 'https://'
|
||||
|
||||
|
||||
def _char_police(string):
|
||||
"""If a string has bad chars, ignore it.
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
def update_communities_posts_and_rank( db ):
|
||||
def update_communities_posts_and_rank(db):
|
||||
sql = "SELECT update_communities_posts_data_and_rank()"
|
||||
db.query_no_return(sql)
|
||||
|
|
|
@ -9,15 +9,17 @@ log = logging.getLogger(__name__)
|
|||
# Value validation
|
||||
# ----------------
|
||||
|
||||
|
||||
def valid_command(val, valid=[]):
|
||||
"""Validate given command among accepted set."""
|
||||
#pylint: disable=dangerous-default-value
|
||||
# pylint: disable=dangerous-default-value
|
||||
assert val in valid, f'invalid command: {val}'
|
||||
return val
|
||||
|
||||
|
||||
def valid_keys(obj, required=[], optional=[]):
|
||||
"""Compare a set of input keys to expected and optional keys."""
|
||||
#pylint: disable=dangerous-default-value
|
||||
# pylint: disable=dangerous-default-value
|
||||
keys = obj.keys()
|
||||
missing = required - keys
|
||||
assert not missing, f'missing required keys: {missing}'
|
||||
|
@ -25,29 +27,39 @@ def valid_keys(obj, required=[], optional=[]):
|
|||
assert not extra, f'extraneous keys: {extra}'
|
||||
return keys
|
||||
|
||||
|
||||
VALID_DATE = re.compile(r'^\d\d\d\d\-\d\d-\d\dT\d\d:\d\d:\d\d$')
|
||||
|
||||
|
||||
def valid_date(val):
|
||||
"""Valid datetime (YYYY-MM-DDTHH:MM:SS)"""
|
||||
assert VALID_DATE.match(val), f'invalid date: {val}'
|
||||
return val
|
||||
|
||||
VALID_LANG = ("ab,aa,af,ak,sq,am,ar,an,hy,as,av,ae,ay,az,bm,ba,eu,be,bn,bh,bi,"
|
||||
"bs,br,bg,my,ca,ch,ce,ny,zh,cv,kw,co,cr,hr,cs,da,dv,nl,dz,en,eo,"
|
||||
"et,ee,fo,fj,fi,fr,ff,gl,ka,de,el,gn,gu,ht,ha,he,hz,hi,ho,hu,ia,"
|
||||
"id,ie,ga,ig,ik,io,is,it,iu,ja,jv,kl,kn,kr,ks,kk,km,ki,rw,ky,kv,"
|
||||
"kg,ko,ku,kj,la,lb,lg,li,ln,lo,lt,lu,lv,gv,mk,mg,ms,ml,mt,mi,mr,"
|
||||
"mh,mn,na,nv,nd,ne,ng,nb,nn,no,ii,nr,oc,oj,cu,om,or,os,pa,pi,fa,"
|
||||
"pl,ps,pt,qu,rm,rn,ro,ru,sa,sc,sd,se,sm,sg,sr,gd,sn,si,sk,sl,so,"
|
||||
"st,es,su,sw,ss,sv,ta,te,tg,th,ti,bo,tk,tl,tn,to,tr,ts,tt,tw,ty,"
|
||||
"ug,uk,ur,uz,ve,vi,vo,wa,cy,wo,fy,xh,yi,yo,za").split(',')
|
||||
|
||||
VALID_LANG = (
|
||||
"ab,aa,af,ak,sq,am,ar,an,hy,as,av,ae,ay,az,bm,ba,eu,be,bn,bh,bi,"
|
||||
"bs,br,bg,my,ca,ch,ce,ny,zh,cv,kw,co,cr,hr,cs,da,dv,nl,dz,en,eo,"
|
||||
"et,ee,fo,fj,fi,fr,ff,gl,ka,de,el,gn,gu,ht,ha,he,hz,hi,ho,hu,ia,"
|
||||
"id,ie,ga,ig,ik,io,is,it,iu,ja,jv,kl,kn,kr,ks,kk,km,ki,rw,ky,kv,"
|
||||
"kg,ko,ku,kj,la,lb,lg,li,ln,lo,lt,lu,lv,gv,mk,mg,ms,ml,mt,mi,mr,"
|
||||
"mh,mn,na,nv,nd,ne,ng,nb,nn,no,ii,nr,oc,oj,cu,om,or,os,pa,pi,fa,"
|
||||
"pl,ps,pt,qu,rm,rn,ro,ru,sa,sc,sd,se,sm,sg,sr,gd,sn,si,sk,sl,so,"
|
||||
"st,es,su,sw,ss,sv,ta,te,tg,th,ti,bo,tk,tl,tn,to,tr,ts,tt,tw,ty,"
|
||||
"ug,uk,ur,uz,ve,vi,vo,wa,cy,wo,fy,xh,yi,yo,za"
|
||||
).split(',')
|
||||
|
||||
|
||||
def valid_lang(val):
|
||||
"""Valid ISO-639-1 language (https://en.wikipedia.org/wiki/ISO_639-1)"""
|
||||
assert val in VALID_LANG, f'invalid ISO639-1 lang: {val}'
|
||||
return val
|
||||
|
||||
|
||||
# Custom op validation
|
||||
# --------------------
|
||||
|
||||
|
||||
def parse_op_json(op, block_num):
|
||||
"""Parse a custom_json op, validating its structure."""
|
||||
# read custom json
|
||||
|
@ -66,6 +78,7 @@ def parse_op_json(op, block_num):
|
|||
|
||||
return op_json
|
||||
|
||||
|
||||
def valid_op_json(op_json):
|
||||
"""Asserts object is in the form of `[command, {payload}]`."""
|
||||
assert isinstance(op_json, list), 'json must be a list'
|
||||
|
|
|
@ -1,30 +1,44 @@
|
|||
import os, psutil
|
||||
from hive.utils.stats import PrometheusClient, BroadcastObject
|
||||
|
||||
def log_memory_usage(memtypes=["rss", "vms", "shared"], broadcast = True) -> str:
|
||||
"""
|
||||
Logs current memory types, additionally broadcast if broadcast set to True (default)
|
||||
|
||||
Available memtypes: rss, vms, shared, text, lib, data, dirty
|
||||
"""
|
||||
|
||||
def format_bytes(val : int):
|
||||
assert isinstance(val, int) or isinstance(val, float), 'invalid data type, required int or float'
|
||||
return f'{ val / 1024.0 / 1024.0 :.2f} MB'
|
||||
def log_memory_usage(memtypes=["rss", "vms", "shared"], broadcast=True) -> str:
|
||||
"""
|
||||
Logs current memory types, additionally broadcast if broadcast set to True (default)
|
||||
|
||||
Available memtypes: rss, vms, shared, text, lib, data, dirty
|
||||
"""
|
||||
|
||||
def format_bytes(val: int):
|
||||
assert isinstance(val, int) or isinstance(val, float), 'invalid data type, required int or float'
|
||||
return f'{ val / 1024.0 / 1024.0 :.2f} MB'
|
||||
|
||||
human_readable = {
|
||||
"rss": "physical_memory",
|
||||
"vms": "virtual_memory",
|
||||
"shared": "shared_memory",
|
||||
"text": "used_by_executable",
|
||||
"lib": "used_by_shared_libraries",
|
||||
}
|
||||
stats = psutil.Process(
|
||||
os.getpid()
|
||||
).memory_info() # docs: https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
|
||||
if broadcast:
|
||||
PrometheusClient.broadcast(
|
||||
[BroadcastObject(f'hivemind_memory_{key}', getattr(stats, key), 'b') for key in stats._fields]
|
||||
) # broadcast to prometheus
|
||||
return f"memory usage report: { ', '.join( [ f'{ human_readable.get(k, k) } = { format_bytes(getattr(stats, k)) }' for k in memtypes ] ) }"
|
||||
|
||||
human_readable = { "rss": "physical_memory", "vms": "virtual_memory", "shared": "shared_memory", "text": "used_by_executable", "lib": "used_by_shared_libraries" }
|
||||
stats = psutil.Process(os.getpid()).memory_info() # docs: https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
|
||||
if broadcast:
|
||||
PrometheusClient.broadcast([ BroadcastObject(f'hivemind_memory_{key}', getattr(stats, key), 'b') for key in stats._fields ]) # broadcast to prometheus
|
||||
return f"memory usage report: { ', '.join( [ f'{ human_readable.get(k, k) } = { format_bytes(getattr(stats, k)) }' for k in memtypes ] ) }"
|
||||
|
||||
def chunks(lst, n):
|
||||
"""Yield successive n-sized chunks from lst."""
|
||||
for i in range(0, len(lst), n):
|
||||
yield lst[i:i + n]
|
||||
yield lst[i : i + n]
|
||||
|
||||
|
||||
def show_app_version(log, database_head_block, patch_level_data):
|
||||
from hive.version import VERSION, GIT_REVISION, GIT_DATE
|
||||
|
||||
log.info("hivemind_version : %s", VERSION)
|
||||
log.info("hivemind_git_rev : %s", GIT_REVISION)
|
||||
log.info("hivemind_git_date : %s", GIT_DATE)
|
||||
|
@ -32,5 +46,5 @@ def show_app_version(log, database_head_block, patch_level_data):
|
|||
log.info("database_schema_version : %s", patch_level_data['level'])
|
||||
log.info("database_patch_date : %s", patch_level_data['patch_date'])
|
||||
log.info("database_patched_to_revision : %s", patch_level_data['patched_to_revision'])
|
||||
|
||||
|
||||
log.info("database_head_block : %s", database_head_block)
|
||||
|
|
|
@ -20,28 +20,25 @@ NAI_PRECISION = {
|
|||
'@@000000037': 6,
|
||||
}
|
||||
|
||||
UNIT_NAI = {
|
||||
'HBD' : '@@000000013',
|
||||
'HIVE' : '@@000000021',
|
||||
'VESTS' : '@@000000037'
|
||||
}
|
||||
UNIT_NAI = {'HBD': '@@000000013', 'HIVE': '@@000000021', 'VESTS': '@@000000037'}
|
||||
|
||||
# convert special chars into their octal formats recognized by sql
|
||||
SPECIAL_CHARS = {
|
||||
"\x00" : " ", # nul char cannot be stored in string column (ABW: if we ever find the need to store nul chars we'll need bytea, not text)
|
||||
"\r" : "\\015",
|
||||
"\n" : "\\012",
|
||||
"\v" : "\\013",
|
||||
"\f" : "\\014",
|
||||
"\\" : "\\134",
|
||||
"'" : "\\047",
|
||||
"%" : "\\045",
|
||||
"_" : "\\137",
|
||||
":" : "\\072"
|
||||
"\x00": " ", # nul char cannot be stored in string column (ABW: if we ever find the need to store nul chars we'll need bytea, not text)
|
||||
"\r": "\\015",
|
||||
"\n": "\\012",
|
||||
"\v": "\\013",
|
||||
"\f": "\\014",
|
||||
"\\": "\\134",
|
||||
"'": "\\047",
|
||||
"%": "\\045",
|
||||
"_": "\\137",
|
||||
":": "\\072",
|
||||
}
|
||||
|
||||
|
||||
def to_nai(value):
|
||||
""" Convert various amount notation to nai notation """
|
||||
"""Convert various amount notation to nai notation"""
|
||||
ret = None
|
||||
if isinstance(value, dict):
|
||||
assert 'amount' in value, "amount not found in dict"
|
||||
|
@ -55,7 +52,7 @@ def to_nai(value):
|
|||
nai = UNIT_NAI[unit]
|
||||
precision = NAI_PRECISION[nai]
|
||||
satoshis = int(decimal.Decimal(raw_amount) * (10**precision))
|
||||
ret = {'amount' : str(satoshis), 'nai' : nai, 'precision' : precision}
|
||||
ret = {'amount': str(satoshis), 'nai': nai, 'precision': precision}
|
||||
|
||||
elif isinstance(value, list):
|
||||
satoshis, precision, nai = value
|
||||
|
@ -67,7 +64,7 @@ def to_nai(value):
|
|||
|
||||
|
||||
def escape_characters(text):
|
||||
""" Escape special charactes """
|
||||
"""Escape special charactes"""
|
||||
assert isinstance(text, str), f"Expected string got: {type(text)}"
|
||||
if len(text.strip()) == 0:
|
||||
return "'" + text + "'"
|
||||
|
@ -99,18 +96,22 @@ def escape_characters(text):
|
|||
ret = ret + "'"
|
||||
return ret
|
||||
|
||||
|
||||
def vests_amount(value):
|
||||
"""Returns a decimal amount, asserting units are VESTS"""
|
||||
return parse_amount(value, 'VESTS')
|
||||
|
||||
|
||||
def steem_amount(value):
|
||||
"""Returns a decimal amount, asserting units are HIVE"""
|
||||
return parse_amount(value, 'HIVE')
|
||||
|
||||
|
||||
def sbd_amount(value):
|
||||
"""Returns a decimal amount, asserting units are HBD"""
|
||||
return parse_amount(value, 'HBD')
|
||||
|
||||
|
||||
def parse_amount(value, expected_unit=None):
|
||||
"""Parse steemd-style amout/asset value, return (decimal, name)."""
|
||||
if isinstance(value, dict):
|
||||
|
@ -134,41 +135,48 @@ def parse_amount(value, expected_unit=None):
|
|||
raise Exception(f"invalid input amount {repr(value)}")
|
||||
|
||||
if expected_unit:
|
||||
# FIXME to be uncommented when payout collection will be corrected
|
||||
# assert unit == expected_unit, "Unexpected unit: %s" % unit
|
||||
# FIXME to be uncommented when payout collection will be corrected
|
||||
# assert unit == expected_unit, "Unexpected unit: %s" % unit
|
||||
return dec_amount
|
||||
|
||||
return (dec_amount, unit)
|
||||
|
||||
|
||||
def amount(string):
|
||||
"""Parse a steemd asset-amount as a Decimal(). Discard asset type."""
|
||||
return parse_amount(string)[0]
|
||||
|
||||
|
||||
def legacy_amount(value):
|
||||
"""Get a pre-appbase-style amount string given a (numeric, asset-str)."""
|
||||
if isinstance(value, str):
|
||||
return value # already legacy
|
||||
return value # already legacy
|
||||
amt, asset = parse_amount(value)
|
||||
prec = {'HBD': 3, 'HIVE': 3, 'VESTS': 6}[asset]
|
||||
tmpl = ("%%.%df %%s" % prec)
|
||||
tmpl = "%%.%df %%s" % prec
|
||||
return tmpl % (amt, asset)
|
||||
|
||||
|
||||
def block_num(block):
|
||||
"""Given a block object, returns the block number."""
|
||||
return int(block['block_id'][:8], base=16)
|
||||
|
||||
|
||||
def block_date(block):
|
||||
"""Parse block timestamp into datetime object."""
|
||||
return parse_time(block['timestamp'])
|
||||
|
||||
|
||||
def parse_time(block_time):
|
||||
"""Convert chain date into datetime object."""
|
||||
return datetime.strptime(block_time, '%Y-%m-%dT%H:%M:%S')
|
||||
|
||||
|
||||
def utc_timestamp(date):
|
||||
"""Convert datetime to UTC unix timestamp."""
|
||||
return date.replace(tzinfo=utc).timestamp()
|
||||
|
||||
|
||||
def load_json_key(obj, key):
|
||||
"""Given a dict, parse JSON in `key`. Blank dict on failure."""
|
||||
if not obj[key]:
|
||||
|
@ -180,14 +188,16 @@ def load_json_key(obj, key):
|
|||
return {}
|
||||
return ret
|
||||
|
||||
|
||||
def trunc(string, maxlen):
|
||||
"""Truncate a string, with a 3-char penalty if maxlen exceeded."""
|
||||
if string:
|
||||
string = string.strip()
|
||||
if len(string) > maxlen:
|
||||
string = string[0:(maxlen-3)] + '...'
|
||||
string = string[0 : (maxlen - 3)] + '...'
|
||||
return string
|
||||
|
||||
|
||||
def secs_to_str(secs):
|
||||
"""Given number of seconds returns, e.g., `02h 29m 39s`"""
|
||||
units = (('s', 60), ('m', 60), ('h', 24), ('d', 7))
|
||||
|
@ -198,12 +208,14 @@ def secs_to_str(secs):
|
|||
rem = int(rem / cycle)
|
||||
if not rem:
|
||||
break
|
||||
if rem: # leftover = weeks
|
||||
if rem: # leftover = weeks
|
||||
out.append((rem, 'w'))
|
||||
return ' '.join(["%02d%s" % tup for tup in out[::-1]])
|
||||
|
||||
|
||||
def rep_log10(rep):
|
||||
"""Convert raw steemd rep into a UI-ready value centered at 25."""
|
||||
|
||||
def _log10(string):
|
||||
leading_digits = int(string[0:4])
|
||||
log = math.log10(leading_digits) + 0.00000001
|
||||
|
@ -220,9 +232,10 @@ def rep_log10(rep):
|
|||
|
||||
out = _log10(rep)
|
||||
out = max(out - 9, 0) * sign # @ -9, $1 earned is approx magnitude 1
|
||||
out = (out * 9) + 25 # 9 points per magnitude. center at 25
|
||||
out = (out * 9) + 25 # 9 points per magnitude. center at 25
|
||||
return float(round(out, 2))
|
||||
|
||||
|
||||
def rep_to_raw(rep):
|
||||
"""Convert a UI-ready rep score back into its approx raw value."""
|
||||
if not isinstance(rep, (str, float, int)):
|
||||
|
@ -235,14 +248,14 @@ def rep_to_raw(rep):
|
|||
rep = abs(rep) + 9
|
||||
return int(sign * pow(10, rep))
|
||||
|
||||
|
||||
def safe_img_url(url, max_size=1024):
|
||||
"""Given an image URL, strict enforce size and validity."""
|
||||
if (url and isinstance(url, str)
|
||||
and len(url) < max_size
|
||||
and url[0:4] == 'http'):
|
||||
if url and isinstance(url, str) and len(url) < max_size and url[0:4] == 'http':
|
||||
return url.strip()
|
||||
return None
|
||||
|
||||
|
||||
def strtobool(val):
|
||||
"""Convert a booleany str to a bool.
|
||||
|
||||
|
@ -258,6 +271,7 @@ def strtobool(val):
|
|||
else:
|
||||
raise ValueError(f"not booleany: {val!r}")
|
||||
|
||||
|
||||
def int_log_level(str_log_level):
|
||||
"""Get `logger`s internal int level from config string."""
|
||||
if not str_log_level:
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
"""Methods for normalizing steemd post metadata."""
|
||||
#pylint: disable=line-too-long,too-many-lines
|
||||
# pylint: disable=line-too-long,too-many-lines
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def mentions(body):
|
||||
"""Given a post body, return proper @-mentioned account names."""
|
||||
# condenser:
|
||||
|
@ -11,8 +12,6 @@ def mentions(body):
|
|||
# validMentionPrecedingChars = /(?:^|[^a-zA-Z0-9_!#$%&*@@]|(?:^|[^a-zA-Z0-9_+~.-])(?:rt|RT|rT|Rt):?)/
|
||||
# endMentionMatch = regexSupplant(/^(?:#{atSigns}|[#{latinAccentChars}]|:\/\/)/);
|
||||
matches = re.findall(
|
||||
'(?:^|[^a-zA-Z0-9_!#$%&*@\\/])'
|
||||
'(?:@)'
|
||||
'([a-zA-Z0-9][a-zA-Z0-9\\-.]{1,14}[a-zA-Z0-9])'
|
||||
'(?![a-z])', body)
|
||||
'(?:^|[^a-zA-Z0-9_!#$%&*@\\/])' '(?:@)' '([a-zA-Z0-9][a-zA-Z0-9\\-.]{1,14}[a-zA-Z0-9])' '(?![a-z])', body
|
||||
)
|
||||
return {grp.lower() for grp in matches}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
import cProfile
|
||||
import pstats
|
||||
|
||||
|
||||
class Profiler:
|
||||
"""Context-based profiler."""
|
||||
|
||||
|
|
|
@ -11,8 +11,9 @@ from os import getpid
|
|||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BroadcastObject:
|
||||
def __init__(self, category : str, value, unit):
|
||||
def __init__(self, category: str, value, unit):
|
||||
self.category = category
|
||||
self.value = value
|
||||
self.unit = unit
|
||||
|
@ -31,25 +32,27 @@ class BroadcastObject:
|
|||
|
||||
def __repr__(self):
|
||||
return self.__str__()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__dict__)
|
||||
|
||||
|
||||
class PrometheusClient:
|
||||
|
||||
deamon = None
|
||||
logs_to_broadcast = Queue()
|
||||
|
||||
@staticmethod
|
||||
def work( port, pid ):
|
||||
def work(port, pid):
|
||||
try:
|
||||
import prometheus_client as prom
|
||||
|
||||
prom.start_http_server(port)
|
||||
|
||||
gauges = {}
|
||||
|
||||
while pid_exists(pid):
|
||||
value : BroadcastObject = PrometheusClient.logs_to_broadcast.get(True)
|
||||
value: BroadcastObject = PrometheusClient.logs_to_broadcast.get(True)
|
||||
value.debug()
|
||||
value_name = value.name()
|
||||
|
||||
|
@ -75,7 +78,8 @@ class PrometheusClient:
|
|||
log.warn("Failed to import prometheus client. Online stats disabled")
|
||||
return
|
||||
from threading import Thread
|
||||
PrometheusClient.deamon = Thread(target=PrometheusClient.work, args=[ port, getpid() ], daemon=True)
|
||||
|
||||
PrometheusClient.deamon = Thread(target=PrometheusClient.work, args=[port, getpid()], daemon=True)
|
||||
PrometheusClient.deamon.start()
|
||||
|
||||
@staticmethod
|
||||
|
@ -90,6 +94,7 @@ class PrometheusClient:
|
|||
else:
|
||||
raise Exception(f"Not expected type. Should be list or BroadcastObject, but: {type(obj)} given")
|
||||
|
||||
|
||||
class Stat:
|
||||
def __init__(self, time):
|
||||
self.time = time
|
||||
|
@ -111,6 +116,7 @@ class Stat:
|
|||
def broadcast(self, name):
|
||||
return BroadcastObject(name, self.time, 's')
|
||||
|
||||
|
||||
class StatusManager:
|
||||
|
||||
# Fully abstract class
|
||||
|
@ -122,28 +128,28 @@ class StatusManager:
|
|||
return perf()
|
||||
|
||||
@staticmethod
|
||||
def stop( start : float ):
|
||||
def stop(start: float):
|
||||
return perf() - start
|
||||
|
||||
@staticmethod
|
||||
def merge_dicts(od1, od2, broadcast : bool = False, total_broadcast : bool = True):
|
||||
def merge_dicts(od1, od2, broadcast: bool = False, total_broadcast: bool = True):
|
||||
if od2 is not None:
|
||||
for k, v in od2.items():
|
||||
if k in od1:
|
||||
od1[k].update(v)
|
||||
else:
|
||||
od1[k] = v
|
||||
|
||||
|
||||
if broadcast:
|
||||
PrometheusClient.broadcast(v.broadcast(k))
|
||||
|
||||
if total_broadcast:
|
||||
PrometheusClient.broadcast( od1[k].broadcast( f"{k}_total" ) )
|
||||
PrometheusClient.broadcast(od1[k].broadcast(f"{k}_total"))
|
||||
|
||||
return od1
|
||||
|
||||
@staticmethod
|
||||
def log_dict(col : dict) -> float:
|
||||
def log_dict(col: dict) -> float:
|
||||
sorted_stats = sorted(col.items(), key=lambda kv: kv[1], reverse=True)
|
||||
measured_time = 0.0
|
||||
for (k, v) in sorted_stats:
|
||||
|
@ -155,6 +161,7 @@ class StatusManager:
|
|||
def print_row():
|
||||
log.info("#" * 20)
|
||||
|
||||
|
||||
class OPStat(Stat):
|
||||
def __init__(self, time, count):
|
||||
super().__init__(time)
|
||||
|
@ -163,11 +170,12 @@ class OPStat(Stat):
|
|||
def __str__(self):
|
||||
return f"Processed {self.count :.0f} times in {self.time :.5f} seconds"
|
||||
|
||||
def broadcast(self, name : str):
|
||||
def broadcast(self, name: str):
|
||||
n = name.lower()
|
||||
if not n.endswith('operation'):
|
||||
n = f"{n}_operation"
|
||||
return list([ super().broadcast(n), BroadcastObject(n + "_count", self.count, 'b') ])
|
||||
return list([super().broadcast(n), BroadcastObject(n + "_count", self.count, 'b')])
|
||||
|
||||
|
||||
class OPStatusManager(StatusManager):
|
||||
# Summary for whole sync
|
||||
|
@ -177,7 +185,7 @@ class OPStatusManager(StatusManager):
|
|||
cpbs = {}
|
||||
|
||||
@staticmethod
|
||||
def op_stats( name, time, processed = 1 ):
|
||||
def op_stats(name, time, processed=1):
|
||||
if name in OPStatusManager.cpbs.keys():
|
||||
OPStatusManager.cpbs[name].time += time
|
||||
OPStatusManager.cpbs[name].count += processed
|
||||
|
@ -187,29 +195,27 @@ class OPStatusManager(StatusManager):
|
|||
@staticmethod
|
||||
def next_blocks():
|
||||
OPStatusManager.global_stats = StatusManager.merge_dicts(
|
||||
OPStatusManager.global_stats,
|
||||
OPStatusManager.cpbs,
|
||||
True
|
||||
OPStatusManager.global_stats, OPStatusManager.cpbs, True
|
||||
)
|
||||
OPStatusManager.cpbs.clear()
|
||||
|
||||
@staticmethod
|
||||
def log_global(label : str):
|
||||
def log_global(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(OPStatusManager.global_stats)
|
||||
log.info(f"Total time for processing operations time: {tm :.4f}s.")
|
||||
return tm
|
||||
|
||||
|
||||
@staticmethod
|
||||
def log_current(label : str):
|
||||
def log_current(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(OPStatusManager.cpbs)
|
||||
log.info(f"Current time for processing operations time: {tm :.4f}s.")
|
||||
return tm
|
||||
|
||||
|
||||
class FlushStat(Stat):
|
||||
def __init__(self, time, pushed):
|
||||
super().__init__(time)
|
||||
|
@ -218,9 +224,10 @@ class FlushStat(Stat):
|
|||
def __str__(self):
|
||||
return f"Pushed {self.pushed :.0f} records in {self.time :.4f} seconds"
|
||||
|
||||
def broadcast(self, name : str):
|
||||
def broadcast(self, name: str):
|
||||
n = f"flushing_{name.lower()}"
|
||||
return list([ super().broadcast(n), BroadcastObject(n + "_items", self.pushed, 'b') ])
|
||||
return list([super().broadcast(n), BroadcastObject(n + "_items", self.pushed, 'b')])
|
||||
|
||||
|
||||
class FlushStatusManager(StatusManager):
|
||||
# Summary for whole sync
|
||||
|
@ -240,14 +247,12 @@ class FlushStatusManager(StatusManager):
|
|||
@staticmethod
|
||||
def next_blocks():
|
||||
FlushStatusManager.global_stats = StatusManager.merge_dicts(
|
||||
FlushStatusManager.global_stats,
|
||||
FlushStatusManager.current_flushes,
|
||||
True
|
||||
FlushStatusManager.global_stats, FlushStatusManager.current_flushes, True
|
||||
)
|
||||
FlushStatusManager.current_flushes.clear()
|
||||
|
||||
@staticmethod
|
||||
def log_global(label : str):
|
||||
def log_global(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(FlushStatusManager.global_stats)
|
||||
|
@ -255,13 +260,14 @@ class FlushStatusManager(StatusManager):
|
|||
return tm
|
||||
|
||||
@staticmethod
|
||||
def log_current(label : str):
|
||||
def log_current(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(FlushStatusManager.current_flushes)
|
||||
log.info(f"Current flushing time: {tm :.4f}s.")
|
||||
return tm
|
||||
|
||||
|
||||
class FinalStat(Stat):
|
||||
def __init__(self, time):
|
||||
super().__init__(time)
|
||||
|
@ -269,9 +275,10 @@ class FinalStat(Stat):
|
|||
def __str__(self):
|
||||
return f"Processed final operations in {self.time :.4f} seconds"
|
||||
|
||||
def broadcast(self, name : str):
|
||||
def broadcast(self, name: str):
|
||||
n = f"flushing_{name.lower()}"
|
||||
return list([ super().broadcast(n), BroadcastObject(n + "_items", '', 'b') ])
|
||||
return list([super().broadcast(n), BroadcastObject(n + "_items", '', 'b')])
|
||||
|
||||
|
||||
class FinalOperationStatusManager(StatusManager):
|
||||
# Summary for whole sync
|
||||
|
@ -288,7 +295,7 @@ class FinalOperationStatusManager(StatusManager):
|
|||
FinalOperationStatusManager.current_finals[name] = FinalStat(time)
|
||||
|
||||
@staticmethod
|
||||
def log_current(label : str):
|
||||
def log_current(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(FinalOperationStatusManager.current_finals)
|
||||
|
@ -299,6 +306,7 @@ class FinalOperationStatusManager(StatusManager):
|
|||
def clear():
|
||||
FinalOperationStatusManager.current_finals.clear()
|
||||
|
||||
|
||||
class WaitStat(Stat):
|
||||
def __init__(self, time):
|
||||
super().__init__(time)
|
||||
|
@ -306,6 +314,7 @@ class WaitStat(Stat):
|
|||
def __str__(self):
|
||||
return f"Waited {self.time :.4f} seconds"
|
||||
|
||||
|
||||
class WaitingStatusManager(StatusManager):
|
||||
# Summary for whole sync
|
||||
global_stats = {}
|
||||
|
@ -323,14 +332,12 @@ class WaitingStatusManager(StatusManager):
|
|||
@staticmethod
|
||||
def next_blocks():
|
||||
WaitingStatusManager.global_stats = StatusManager.merge_dicts(
|
||||
WaitingStatusManager.global_stats,
|
||||
WaitingStatusManager.current_waits,
|
||||
True
|
||||
WaitingStatusManager.global_stats, WaitingStatusManager.current_waits, True
|
||||
)
|
||||
WaitingStatusManager.current_waits.clear()
|
||||
|
||||
@staticmethod
|
||||
def log_global(label : str):
|
||||
def log_global(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(WaitingStatusManager.global_stats)
|
||||
|
@ -338,18 +345,19 @@ class WaitingStatusManager(StatusManager):
|
|||
return tm
|
||||
|
||||
@staticmethod
|
||||
def log_current(label : str):
|
||||
def log_current(label: str):
|
||||
StatusManager.print_row()
|
||||
log.info(label)
|
||||
tm = StatusManager.log_dict(WaitingStatusManager.current_waits)
|
||||
log.info(f"Current waiting time: {tm :.4f}s.")
|
||||
return tm
|
||||
|
||||
def minmax(collection : dict, blocks : int, time : float, _from : int):
|
||||
value = blocks/time
|
||||
|
||||
def minmax(collection: dict, blocks: int, time: float, _from: int):
|
||||
value = blocks / time
|
||||
_to = _from + blocks
|
||||
PrometheusClient.broadcast(BroadcastObject('block_processing_rate', value, 'bps'))
|
||||
if len(collection.keys()) == 0:
|
||||
if len(collection.keys()) == 0:
|
||||
|
||||
collection['min'] = value
|
||||
collection['min_from'] = _from
|
||||
|
@ -371,21 +379,22 @@ def minmax(collection : dict, blocks : int, time : float, _from : int):
|
|||
collection['max'] = value
|
||||
collection['max_from'] = _from
|
||||
collection['max_to'] = _to
|
||||
|
||||
|
||||
return collection
|
||||
|
||||
|
||||
def _normalize_sql(sql, maxlen=180):
|
||||
"""Collapse whitespace and middle-truncate if needed."""
|
||||
out = ' '.join(sql.split())
|
||||
if len(out) > maxlen:
|
||||
i = int(maxlen / 2 - 4)
|
||||
out = (out[0:i] +
|
||||
' ... ' +
|
||||
out[-i:None])
|
||||
out = out[0:i] + ' ... ' + out[-i:None]
|
||||
return out
|
||||
|
||||
|
||||
class StatsAbstract:
|
||||
"""Tracks service call timings"""
|
||||
|
||||
def __init__(self, service):
|
||||
self._service = service
|
||||
self.clear()
|
||||
|
@ -424,23 +433,21 @@ class StatsAbstract:
|
|||
return
|
||||
|
||||
total_ms = parent_secs * 1000
|
||||
log.info("Service: %s -- %ds total (%.1f%%)",
|
||||
self._service,
|
||||
round(self._ms / 1000),
|
||||
100 * (self._ms / total_ms))
|
||||
log.info(
|
||||
"Service: %s -- %ds total (%.1f%%)", self._service, round(self._ms / 1000), 100 * (self._ms / total_ms)
|
||||
)
|
||||
|
||||
log.info('%7s %9s %9s %9s', '-pct-', '-ttl-', '-avg-', '-cnt-')
|
||||
for call, ms, reqs in self.table(40):
|
||||
try:
|
||||
avg = ms/reqs
|
||||
millisec = ms/self._ms
|
||||
avg = ms / reqs
|
||||
millisec = ms / self._ms
|
||||
except ZeroDivisionError as ex:
|
||||
avg = 0.0
|
||||
millisec = 0.0
|
||||
avg = 0.0
|
||||
millisec = 0.0
|
||||
if reqs == 0:
|
||||
reqs = 1
|
||||
log.info("% 6.1f%% % 7dms % 9.2f % 8dx -- %s",
|
||||
100 * millisec, ms, avg, reqs, call)
|
||||
log.info("% 6.1f%% % 7dms % 9.2f % 8dx -- %s", 100 * millisec, ms, avg, reqs, call)
|
||||
self.clear()
|
||||
|
||||
|
||||
|
@ -462,9 +469,9 @@ class SteemStats(StatsAbstract):
|
|||
'get_order_book': 20,
|
||||
'get_feed_history': 20,
|
||||
'lookup_accounts': 1000,
|
||||
'get_comment_pending_payouts':1000,
|
||||
'get_ops_in_block':500,
|
||||
'enum_virtual_ops':1000
|
||||
'get_comment_pending_payouts': 1000,
|
||||
'get_ops_in_block': 500,
|
||||
'enum_virtual_ops': 1000,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
|
@ -478,13 +485,13 @@ class SteemStats(StatsAbstract):
|
|||
par = self.PAR_STEEMD[call]
|
||||
over = per / par
|
||||
if over >= self.PAR_THRESHOLD:
|
||||
out = ("[STEEM][%dms] %s[%d] -- %.1fx par (%d/%d)"
|
||||
% (ms, call, batch_size, over, per, par))
|
||||
out = "[STEEM][%dms] %s[%d] -- %.1fx par (%d/%d)" % (ms, call, batch_size, over, per, par)
|
||||
log.warning(colorize(out))
|
||||
|
||||
|
||||
class DbStats(StatsAbstract):
|
||||
"""Tracks database query timings."""
|
||||
|
||||
SLOW_QUERY_MS = 250
|
||||
LOGGING_TRESHOLD = 50
|
||||
|
||||
|
@ -500,8 +507,10 @@ class DbStats(StatsAbstract):
|
|||
out = "[SQL][%dms] %s" % (ms, call[:250])
|
||||
log.warning(colorize(out))
|
||||
|
||||
|
||||
class Stats:
|
||||
"""Container for steemd and db timing data."""
|
||||
|
||||
PRINT_THRESH_MINS = 1
|
||||
|
||||
COLLECT_DB_STATS = 0
|
||||
|
@ -546,14 +555,20 @@ class Stats:
|
|||
def report(cls):
|
||||
"""Emit a timing report for tracked services."""
|
||||
if not cls._secs:
|
||||
return # nothing to report
|
||||
return # nothing to report
|
||||
total = perf() - cls._start
|
||||
non_idle = total - cls._idle
|
||||
log.info("cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.",
|
||||
cls._secs, 100 * cls._secs / non_idle, non_idle,
|
||||
100 * cls._idle / total, peak_usage_mb())
|
||||
log.info(
|
||||
"cumtime %ds (%.1f%% of %ds). %.1f%% idle. peak %dmb.",
|
||||
cls._secs,
|
||||
100 * cls._secs / non_idle,
|
||||
non_idle,
|
||||
100 * cls._idle / total,
|
||||
peak_usage_mb(),
|
||||
)
|
||||
if cls._secs > 1:
|
||||
cls._db.report(cls._secs)
|
||||
cls._steemd.report(cls._secs)
|
||||
|
||||
|
||||
atexit.register(Stats.report)
|
||||
|
|
|
@ -5,12 +5,14 @@ import resource
|
|||
|
||||
USE_COLOR = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
|
||||
|
||||
|
||||
def colorize(string, color='93', force=False):
|
||||
"""Colorizes a string for stdout, if attached to terminal"""
|
||||
if not USE_COLOR and not force:
|
||||
return string
|
||||
return f"[{color}m{string}[0m"
|
||||
|
||||
|
||||
def peak_usage_mb():
|
||||
"""Get peak memory usage of hive process."""
|
||||
mem_denom = (1024 * 1024) if sys.platform == 'darwin' else 1024
|
||||
|
|
|
@ -5,9 +5,10 @@ from hive.utils.normalize import secs_to_str
|
|||
from functools import wraps
|
||||
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
#timeit decorator for measuring method execution time
|
||||
# timeit decorator for measuring method execution time
|
||||
def time_it(method):
|
||||
@wraps(method)
|
||||
def time_method(*args, **kwargs):
|
||||
|
@ -15,6 +16,7 @@ def time_it(method):
|
|||
result = method(*args, **kwargs)
|
||||
log.info("%s executed in %.4f s", method.__name__, perf() - start_time)
|
||||
return result
|
||||
|
||||
return time_method
|
||||
|
||||
|
||||
|
@ -29,7 +31,8 @@ class Timer:
|
|||
`full_total` - total items to process, outside of
|
||||
(and including) this invocation. [optional]
|
||||
"""
|
||||
#pylint: disable=too-many-instance-attributes
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
# Name of entity, lap units (e.g. rps, wps), total items in job
|
||||
_entity = []
|
||||
|
@ -71,9 +74,7 @@ class Timer:
|
|||
out = prefix
|
||||
else:
|
||||
# " -- post 1 of 10"
|
||||
out = " -- %s %d of %d" % (self._entity,
|
||||
self._processed,
|
||||
self._full_total)
|
||||
out = " -- %s %d of %d" % (self._entity, self._processed, self._full_total)
|
||||
|
||||
# " (3/s, 4rps, 5wps) -- "
|
||||
rates = []
|
||||
|
@ -97,10 +98,10 @@ class Timer:
|
|||
def _eta(self):
|
||||
"""Time to finish, based on most recent batch."""
|
||||
left = self._full_total - self._processed
|
||||
secs = (left / self._rate())
|
||||
secs = left / self._rate()
|
||||
return secs_to_str(secs)
|
||||
|
||||
def _elapsed(self, lap_idx=None):
|
||||
if not lap_idx:
|
||||
return self._laps[-1] - self._laps[0]
|
||||
return self._laps[lap_idx] - self._laps[lap_idx-1]
|
||||
return self._laps[lap_idx] - self._laps[lap_idx - 1]
|
||||
|
|
|
@ -3,15 +3,18 @@ from hive.utils.timer import time_it
|
|||
|
||||
DB = Db.instance()
|
||||
|
||||
|
||||
def update_all_hot_and_trending():
|
||||
"""Calculate and set hot and trending values of all posts"""
|
||||
# NOT USED!!!
|
||||
update_hot_and_trending_for_block_range()
|
||||
|
||||
|
||||
NO_CONSTRAINT = -1
|
||||
|
||||
|
||||
@time_it
|
||||
def update_hot_and_trending_for_block_range( first_block = NO_CONSTRAINT, last_block = NO_CONSTRAINT):
|
||||
def update_hot_and_trending_for_block_range(first_block=NO_CONSTRAINT, last_block=NO_CONSTRAINT):
|
||||
"""Calculate and set hot and trending values of all posts"""
|
||||
# NOT USED!!!
|
||||
hot_and_trend_sql = """
|
||||
|
@ -30,13 +33,13 @@ def update_hot_and_trending_for_block_range( first_block = NO_CONSTRAINT, last_b
|
|||
|
||||
sql = ""
|
||||
if first_block == NO_CONSTRAINT and last_block == NO_CONSTRAINT:
|
||||
sql = hot_and_trend_sql.format( "" )
|
||||
sql = hot_and_trend_sql.format("")
|
||||
elif last_block == NO_CONSTRAINT:
|
||||
sql = hot_and_trend_sql.format( f"WHERE block_num >= {first_block}" )
|
||||
sql = hot_and_trend_sql.format(f"WHERE block_num >= {first_block}")
|
||||
elif first_block == NO_CONSTRAINT:
|
||||
sql = hot_and_trend_sql.format( f"WHERE block_num <= {last_block}" )
|
||||
sql = hot_and_trend_sql.format(f"WHERE block_num <= {last_block}")
|
||||
elif first_block == last_block:
|
||||
sql = hot_and_trend_sql.format( f"WHERE block_num = {last_block}" )
|
||||
sql = hot_and_trend_sql.format(f"WHERE block_num = {last_block}")
|
||||
else:
|
||||
sql = hot_and_trend_sql.format( f"WHERE block_num >= {first_block} AND block_num <= {last_block}" )
|
||||
sql = hot_and_trend_sql.format(f"WHERE block_num >= {first_block} AND block_num <= {last_block}")
|
||||
DB.query_no_return(sql)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Performant FIFO queue which ignores duplicates."""
|
||||
from math import ceil
|
||||
|
||||
|
||||
class UniqueFIFO:
|
||||
"""FIFO queue which ignores duplicates and shifts efficiently."""
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
from json import dumps
|
||||
|
||||
|
||||
def make_benchmark_header():
|
||||
return """from requests import post
|
||||
from json import dumps
|
||||
|
@ -11,6 +12,7 @@ def send_rpc_query(address, data):
|
|||
return response_json
|
||||
"""
|
||||
|
||||
|
||||
def make_benchmark(test_name, address, test_payload):
|
||||
return f"""
|
||||
def test_{test_name}(benchmark):
|
||||
|
@ -21,8 +23,10 @@ def test_{test_name}(benchmark):
|
|||
assert error is not None or result is not None, "No error or result in response"
|
||||
"""
|
||||
|
||||
|
||||
def get_request_from_yaml(path_to_yaml):
|
||||
import yaml
|
||||
|
||||
yaml_document = None
|
||||
with open(path_to_yaml, "r") as yaml_file:
|
||||
yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
|
||||
|
@ -33,10 +37,12 @@ def get_request_from_yaml(path_to_yaml):
|
|||
return dumps(json_parameters)
|
||||
return None
|
||||
|
||||
|
||||
def make_test_name_from_path(test_path):
|
||||
splited = test_path.split("/")
|
||||
return ("_".join(splited[-3:])).replace(".", "_").replace("-", "_")
|
||||
|
||||
|
||||
def make_benchmark_test_file(file_name, address, tests_root_dir):
|
||||
import os
|
||||
from fnmatch import fnmatch
|
||||
|
@ -56,16 +62,14 @@ def make_benchmark_test_file(file_name, address, tests_root_dir):
|
|||
benchmarks_file.write(make_benchmark(test_name, address, test_payload))
|
||||
benchmarks_file.write("\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path_to_test_dir", type=str, help = "Path to test directory for given xml file")
|
||||
parser.add_argument("path_to_test_dir", type=str, help="Path to test directory for given xml file")
|
||||
parser.add_argument("benchmark_test_file_name", type=str, help="Name of the generated test file")
|
||||
parser.add_argument("target_ip_address", type=str, help="Address of the hivemind")
|
||||
args = parser.parse_args()
|
||||
|
||||
make_benchmark_test_file(args.benchmark_test_file_name, args.target_ip_address, args.path_to_test_dir)
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -7,14 +7,16 @@ import os
|
|||
from sys import exit
|
||||
from json import dumps, load
|
||||
|
||||
|
||||
def get_request_from_yaml(path_to_yaml):
|
||||
""" Extract request parameters from given yaml file
|
||||
"""Extract request parameters from given yaml file
|
||||
Parameters:
|
||||
- path_to_yaml - path to yaml file
|
||||
Returns:
|
||||
- string with request parameters
|
||||
"""
|
||||
import yaml
|
||||
|
||||
yaml_document = None
|
||||
with open(path_to_yaml, "r") as yaml_file:
|
||||
yaml_document = yaml.load(yaml_file, Loader=yaml.BaseLoader)
|
||||
|
@ -25,8 +27,9 @@ def get_request_from_yaml(path_to_yaml):
|
|||
return dumps(json_parameters)
|
||||
return ""
|
||||
|
||||
|
||||
def make_class_path_dict(root_dir):
|
||||
""" Scan root dir for files with given pattern and construct dictionary
|
||||
"""Scan root dir for files with given pattern and construct dictionary
|
||||
with keys as path with replaced ., -, / characters and values as file path
|
||||
Parameters:
|
||||
- root_dir - dir to scan for files
|
||||
|
@ -46,8 +49,9 @@ def make_class_path_dict(root_dir):
|
|||
ret[test_path.replace(".", "_").replace("-", "_").replace("/", "_")] = test_path
|
||||
return ret
|
||||
|
||||
|
||||
def class_to_path(class_name, class_to_path_dic):
|
||||
""" Return path to test file basing on class name
|
||||
"""Return path to test file basing on class name
|
||||
Parameters:
|
||||
- class_name - test to find,
|
||||
- class_to_path_dic - dict with class -> path key/values
|
||||
|
@ -55,11 +59,13 @@ def class_to_path(class_name, class_to_path_dic):
|
|||
- path to test file
|
||||
"""
|
||||
from fnmatch import fnmatch
|
||||
|
||||
for c, p in class_to_path_dic.items():
|
||||
if fnmatch(c, "*" + class_name):
|
||||
return p
|
||||
return None
|
||||
|
||||
|
||||
def json_report_parser(path_to_test_dir, json_file, time_threshold=1.0):
|
||||
above_treshold = []
|
||||
html_file, _ = os.path.splitext(json_file)
|
||||
|
@ -80,30 +86,55 @@ def json_report_parser(path_to_test_dir, json_file, time_threshold=1.0):
|
|||
ofile.write(" </head>\n")
|
||||
ofile.write(" <body>\n")
|
||||
ofile.write(" <table>\n")
|
||||
ofile.write(" <tr><th>Test name</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th></tr>\n")
|
||||
ofile.write(
|
||||
" <tr><th>Test name</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th></tr>\n"
|
||||
)
|
||||
json_data = None
|
||||
with open(json_file, "r") as json_file:
|
||||
json_data = load(json_file)
|
||||
for benchmark in json_data['benchmarks']:
|
||||
if float(benchmark['stats']['mean']) > time_threshold:
|
||||
ofile.write(" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(benchmark['name'], get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)), benchmark['stats']['min'] * 1000, benchmark['stats']['max'] * 1000, benchmark['stats']['mean'] * 1000))
|
||||
above_treshold.append((benchmark['name'], f"{benchmark['stats']['mean'] * 1000:.4f}", get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic))))
|
||||
ofile.write(
|
||||
" <tr><td>{}<br/>Parameters: {}</td><td>{:.4f}</td><td>{:.4f}</td><td bgcolor=\"red\">{:.4f}</td></tr>\n".format(
|
||||
benchmark['name'],
|
||||
get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)),
|
||||
benchmark['stats']['min'] * 1000,
|
||||
benchmark['stats']['max'] * 1000,
|
||||
benchmark['stats']['mean'] * 1000,
|
||||
)
|
||||
)
|
||||
above_treshold.append(
|
||||
(
|
||||
benchmark['name'],
|
||||
f"{benchmark['stats']['mean'] * 1000:.4f}",
|
||||
get_request_from_yaml(class_to_path(benchmark['name'][5:], class_to_path_dic)),
|
||||
)
|
||||
)
|
||||
else:
|
||||
ofile.write(f" <tr><td>{benchmark['name']}</td><td>{benchmark['stats']['min'] * 1000:.4f}</td><td>{benchmark['stats']['max'] * 1000:.4f}</td><td>{benchmark['stats']['mean'] * 1000:.4f}</td></tr>\n")
|
||||
ofile.write(
|
||||
f" <tr><td>{benchmark['name']}</td><td>{benchmark['stats']['min'] * 1000:.4f}</td><td>{benchmark['stats']['max'] * 1000:.4f}</td><td>{benchmark['stats']['mean'] * 1000:.4f}</td></tr>\n"
|
||||
)
|
||||
ofile.write(" </table>\n")
|
||||
ofile.write(" </body>\n")
|
||||
ofile.write("</html>\n")
|
||||
return above_treshold
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("path_to_test_dir", type = str, help = "Path to test directory for given json benchmark file")
|
||||
parser.add_argument("json_file", type = str, help = "Path to benchmark json file")
|
||||
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
|
||||
parser.add_argument("path_to_test_dir", type=str, help="Path to test directory for given json benchmark file")
|
||||
parser.add_argument("json_file", type=str, help="Path to benchmark json file")
|
||||
parser.add_argument(
|
||||
"--time-threshold",
|
||||
dest="time_threshold",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not json_report_parser(args.path_to_test_dir, args.json_file, args.time_threshold):
|
||||
exit(1)
|
||||
exit(0)
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ from json import load, dump
|
|||
from benchmark_generator import make_benchmark_test_file
|
||||
from json_report_parser import json_report_parser
|
||||
|
||||
|
||||
def get_test_directories(tests_root_dir):
|
||||
ret = []
|
||||
for name in os.listdir(tests_root_dir):
|
||||
|
@ -16,14 +17,17 @@ def get_test_directories(tests_root_dir):
|
|||
ret.append(dir_path)
|
||||
return ret
|
||||
|
||||
|
||||
def find_data_in_benchmarks(name, json_data):
|
||||
for benchmark in json_data['benchmarks']:
|
||||
if benchmark['name'] == name:
|
||||
return (benchmark['stats']['min'], benchmark['stats']['max'], benchmark['stats']['mean'])
|
||||
return (None, None, None)
|
||||
|
||||
|
||||
def join_benchmark_data(file_name, json_files):
|
||||
from statistics import mean
|
||||
|
||||
jsons = []
|
||||
for json_file in json_files:
|
||||
with open(json_file, "r") as src:
|
||||
|
@ -47,15 +51,23 @@ def join_benchmark_data(file_name, json_files):
|
|||
with open(f"{file_name}.json", "w") as out:
|
||||
dump(jsons[0], out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("hivemind_address", type=str, help="Address of hivemind instance")
|
||||
parser.add_argument("hivemind_port", type=int, help="Port of hivemind instance")
|
||||
parser.add_argument("tests_root_dir", type=str, help="Path to tests root dir")
|
||||
parser.add_argument("--benchmark-runs", type=int, default=3, help="How many benchmark runs")
|
||||
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
|
||||
parser.add_argument(
|
||||
"--time-threshold",
|
||||
dest="time_threshold",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Time threshold for test execution time, tests with execution time greater than threshold will be marked on red.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
assert os.path.exists(args.tests_root_dir), "Directory does not exist"
|
||||
|
@ -76,11 +88,11 @@ if __name__ == "__main__":
|
|||
name, ext = os.path.splitext(benchmark_file)
|
||||
json_file_name = f"{name}-{run:03d}.json"
|
||||
cmd = [
|
||||
"pytest",
|
||||
"--benchmark-max-time=0.000001",
|
||||
"--benchmark-min-rounds=10",
|
||||
f"--benchmark-json={json_file_name}",
|
||||
benchmark_file
|
||||
"pytest",
|
||||
"--benchmark-max-time=0.000001",
|
||||
"--benchmark-min-rounds=10",
|
||||
f"--benchmark-json={json_file_name}",
|
||||
benchmark_file,
|
||||
]
|
||||
if name in benchmark_json_files:
|
||||
benchmark_json_files[name].append(json_file_name)
|
||||
|
@ -99,10 +111,11 @@ if __name__ == "__main__":
|
|||
json_file_name = "benchmark_" + test_directory.split("/")[-1] + ".json"
|
||||
ret = json_report_parser(test_directory, json_file_name, args.time_threshold)
|
||||
if ret:
|
||||
failed.extend(ret)
|
||||
failed.extend(ret)
|
||||
|
||||
if failed:
|
||||
from prettytable import PrettyTable
|
||||
|
||||
summary = PrettyTable()
|
||||
print(f"########## Test failed with following tests above {args.time_threshold * 1000}ms threshold ##########")
|
||||
summary.field_names = ['Test name', 'Mean time [ms]', 'Call parameters']
|
||||
|
@ -111,4 +124,3 @@ if __name__ == "__main__":
|
|||
print(summary)
|
||||
exit(2)
|
||||
exit(0)
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ def setup_env(current_runner_id, hive_sync_runner_id, infile, outfile, end, **kw
|
|||
if key.startswith('postgres'):
|
||||
if key == 'postgres_host':
|
||||
runner[key] = hive_sync_runner['host']
|
||||
if key == 'postgres_port': # to be eliminated when CI will be only at psql12
|
||||
if key == 'postgres_port': # to be eliminated when CI will be only at psql12
|
||||
runner[key] = 25432
|
||||
else:
|
||||
runner[key] = hive_sync_runner[key]
|
||||
|
@ -96,69 +96,50 @@ def setup_env(current_runner_id, hive_sync_runner_id, infile, outfile, end, **kw
|
|||
runner[key] = value
|
||||
|
||||
for key in runner:
|
||||
if key == 'postgres_host': # to be eliminated when CI will be only at psql12
|
||||
if key == 'postgres_host': # to be eliminated when CI will be only at psql12
|
||||
runner[key] = 'localhost'
|
||||
if key == 'postgres_port': # to be eliminated when CI will be only at psql12
|
||||
if key == 'postgres_port': # to be eliminated when CI will be only at psql12
|
||||
runner[key] = 25432
|
||||
|
||||
output(
|
||||
f'export RUNNER_{key.upper()}="{str(runner[key])}"',
|
||||
outfile,
|
||||
end,
|
||||
)
|
||||
)
|
||||
|
||||
for key in data['common']:
|
||||
output(
|
||||
f"export RUNNER_{key.upper()}=\"{str(data['common'][key])}\"",
|
||||
outfile,
|
||||
end,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description=__doc__,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter
|
||||
)
|
||||
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.add_argument(
|
||||
'infile',
|
||||
type=argparse.FileType('r'),
|
||||
nargs='?',
|
||||
default=sys.stdin,
|
||||
help='Input file or pipe via STDIN'
|
||||
)
|
||||
'infile', type=argparse.FileType('r'), nargs='?', default=sys.stdin, help='Input file or pipe via STDIN'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-o', '--outfile',
|
||||
type=argparse.FileType('w'),
|
||||
default=sys.stdout,
|
||||
help='Output file, STDOUT if not set'
|
||||
)
|
||||
'-o', '--outfile', type=argparse.FileType('w'), default=sys.stdout, help='Output file, STDOUT if not set'
|
||||
)
|
||||
parser.add_argument("-e", "--end", dest='end', default='\n', help='String at the end of line in output')
|
||||
parser.add_argument(
|
||||
"-e", "--end",
|
||||
dest='end',
|
||||
default='\n',
|
||||
help='String at the end of line in output'
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s", "--hive-sync-runner-id",
|
||||
"-s",
|
||||
"--hive-sync-runner-id",
|
||||
required=True,
|
||||
type=int,
|
||||
help='Id of runner which did hive sync, 0 when current runner does hive sync actually'
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c", "--current-runner-id",
|
||||
required=True,
|
||||
type=int,
|
||||
help='Id of current runner'
|
||||
)
|
||||
help='Id of runner which did hive sync, 0 when current runner does hive sync actually',
|
||||
)
|
||||
parser.add_argument("-c", "--current-runner-id", required=True, type=int, help='Id of current runner')
|
||||
parser.add_argument(
|
||||
'--log-level',
|
||||
default='INFO',
|
||||
dest='log_level',
|
||||
choices=['debug', 'info', 'warning', 'error'],
|
||||
help='Log level (string)',
|
||||
)
|
||||
)
|
||||
|
||||
result = parser.parse_args()
|
||||
|
||||
|
|
|
@ -5,11 +5,14 @@ import csv
|
|||
from time import perf_counter
|
||||
import requests
|
||||
|
||||
|
||||
def process_file_name(file_name, tavern_tests_dir):
|
||||
return file_name.replace(tavern_tests_dir, "").lstrip("/")
|
||||
|
||||
|
||||
def abs_rel_diff(a, b):
|
||||
return abs((a - b) / float(b)) * 100.
|
||||
return abs((a - b) / float(b)) * 100.0
|
||||
|
||||
|
||||
def parse_csv_files(root_dir):
|
||||
ret_times = {}
|
||||
|
@ -25,13 +28,13 @@ def parse_csv_files(root_dir):
|
|||
test_time = float(row[1])
|
||||
test_response_size = float(row[2])
|
||||
ret_benchmark_request_params[test_name] = json.loads(row[4])
|
||||
|
||||
|
||||
test_benchmark_time_threshold = None
|
||||
try:
|
||||
test_benchmark_time_threshold = float(row[3])
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
if test_name in ret_times:
|
||||
ret_times[test_name].append(test_time)
|
||||
else:
|
||||
|
@ -46,6 +49,7 @@ def parse_csv_files(root_dir):
|
|||
ret_benchmark_time_threshold[test_name] = test_benchmark_time_threshold
|
||||
return ret_times, ret_sizes, ret_benchmark_time_threshold, ret_benchmark_request_params
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
from statistics import mean, median
|
||||
|
@ -55,8 +59,20 @@ if __name__ == "__main__":
|
|||
parser.add_argument("port", type=int)
|
||||
parser.add_argument("csv_report_dir", type=str, help="Path to benchmark csv reports")
|
||||
parser.add_argument("tavern_tests_dir", type=str, help="Path to tavern tests dir")
|
||||
parser.add_argument("--median-cutoff-time", dest="cutoff_time", type=float, default=0.3, help="Tests with median time (in seconds) below cutoff will not be shown")
|
||||
parser.add_argument("--time-threshold", dest="time_threshold", type=float, default=1.0, help="Time (in seconds) threshold for test execution time, tests with execution time greater than threshold will be marked on red.")
|
||||
parser.add_argument(
|
||||
"--median-cutoff-time",
|
||||
dest="cutoff_time",
|
||||
type=float,
|
||||
default=0.3,
|
||||
help="Tests with median time (in seconds) below cutoff will not be shown",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--time-threshold",
|
||||
dest="time_threshold",
|
||||
type=float,
|
||||
default=1.0,
|
||||
help="Time (in seconds) threshold for test execution time, tests with execution time greater than threshold will be marked on red.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
assert os.path.exists(args.csv_report_dir), "Please provide valid csv report path"
|
||||
|
@ -81,21 +97,33 @@ if __name__ == "__main__":
|
|||
ofile.write(" padding: 15px;\n")
|
||||
ofile.write(" }\n")
|
||||
ofile.write(" </style>\n")
|
||||
ofile.write(" <link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.22/css/jquery.dataTables.css\">\n")
|
||||
ofile.write(" <script src=\"https://code.jquery.com/jquery-3.5.1.js\" integrity=\"sha256-QWo7LDvxbWT2tbbQ97B53yJnYU3WhH/C8ycbRAkjPDc=\" crossorigin=\"anonymous\"></script>\n")
|
||||
ofile.write(" <script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdn.datatables.net/1.10.22/js/jquery.dataTables.js\"></script>\n")
|
||||
ofile.write(
|
||||
" <link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.22/css/jquery.dataTables.css\">\n"
|
||||
)
|
||||
ofile.write(
|
||||
" <script src=\"https://code.jquery.com/jquery-3.5.1.js\" integrity=\"sha256-QWo7LDvxbWT2tbbQ97B53yJnYU3WhH/C8ycbRAkjPDc=\" crossorigin=\"anonymous\"></script>\n"
|
||||
)
|
||||
ofile.write(
|
||||
" <script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdn.datatables.net/1.10.22/js/jquery.dataTables.js\"></script>\n"
|
||||
)
|
||||
ofile.write(" <script type=\"text/javascript\" charset=\"utf8\">\n")
|
||||
ofile.write(" $(document).ready( function () {\n")
|
||||
ofile.write(" $('#benchmarks').DataTable({\"aLengthMenu\": [[10, 25, 50, 100, 1000, 10000, -1], [10, 25, 50, 100, 1000, 10000, \"All\"]]});\n")
|
||||
ofile.write(
|
||||
" $('#benchmarks').DataTable({\"aLengthMenu\": [[10, 25, 50, 100, 1000, 10000, -1], [10, 25, 50, 100, 1000, 10000, \"All\"]]});\n"
|
||||
)
|
||||
ofile.write(" } );\n")
|
||||
ofile.write(" </script>\n")
|
||||
ofile.write(" <script src=\"https://polyfill.io/v3/polyfill.min.js?features=es6\"></script>\n")
|
||||
ofile.write(" <script id=\"MathJax-script\" async src=\"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"></script>\n")
|
||||
ofile.write(
|
||||
" <script id=\"MathJax-script\" async src=\"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"></script>\n"
|
||||
)
|
||||
ofile.write(" </head>\n")
|
||||
ofile.write(" <body>\n")
|
||||
ofile.write(" <table id=\"benchmarks\">\n")
|
||||
ofile.write(" <thead>\n")
|
||||
ofile.write(" <tr><th>Test name</th><th>Response mean size [kB]</th><th>Response ref size [kB]</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th><th>Median time [ms]</th><th>Reference (pure requests call) [ms]</th><th>\[ {\\vert} {T_{mean} - T_{ref} \over T_{ref}} {\lvert} \cdot 100 \] [%]</th><th>\[ {\\vert} {T_{median} - T_{ref} \over T_{ref}} {\lvert} \cdot 100 \] [%]</th></tr>\n")
|
||||
ofile.write(
|
||||
" <tr><th>Test name</th><th>Response mean size [kB]</th><th>Response ref size [kB]</th><th>Min time [ms]</th><th>Max time [ms]</th><th>Mean time [ms]</th><th>Median time [ms]</th><th>Reference (pure requests call) [ms]</th><th>\[ {\\vert} {T_{mean} - T_{ref} \over T_{ref}} {\lvert} \cdot 100 \] [%]</th><th>\[ {\\vert} {T_{median} - T_{ref} \over T_{ref}} {\lvert} \cdot 100 \] [%]</th></tr>\n"
|
||||
)
|
||||
ofile.write(" </thead>\n")
|
||||
ofile.write(" <tbody>\n")
|
||||
for name, data in report_data.items():
|
||||
|
@ -110,24 +138,31 @@ if __name__ == "__main__":
|
|||
req_data_benchmark_time_threshold = report_data_time_threshold.get(name, None)
|
||||
print(f"Sending {req_data} for reference time measurement")
|
||||
ret = requests.post(f"{args.address}:{args.port}", req_data)
|
||||
ref_time = 0.
|
||||
ref_time = 0.0
|
||||
if ret.status_code == 200:
|
||||
ref_time = perf_counter() - t_start
|
||||
print(f"Got response in {ref_time:.4f}s")
|
||||
ref_size = int(ret.headers.get("Content-Length", 0))
|
||||
if (req_data_benchmark_time_threshold is None and dmean > args.time_threshold) or (req_data_benchmark_time_threshold is not None and dmean > req_data_benchmark_time_threshold):
|
||||
ofile.write(f" <tr><td>{name}<br/>Parameters: {req_data}</td><td>{dmean_size / 1000.0:.1f}</td><td>{ref_size / 1000.0:.1f}</td><td>{dmin * 1000:.4f}</td><td>{dmax * 1000:.4f}</td><td bgcolor=\"red\">{dmean * 1000:.4f}</td><td>{dmedian * 1000:.4f}</td><td>{ref_time * 1000:.4f}</td><td>{abs_rel_diff(dmean, ref_time):.4f}</td><td>{abs_rel_diff(dmedian, ref_time):.4f}</td></tr>\n")
|
||||
if (req_data_benchmark_time_threshold is None and dmean > args.time_threshold) or (
|
||||
req_data_benchmark_time_threshold is not None and dmean > req_data_benchmark_time_threshold
|
||||
):
|
||||
ofile.write(
|
||||
f" <tr><td>{name}<br/>Parameters: {req_data}</td><td>{dmean_size / 1000.0:.1f}</td><td>{ref_size / 1000.0:.1f}</td><td>{dmin * 1000:.4f}</td><td>{dmax * 1000:.4f}</td><td bgcolor=\"red\">{dmean * 1000:.4f}</td><td>{dmedian * 1000:.4f}</td><td>{ref_time * 1000:.4f}</td><td>{abs_rel_diff(dmean, ref_time):.4f}</td><td>{abs_rel_diff(dmedian, ref_time):.4f}</td></tr>\n"
|
||||
)
|
||||
above_treshold.append((name, f"{dmean:.4f}"))
|
||||
else:
|
||||
ofile.write(f" <tr><td>{name}</td><td>{dmean_size / 1000.0:.1f}</td><td>{ref_size / 1000.0:.1f}</td><td>{dmin * 1000:.4f}</td><td>{dmax * 1000:.4f}</td><td>{dmean * 1000:.4f}</td><td>{dmedian * 1000:.4f}</td><td>{ref_time * 1000:.4f}</td><td>{abs_rel_diff(dmean, ref_time):.4f}</td><td>{abs_rel_diff(dmedian, ref_time):.4f}</td></tr>\n")
|
||||
ofile.write(
|
||||
f" <tr><td>{name}</td><td>{dmean_size / 1000.0:.1f}</td><td>{ref_size / 1000.0:.1f}</td><td>{dmin * 1000:.4f}</td><td>{dmax * 1000:.4f}</td><td>{dmean * 1000:.4f}</td><td>{dmedian * 1000:.4f}</td><td>{ref_time * 1000:.4f}</td><td>{abs_rel_diff(dmean, ref_time):.4f}</td><td>{abs_rel_diff(dmedian, ref_time):.4f}</td></tr>\n"
|
||||
)
|
||||
ofile.write(" </tbody>\n")
|
||||
ofile.write(" </table>\n")
|
||||
ofile.write(" </body>\n")
|
||||
ofile.write("</html>\n")
|
||||
|
||||
|
||||
if report_data_time_threshold:
|
||||
print("Tests with defined custom benchmark time threshold")
|
||||
from prettytable import PrettyTable
|
||||
|
||||
summary = PrettyTable()
|
||||
summary.field_names = ['Test name', 'Custom time value [s]']
|
||||
for name, threshold in report_data_time_threshold.items():
|
||||
|
@ -136,6 +171,7 @@ if __name__ == "__main__":
|
|||
|
||||
if above_treshold:
|
||||
from prettytable import PrettyTable
|
||||
|
||||
summary = PrettyTable()
|
||||
print(f"########## Test failed with following tests above {args.time_threshold}s threshold ##########")
|
||||
summary.field_names = ['Test name', 'Mean time [s]']
|
||||
|
@ -143,5 +179,5 @@ if __name__ == "__main__":
|
|||
summary.add_row(entry)
|
||||
print(summary)
|
||||
# Temp. disable until time measuring problems will be finally solved.
|
||||
#exit(2)
|
||||
# exit(2)
|
||||
exit(0)
|
||||
|
|
|
@ -17,6 +17,7 @@ from hive.steem.client import SteemClient
|
|||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("hived_url", type=str, help="Url address of hived instance")
|
||||
|
@ -28,7 +29,7 @@ if __name__ == "__main__":
|
|||
|
||||
args = parser.parse_args()
|
||||
|
||||
client = SteemClient({"default":args.hived_url})
|
||||
client = SteemClient({"default": args.hived_url})
|
||||
from_block = args.from_block
|
||||
|
||||
def breaker():
|
||||
|
@ -47,7 +48,9 @@ if __name__ == "__main__":
|
|||
block_num = int(block['block_id'][:8], base=16)
|
||||
block_data = dict(block)
|
||||
for idx in range(len(block_data['transactions'])):
|
||||
block_data['transactions'][idx]['operations'] = [op for op in block_data['transactions'][idx]['operations'] if op['type'] in args.operations]
|
||||
block_data['transactions'][idx]['operations'] = [
|
||||
op for op in block_data['transactions'][idx]['operations'] if op['type'] in args.operations
|
||||
]
|
||||
if args.dump_ops_only and block_data['transactions'][idx]['operations']:
|
||||
output_file.write(f"{dumps(block_data['transactions'][idx]['operations'])}\n")
|
||||
if not args.dump_ops_only:
|
||||
|
|
35
setup.py
35
setup.py
|
@ -17,13 +17,17 @@ site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
|
|||
VERSION = 'notag'
|
||||
GIT_REVISION = 'nogitrev'
|
||||
GIT_DATE = 'nogitdate'
|
||||
|
||||
|
||||
class GitRevisionProvider(object):
|
||||
""" Static class to provide version and git revision information"""
|
||||
"""Static class to provide version and git revision information"""
|
||||
|
||||
logger = logging.getLogger('GitRevisionProvider')
|
||||
|
||||
@classmethod
|
||||
def is_git_sha(cls, s):
|
||||
from re import fullmatch
|
||||
|
||||
return fullmatch('^g[0-9a-f]{8}$', s) is not None
|
||||
|
||||
@classmethod
|
||||
|
@ -52,11 +56,11 @@ class GitRevisionProvider(object):
|
|||
|
||||
@classmethod
|
||||
def provide_git_revision(cls):
|
||||
""" Evaluate version and git revision and save it to a version file
|
||||
Evaluation is based on VERSION variable and git describe if
|
||||
.git directory is present in tree.
|
||||
In case when .git is not available version and git_revision is taken
|
||||
from get_distribution call
|
||||
"""Evaluate version and git revision and save it to a version file
|
||||
Evaluation is based on VERSION variable and git describe if
|
||||
.git directory is present in tree.
|
||||
In case when .git is not available version and git_revision is taken
|
||||
from get_distribution call
|
||||
|
||||
"""
|
||||
version = str(VERSION)
|
||||
|
@ -64,6 +68,7 @@ class GitRevisionProvider(object):
|
|||
git_date = str(GIT_DATE)
|
||||
if os.path.exists(".git"):
|
||||
from subprocess import check_output
|
||||
|
||||
command = 'git describe --tags --long --dirty'
|
||||
version_string = check_output(command.split()).decode('utf-8').strip()
|
||||
if version_string != 'fatal: No names found, cannot describe anything.':
|
||||
|
@ -100,6 +105,7 @@ class GitRevisionProvider(object):
|
|||
git_date = cls.get_git_date(git_revision)
|
||||
else:
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
try:
|
||||
version, git_revision = get_distribution("hivemind").version.split("+")
|
||||
except:
|
||||
|
@ -109,7 +115,7 @@ class GitRevisionProvider(object):
|
|||
|
||||
@classmethod
|
||||
def _save_version_file(cls, hivemind_version, git_revision, git_date):
|
||||
""" Helper method to save version.py with current version and git_revision """
|
||||
"""Helper method to save version.py with current version and git_revision"""
|
||||
with open("hive/version.py", 'w') as version_file:
|
||||
version_file.write("# generated by setup.py\n")
|
||||
version_file.write("# contents will be overwritten\n")
|
||||
|
@ -117,23 +123,27 @@ class GitRevisionProvider(object):
|
|||
version_file.write(f"GIT_REVISION = '{git_revision}'\n")
|
||||
version_file.write(f"GIT_DATE = '{git_date}'\n")
|
||||
|
||||
|
||||
VERSION, GIT_REVISION = GitRevisionProvider.provide_git_revision()
|
||||
SQL_SCRIPTS_PATH = 'hive/db/sql_scripts/'
|
||||
SQL_UPGRADE_PATH = 'hive/db/sql_scripts/upgrade/'
|
||||
|
||||
|
||||
def get_sql_scripts(dir, base_dir):
|
||||
from os import listdir
|
||||
from os.path import isfile, join, relpath
|
||||
|
||||
if base_dir is None:
|
||||
return [join(dir, f) for f in listdir(dir) if isfile(join(dir, f))]
|
||||
else:
|
||||
return [relpath(join(dir, f), base_dir) for f in listdir(dir) if isfile(join(dir, f))]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
sql_scripts = get_sql_scripts(SQL_SCRIPTS_PATH, "hive/db/")
|
||||
sql_upgrade_scripts = get_sql_scripts(SQL_UPGRADE_PATH, "hive/db/")
|
||||
|
||||
|
||||
print(f'Found {len(sql_scripts)} SQL scripts to be installed.')
|
||||
print(f'Found {len(sql_upgrade_scripts)} upgrade SQL scripts to be installed.')
|
||||
|
||||
|
@ -149,7 +159,6 @@ if __name__ == "__main__":
|
|||
for p in found_packages:
|
||||
print(f"Found Python package: {p}")
|
||||
|
||||
|
||||
setup(
|
||||
name='hivemind',
|
||||
version=VERSION + "+" + GIT_REVISION,
|
||||
|
@ -157,9 +166,7 @@ if __name__ == "__main__":
|
|||
long_description=open('README.md').read(),
|
||||
packages=found_packages,
|
||||
package_data=package_resources,
|
||||
setup_requires=[
|
||||
'pytest-runner'
|
||||
],
|
||||
setup_requires=['pytest-runner'],
|
||||
install_requires=[
|
||||
'aiopg==1.2.1',
|
||||
'jsonrpcserver==4.2.0',
|
||||
|
@ -181,7 +188,7 @@ if __name__ == "__main__":
|
|||
'psutil==5.8.0',
|
||||
'atomic==0.7.3',
|
||||
'python-dateutil==2.8.1',
|
||||
'regex==2021.4.4'
|
||||
'regex==2021.4.4',
|
||||
],
|
||||
extras_require={
|
||||
'dev': [
|
||||
|
@ -194,5 +201,5 @@ if __name__ == "__main__":
|
|||
'console_scripts': [
|
||||
'hive=hive.cli:run',
|
||||
]
|
||||
}
|
||||
},
|
||||
)
|
||||
|
|
|
@ -7,14 +7,10 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc":"2.0",
|
||||
"method":"database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['steemit', '1970-01-01T00:00:00', '', ''],
|
||||
"limit" : 10,
|
||||
"order" : 'by_author_last_update'
|
||||
},
|
||||
"id":1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['steemit', '1970-01-01T00:00:00', '', ''], "limit": 10, "order": 'by_author_last_update'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'last_update'])
|
||||
|
|
|
@ -6,14 +6,15 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc":"2.0",
|
||||
"method":"database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['1970-01-01T00:00:00', '', ''],
|
||||
"limit" : 10,
|
||||
"order" : 'by_cashout_time'
|
||||
},
|
||||
"id":1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['1970-01-01T00:00:00', '', ''], "limit": 10, "order": 'by_cashout_time'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'parent_author', 'parent_permlink', 'created'])
|
||||
run_test(
|
||||
reference_hive_node_url,
|
||||
test_hive_node_url,
|
||||
payload,
|
||||
['author', 'permlink', 'parent_author', 'parent_permlink', 'created'],
|
||||
)
|
||||
|
|
|
@ -6,14 +6,15 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc":"2.0",
|
||||
"method":"database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['steemit', 'firstpost', '', ''],
|
||||
"limit" : 10,
|
||||
"order" : 'by_parent'
|
||||
},
|
||||
"id":1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['steemit', 'firstpost', '', ''], "limit": 10, "order": 'by_parent'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'parent_author', 'parent_permlink', 'created'])
|
||||
run_test(
|
||||
reference_hive_node_url,
|
||||
test_hive_node_url,
|
||||
payload,
|
||||
['author', 'permlink', 'parent_author', 'parent_permlink', 'created'],
|
||||
)
|
||||
|
|
|
@ -6,14 +6,10 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc" : "2.0",
|
||||
"method" : "database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['steemit', 'firstpost'],
|
||||
"limit" : 10,
|
||||
"order" : 'by_permlink'
|
||||
},
|
||||
"id" : 1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['steemit', 'firstpost'], "limit": 10, "order": 'by_permlink'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink'])
|
||||
|
|
|
@ -6,15 +6,15 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc" : "2.0",
|
||||
"method" : "database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['steemit', 'firstpost', '', ''],
|
||||
"limit" : 10,
|
||||
"order" : 'by_root'
|
||||
},
|
||||
"id":1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['steemit', 'firstpost', '', ''], "limit": 10, "order": 'by_root'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'root_author', 'root_permlink', 'created'])
|
||||
|
||||
run_test(
|
||||
reference_hive_node_url,
|
||||
test_hive_node_url,
|
||||
payload,
|
||||
['author', 'permlink', 'root_author', 'root_permlink', 'created'],
|
||||
)
|
||||
|
|
|
@ -6,14 +6,10 @@ if __name__ == '__main__':
|
|||
test_hive_node_url = 'http://127.0.0.1:8080'
|
||||
|
||||
payload = {
|
||||
"jsonrpc":"2.0",
|
||||
"method":"database_api.list_comments",
|
||||
"params" : {
|
||||
"start" : ['steemit', '1970-01-01T00:00:00', '', ''],
|
||||
"limit" : 10,
|
||||
"order" : 'by_last_update'
|
||||
},
|
||||
"id":1
|
||||
"jsonrpc": "2.0",
|
||||
"method": "database_api.list_comments",
|
||||
"params": {"start": ['steemit', '1970-01-01T00:00:00', '', ''], "limit": 10, "order": 'by_last_update'},
|
||||
"id": 1,
|
||||
}
|
||||
|
||||
run_test(reference_hive_node_url, test_hive_node_url, payload, ['author', 'permlink', 'last_update'])
|
||||
|
|
|
@ -7,7 +7,7 @@ def run_test(reference_node_url, test_node_url, payload, table_keys):
|
|||
resp = post(reference_node_url, dumps(payload))
|
||||
|
||||
json = resp.json()
|
||||
#print(json)
|
||||
# print(json)
|
||||
table = prettytable.PrettyTable()
|
||||
table.field_names = table_keys
|
||||
for row in json['result']['comments']:
|
||||
|
@ -18,7 +18,7 @@ def run_test(reference_node_url, test_node_url, payload, table_keys):
|
|||
resp = post(test_node_url, dumps(payload))
|
||||
|
||||
json = resp.json()
|
||||
#print(json)
|
||||
# print(json)
|
||||
table = prettytable.PrettyTable()
|
||||
table.field_names = table_keys
|
||||
for row in json['result']:
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
#pylint: disable=missing-docstring
|
||||
# pylint: disable=missing-docstring
|
||||
import pytest
|
||||
from hive.server.condenser_api.get_state import get_state
|
||||
from hive.server.condenser_api.tags import get_trending_tags
|
||||
from hive.server.condenser_api.call import call
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_state():
|
||||
ret = await get_state('/trending')
|
||||
|
@ -35,17 +36,16 @@ async def test_get_state():
|
|||
with pytest.raises(Exception):
|
||||
await get_state('witnesses')
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_call():
|
||||
assert await call('condenser_api',
|
||||
'get_followers',
|
||||
['xeroc', '', 'blog', 10])
|
||||
assert await call('condenser_api',
|
||||
'get_discussions_by_blog',
|
||||
[{"tag": "xeroc",
|
||||
"start_author": "",
|
||||
"start_permlink": "",
|
||||
"limit": 10}])
|
||||
assert await call('condenser_api', 'get_followers', ['xeroc', '', 'blog', 10])
|
||||
assert await call(
|
||||
'condenser_api',
|
||||
'get_discussions_by_blog',
|
||||
[{"tag": "xeroc", "start_author": "", "start_permlink": "", "limit": 10}],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_trending_tags():
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#pylint: disable=missing-docstring,invalid-name
|
||||
# pylint: disable=missing-docstring,invalid-name
|
||||
import pytest
|
||||
|
||||
from hive.server.condenser_api.methods import (
|
||||
|
@ -17,24 +17,29 @@ from hive.server.condenser_api.methods import (
|
|||
get_replies_by_last_update,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_followers():
|
||||
assert await get_followers('xeroc', '', 'blog', 10)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_following():
|
||||
assert await get_following('xeroc', '', 'blog', 10)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_follow_count():
|
||||
assert await get_follow_count('xeroc')
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_content():
|
||||
post = await get_content('xeroc', 'python-steem-0-1')
|
||||
assert post
|
||||
assert post['author'] == 'xeroc'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_content_replies():
|
||||
replies = await get_content_replies('xeroc', 'python-steem-0-1')
|
||||
|
@ -42,6 +47,7 @@ async def test_get_content_replies():
|
|||
assert len(replies) > 0
|
||||
assert 'puppies' in [r['author'] for r in replies]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_nested_query_compat():
|
||||
params = dict(start_author='', start_permlink='', limit=10, tag='life', truncate_body=0)
|
||||
|
@ -50,48 +56,42 @@ async def test_nested_query_compat():
|
|||
ret2 = await get_discussions_by_trending(*arg1)
|
||||
assert ret1 == ret2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_trending():
|
||||
assert await get_discussions_by_trending(
|
||||
start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
assert await get_discussions_by_trending(start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_hot():
|
||||
assert await get_discussions_by_hot(
|
||||
start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
assert await get_discussions_by_hot(start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_promoted():
|
||||
assert await get_discussions_by_promoted(
|
||||
start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
assert await get_discussions_by_promoted(start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_created():
|
||||
assert await get_discussions_by_created(
|
||||
start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
assert await get_discussions_by_created(start_author='', start_permlink='', limit=20, tag='', truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_blog():
|
||||
assert await get_discussions_by_blog(
|
||||
tag='xeroc', start_author='', start_permlink='', limit=20, truncate_body=0)
|
||||
assert await get_discussions_by_blog(tag='xeroc', start_author='', start_permlink='', limit=20, truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_feed():
|
||||
assert await get_discussions_by_feed(
|
||||
tag='xeroc', start_author='', start_permlink='', limit=20, truncate_body=0)
|
||||
assert await get_discussions_by_feed(tag='xeroc', start_author='', start_permlink='', limit=20, truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_discussions_by_comments():
|
||||
assert await get_discussions_by_comments(
|
||||
start_author='xeroc',
|
||||
start_permlink='',
|
||||
limit=20,
|
||||
truncate_body=0)
|
||||
assert await get_discussions_by_comments(start_author='xeroc', start_permlink='', limit=20, truncate_body=0)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_replies_by_last_update():
|
||||
assert await get_replies_by_last_update(
|
||||
start_author='xeroc',
|
||||
start_permlink='',
|
||||
limit=20,
|
||||
truncate_body=0)
|
||||
assert await get_replies_by_last_update(start_author='xeroc', start_permlink='', limit=20, truncate_body=0)
|
||||
|
|
|
@ -3,59 +3,74 @@ from hive.server.database_api.methods import list_comments
|
|||
from hive.steem.client import SteemClient
|
||||
from hive.conf import Conf
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
return SteemClient(url='https://api.hive.blog')
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_comments_by_cashout_time(client):
|
||||
with Conf() as conf:
|
||||
reference_data = await client.list_comments({"start":["1970-01-01T00:00:00","steemit","firstpost"],"limit":10,"order":"by_cashout_time"})
|
||||
test_data = await list_comments({'db' : conf.db()}, ["1970-01-01T00:00:00","steemit","firstpost"],10,"by_cashout_time")
|
||||
reference_data = await client.list_comments(
|
||||
{"start": ["1970-01-01T00:00:00", "steemit", "firstpost"], "limit": 10, "order": "by_cashout_time"}
|
||||
)
|
||||
test_data = await list_comments(
|
||||
{'db': conf.db()}, ["1970-01-01T00:00:00", "steemit", "firstpost"], 10, "by_cashout_time"
|
||||
)
|
||||
assert reference_data
|
||||
assert test_data
|
||||
assert len(reference_data) == len(test_data)
|
||||
to_compare = ['author','permlink']
|
||||
to_compare = ['author', 'permlink']
|
||||
for idx in range(len(reference_data)):
|
||||
for key in to_compare:
|
||||
assert reference_data[idx][key] == test_data[idx][key]
|
||||
assert reference_data[idx]['cashout_time'] == test_data[idx]['payout_at']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_comments_by_permlink(client):
|
||||
with Conf() as conf:
|
||||
reference_data = await client.list_comments({"start":["steemit","firstpost"],"limit":10,"order":"by_permlink"})
|
||||
test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost"],10,"by_permlink")
|
||||
reference_data = await client.list_comments(
|
||||
{"start": ["steemit", "firstpost"], "limit": 10, "order": "by_permlink"}
|
||||
)
|
||||
test_data = await list_comments({'db': conf.db()}, ["steemit", "firstpost"], 10, "by_permlink")
|
||||
assert reference_data
|
||||
assert test_data
|
||||
assert len(reference_data) == len(test_data)
|
||||
to_compare = ['author','permlink']
|
||||
to_compare = ['author', 'permlink']
|
||||
for idx in range(len(reference_data)):
|
||||
for key in to_compare:
|
||||
assert reference_data[idx][key] == test_data[idx][key]
|
||||
assert reference_data[idx][key] == test_data[idx][key]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_comments_by_root(client):
|
||||
with Conf() as conf:
|
||||
reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_root"})
|
||||
test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost","",""],10,"by_root")
|
||||
reference_data = await client.list_comments(
|
||||
{"start": ["steemit", "firstpost", "", ""], "limit": 10, "order": "by_root"}
|
||||
)
|
||||
test_data = await list_comments({'db': conf.db()}, ["steemit", "firstpost", "", ""], 10, "by_root")
|
||||
assert reference_data
|
||||
assert test_data
|
||||
assert len(reference_data) == len(test_data)
|
||||
to_compare = ['author','permlink','root_author','root_permlink']
|
||||
to_compare = ['author', 'permlink', 'root_author', 'root_permlink']
|
||||
for idx in range(len(reference_data)):
|
||||
for key in to_compare:
|
||||
assert reference_data[idx][key] == test_data[idx][key]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_comments_by_parent(client):
|
||||
with Conf() as conf:
|
||||
reference_data = await client.list_comments({"start":["steemit","firstpost","",""],"limit":10,"order":"by_parent"})
|
||||
test_data = await list_comments({'db' : conf.db()}, ["steemit","firstpost","",""],10,"by_parent")
|
||||
reference_data = await client.list_comments(
|
||||
{"start": ["steemit", "firstpost", "", ""], "limit": 10, "order": "by_parent"}
|
||||
)
|
||||
test_data = await list_comments({'db': conf.db()}, ["steemit", "firstpost", "", ""], 10, "by_parent")
|
||||
assert reference_data
|
||||
assert test_data
|
||||
assert len(reference_data) == len(test_data)
|
||||
to_compare = ['author','permlink','parent_author','parent_permlink']
|
||||
to_compare = ['author', 'permlink', 'parent_author', 'parent_permlink']
|
||||
for idx in range(len(reference_data)):
|
||||
for key in to_compare:
|
||||
assert reference_data[idx][key] == test_data[idx][key]
|
||||
|
|
|
@ -1,23 +1,27 @@
|
|||
#pylint: disable=missing-docstring
|
||||
#pylint: disable=redefined-outer-name
|
||||
# pylint: disable=missing-docstring
|
||||
# pylint: disable=redefined-outer-name
|
||||
import datetime
|
||||
import pytest
|
||||
|
||||
from hive.utils.normalize import parse_time
|
||||
from hive.steem.client import SteemClient
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
return SteemClient(url='https://api.hive.blog')
|
||||
|
||||
|
||||
def test_instance(client):
|
||||
assert isinstance(client, SteemClient)
|
||||
|
||||
|
||||
def test_get_accounts(client):
|
||||
accounts = client.get_accounts(['steemit', 'test-safari'])
|
||||
assert len(accounts) == 2
|
||||
assert accounts[0]['name'] == 'steemit'
|
||||
|
||||
|
||||
def test_get_content_batch(client):
|
||||
tuples = [('test-safari', 'may-spam'), ('test-safari', 'june-spam')]
|
||||
posts = client.get_content_batch(tuples)
|
||||
|
@ -25,10 +29,12 @@ def test_get_content_batch(client):
|
|||
assert posts[0]['author'] == 'test-safari'
|
||||
assert posts[1]['author'] == 'test-safari'
|
||||
|
||||
|
||||
def test_get_block(client):
|
||||
block = client.get_block(23494494)
|
||||
assert block['block_id'] == '01667f5e194c421aa00eb02270d3219a5d9bf339'
|
||||
|
||||
|
||||
def test_stream_blocks(client):
|
||||
start_at = client.last_irreversible()
|
||||
stop_at = client.head_block() + 2
|
||||
|
@ -41,7 +47,9 @@ def test_stream_blocks(client):
|
|||
pass
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
for block in client.stream_blocks(start_at, trail_blocks=0, max_gap=100, breaker=breaker, exception_reporter = exception_report):
|
||||
for block in client.stream_blocks(
|
||||
start_at, trail_blocks=0, max_gap=100, breaker=breaker, exception_reporter=exception_report
|
||||
):
|
||||
num = block.get_num()
|
||||
assert num == start_at + streamed
|
||||
streamed += 1
|
||||
|
@ -50,25 +58,31 @@ def test_stream_blocks(client):
|
|||
assert streamed >= 20
|
||||
assert num >= stop_at
|
||||
|
||||
|
||||
def test_head_time(client):
|
||||
head = parse_time(client.head_time())
|
||||
assert head > datetime.datetime.now() - datetime.timedelta(minutes=15)
|
||||
|
||||
|
||||
def test_head_block(client):
|
||||
assert client.head_block() > 23e6
|
||||
|
||||
|
||||
def test_last_irreversible(client):
|
||||
assert client.last_irreversible() > 23e6
|
||||
|
||||
|
||||
def test_gdgp_extended(client):
|
||||
ret = client.gdgp_extended()
|
||||
assert 'dgpo' in ret
|
||||
assert 'head_block_number' in ret['dgpo']
|
||||
assert 'usd_per_steem' in ret
|
||||
|
||||
|
||||
def test_get_blocks_range(client):
|
||||
def breaker():
|
||||
return True
|
||||
|
||||
lbound = 23000000
|
||||
blocks = client.get_blocks_range(lbound, lbound + 5, breaker)
|
||||
assert len(blocks) == 5
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
#pylint: disable=unused-import,unused-variable,wildcard-import,missing-docstring
|
||||
# pylint: disable=unused-import,unused-variable,wildcard-import,missing-docstring
|
||||
|
||||
from hive import *
|
||||
from hive.db import *
|
||||
|
@ -10,5 +10,6 @@ from hive.server import *
|
|||
from hive.indexer import *
|
||||
from hive.community import *
|
||||
|
||||
|
||||
def test_import():
|
||||
pass
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
#pylint: disable=missing-docstring,line-too-long
|
||||
# pylint: disable=missing-docstring,line-too-long
|
||||
import json
|
||||
|
||||
from hive.utils.account import safe_profile_metadata
|
||||
|
||||
|
||||
def test_valid_account():
|
||||
raw_profile = dict(
|
||||
name='Leonardo Da Vinci',
|
||||
|
@ -19,6 +20,7 @@ def test_valid_account():
|
|||
for key, safe_value in safe_profile.items():
|
||||
assert raw_profile[key] == safe_value
|
||||
|
||||
|
||||
def test_invalid_account():
|
||||
raw_profile = dict(
|
||||
name='NameIsTooBigByOneChar',
|
||||
|
@ -30,13 +32,16 @@ def test_invalid_account():
|
|||
ignore_prof = dict(
|
||||
name='Ignore me -- missing version:2!',
|
||||
)
|
||||
account = {'name': 'foo', 'json_metadata': json.dumps(dict(profile=raw_profile)),
|
||||
'posting_json_metadata': json.dumps(dict(profile=ignore_prof))}
|
||||
account = {
|
||||
'name': 'foo',
|
||||
'json_metadata': json.dumps(dict(profile=raw_profile)),
|
||||
'posting_json_metadata': json.dumps(dict(profile=ignore_prof)),
|
||||
}
|
||||
|
||||
safe_profile = safe_profile_metadata(account)
|
||||
assert safe_profile['name'] == 'NameIsTooBigByOne...'
|
||||
assert safe_profile['about'] == ''
|
||||
assert safe_profile['location'] == ''
|
||||
assert safe_profile['website'] == 'http://davincilife.com/' # TODO: should normalize to https?
|
||||
assert safe_profile['website'] == 'http://davincilife.com/' # TODO: should normalize to https?
|
||||
assert safe_profile['cover_image'] == ''
|
||||
assert safe_profile['profile_image'] == ''
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#pylint: disable=missing-docstring
|
||||
# pylint: disable=missing-docstring
|
||||
import pytest
|
||||
|
||||
from datetime import datetime
|
||||
|
@ -24,43 +24,54 @@ from hive.utils.normalize import (
|
|||
int_log_level,
|
||||
)
|
||||
|
||||
|
||||
def test_secs_to_str():
|
||||
assert secs_to_str(0) == '00s'
|
||||
assert secs_to_str(8979) == '02h 29m 39s'
|
||||
assert secs_to_str(12345678) == '20w 02d 21h 21m 18s'
|
||||
|
||||
|
||||
def test_block_num():
|
||||
block = dict(block_id='013c33f88c643c92a7352b52efde7237f4d4ee0b')
|
||||
assert block_num(block) == 20722680
|
||||
|
||||
|
||||
def test_block_date():
|
||||
block = dict(timestamp='2018-03-16T10:08:42')
|
||||
assert block_date(block) == datetime(2018, 3, 16, 10, 8, 42)
|
||||
|
||||
|
||||
def test_vests_amount():
|
||||
assert vests_amount('4.549292 VESTS') == Decimal('4.549292')
|
||||
|
||||
|
||||
def test_steem_amount():
|
||||
assert steem_amount('1.234567 HIVE') == Decimal('1.234567')
|
||||
|
||||
|
||||
def test_sbd_amount():
|
||||
assert sbd_amount('1.001 HBD') == Decimal('1.001')
|
||||
|
||||
|
||||
def test_parse_amount():
|
||||
nai = [1231121, 6, '@@000000037']
|
||||
assert parse_amount(nai, 'VESTS') == Decimal('1.231121')
|
||||
|
||||
|
||||
def test_amount():
|
||||
assert amount('3.432 FOO') == Decimal('3.432')
|
||||
|
||||
|
||||
def test_legacy_amount():
|
||||
nai = [1231121, 6, '@@000000037']
|
||||
assert legacy_amount(nai) == '1.231121 VESTS'
|
||||
|
||||
|
||||
def test_parse_time():
|
||||
block_time = '2018-06-22T20:34:30'
|
||||
assert parse_time(block_time) == datetime(2018, 6, 22, 20, 34, 30)
|
||||
|
||||
|
||||
def test_utc_timestamp():
|
||||
assert utc_timestamp(parse_time('1970-01-01T00:00:00')) == 0
|
||||
assert utc_timestamp(parse_time('1970-01-01T00:00:01')) == 1
|
||||
|
@ -70,26 +81,31 @@ def test_utc_timestamp():
|
|||
timestamp = utc_timestamp(date)
|
||||
assert timestamp == 1529699670
|
||||
|
||||
|
||||
def test_load_json_key():
|
||||
obj = {'profile':'{"foo":"bar"}'}
|
||||
obj = {'profile': '{"foo":"bar"}'}
|
||||
loaded = load_json_key(obj, 'profile')
|
||||
assert loaded
|
||||
print(loaded, "===============SSSSSSSSSSS")
|
||||
assert loaded['foo'] == 'bar'
|
||||
|
||||
|
||||
def test_trunc():
|
||||
assert trunc('string too long', 5) == 'st...'
|
||||
|
||||
|
||||
def test_rep_log10():
|
||||
assert rep_log10(0) == 25
|
||||
assert rep_log10('2321387987213') == 55.29
|
||||
|
||||
|
||||
def test_safe_img_url():
|
||||
url = 'https://example.com/a.jpg'
|
||||
max_size = len(url) + 1
|
||||
assert safe_img_url(url, max_size) == url
|
||||
assert safe_img_url(url + 'x', max_size) is None
|
||||
|
||||
|
||||
def test_strtobool():
|
||||
assert strtobool('t') == True
|
||||
assert strtobool('T') == True
|
||||
|
@ -107,6 +123,7 @@ def test_strtobool():
|
|||
with pytest.raises(ValueError):
|
||||
strtobool('foo')
|
||||
|
||||
|
||||
def test_int_log_level():
|
||||
assert int_log_level('debug') == 10
|
||||
assert int_log_level('DEBUG') == 10
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#pylint: disable=missing-docstring,line-too-long
|
||||
# pylint: disable=missing-docstring,line-too-long
|
||||
from decimal import Decimal
|
||||
|
||||
from hive.utils.post import (
|
||||
|
@ -15,7 +15,7 @@ POST_1 = {
|
|||
"rshares": 1506388632,
|
||||
"time": "2017-06-20T15:53:51",
|
||||
"voter": "test-safari",
|
||||
"weight": 0
|
||||
"weight": 0,
|
||||
},
|
||||
{
|
||||
"percent": 200,
|
||||
|
@ -23,7 +23,7 @@ POST_1 = {
|
|||
"rshares": 110837437,
|
||||
"time": "2017-06-20T16:24:09",
|
||||
"voter": "darth-cryptic",
|
||||
"weight": 846
|
||||
"weight": 846,
|
||||
},
|
||||
{
|
||||
"percent": 10000,
|
||||
|
@ -31,7 +31,7 @@ POST_1 = {
|
|||
"rshares": 621340000,
|
||||
"time": "2017-06-20T15:55:15",
|
||||
"voter": "test25",
|
||||
"weight": 273
|
||||
"weight": 273,
|
||||
},
|
||||
{
|
||||
"percent": 10000,
|
||||
|
@ -39,8 +39,8 @@ POST_1 = {
|
|||
"rshares": 493299375,
|
||||
"time": "2017-06-20T15:54:54",
|
||||
"voter": "mysqlthrashmetal",
|
||||
"weight": 263
|
||||
}
|
||||
"weight": 263,
|
||||
},
|
||||
],
|
||||
"allow_curation_rewards": True,
|
||||
"allow_replies": True,
|
||||
|
@ -83,7 +83,7 @@ POST_1 = {
|
|||
"total_pending_payout_value": "0.000 HIVE",
|
||||
"total_vote_weight": 0,
|
||||
"url": "/spam/@test-safari/june-spam",
|
||||
"vote_rshares": 0
|
||||
"vote_rshares": 0,
|
||||
}
|
||||
|
||||
POST_2 = {
|
||||
|
@ -131,9 +131,10 @@ POST_2 = {
|
|||
"total_pending_payout_value": "0.000 HIVE",
|
||||
"total_vote_weight": 0,
|
||||
"url": "/spam/@test-safari/june-spam",
|
||||
"vote_rshares": 0
|
||||
"vote_rshares": 0,
|
||||
}
|
||||
|
||||
|
||||
def test_mentions():
|
||||
# pylint: disable=invalid-name
|
||||
m = mentions
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue