steem -> hive rebranding v1.0.0

This commit is contained in:
Bartek Wrona 2020-06-24 01:27:43 +02:00
parent 81d2ee9b2f
commit 2074917f07
924 changed files with 95073 additions and 92555 deletions

14
.gitignore vendored
View File

@ -38,7 +38,7 @@ build_version.cc
libraries/appbase/examples/appbase_example
libraries/chainbase/test/chainbase_test
libraries/protocol/include/steem/protocol/hardfork.hpp
libraries/protocol/include/hive/protocol/hardfork.hpp
libraries/fc/git_revision.cpp
libraries/fc/tests/all_tests
libraries/fc/tests/api
@ -70,8 +70,8 @@ libraries/wallet/Doxyfile
libraries/wallet/api_documentation.cpp
libraries/wallet/doxygen
programs/build_helpers/steem_build_helpers/__pycache__/
programs/util/dump_steem_schema
programs/build_helpers/hive_build_helpers/__pycache__/
programs/util/dump_hive_schema
programs/util/schema_test
programs/util/serialize_set_properties
programs/util/sign_digest
@ -82,8 +82,8 @@ programs/util/test_shared_mem
programs/util/test_sqrt
programs/cli_wallet/cli_wallet
programs/js_operation_serializer/js_operation_serializer
programs/steemd/steemd
programs/steemd/test
programs/hived/hived
programs/hived/test
programs/delayed_node
programs/build_helpers/cat-parts
programs/size_checker/size_checker
@ -105,3 +105,7 @@ build_*/
/.vs
/out/isenseconfig/Linux-Release
# various log and benchmark files
advanced_benchmark.json
r_advanced_benchmark.json
*.log

View File

@ -3,11 +3,13 @@ stages:
before_script:
- docker info
- docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY_IMAGE
- echo $CI_REPOSITORY_URL
- echo $CI_PROJECT_DIR
- mkdir -p $CI_PROJECT_DIR/$CI_COMMIT_REF_NAME
- echo "Working for branch" $CI_COMMIT_REF_NAME " and environment " $CI_ENVIRONMENT_SLUG
- rm -rf $CI_BUILDS_DIR/hive/$CI_COMMIT_REF_NAME
- git clone https://$CI_DEPLOY_USER:$CI_DEPLOY_PASSWORD@gitlab.syncad.com/hive/hive.git $CI_BUILDS_DIR/hive/$CI_COMMIT_REF_NAME
- git clone $CI_REPOSITORY_URL $CI_BUILDS_DIR/hive/$CI_COMMIT_REF_NAME
- cd $CI_BUILDS_DIR/hive/$CI_COMMIT_REF_NAME
- git checkout $CI_COMMIT_REF_NAME
- git status
@ -41,12 +43,13 @@ staging-build:
cache: {}
tags:
- public-runner
- secure-runner
allow_failure: false
only:
- 0.23.0
- 0.24.0
- develop
# environment:
# name: staging
# url: http://192.168.6.144:8888/v1/chain/get_info
@ -57,6 +60,7 @@ development-build:
variables:
LC_ALL: "C"
DOCKER_BUILDKIT: 1
DOCKER_CONFIG: $CI_PROJECT_DIR/$CI_COMMIT_REF_NAME
GIT_STRATEGY: none
GIT_SUBMODULE_STRATEGY: recursive
@ -76,13 +80,14 @@ development-build:
cache: {}
tags:
- public-runner
- secure-runner
allow_failure: false
when: manual
except:
- 0.23.0
- 0.24.0
- develop
# environment:
# name: development
# url: http://192.168.6.144:8888/v1/chain/get_info

View File

@ -27,18 +27,18 @@ RUN \
libbz2-dev \
liblz4-dev \
libzstd-dev && \
rm -rf /usr/local/src/steem
rm -rf /usr/local/src/hive
ADD . /usr/local/src/steem
ADD . /usr/local/src/hive
RUN \
cd /usr/local/src/steem && \
cd /usr/local/src/hive && \
mkdir build && \
cd build && \
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_STD_ALLOCATOR_SUPPORT=ON \
-DBUILD_STEEM_TESTNET=ON \
-DBUILD_HIVE_TESTNET=ON \
-DLOW_MEMORY_NODE=OFF \
-DCLEAR_VOTES=ON \
-DSKIP_BY_TX_ID=ON \

View File

@ -1,11 +1,11 @@
# Defines Steem library target.
project( Steem )
# Defines Hive library target.
project( Hive )
cmake_minimum_required( VERSION 3.2 )
set( BLOCKCHAIN_NAME "Steem" )
set( BLOCKCHAIN_NAME "Hive" )
set( CMAKE_CXX_STANDARD 14 )
set( GUI_CLIENT_EXECUTABLE_NAME Steem )
set( GUI_CLIENT_EXECUTABLE_NAME Hive )
set( CUSTOM_URL_SCHEME "gcs" )
set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" )
@ -50,9 +50,9 @@ LIST(APPEND BOOST_COMPONENTS thread
coroutine)
SET( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" )
OPTION( BUILD_STEEM_TESTNET "Build source for test network (ON OR OFF)" OFF )
MESSAGE( STATUS "BUILD_STEEM_TESTNET: ${BUILD_STEEM_TESTNET}" )
if( BUILD_STEEM_TESTNET )
OPTION( BUILD_HIVE_TESTNET "Build source for test network (ON OR OFF)" OFF )
MESSAGE( STATUS "BUILD_HIVE_TESTNET: ${BUILD_HIVE_TESTNET}" )
if( BUILD_HIVE_TESTNET )
MESSAGE( STATUS " " )
MESSAGE( STATUS " CONFIGURING FOR TEST NET " )
MESSAGE( STATUS " " )
@ -66,8 +66,8 @@ if( ENABLE_SMT_SUPPORT )
MESSAGE( STATUS " " )
MESSAGE( STATUS " CONFIGURING FOR SMT SUPPORT " )
MESSAGE( STATUS " " )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTEEM_ENABLE_SMT" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSTEEM_ENABLE_SMT" )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DHIVE_ENABLE_SMT" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHIVE_ENABLE_SMT" )
endif()
OPTION( ENABLE_MIRA "Build source with MIRA (ON OR OFF)" OFF )
@ -90,6 +90,22 @@ if( LOW_MEMORY_NODE )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DIS_LOW_MEM" )
endif()
OPTION( SUPPORT_COMMENT_CONTENT "Build source with enabled comment content support (ON OR OFF)" OFF )
MESSAGE( STATUS "SUPPORT_COMMENT_CONTENT: ${SUPPORT_COMMENT_CONTENT}" )
IF (SUPPORT_COMMENT_CONTENT)
if( LOW_MEMORY_NODE )
MESSAGE( WARNING "Ignoring SUPPORT_COMMENT_CONTENT setting due to enabled LOW_MEMORY_NODE")
ELSE()
MESSAGE( STATUS " " )
MESSAGE( STATUS " CONFIGURING FOR COMMENT_CONTENT_SUPPORT " )
MESSAGE( STATUS " " )
SET( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSTORE_COMMENT_CONTENT" )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSTORE_COMMENT_CONTENT" )
endif()
ELSE()
MESSAGE( STATUS " CONFIGURING to skip COMMENT_CONTENT objects " )
ENDIF()
OPTION( CHAINBASE_CHECK_LOCKING "Check locks in chainbase (ON or OFF)" ON )
MESSAGE( STATUS "CHAINBASE_CHECK_LOCKING: ${CHAINBASE_CHECK_LOCKING}" )
if( CHAINBASE_CHECK_LOCKING )
@ -111,14 +127,14 @@ if( SKIP_BY_TX_ID )
SET( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSKIP_BY_TX_ID" )
endif()
OPTION( STEEM_STATIC_BUILD "Build steemd as a static library (ON or OFF)" OFF )
if( STEEM_STATIC_BUILD AND ( ( MSVC AND NOT MINGW ) OR APPLE ) )
OPTION( HIVE_STATIC_BUILD "Build hived as a static library (ON or OFF)" OFF )
if( HIVE_STATIC_BUILD AND ( ( MSVC AND NOT MINGW ) OR APPLE ) )
MESSAGE( STATUS "Statuc build is not available on Windows or OS X" )
SET( STEEM_STATIC_BUILD OFF )
SET( HIVE_STATIC_BUILD OFF )
endif()
MESSAGE( STATUS "STEEM_STATIC_BUILD: ${STEEM_STATIC_BUILD}" )
MESSAGE( STATUS "HIVE_STATIC_BUILD: ${HIVE_STATIC_BUILD}" )
SET( STEEM_LINT_LEVEL "OFF" CACHE STRING "Lint level during steem build (FULL, HIGH, LOW, OFF)" )
SET( HIVE_LINT_LEVEL "OFF" CACHE STRING "Lint level during Hive build (FULL, HIGH, LOW, OFF)" )
find_program(
CLANG_TIDY_EXE
NAMES "clang-tidy"
@ -131,13 +147,13 @@ elseif( VERSION LESS 3.6 )
messgae( STATUS "clang-tidy found but only supported with CMake version >= 3.6" )
else()
message( STATUS "clany-tidy found: ${CLANG_TIDY_EXE}" )
if( "${STEEM_LINT_LEVEL}" STREQUAL "FULL" )
if( "${HIVE_LINT_LEVEL}" STREQUAL "FULL" )
message( STATUS "Linting level set to: FULL" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='*'" )
elseif( "${STEEM_LINT_LEVEL}" STREQUAL "HIGH" )
elseif( "${HIVE_LINT_LEVEL}" STREQUAL "HIGH" )
message( STATUS "Linting level set to: HIGH" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='boost-use-to-string,clang-analyzer-*,cppcoreguidelines-*,llvm-*,misc-*,performance-*,readability-*'" )
elseif( "${STEEM_LINT_LEVEL}" STREQUAL "LOW" )
elseif( "${HIVE_LINT_LEVEL}" STREQUAL "LOW" )
message( STATUS "Linting level set to: LOW" )
set( DO_CLANG_TIDY "${CLANG_TIDY_EXE}" "-checks='clang-analyzer-*'" )
else()
@ -161,7 +177,7 @@ endif()
if( WIN32 )
message( STATUS "Configuring Steem on WIN32")
message( STATUS "Configuring Hive on WIN32")
set( DB_VERSION 60 )
set( BDB_STATIC_LIBS 1 )
@ -215,11 +231,11 @@ else( WIN32 ) # Apple AND Linux
if( APPLE )
# Apple Specific Options Here
message( STATUS "Configuring Steem on OS X" )
message( STATUS "Configuring Hive on OS X" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -stdlib=libc++ -Wall -Wno-conversion -Wno-deprecated-declarations" )
else( APPLE )
# Linux Specific Options Here
message( STATUS "Configuring Steem on Linux" )
message( STATUS "Configuring Hive on Linux" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -Wall" )
set( rt_library rt )
set( pthread_library pthread)
@ -251,7 +267,7 @@ else( WIN32 ) # Apple AND Linux
endif( WIN32 )
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Steem for code coverage analysis")
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Hive for code coverage analysis")
if(ENABLE_COVERAGE_TESTING)
SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}")
@ -261,7 +277,7 @@ endif()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing -Werror -DBOOST_THREAD_DONT_PROVIDE_PROMISE_LAZY" )
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing -Werror -DBOOST_THREAD_DONT_PROVIDE_PROMISE_LAZY" )
# external_plugins needs to be compiled first because libraries/app depends on STEEM_EXTERNAL_PLUGINS being fully populated
# external_plugins needs to be compiled first because libraries/app depends on HIVE_EXTERNAL_PLUGINS being fully populated
add_subdirectory( external_plugins )
add_subdirectory( libraries )
add_subdirectory( programs )
@ -281,24 +297,24 @@ set(CPACK_OUTPUT_FILE_PREFIX ${CMAKE_BINARY_DIR}/packages)
set(CMAKE_INSTALL_PREFIX ${CMAKE_BINARY_DIR}/install)
SET(CPACK_PACKAGE_DIRECTORY "${CMAKE_INSTALL_PREFIX}")
set(CPACK_PACKAGE_NAME "steem")
set(CPACK_PACKAGE_NAME "hive")
set(CPACK_PACKAGE_VENDOR "Steemit, Inc.")
set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}")
set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}")
set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}")
set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}")
set(CPACK_PACKAGE_DESCRIPTION "A client for the Steem network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the Steem network")
set(CPACK_PACKAGE_DESCRIPTION "A client for the Hive network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the Hive network")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Steem ${CPACK_PACKAGE_VERSION}")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Hive ${CPACK_PACKAGE_VERSION}")
if(WIN32)
SET(CPACK_GENERATOR "ZIP;NSIS")
set(CPACK_PACKAGE_NAME "Steem") # override above
set(CPACK_PACKAGE_NAME "Hive") # override above
set(CPACK_NSIS_EXECUTABLES_DIRECTORY .)
set(CPACK_NSIS_PACKAGE_NAME "Steem v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_PACKAGE_NAME "Hive v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"Steem\\\"")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"Hive\\\"")
# it seems like windows zip files usually don't have a single directory inside them, unix tgz frequently do
SET(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0)
@ -317,10 +333,10 @@ endif(LINUX)
include(CPack)
endif(ENABLE_INSTALLER)
if( BUILD_STEEM_TESTNET )
if( BUILD_HIVE_TESTNET )
MESSAGE( STATUS "\n\n CONFIGURED FOR TEST NETWORK \n\n" )
else()
MESSAGE( STATUS "\n\n CONFIGURED FOR STEEM NETWORK \n\n" )
MESSAGE( STATUS "\n\n CONFIGURED FOR HIVE NETWORK \n\n" )
endif()
if( ENABLE_SMT_SUPPORT )
@ -341,4 +357,14 @@ else()
MESSAGE( STATUS "\n\n CONFIGURED FOR FULL NODE \n\n" )
endif()
set(INDENT_WIDTH 2 CACHE STRING "Determine how many spaces should be used for formating (by default 2)")
find_program(PYTHON_3_6 "python3.6")
# How to launch?
# cmake --build . --target format
if(PYTHON_3_6)
add_custom_target(format COMMAND ${PYTHON_3_6} ${CMAKE_SOURCE_DIR}/format.py ${INDENT_WIDTH} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
MESSAGE( STATUS "\n\n FORMAT IS AVAIABLE \n\n" )
else(PYTHON_3_6)
MESSAGE( STATUS "\n\n FORMAT IS NOT AVAIABLE \n\n" )
endif( PYTHON_3_6 )

View File

@ -2,7 +2,7 @@
ARG LOW_MEMORY_NODE=ON
ARG CLEAR_VOTES=ON
ARG BUILD_STEEM_TESTNET=OFF
ARG BUILD_HIVE_TESTNET=OFF
ARG ENABLE_MIRA=OFF
FROM registry.gitlab.syncad.com/hive/hive/hive-baseenv:latest AS builder
@ -91,17 +91,17 @@ FROM builder AS general_node_builder
ARG LOW_MEMORY_NODE
ARG CLEAR_VOTES
ARG BUILD_STEEM_TESTNET
ARG BUILD_HIVE_TESTNET
ARG ENABLE_MIRA
ENV LOW_MEMORY_NODE=${LOW_MEMORY_NODE}
ENV CLEAR_VOTES=${CLEAR_VOTES}
ENV BUILD_STEEM_TESTNET=${BUILD_STEEM_TESTNET}
ENV BUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET}
ENV ENABLE_MIRA=${ENABLE_MIRA}
RUN \
cd ${src_dir} && \
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_STEEM_TESTNET} ${ENABLE_MIRA}
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA}
###################################################################################################
## GENERAL NODE CONFIGURATION ##
@ -140,13 +140,20 @@ ARG ENABLE_MIRA=OFF
ENV LOW_MEMORY_NODE=${LOW_MEMORY_NODE}
ENV CLEAR_VOTES=${CLEAR_VOTES}
ENV BUILD_STEEM_TESTNET="ON"
ENV BUILD_HIVE_TESTNET="ON"
ENV ENABLE_MIRA=${ENABLE_MIRA}
RUN \
cd ${src_dir} && \
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_STEEM_TESTNET} ${ENABLE_MIRA} && \
cd build/tests && \
./chain_test && \
./plugin_test
${src_dir}/ciscripts/build.sh ${LOW_MEMORY_NODE} ${CLEAR_VOTES} ${BUILD_HIVE_TESTNET} ${ENABLE_MIRA} && \
apt-get update && \
apt-get install -y screen && \
pip3 install -U secp256k1prp && \
git clone https://gitlab.syncad.com/hive/beem.git && \
cd beem && \
git checkout dk-update-proposal-operation && \
python3 setup.py build && \
python3 setup.py install --user && \
cd ${src_dir} && \
${src_dir}/ciscripts/run_regressions.sh

View File

@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Steem"
PROJECT_NAME = "Hive"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
@ -1980,7 +1980,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = STEEM_ENABLE_SMT
PREDEFINED = HIVE_ENABLE_SMT
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The

View File

@ -78,7 +78,7 @@ ca. 14GB of memory, and growing, is required:
## CLI Wallet
We provide a basic cli wallet for interfacing with `steemd`. The wallet is self-documented via command line help. The node you connect to via the cli wallet needs to be running the `account_by_key_api`, `condenser_api`, and needs to be configured to accept WebSocket connections via `webserver-ws-endpoint`.
We provide a basic cli wallet for interfacing with `hived`. The wallet is self-documented via command line help. The node you connect to via the cli wallet needs to be running the `account_by_key_api`, `condenser_api`, and needs to be configured to accept WebSocket connections via `webserver-ws-endpoint`.
## Testing
@ -89,7 +89,7 @@ on how to use lcov to check code test coverage.
## Config File
Run `steemd` once to generate a data directory and config file. The default location is `witness_node_data_dir`. Kill `steemd`. It won't do anything without seed nodes. If you want to modify the config to your liking, we have two example configs used in the docker images. ( [consensus node](contrib/config-for-docker.ini), [full node](contrib/fullnode.config.ini) ) All options will be present in the default config file and there may be more options needing to be changed from the docker configs (some of the options actually used in images are configured via command line).
Run `hived` once to generate a data directory and config file. The default location is `witness_node_data_dir`. Kill `hived`. It won't do anything without seed nodes. If you want to modify the config to your liking, we have two example configs used in the docker images. ( [consensus node](contrib/config-for-docker.ini), [full node](contrib/fullnode.config.ini) ) All options will be present in the default config file and there may be more options needing to be changed from the docker configs (some of the options actually used in images are configured via command line).
## Seed Nodes
@ -97,27 +97,27 @@ A list of some seed nodes to get you started can be found in
[doc/seednodes.txt](doc/seednodes.txt).
This same file is baked into the docker images and can be overridden by
setting `STEEMD_SEED_NODES` in the container environment at `docker run`
setting `HIVED_SEED_NODES` in the container environment at `docker run`
time to a whitespace delimited list of seed nodes (with port).
## Environment variables
There are quite a few environment variables that can be set to run steemd in different ways:
There are quite a few environment variables that can be set to run hived in different ways:
* `USE_WAY_TOO_MUCH_RAM` - if set to true, steemd starts a 'full node'
* `USE_WAY_TOO_MUCH_RAM` - if set to true, hived starts a 'full node'
* `USE_FULL_WEB_NODE` - if set to true, a default config file will be used that enables a full set of API's and associated plugins.
* `USE_NGINX_FRONTEND` - if set to true, this will enable an NGINX reverse proxy in front of steemd that proxies WebSocket requests to steemd. This will also enable a custom healthcheck at the path '/health' that lists how many seconds away from current blockchain time your node is. It will return a '200' if it's less than 60 seconds away from being synced.
* `USE_MULTICORE_READONLY` - if set to true, this will enable steemd in multiple reader mode to take advantage of multiple cores (if available). Read requests are handled by the read-only nodes and write requests are forwarded back to the single 'writer' node automatically. NGINX load balances all requests to the reader nodes, 4 per available core. This setting is still considered experimental and may have trouble with some API calls until further development is completed.
* `HOME` - set this to the path where you want steemd to store it's data files (block log, shared memory, config file, etc). By default `/var/lib/steemd` is used and exists inside the docker container. If you want to use a different mount point (like a ramdisk, or a different drive) then you may want to set this variable to map the volume to your docker container.
* `USE_NGINX_FRONTEND` - if set to true, this will enable an NGINX reverse proxy in front of hived that proxies WebSocket requests to hived. This will also enable a custom healthcheck at the path '/health' that lists how many seconds away from current blockchain time your node is. It will return a '200' if it's less than 60 seconds away from being synced.
* `USE_MULTICORE_READONLY` - if set to true, this will enable hived in multiple reader mode to take advantage of multiple cores (if available). Read requests are handled by the read-only nodes and write requests are forwarded back to the single 'writer' node automatically. NGINX load balances all requests to the reader nodes, 4 per available core. This setting is still considered experimental and may have trouble with some API calls until further development is completed.
* `HOME` - set this to the path where you want hived to store it's data files (block log, shared memory, config file, etc). By default `/var/lib/hived` is used and exists inside the docker container. If you want to use a different mount point (like a ramdisk, or a different drive) then you may want to set this variable to map the volume to your docker container.
## PaaS mode
Hived now supports a PaaS mode (platform as a service) that currently works with Amazon's Elastic Beanstalk service. It can be launched using the following environment variables:
* `USE_PAAS` - if set to true, steemd will launch in a format that works with AWS EB. Containers will exit upon failure so that they can be relaunched automatically by ECS. This mode assumes `USE_WAY_TOO_MUCH_RAM` and `USE_FULL_WEB_NODE`, they do not need to be also set.
* `S3_BUCKET` - set this to the name of the S3 bucket where you will store shared memory files for steemd in Amazon S3. They will be stored compressed in bz2 format with the file name `blockchain-$VERSION-latest.tar.bz2`, where $VERSION is the release number followed by the git short commit hash stored in each docker image at `/etc/steemdversion`.
* `SYNC_TO_S3` - if set to true, the node will function to only generate shared memory files and upload them to the specified S3 bucket. This makes fast deployments and autoscaling for steemd possible.
* `USE_PAAS` - if set to true, hived will launch in a format that works with AWS EB. Containers will exit upon failure so that they can be relaunched automatically by ECS. This mode assumes `USE_WAY_TOO_MUCH_RAM` and `USE_FULL_WEB_NODE`, they do not need to be also set.
* `S3_BUCKET` - set this to the name of the S3 bucket where you will store shared memory files for hived in Amazon S3. They will be stored compressed in bz2 format with the file name `blockchain-$VERSION-latest.tar.bz2`, where $VERSION is the release number followed by the git short commit hash stored in each docker image at `/etc/hivedversion`.
* `SYNC_TO_S3` - if set to true, the node will function to only generate shared memory files and upload them to the specified S3 bucket. This makes fast deployments and autoscaling for hived possible.
## System Requirements

View File

@ -11,4 +11,4 @@ dependencies:
test:
override:
- time docker build --rm=false -t steemitinc/steem-test -f Dockerfile.test .
- time docker build --rm=false -t steemitinc/hive-test -f Dockerfile.test .

View File

@ -3,13 +3,13 @@ set -e
LOW_MEMORY_NODE=$1
CLEAR_VOTES=$2
BUILD_STEEM_TESTNET=$3
BUILD_HIVE_TESTNET=$3
ENABLE_MIRA=$4
echo "PWD=${PWD}"
echo "LOW_MEMORY_NODE=${LOW_MEMORY_NODE}"
echo "CLEAR_VOTES=${CLEAR_VOTES}"
echo "BUILD_STEEM_TESTNET=${BUILD_STEEM_TESTNET}"
echo "BUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET}"
echo "ENABLE_MIRA=${ENABLE_MIRA}"
BUILD_DIR="${PWD}/build"
@ -25,15 +25,15 @@ cmake \
-DLOW_MEMORY_NODE=${LOW_MEMORY_NODE} \
-DCLEAR_VOTES=${CLEAR_VOTES} \
-DSKIP_BY_TX_ID=OFF \
-DBUILD_STEEM_TESTNET=${BUILD_STEEM_TESTNET} \
-DBUILD_HIVE_TESTNET=${BUILD_HIVE_TESTNET} \
-DENABLE_MIRA=${ENABLE_MIRA} \
-DSTEEM_STATIC_BUILD=ON \
-DHIVE_STATIC_BUILD=ON \
..
make -j$(nproc)
make install
cd ..
( "${BUILD_DIR}/install-root"/bin/steemd --version \
( "${BUILD_DIR}/install-root"/bin/hived --version \
| grep -o '[0-9]*\.[0-9]*\.[0-9]*' \
&& echo '_' \
&& git rev-parse --short HEAD ) \

View File

@ -1,5 +1,5 @@
#!/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/steem/statuses/$(git rev-parse HEAD) -d "{
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"failure\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"JenkinsCI reports the build has failed!\",

View File

@ -1,5 +1,5 @@
#!/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/steem/statuses/$(git rev-parse HEAD) -d "{
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"pending\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"The build is now pending in jenkinsci!\",

View File

@ -1,8 +1,8 @@
#!/bin/bash
set -e
export IMAGE_NAME="steemit/steem:$BRANCH_NAME"
if [[ $IMAGE_NAME == "steemit/steem:stable" ]] ; then
IMAGE_NAME="steemit/steem:latest"
export IMAGE_NAME="steemit/hive:$BRANCH_NAME"
if [[ $IMAGE_NAME == "steemit/hive:stable" ]] ; then
IMAGE_NAME="steemit/hive:latest"
fi
sudo docker build --build-arg CI_BUILD=1 --build-arg BUILD_STEP=2 -t=$IMAGE_NAME .
sudo docker login --username=$DOCKER_USER --password=$DOCKER_PASS

View File

@ -1,5 +1,5 @@
#/bin/bash
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/steem/statuses/$(git rev-parse HEAD) -d "{
curl --silent -XPOST -H "Authorization: token $GITHUB_SECRET" https://api.github.com/repos/steemit/hive/statuses/$(git rev-parse HEAD) -d "{
\"state\": \"success\",
\"target_url\": \"${BUILD_URL}\",
\"description\": \"Jenkins-CI reports build succeeded!!\",

View File

@ -1,6 +1,6 @@
#!/bin/bash
set -e
sudo docker build --build-arg CI_BUILD=1 --build-arg BUILD_STEP=1 -t=steemit/steem:tests .
sudo docker run -v $WORKSPACE:/var/jenkins steemit/steem:tests cp -r /var/cobertura /var/jenkins
sudo docker build --build-arg CI_BUILD=1 --build-arg BUILD_STEP=1 -t=steemit/hive:tests .
sudo docker run -v $WORKSPACE:/var/jenkins steemit/hive:tests cp -r /var/cobertura /var/jenkins
# make docker cleanup after itself and delete all exited containers
sudo docker rm -v $(docker ps -a -q -f status=exited) || true

59
ciscripts/run_regressions.sh Executable file
View File

@ -0,0 +1,59 @@
#/bin/sh
BUILD_DIR="${PWD}/build"
TESTS_DIR=${BUILD_DIR}/tests
cd $TESTS_DIR
# $1 unit test name group
execute_unittest_group()
{
local unit_test_group=$1
echo "Start unit tests group '${unit_test_group}'"
if ! ctest -R ^${unit_test_group}.* --output-on-failure -vv
then
exit 1
fi
}
execute_hive_functional()
{
echo "Start hive functional tests"
if ! ctest -R ^hive_functional/.* --output-on-failure -vv
then
exit 1
fi
}
echo " _____ _____ _____ _____ _____ ";
echo " /\ \ /\ \ /\ \ /\ \ /\ \ ";
echo " /::\ \ /::\ \ /::\ \ /::\ \ /::\ \ ";
echo " \:::\ \ /::::\ \ /::::\ \ \:::\ \ /::::\ \ ";
echo " \:::\ \ /::::::\ \ /::::::\ \ \:::\ \ /::::::\ \ ";
echo " \:::\ \ /:::/\:::\ \ /:::/\:::\ \ \:::\ \ /:::/\:::\ \ ";
echo " \:::\ \ /:::/__\:::\ \ /:::/__\:::\ \ \:::\ \ /:::/__\:::\ \ ";
echo " /::::\ \ /::::\ \:::\ \ \:::\ \:::\ \ /::::\ \ \:::\ \:::\ \ ";
echo " /::::::\ \ /::::::\ \:::\ \ ___\:::\ \:::\ \ /::::::\ \ ___\:::\ \:::\ \ ";
echo " /:::/\:::\ \ /:::/\:::\ \:::\ \ /\ \:::\ \:::\ \ /:::/\:::\ \ /\ \:::\ \:::\ \ ";
echo " /:::/ \:::\____\/:::/__\:::\ \:::\____\/::\ \:::\ \:::\____\ /:::/ \:::\____\/::\ \:::\ \:::\____\ ";
echo " /:::/ \::/ /\:::\ \:::\ \::/ /\:::\ \:::\ \::/ / /:::/ \::/ /\:::\ \:::\ \::/ /";
echo " /:::/ / \/____/ \:::\ \:::\ \/____/ \:::\ \:::\ \/____/ /:::/ / \/____/ \:::\ \:::\ \/____/ ";
echo " /:::/ / \:::\ \:::\ \ \:::\ \:::\ \ /:::/ / \:::\ \:::\ \ ";
echo "/:::/ / \:::\ \:::\____\ \:::\ \:::\____\ /:::/ / \:::\ \:::\____\ ";
echo "\::/ / \:::\ \::/ / \:::\ /:::/ / \::/ / \:::\ /:::/ / ";
echo " \/____/ \:::\ \/____/ \:::\/:::/ / \/____/ \:::\/:::/ / ";
echo " \:::\ \ \::::::/ / \::::::/ / ";
echo " \:::\____\ \::::/ / \::::/ / ";
echo " \::/ / \::/ / \::/ / ";
echo " \/____/ \/____/ \/____/ ";
echo " ";
execute_unittest_group plugin_test
execute_unittest_group chain_test
execute_hive_functional
exit 0

View File

@ -2,16 +2,16 @@ timeout: "1800s"
steps:
- name: gcr.io/cloud-builders/git
args: ['clone', '-b', '$BRANCH_NAME', '--recurse-submodules', '-v', 'https://github.com/blocktradesdevs/steem.git']
args: ['clone', '-b', '$BRANCH_NAME', '--recurse-submodules', '-v', 'https://github.com/blocktradesdevs/hive.git']
id: "Git clone"
- name: gcr.io/cloud-builders/git
dir: "steem"
dir: "hive"
args: ['status']
id: "Git status"
- name: 'gcr.io/cloud-builders/docker'
dir: "steem"
dir: "hive"
args: [
'build',
'-f', '../Builder.DockerFile',

View File

@ -5,7 +5,7 @@ SCRIPTPATH=`dirname $SCRIPT`
DATADIR="${SCRIPTPATH}/datadir"
HIVED="${SCRIPTPATH}/bin/steemd"
HIVED="${SCRIPTPATH}/bin/hived"
ARGS=""

View File

@ -0,0 +1,16 @@
#!/bin/bash
echo /tmp/core | tee /proc/sys/kernel/core_pattern
ulimit -c unlimited
# if we're not using PaaS mode then start hived traditionally with sv to control it
if [[ ! "$USE_PAAS" ]]; then
mkdir -p /etc/service/hived
cp /usr/local/bin/hive-sv-run.sh /etc/service/hived/run
chmod +x /etc/service/hived/run
runsv /etc/service/hived
elif [[ "$IS_TESTNET" ]]; then
/usr/local/bin/pulltestnetscripts.sh
else
/usr/local/bin/startpaashived.sh
fi

View File

@ -1,22 +1,22 @@
#!/bin/bash
# if the writer node dies, kill runsv causing the container to exit
STEEMD_PID=`pgrep -f p2p-endpoint`
HIVED_PID=`pgrep -f p2p-endpoint`
if [[ ! $? -eq 0 ]]; then
echo NOTIFYALERT! steemd has quit unexpectedly, checking for core dump and then starting a new instance..
echo NOTIFYALERT! hived has quit unexpectedly, checking for core dump and then starting a new instance..
sleep 30
SAVED_PID=`cat /tmp/steemdpid`
SAVED_PID=`cat /tmp/hivedpid`
if [[ -e /tmp/core.$SAVED_PID ]]; then
gdb --batch --quiet -ex "thread apply all bt full" -ex "quit" /usr/local/steemd-full/bin/steemd /tmp/core.$SAVED_PID >> /tmp/stacktrace
gdb --batch --quiet -ex "thread apply all bt full" -ex "quit" /usr/local/hived-full/bin/hived /tmp/core.$SAVED_PID >> /tmp/stacktrace
STACKTRACE=`cat /tmp/stacktrace`
echo NOTIFYALERT! steemd stacktrace from coredump:
echo NOTIFYALERT! hived stacktrace from coredump:
for ((i=0;i<${#STACKTRACE};i+=120)); do
echo "${STACKTRACE:i:120}"
done
CORE_FILE_NAME=coredump-`date '+%Y%m%d-%H%M%S'`.$SAVED_PID
aws s3 cp /tmp/core.$SAVED_PID s3://$S3_BUCKET/$CORE_FILE_NAME
fi
RUN_SV_PID=`pgrep -f /etc/service/steemd`
RUN_SV_PID=`pgrep -f /etc/service/hived`
kill -9 $RUN_SV_PID
fi

View File

@ -1,12 +1,12 @@
#!/bin/bash
echo steemd-testnet: getting deployment scripts from external source
echo hived-testnet: getting deployment scripts from external source
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/testnetinit.sh > /usr/local/bin/testnetinit.sh
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/testnet.config.ini > /etc/steemd/testnet.config.ini
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/fastgen.config.ini > /etc/steemd/fastgen.config.ini
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/testnet.config.ini > /etc/hived/testnet.config.ini
wget -qO- $SCRIPTURL/master/$LAUNCHENV/$APP/fastgen.config.ini > /etc/hived/fastgen.config.ini
chmod +x /usr/local/bin/testnetinit.sh
echo steemd-testnet: launching testnetinit script
echo hived-testnet: launching testnetinit script
/usr/local/bin/testnetinit.sh

View File

@ -1,16 +1,16 @@
#!/bin/bash
VERSION=`cat /etc/steemdversion`
VERSION=`cat /etc/hivedversion`
if [[ "$IS_BROADCAST_NODE" ]]; then
STEEMD="/usr/local/steemd-default/bin/steemd"
HIVED="/usr/local/hived-default/bin/hived"
elif [[ "$IS_AH_NODE" ]]; then
STEEMD="/usr/local/steemd-default/bin/steemd"
HIVED="/usr/local/hived-default/bin/hived"
else
STEEMD="/usr/local/steemd-full/bin/steemd"
HIVED="/usr/local/hived-full/bin/hived"
fi
chown -R steemd:steemd $HOME
chown -R hived:hived $HOME
# clean out data dir since it may be semi-persistent block storage on the ec2 with stale data
rm -rf $HOME/*
@ -19,19 +19,19 @@ ARGS=""
# if user did pass in desired seed nodes, use
# the ones the user specified:
if [[ ! -z "$STEEMD_SEED_NODES" ]]; then
for NODE in $STEEMD_SEED_NODES ; do
if [[ ! -z "$HIVED_SEED_NODES" ]]; then
for NODE in $HIVED_SEED_NODES ; do
ARGS+=" --p2p-seed-node=$NODE"
done
fi
NOW=`date +%s`
STEEMD_FEED_START_TIME=`expr $NOW - 1209600`
HIVED_FEED_START_TIME=`expr $NOW - 1209600`
ARGS+=" --follow-start-feeds=$STEEMD_FEED_START_TIME"
ARGS+=" --follow-start-feeds=$HIVED_FEED_START_TIME"
STEEMD_PROMOTED_START_TIME=`expr $NOW - 604800`
ARGS+=" --tags-start-promoted=$STEEMD_PROMOTED_START_TIME"
HIVED_PROMOTED_START_TIME=`expr $NOW - 604800`
ARGS+=" --tags-start-promoted=$HIVED_PROMOTED_START_TIME"
if [[ ! "$DISABLE_BLOCK_API" ]]; then
ARGS+=" --plugin=block_api"
@ -39,24 +39,24 @@ fi
# overwrite local config with image one
if [[ "$IS_BROADCAST_NODE" ]]; then
cp /etc/steemd/config-for-broadcaster.ini $HOME/config.ini
cp /etc/hived/config-for-broadcaster.ini $HOME/config.ini
elif [[ "$IS_AH_NODE" ]]; then
cp /etc/steemd/config-for-ahnode.ini $HOME/config.ini
cp /etc/hived/config-for-ahnode.ini $HOME/config.ini
elif [[ "$IS_OPSWHITELIST_NODE" ]]; then
cp /etc/steemd/fullnode.opswhitelist.config.ini $HOME/config.ini
cp /etc/hived/fullnode.opswhitelist.config.ini $HOME/config.ini
else
cp /etc/steemd/fullnode.config.ini $HOME/config.ini
cp /etc/hived/fullnode.config.ini $HOME/config.ini
fi
chown steemd:steemd $HOME/config.ini
chown hived:hived $HOME/config.ini
cd $HOME
mv /etc/nginx/nginx.conf /etc/nginx/nginx.original.conf
cp /etc/nginx/steemd.nginx.conf /etc/nginx/nginx.conf
cp /etc/nginx/hived.nginx.conf /etc/nginx/nginx.conf
# get blockchain state from an S3 bucket
echo steemd: beginning download and decompress of s3://$S3_BUCKET/blockchain-$VERSION-latest.tar.lz4
echo hived: beginning download and decompress of s3://$S3_BUCKET/blockchain-$VERSION-latest.tar.lz4
finished=0
count=1
if [[ "$USE_RAMDISK" ]]; then
@ -77,13 +77,13 @@ if [[ "$USE_RAMDISK" ]]; then
fi
if [[ $? -ne 0 ]]; then
sleep 1
echo notifyalert steemd: unable to pull blockchain state from S3 - attempt $count
echo notifyalert hived: unable to pull blockchain state from S3 - attempt $count
(( count++ ))
else
finished=1
fi
done
chown -R steemd:steemd /mnt/ramdisk/blockchain
chown -R hived:hived /mnt/ramdisk/blockchain
else
while [[ $count -le 5 ]] && [[ $finished == 0 ]]
do
@ -97,7 +97,7 @@ else
fi
if [[ $? -ne 0 ]]; then
sleep 1
echo notifyalert steemd: unable to pull blockchain state from S3 - attempt $count
echo notifyalert hived: unable to pull blockchain state from S3 - attempt $count
(( count++ ))
else
finished=1
@ -106,19 +106,19 @@ else
fi
if [[ $finished == 0 ]]; then
if [[ ! "$SYNC_TO_S3" ]]; then
echo notifyalert steemd: unable to pull blockchain state from S3 - exiting
echo notifyalert hived: unable to pull blockchain state from S3 - exiting
exit 1
else
echo notifysteemdsync steemdsync: shared memory file for $VERSION not found, creating a new one by replaying the blockchain
echo notifyhivedsync hivedsync: shared memory file for $VERSION not found, creating a new one by replaying the blockchain
if [[ "$USE_RAMDISK" ]]; then
mkdir -p /mnt/ramdisk/blockchain
chown -R steemd:steemd /mnt/ramdisk/blockchain
chown -R hived:hived /mnt/ramdisk/blockchain
else
mkdir blockchain
fi
aws s3 cp s3://$S3_BUCKET/block_log-latest blockchain/block_log
if [[ $? -ne 0 ]]; then
echo notifysteemdsync steemdsync: unable to pull latest block_log from S3, will sync from scratch.
echo notifyhivedsync hivedsync: unable to pull latest block_log from S3, will sync from scratch.
else
ARGS+=" --replay-blockchain --force-validate"
fi
@ -136,7 +136,7 @@ if [[ "$SYNC_TO_S3" ]]; then
chown www-data:www-data /tmp/issyncnode
fi
chown -R steemd:steemd $HOME/*
chown -R hived:hived $HOME/*
# let's get going
cp /etc/nginx/healthcheck.conf.template /etc/nginx/healthcheck.conf
@ -146,22 +146,22 @@ rm /etc/nginx/sites-enabled/default
cp /etc/nginx/healthcheck.conf /etc/nginx/sites-enabled/default
/etc/init.d/fcgiwrap restart
service nginx restart
exec chpst -usteemd \
$STEEMD \
exec chpst -uhived \
$HIVED \
--webserver-ws-endpoint=127.0.0.1:8091 \
--webserver-http-endpoint=127.0.0.1:8091 \
--p2p-endpoint=0.0.0.0:2001 \
--data-dir=$HOME \
$ARGS \
$STEEMD_EXTRA_OPTS \
$HIVED_EXTRA_OPTS \
2>&1&
SAVED_PID=`pgrep -f p2p-endpoint`
echo $SAVED_PID >> /tmp/steemdpid
mkdir -p /etc/service/steemd
echo $SAVED_PID >> /tmp/hivedpid
mkdir -p /etc/service/hived
if [[ ! "$SYNC_TO_S3" ]]; then
cp /usr/local/bin/paas-sv-run.sh /etc/service/steemd/run
cp /usr/local/bin/paas-sv-run.sh /etc/service/hived/run
else
cp /usr/local/bin/sync-sv-run.sh /etc/service/steemd/run
cp /usr/local/bin/sync-sv-run.sh /etc/service/hived/run
fi
chmod +x /etc/service/steemd/run
runsv /etc/service/steemd
chmod +x /etc/service/hived/run
runsv /etc/service/hived

View File

@ -1,32 +1,32 @@
#!/bin/bash
STEEMD="/usr/local/steemd-default/bin/steemd"
HIVED="/usr/local/hived-default/bin/hived"
VERSION=`cat /etc/steemdversion`
VERSION=`cat /etc/hivedversion`
if [[ "$USE_WAY_TOO_MUCH_RAM" ]]; then
STEEMD="/usr/local/steemd-full/bin/steemd"
HIVED="/usr/local/hived-full/bin/hived"
fi
chown -R steemd:steemd $HOME
chown -R hived:hived $HOME
ARGS=""
# if user did pass in desired seed nodes, use
# the ones the user specified:
if [[ ! -z "$STEEMD_SEED_NODES" ]]; then
for NODE in $STEEMD_SEED_NODES ; do
if [[ ! -z "$HIVED_SEED_NODES" ]]; then
for NODE in $HIVED_SEED_NODES ; do
ARGS+=" --p2p-seed-node=$NODE"
done
fi
if [[ ! -z "$STEEMD_WITNESS_NAME" ]]; then
ARGS+=" --witness=\"$STEEMD_WITNESS_NAME\""
if [[ ! -z "$HIVED_WITNESS_NAME" ]]; then
ARGS+=" --witness=\"$HIVED_WITNESS_NAME\""
fi
if [[ ! -z "$STEEMD_PRIVATE_KEY" ]]; then
ARGS+=" --private-key=$STEEMD_PRIVATE_KEY"
if [[ ! -z "$HIVED_PRIVATE_KEY" ]]; then
ARGS+=" --private-key=$HIVED_PRIVATE_KEY"
fi
if [[ ! -z "$TRACK_ACCOUNT" ]]; then
@ -41,33 +41,33 @@ if [[ ! "$DISABLE_SCALE_MEM" ]]; then
fi
NOW=`date +%s`
STEEMD_FEED_START_TIME=`expr $NOW - 1209600`
HIVED_FEED_START_TIME=`expr $NOW - 1209600`
ARGS+=" --follow-start-feeds=$STEEMD_FEED_START_TIME"
ARGS+=" --follow-start-feeds=$HIVED_FEED_START_TIME"
# overwrite local config with image one
if [[ "$USE_FULL_WEB_NODE" ]]; then
cp /etc/steemd/fullnode.config.ini $HOME/config.ini
cp /etc/hived/fullnode.config.ini $HOME/config.ini
elif [[ "$IS_BROADCAST_NODE" ]]; then
cp /etc/steemd/config-for-broadcaster.ini $HOME/config.ini
cp /etc/hived/config-for-broadcaster.ini $HOME/config.ini
elif [[ "$IS_AH_NODE" ]]; then
cp /etc/steemd/config-for-ahnode.ini $HOME/config.ini
cp /etc/hived/config-for-ahnode.ini $HOME/config.ini
elif [[ "$IS_OPSWHITELIST_NODE" ]]; then
cp /etc/steemd/fullnode.opswhitelist.config.ini $HOME/config.ini
cp /etc/hived/fullnode.opswhitelist.config.ini $HOME/config.ini
else
cp /etc/steemd/config.ini $HOME/config.ini
cp /etc/hived/config.ini $HOME/config.ini
fi
chown steemd:steemd $HOME/config.ini
chown hived:hived $HOME/config.ini
if [[ ! -d $HOME/blockchain ]]; then
if [[ -e /var/cache/steemd/blocks.tbz2 ]]; then
if [[ -e /var/cache/hived/blocks.tbz2 ]]; then
# init with blockchain cached in image
ARGS+=" --replay-blockchain"
mkdir -p $HOME/blockchain/database
cd $HOME/blockchain/database
tar xvjpf /var/cache/steemd/blocks.tbz2
chown -R steemd:steemd $HOME/blockchain
tar xvjpf /var/cache/hived/blocks.tbz2
chown -R hived:hived $HOME/blockchain
fi
else
ARGS+=" --tags-skip-startup-update"
@ -78,7 +78,7 @@ fi
cd $HOME
if [[ "$USE_PUBLIC_SHARED_MEMORY" ]]; then
echo steemd: Downloading and uncompressing blockchain-$VERSION-latest.tar.lz4 - this may take awhile.
echo hived: Downloading and uncompressing blockchain-$VERSION-latest.tar.lz4 - this may take awhile.
wget -qO- https://s3.amazonaws.com/steemit-dev-blockchainstate/blockchain-$VERSION-latest.tar.lz4 | lz4 -d | tar x
fi
@ -87,7 +87,7 @@ if [[ "$USE_PUBLIC_BLOCKLOG" ]]; then
if [[ ! -d $HOME/blockchain ]]; then
mkdir -p $HOME/blockchain
fi
echo "steemd: Downloading a block_log and replaying the blockchain"
echo "hived: Downloading a block_log and replaying the blockchain"
echo "This may take a little while..."
wget -O $HOME/blockchain/block_log https://s3.amazonaws.com/steemit-dev-blockchainstate/block_log-latest
ARGS+=" --replay-blockchain"
@ -98,7 +98,7 @@ fi
sleep 1
mv /etc/nginx/nginx.conf /etc/nginx/nginx.original.conf
cp /etc/nginx/steemd.nginx.conf /etc/nginx/nginx.conf
cp /etc/nginx/hived.nginx.conf /etc/nginx/nginx.conf
if [[ "$USE_NGINX_FRONTEND" ]]; then
cp /etc/nginx/healthcheck.conf.template /etc/nginx/healthcheck.conf
@ -108,23 +108,23 @@ if [[ "$USE_NGINX_FRONTEND" ]]; then
cp /etc/nginx/healthcheck.conf /etc/nginx/sites-enabled/default
/etc/init.d/fcgiwrap restart
service nginx restart
exec chpst -usteemd \
$STEEMD \
exec chpst -uhived \
$HIVED \
--webserver-ws-endpoint=0.0.0.0:8091 \
--webserver-http-endpoint=0.0.0.0:8091 \
--p2p-endpoint=0.0.0.0:2001 \
--data-dir=$HOME \
$ARGS \
$STEEMD_EXTRA_OPTS \
$HIVED_EXTRA_OPTS \
2>&1
else
exec chpst -usteemd \
$STEEMD \
exec chpst -uhived \
$HIVED \
--webserver-ws-endpoint=0.0.0.0:8090 \
--webserver-http-endpoint=0.0.0.0:8090 \
--p2p-endpoint=0.0.0.0:2001 \
--data-dir=$HOME \
$ARGS \
$STEEMD_EXTRA_OPTS \
$HIVED_EXTRA_OPTS \
2>&1
fi

View File

@ -1,16 +0,0 @@
#!/bin/bash
echo /tmp/core | tee /proc/sys/kernel/core_pattern
ulimit -c unlimited
# if we're not using PaaS mode then start steemd traditionally with sv to control it
if [[ ! "$USE_PAAS" ]]; then
mkdir -p /etc/service/steemd
cp /usr/local/bin/steem-sv-run.sh /etc/service/steemd/run
chmod +x /etc/service/steemd/run
runsv /etc/service/steemd
elif [[ "$IS_TESTNET" ]]; then
/usr/local/bin/pulltestnetscripts.sh
else
/usr/local/bin/startpaassteemd.sh
fi

View File

@ -1,24 +1,24 @@
#!/bin/bash
VERSION=`cat /etc/steemdversion`
VERSION=`cat /etc/hivedversion`
# if the writer node dies by itself, kill runsv causing the container to exit
STEEMD_PID=`pgrep -f p2p-endpoint`
HIVED_PID=`pgrep -f p2p-endpoint`
if [[ ! $? -eq 0 ]]; then
echo NOTIFYALERT! steemdsync has quit unexpectedly, checking for coredump and then starting a new instance..
echo NOTIFYALERT! hivedsync has quit unexpectedly, checking for coredump and then starting a new instance..
sleep 30
SAVED_PID=`cat /tmp/steemdpid`
SAVED_PID=`cat /tmp/hivedpid`
if [[ -e /tmp/core.$SAVED_PID ]]; then
gdb --batch --quiet -ex "thread apply all bt full" -ex "quit" /usr/local/steemd-full/bin/steemd /tmp/core.$SAVED_PID >> /tmp/stacktrace
gdb --batch --quiet -ex "thread apply all bt full" -ex "quit" /usr/local/hived-full/bin/hived /tmp/core.$SAVED_PID >> /tmp/stacktrace
STACKTRACE=`cat /tmp/stacktrace`
echo NOTIFYALERT! steemdsync stacktrace from coredump:
echo NOTIFYALERT! hivedsync stacktrace from coredump:
for ((i=0;i<${#STACKTRACE};i+=120)); do
echo "${STACKTRACE:i:120}"
done
CORE_FILE_NAME=coredump-`date '+%Y%m%d-%H%M%S'`.$SAVED_PID
aws s3 cp /tmp/core.$SAVED_PID s3://$S3_BUCKET/$CORE_FILE_NAME
fi
RUN_SV_PID=`pgrep -f /etc/service/steemd`
RUN_SV_PID=`pgrep -f /etc/service/hived`
kill -9 $RUN_SV_PID
fi
@ -38,13 +38,13 @@ if [[ ! -z "$BLOCKCHAIN_TIME" ]]; then
# if we're within 10 seconds of current time, call it synced and begin the upload
BLOCK_AGE=$((${CURRENT_SECS} - ${BLOCKCHAIN_SECS}))
if [[ ${BLOCK_AGE} -le 10 ]]; then
STEEMD_PID=`pgrep -f p2p-endpoint`
kill -SIGINT $STEEMD_PID
echo steemdsync: waiting for steemd to exit cleanly
HIVED_PID=`pgrep -f p2p-endpoint`
kill -SIGINT $HIVED_PID
echo hivedsync: waiting for hived to exit cleanly
# loop while the process is still running
let WAIT_TIME=0
while kill -0 $STEEMD_PID 2> /dev/null; do
while kill -0 $HIVED_PID 2> /dev/null; do
sleep 1
let WAIT_TIME++
@ -55,16 +55,16 @@ if [[ ! -z "$BLOCKCHAIN_TIME" ]]; then
fi
done
echo steemdsync: starting a new blockchainstate upload operation
echo hivedsync: starting a new blockchainstate upload operation
cd ${COMPRESSPATH:-$HOME}
echo steemdsync: compressing blockchainstate...
echo hivedsync: compressing blockchainstate...
if [[ "$USE_RAMDISK" ]]; then
tar vcf blockchain.tar.lz4 --use-compress-prog=lz4 -C $HOME blockchain -C /mnt/ramdisk blockchain
else
tar cf blockchain.tar.lz4 --use-compress-prog=lz4 -C $HOME blockchain
fi
if [[ ! $? -eq 0 ]]; then
echo NOTIFYALERT! steemdsync was unable to compress shared memory file, check the logs.
echo NOTIFYALERT! hivedsync was unable to compress shared memory file, check the logs.
exit 1
fi
if [[ "$IS_BROADCAST_NODE" ]]; then
@ -74,13 +74,13 @@ if [[ ! -z "$BLOCKCHAIN_TIME" ]]; then
else
FILE_NAME=blockchain-$VERSION-`date '+%Y%m%d-%H%M%S'`.tar.lz4
fi
echo steemdsync: uploading $FILE_NAME to $S3_BUCKET
echo hivedsync: uploading $FILE_NAME to $S3_BUCKET
aws s3 cp blockchain.tar.lz4 s3://$S3_BUCKET/$FILE_NAME
if [[ ! $? -eq 0 ]]; then
echo NOTIFYALERT! steemdsync was unable to upload $FILE_NAME to s3://$S3_BUCKET
echo NOTIFYALERT! hivedsync was unable to upload $FILE_NAME to s3://$S3_BUCKET
exit 1
fi
echo steemdsync: replacing current version of blockchain state with $FILE_NAME
echo hivedsync: replacing current version of blockchain state with $FILE_NAME
if [[ "$IS_BROADCAST_NODE" ]]; then
aws s3 cp s3://$S3_BUCKET/$FILE_NAME s3://$S3_BUCKET/broadcast-$VERSION-latest.tar.lz4
aws s3api put-object-acl --bucket $S3_BUCKET --key broadcast-$VERSION-latest.tar.lz4 --acl public-read
@ -92,7 +92,7 @@ if [[ ! -z "$BLOCKCHAIN_TIME" ]]; then
aws s3api put-object-acl --bucket $S3_BUCKET --key blockchain-$VERSION-latest.tar.lz4 --acl public-read
fi
if [[ ! $? -eq 0 ]]; then
echo NOTIFYALERT! steemdsync was unable to overwrite the current blockchainstate with $FILE_NAME
echo NOTIFYALERT! hivedsync was unable to overwrite the current blockchainstate with $FILE_NAME
exit 1
fi
# upload a current block_log
@ -103,11 +103,11 @@ if [[ ! -z "$BLOCKCHAIN_TIME" ]]; then
aws s3api put-object-acl --bucket $S3_BUCKET --key block_log-latest --acl public-read
fi
# kill the container starting the process over again
echo steemdsync: stopping the container after a sync operation
echo hivedsync: stopping the container after a sync operation
if [[ -e /tmp/isnewsync ]]; then
echo notifysteemdsync: steemdsync: successfully generated and uploaded new blockchain-$VERSION-latest.tar.lz4 to s3://$S3_BUCKET
echo notifyhivedsync: hivedsync: successfully generated and uploaded new blockchain-$VERSION-latest.tar.lz4 to s3://$S3_BUCKET
fi
RUN_SV_PID=`pgrep -f /etc/service/steemd`
RUN_SV_PID=`pgrep -f /etc/service/hived`
kill -9 $RUN_SV_PID
fi
fi

View File

@ -20,7 +20,7 @@ recommended for witnesses and seed-nodes.
Clears old votes from memory that are no longer required for consensus.
### BUILD_STEEM_TESTNET=[OFF/ON]
### BUILD_HIVE_TESTNET=[OFF/ON]
Builds hived for use in a private testnet. Also required for building unit tests.
@ -91,7 +91,7 @@ will build out of the box without further effort:
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release ..
make -j$(nproc) steemd
make -j$(nproc) hived
make -j$(nproc) cli_wallet
# optional
make install # defaults to /usr/local
@ -156,7 +156,7 @@ Then the instructions are the same as for hive:
git submodule update --init --recursive
mkdir build && cd build
cmake -DCMAKE_BUILD_TYPE=Release ..
make -j$(nproc) steemd
make -j$(nproc) hived
make -j$(nproc) cli_wallet
## Building on macOS X
@ -228,15 +228,15 @@ In that case, change the directories for `export` accordingly.
Also, some useful build targets for `make` are:
steemd
hived
chain_test
cli_wallet
e.g.:
make -j$(sysctl -n hw.logicalcpu) steemd
make -j$(sysctl -n hw.logicalcpu) hived
This will only build `steemd`.
This will only build `hived`.
## Building on Other Platforms

View File

@ -26,7 +26,7 @@ number of resources in the pool:
Usage and budget are pretty straightforward and easy to think about. Decay is a little less intuitive.
One way to think about decay is "resources expire." Steem had relatively little activity over its
One way to think about decay is "resources expire." Hive (was Steem then) had relatively little activity over its
first X months, does that mean users should now be able to burst to 30X times the current limit for
the next day? That sounds pretty wrong.
@ -54,7 +54,7 @@ respond appropriately." This is a slightly fuzzy concept, because the "detectio
Every user has a "manabar" called *resource credits* (RC).
- The maximum RC an account can have is equal to its Steem Power (technically, VESTS).
- The maximum RC an account can have is equal to its Hive Power (technically, VESTS).
- It takes 5 days for RC to regenerate from 0% to 100%. (So more SP means more RC's per hour.)
- Users automatically spend RC whenever they transact. RCs are charged based on various resources a transaction can consume. Resources are things like execution time, state size, transaction size. The sum of costs of all resources are charges to the user's RCs. The one RC pool pays for _all_ resources.
- A user who doesn't have enough RC is unable to transact.
@ -84,7 +84,7 @@ You can look at this system like a market:
- Q: What is a good decay value to use?
- A: "Good" is subjective. There are daily variations (because more users are concentrated in certain timezones and awake during certain times
of the day), and weekly variations (people use Steem more/less on weekends/holidays). The default decay rate (347321) corresponds to a half-life of
of the day), and weekly variations (people use Hive more/less on weekends/holidays). The default decay rate (347321) corresponds to a half-life of
5 days, which should fully absorb daily variations and partially absorb weekly variations. Since the RC pricing is determined based on the pool level,
the half-life is also roughly approximately the timescale at which RC prices become hard to predict -- there is some value in having a longer timescale
for varying of transaction costs.
@ -94,14 +94,14 @@ for varying of transaction costs.
- Q: Why 2x?
- A: The RC plugin makes usage anywhere close to the actual budget value require a huge RC spending level. How huge is "huge"? Basically, if
100% of the Steem Power stake is constantly using all of its RC to do transactions that only require a single resource, the usage will be
100% of the Hive Power stake is constantly using all of its RC to do transactions that only require a single resource, the usage will be
equal to the budget. The price curve of RC's is designed with a very rough empirical model which implies the "natural" level of RC usage will
result in about `50%` of the budget is actually used. Like all user behavior, this model might not fit the data, in which case "2x" might be
wrong, and it should actually be "1.5x" or "3x". But it will probably not be as small as "1x," and it will probably not be as large as "100x".
- Q: Why not have the budget be the desired usage level of the resource?
- A: If you say "users should use this much," how would you even enforce that? The amount of activity that users do is nothing more, and nothing
less, than the sum of each individual user's decisions. The budget creates market forces that influence these decisions: You need X Steem Power
less, than the sum of each individual user's decisions. The budget creates market forces that influence these decisions: You need X Hive Power
to transact at R rate (today, and tomorrow the numbers will probably be similar, but over time they might become very different). The code sets
the numbers so that the budget is an upper bound on user activity. How far below that upper bound will the actual user activity be? That
depends very much on the sum of users' individual decisions about how many resources to consume and how much SP heavy consumers are willing
@ -116,8 +116,8 @@ How much users actually transact is something that is impossible to predict, but
Currently, the only witness configurable resource is subsidized accounts. Two parameters are set:
- `account_subsidy_budget` is the per-block budget.
- A value of 10,000 (`STEEM_ACCOUNT_SUBSIDY_PRECISION`) represents a budget of one subsidized account per block.
- `account_subsidy_decay` is the per-block decay rate. A value of `2^36` (`STEEM_RD_DECAY_DENOM_SHIFT`) represents 100% decay rate.
- A value of 10,000 (`HIVE_ACCOUNT_SUBSIDY_PRECISION`) represents a budget of one subsidized account per block.
- `account_subsidy_decay` is the per-block decay rate. A value of `2^36` (`HIVE_RD_DECAY_DENOM_SHIFT`) represents 100% decay rate.
Here is a Python script to convert from half-life, measured in days, to an appropriately scaled per-block decay rate:
@ -126,12 +126,12 @@ Here is a Python script to convert from half-life, measured in days, to an appro
import math
STEEM_BLOCKS_PER_DAY = 20*60*24
STEEM_RD_DECAY_DENOM_SHIFT = 36
STEEM_MAX_WITNESSES = 21
STEEM_MAX_VOTED_WITNESSES_HF17 = 20
HIVE_BLOCKS_PER_DAY = 20*60*24
HIVE_RD_DECAY_DENOM_SHIFT = 36
HIVE_MAX_WITNESSES = 21
HIVE_MAX_VOTED_WITNESSES_HF17 = 20
f = lambda d : int(0.5 + (1 << STEEM_RD_DECAY_DENOM_SHIFT) * (-math.expm1(-math.log(2.0) / (STEEM_BLOCKS_PER_DAY * d * STEEM_MAX_VOTED_WITNESSES_HF17 / STEEM_MAX_WITNESSES))))
f = lambda d : int(0.5 + (1 << HIVE_RD_DECAY_DENOM_SHIFT) * (-math.expm1(-math.log(2.0) / (HIVE_BLOCKS_PER_DAY * d * HIVE_MAX_VOTED_WITNESSES_HF17 / HIVE_MAX_WITNESSES))))
print("A 5-day half-life corresponds to a decay constant of", f(5))
```
@ -168,21 +168,21 @@ It may sound like consensus limits are better, because they're enforced more str
checking makes upgrades painful:
- Upgrading non-consensus limits is a witness-only upgrade.
- Upgrading consensus limits is a hardfork, all `steemd` nodes must upgrade.
- Upgrading consensus limits is a hardfork, all `hived` nodes must upgrade.
The subsidized account resource limit is consensus. Other resource limits are non-consensus. Why were things
divided that way? It has to do with the consequences of violating the limits:
- If a witness ignores the subsidized account limit, people will get new accounts for free, that normally cost STEEM to create. This is a medium-sized economic problem.
- If a witness ignores the subsidized account limit, people will get new accounts for free, that normally cost HIVE to create. This is a medium-sized economic problem.
- If a witness ignores the other resource limits, their blocks might take a little longer [1] to process or use more memory. This is a tiny IT problem.
For resource limits, having witness-only upgrades outweighs the problem. Witnesses who have been around a long time know that, in the ancient past, Steem's bandwidth
For resource limits, having witness-only upgrades outweighs the problem. Witnesses who have been around a long time know that, in the ancient past, Hive's bandwidth
algorithm was consensus. It was then changed to non-consensus when the current bandwidth algorithm was implemented. The new resource-based limits are non-consensus
for the most part, just like the current bandwidth algorithm.
[1] Could they take a lot longer? No, the rogue witness would be limited by block size. Transactions have some variation in how much CPU / memory they use relative
to their size. But operations that allow users to take a huge amount of CPU / memory for a tiny number of bytes are attack vectors. As good blockchain architects,
we should never implement such operations in the Steem source code! Even the worst-case CPU cycles / memory bytes consumed by an attacker spamming the most
we should never implement such operations in the Hive source code! Even the worst-case CPU cycles / memory bytes consumed by an attacker spamming the most
"efficient" attack (in terms of CPU cycles / memory bytes consumed per byte of transaction size) should still be limited by the max block size.
# Setting consensus parameters

View File

@ -3,7 +3,7 @@ Python Debug Node Readme
------------------------
The Python Debug Node is a wrapper class that automates the creation and maintenance
of a running Steem Debug Node. The Debug Node is a plugin for Steem that allows realtime
of a running Hive Debug Node. The Debug Node is a plugin for Hive that allows realtime
local modification of the chain state in a way that mimicks real world behaviors
without corrupting a localally saved blockchain or propogating changes to the live chain.
@ -19,14 +19,14 @@ is a higher level language that many amateur and skilled programmers use. There
been community development of Python libraries to make interfacing with a live node easier.
This plugin closes the gap by allowing a node to be launched programmatically in Python
in addition to interfacing with the node. This module utilizes community member Xeroc's
[Python Steem library](https://github.com/xeroc/python-steemlib).
[Python Hive library](https://github.com/xeroc/python-steemlib).
How Do I Use This?
------------------
First of all, you need to install the module. Navigate to `tests/external_testing_scripts`
and run `python3 setup.py install`
To use the script include `from steemdebugnode import DebugNode`
To use the script include `from hivedebugnode import DebugNode`
There are a couple of examples already made that you can try modifying yourself.
@ -38,7 +38,7 @@ generates a historgram of block producers to verify the witness scheduling algor
properly. The purpose of the script is it verify any given hardfork does not have a bug that
could crash the chain entirely.
[debugnode.py](https://github.com/openhive-network/hive/python_scripts/steemdebugnode/debugnode.py#L212)
[debugnode.py](https://github.com/openhive-network/hive/python_scripts/hivedebugnode/debugnode.py#L212)
This script is much simpler. It has the same parsing logic, but has much less test logic.
All it does is replay the blockchain, periodically printing a status update so the user
knows it is still working. The script then hangs so the user can interact with the chain
@ -47,7 +47,7 @@ through RPC calls or the CLI Wallet.
What is the important part of these scripts?
``` Python
debug_node = DebugNode( str( steemd ), str( data_dir ) )
debug_node = DebugNode( str( hived ), str( data_dir ) )
with debug_node:
# Do stuff with blockchain
```
@ -58,9 +58,9 @@ and establishes the internal rpc connection. The script can then do whatever it
When the `with` block ends, the node automatically shutsdown and cleans up. The node uses
a system standard temp directory through the standard Python TemporaryDirectory as the
working data directory for the running node. The only work your script needs to do is
specify the steemd binary location and a populated data directory. For most configurations
this will be `programs/steemd/steemd` and `witness_node_data_dir` respectively, from the
git root directory for Steem.
specify the hived binary location and a populated data directory. For most configurations
this will be `programs/hived/hived` and `witness_node_data_dir` respectively, from the
git root directory for Hive.
TODO/ Long Term Goals
---------------------
@ -75,4 +75,4 @@ the RPC call. Most, if not all, RPC API calls could be programatically generated
the C++ source. It would also be a good step forward to introduce a simple testing framework
that could be used to start a debug node and then run a series of test cases on a common
starting chain state. This would address much of the integration testing that is sorely
needed for Steem.
needed for Hive.

View File

@ -62,7 +62,7 @@ There are three methods to secure the API:
- Limit access to the API socket to a trusted LAN by firewall configuration
- Limit access to particular APIs with username/password authentication
The Steem developers recommend using the first of these methods to secure the API by binding to localhost, as follows:
The Hive developers recommend using the first of these methods to secure the API by binding to localhost, as follows:
rpc-endpoint = 127.0.0.1:8090
@ -70,7 +70,7 @@ Securing specific APIs
-----------------------
The problem with securing APIs at the network level is that there are deployment scenarios where a node may want to have some APIs public, but other APIs private.
The `steemd` process includes username/password based authentication to individual APIs.
The `hived` process includes username/password based authentication to individual APIs.
Since the username/password is sent directly over the wire, you should use a TLS connection when authenticating with username and password. TLS connection can be achieved by one of two methods:

View File

@ -2,12 +2,12 @@
What is an Automated Action
---------------------------
The concept of automatic/automated actions in the Steem Blockchain is not new to SMTs. The framework is a formalization of a design pattern that is already used widely in the Steem source code. When a user initiates a power down, a week later some Steem Power is converted to STEEM and deposited in their account. This is all done behind the scenes in what we have called per-block processing. The only artifacts of the state change are the changed balance in the account object and a virtual operation in the user's account history. The virtual operation is an annotation that the event occurred, but it does not represent the state transition itself. This has lead to inconsistencies in what data is included in each virtual operation. Automated Actions seek to fix this problem for all new such actions added in SMTs. Automated Actions not only represent but also trigger the state transition instead of simply recording such a transition occurred. Because they trigger transitions they must be included in blocks. The advantage of recording automated actions in such a manner is that while reindexing the blockchain, the actions can be trusted without verifying state. Well formed actions read minimal or no state and reduce reindex time.
The concept of automatic/automated actions in the Hive Blockchain is not new to SMTs. The framework is a formalization of a design pattern that is already used widely in the Hive source code. When a user initiates a power down, a week later some Hive Power is converted to HIVE and deposited in their account. This is all done behind the scenes in what we have called per-block processing. The only artifacts of the state change are the changed balance in the account object and a virtual operation in the user's account history. The virtual operation is an annotation that the event occurred, but it does not represent the state transition itself. This has lead to inconsistencies in what data is included in each virtual operation. Automated Actions seek to fix this problem for all new such actions added in SMTs. Automated Actions not only represent but also trigger the state transition instead of simply recording such a transition occurred. Because they trigger transitions they must be included in blocks. The advantage of recording automated actions in such a manner is that while reindexing the blockchain, the actions can be trusted without verifying state. Well formed actions read minimal or no state and reduce reindex time.
Types of Actions
----------------
Automated Actions are broken in to two broad types of actions; required and optional. Required actions, as their name implies, are required to be included in blocks and verified trustlessly by each node running steemd. Optional actions, likewise, are not required to be included. This is a new use case from what currently happens during per-block processing. What happens during per-block processing takes the state as input and transitions to a new state. This must be deterministic in order to avoid forking. Optional actions allows skipping of certain actions through the omission of their inclusion. One such example of how this is used is token emission for SMTs. SMTs must pay for token emissions via Resource Credits the same as users and their operations. Optional actions will be excluded during block generation based on RCs. This would not have previously been possible.
Automated Actions are broken in to two broad types of actions; required and optional. Required actions, as their name implies, are required to be included in blocks and verified trustlessly by each node running hived. Optional actions, likewise, are not required to be included. This is a new use case from what currently happens during per-block processing. What happens during per-block processing takes the state as input and transitions to a new state. This must be deterministic in order to avoid forking. Optional actions allows skipping of certain actions through the omission of their inclusion. One such example of how this is used is token emission for SMTs. SMTs must pay for token emissions via Resource Credits the same as users and their operations. Optional actions will be excluded during block generation based on RCs. This would not have previously been possible.
Required Actions
----------------

View File

@ -1,10 +1,10 @@
This is developer documentation for creating brand-new operations on the STEEM blockchain.
This is developer documentation for creating brand-new operations on the HIVE blockchain.
- (1) Define `smt_elevate_account_operation` structure in `smt_operations.hpp`
- (2) Create `FC_REFLECT` definition for the operation struct.
- (3) Implement `validate()` for the operation struct.
- (4) Add operation to `steem::protocol::operation`
- (4) Add operation to `hive::protocol::operation`
- (5) Define evaluator for the operation.
- (6) Define required authorities for the operation.
- (7) Define unit tests for the operation.
@ -68,7 +68,7 @@ checks must go in the evaluator.
## Step 4
- (4a) The file `operations.hpp` defines the `steem::protocol::operation`
- (4a) The file `operations.hpp` defines the `hive::protocol::operation`
type, which is an `fc::static_variant` with a lengthy parameter list. (The
`fc::static_variant` implements a
[tagged union type](https://en.wikipedia.org/wiki/Tagged_union) which uses
@ -85,7 +85,7 @@ of virtual operations.
## Step 5
- (5a) You must add `STEEM_DEFINE_EVALUATOR` macro in `evaluator.hpp` to
- (5a) You must add `HIVE_DEFINE_EVALUATOR` macro in `evaluator.hpp` to
generate some boilerplate code. The macro is defined `evaluator.hpp`,
most of the generated code is support code required by the framework and
does not affect the operation itself.
@ -152,13 +152,13 @@ to make it available to JSON clients
## Step 9
- (9a) Add `smt_token_object_type` to `enum object_type` in `steem_objects.hpp` and add
- (9a) Add `smt_token_object_type` to `enum object_type` in `hive_objects.hpp` and add
to `FC_REFLECT_ENUM` bubble list at the bottom of that file
- (9b) Declare (but do not define) `class smt_token_object;` in `steem_objects.hpp`
- (9c) Define `typedef oid< smt_token_object > smt_token_id_type` in `steem_objects.hpp`
- (9b) Declare (but do not define) `class smt_token_object;` in `hive_objects.hpp`
- (9c) Define `typedef oid_ref< smt_token_object > smt_token_id_type` in `hive_objects.hpp`
- (9d) Create object header file (one header file per object) in `smt_objects` directory.
Include the new header from `smt_objects.hpp`.
- (9e) All SMT objects are consensus, and therefore should exist in `steem::chain` namespace
- (9e) All SMT objects are consensus, and therefore should exist in `hive::chain` namespace
- (9f) The object class should subclass `object< smt_token_object_type, smt_token_object >`
- (9g) The constructor should be defined the same as other object classes, with
`Constructor` and `Allocator` template parameters.
@ -168,12 +168,17 @@ In practice, this means strings should be `shared_string`, and collections (vect
should be one of the `boost::interprocess` types. See examples in fields
`transaction_object::packed_trx`, `comment_object::permlink`, and
`feed_history_object::price_history`.
- (9i) The first field should be `id_type id;`
- (9i) The first field should be `id_type id;` - if you are using `CHAINBASE_OBJECT` macro it
will be added automatically
- (9j) Most fields should be default initialized, or set to zero, empty or compile-time
default values. Usually, the only field initialization done in the class constructor is
passing the allocator, setting integer fields to zero, and executing the caller-provided
`Constructor` callback. The "real" initialization is performed by that callback, which
will have access to necessary external information (from `database` and the operation).
Such standard constructor can be defined with use of `CHAINBASE_DEFAULT_CONSTRUCTOR` macro, but
you can also define custom constructors - they just need to be extensions of the following:
```template< typename Allocator >
chain_object( allocator< Allocator > a, uint64_t _id ) : id( _id ) {}```
- (9k) All fields must be passed to `FC_REFLECT` in a bubble list
- (9l) `struct` definitions for any index other than the default `by_id` should follow the class;
`by_id` should *never* be defined by an object class.
@ -181,7 +186,7 @@ will have access to necessary external information (from `database` and the oper
and Boost `multi_index_container` docs for more information on the purpose and syntax of this
definition.
- (9n) Macro
`CHAINBASE_SET_INDEX_TYPE( steem::chain::smt_token_object, steem::chain::smt_token_index )`
`CHAINBASE_SET_INDEX_TYPE( hive::chain::smt_token_object, hive::chain::smt_token_index )`
must be invoked. It should be invoked at the global scope (outside any namespace).
- (9o) Call `add_core_index< smt_token_index >(*this);` in `database::initialize_indexes()` to
register the object type with the database.
@ -191,7 +196,7 @@ register the object type with the database.
Step 9 requires some explanation.
- (9a) Each object type has an integer ID signifying that object type. These type ID's are
defined by an `enum` in `steem_object_types.hpp`, any new objects must be added here. In SQL
defined by an `enum` in `hive_object_types.hpp`, any new objects must be added here. In SQL
terms, if we imagine each *database table* has an integer ID, then `smt_token_object_type` is
the ID value that refers to the `smt_token_object` *table*.
@ -200,7 +205,7 @@ variable of ID type, which notes the table the ID refers to. This is implemente
`chainbase::oid` class, which takes the class name as a template parameter. To cut down
on the number of template invocations needed in typical code (and to ease porting of
code first developed with older versions of `chainbase` or its predecessors), a type
alias `typedef oid< smt_token_object > smt_id_type` is added to `steem_object_types.hpp`.
alias `typedef oid_ref< smt_token_object > smt_id_type` is added to `hive_object_types.hpp`.
- (9f) The `smt_token_object` class subclasses
`chainbase::object< smt_token_object_type, smt_token_object >`. This is the
@ -209,7 +214,7 @@ it involves routing of type parameters to allow interaction of templates and pol
complicated, but the `object` class definition in `chainbase.hpp` makes clear that it is used to simply
`typedef oid< smt_token_object > id_type;`
As a consequence of the above, `smt_token_id_type`, `oid< smt_token_object >` and
As a consequence of the above, `smt_token_id_type`, `oid_ref< smt_token_object >` and
`smt_token_object::id_type` all refer to a type representing an integer object ID, with a
compile-time "note" attached that this object ID is a primary key for the `smt_token_object`
database table. The table ID of that database table is an integer value given by
@ -228,9 +233,9 @@ Examples may be seen with fields `transaction_object::packed_trx`, `comment_obje
created will be assigned the next sequentially available object ID.
- (9l) Some functionality in `chainbase` requires the `by_id` field. Since `chainbase`
is designed as a reusable library, not tightly coupled to Steem, it contains no reference
to any `steem` namespaces. So the name `by_id` must refer to `chainbase::by_id`. If you
define a `struct by_id;` in the `steem::chain` namespace, the result will be that every
is designed as a reusable library, not tightly coupled to Hive, it contains no reference
to any `hive` namespaces. So the name `by_id` must refer to `chainbase::by_id`. If you
define a `struct by_id;` in the `hive::chain` namespace, the result will be that every
index defined later in the compilation unit which references `by_id` without qualification
will become an incorrect or ambiguous type reference. The result likely will not
function correctly, and may not compile.
@ -249,7 +254,7 @@ definitions. More information about the syntax is available in the Boost docume
- (9m) The `by_id` index is used by the `chainbase` infrastructure to implement the undo function.
- (9m) All indexes used in Steem must be `ordered_unique`. In theory, hashed or non-unique
- (9m) All indexes used in Hive must be `ordered_unique`. In theory, hashed or non-unique
indexes may be permissible in some situations, and may offer a performance advantage.
However, past experience has shown that the undefined iteration order of these indexes
is a potential source of state corruption bugs (in practice, iteration order of such an

View File

@ -18,18 +18,18 @@ performing (simulated) actions with that account.
Why this isn't unsafe
---------------------
Anyone (even you!) can edit their account to contain 1 million STEEM. It's really
Anyone (even you!) can edit their account to contain 1 million HIVE. It's really
impossible for the developers to physically stop you from editing the memory/disk of your own
computer to make your local node think any account balance is any amount, so you can "give"
yourself 1 million STEEM. Just like it's really impossible for your bank to physically stop
yourself 1 million HIVE. Just like it's really impossible for your bank to physically stop
you from writing any numbers you'd like when balancing your checkbook, so you can "give"
yourself 1 million dollars.
But you have no way to control what other nodes do (or what your bank's clerks and computer systems do).
They do their own bookkeeping and keep track of what your real balance is (without all
the fake STEEM or fake dollars you "gave" yourself). So you can believe whatever you want
the fake HIVE or fake dollars you "gave" yourself). So you can believe whatever you want
about your balance and rewrite the rules of your own bookkeeping system to show you whatever balance you
want to be shown, but as soon as you try to actually spend STEEM (or dollars) that you don't actually have,
want to be shown, but as soon as you try to actually spend HIVE (or dollars) that you don't actually have,
you'll be stopped because every other node on the network is a system you don't control that's keeping the
books properly (without all your edits to give yourself extra funds), and they do their own verification
of every transaction and will suppress any that doesn't have sufficient balance and a proper cryptographic
@ -73,7 +73,7 @@ The API's configured with `public-api` are assigned numbers starting at zero. S
API number 2 (TODO: Explain about resolving names to API's and get it working).
The API provides the following methods
(see `libraries/plugins/debug_node/include/steem/plugins/debug_node/debug_node_api.hpp`
(see `libraries/plugins/debug_node/include/hive/plugins/debug_node/debug_node_api.hpp`
for these definitions):
void debug_push_blocks( std::string src_filename, uint32_t count );
@ -82,7 +82,7 @@ for these definitions):
void debug_stream_json_objects( std::string filename );
void debug_stream_json_objects_flush();
Okay, let's run `steemd`. It should start immediately with no blocks. We can ask it to read blocks from the directory we saved earlier:
Okay, let's run `hived`. It should start immediately with no blocks. We can ask it to read blocks from the directory we saved earlier:
curl --data '{"jsonrpc": "2.0", "method": "call", "params": [2,"debug_push_blocks",["/mydir/myblocks", 1000]], "id": 1}' http://127.0.0.1:8090/rpc
@ -122,7 +122,7 @@ Now that we've reset its key, we can take control of it in the wallet:
unlock abc
import_key 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3
list_my_accounts
transfer steemit dantheman "1.234 STEEM" "make -j100 money" true
transfer steemit dantheman "1.234 HIVE" "make -j100 money" true
list_my_accounts
get_account_history steemit -1 1000

View File

@ -2,7 +2,7 @@
How plugins work
----------------
All plugins in the `libraries/plugins` directory are iterated over by `CMakeLists.txt`. The manifest directory iterates through all plugins, adding them to the `steem_plugins` build target. Any other build target wanting to access all plugins
All plugins in the `libraries/plugins` directory are iterated over by `CMakeLists.txt`. The manifest directory iterates through all plugins, adding them to the `hive_plugins` build target. Any other build target wanting to access all plugins
available at build time should link to this target.
There is a plugin in `example_plugins` called `example_api_plugin` which is a working example of adding a custom API call.
@ -11,7 +11,7 @@ Registering plugins
-------------------
- Plugins are enabled with the `plugins` config file option.
- By default, steemd runs the `chain`, `p2p`, and `webserver` plugins.
- By default, hived runs the `chain`, `p2p`, and `webserver` plugins.
- Some plugins may keep records in the database (such as `account_history`). If you change whether such a plugin is disabled/enabled, you should also replay the chain. Detecting this situation and automatically replaying when needed will be implemented in a future release.
- To make an API visible, include the associated plugin in the `plugins` config file option. Only APIs explicitly made available through the config will be registered.

View File

@ -1,8 +1,8 @@
# Introduction
In this document we derive the approximate integer square root function used by Steem for the curation curve
[here](https://github.com/steemit/steem/issues/1052).
In this document we derive the approximate integer square root function used by Hive for the curation curve
[here](https://github.com/steemit/hive/issues/1052).
# MSB function

View File

@ -8,8 +8,8 @@ Tests are broken in several categories:
basic_tests // Tests of "basic" functionality
block_tests // Tests of the block chain
live_tests // Tests on live chain data (currently only past hardfork testing)
operation_tests // Unit Tests of Steem operations
operation_time_tests // Tests of Steem operations that include a time based component (ex. vesting withdrawals)
operation_tests // Unit Tests of Hive operations
operation_time_tests // Tests of Hive operations that include a time based component (ex. vesting withdrawals)
serialization_tests // Tests related of serialization
```
@ -18,7 +18,7 @@ serialization_tests // Tests related of serialization
If you have not done so, install lcov `brew install lcov`
```
cmake -D BUILD_STEEM_TESTNET=ON -D ENABLE_COVERAGE_TESTING=true -D CMAKE_BUILD_TYPE=Debug .
cmake -D BUILD_HIVE_TESTNET=ON -D ENABLE_COVERAGE_TESTING=true -D CMAKE_BUILD_TYPE=Debug .
make
lcov --capture --initial --directory . --output-file base.info --no-external
tests/chain_test

View File

@ -7,7 +7,7 @@ With the right equipment and technical configuration a reindex should take **no
Physically attached SSD will ensure an optimal reindex time. SSD over a NAS or some kind of network storage backed by SSD will often have much higher latency. As an example, AWS EBS is not performant enough. A good recommended instance in AWS is the i3.2xlarge, it comes with a physically attached nVME drive (it must be formatted and mounted on instance launch).
You can save a lot of time by replaying from a `block_log`. Using the docker method below, we have made it easy to download a `block_log` at launch and replay from it by passing in the `USE_PUBLIC_BLOCKLOG=1` environment variable. To do this, make sure your data directory is empty and does not contain a block_log. If you are not using docker, you can download a `block_log` from [here](https://gtg.steem.house/get/blockchain), put it in your Hive data directory, and use the `--replay-blockchain` command line option. Be sure to remove the option if you have to stop/restart steemd after already being synced.
You can save a lot of time by replaying from a `block_log`. Using the docker method below, we have made it easy to download a `block_log` at launch and replay from it by passing in the `USE_PUBLIC_BLOCKLOG=1` environment variable. To do this, make sure your data directory is empty and does not contain a block_log. If you are not using docker, you can download a `block_log` from [here](https://gtg.steem.house/get/blockchain), put it in your Hive data directory, and use the `--replay-blockchain` command line option. Be sure to remove the option if you have to stop/restart hived after already being synced.
We recommend using docker to both build and run Hive for exchanges. Docker is the world's leading containerization platform and using it guarantees that your build and run environment is identical to what our developers use. You can still build from source and you can keep both blockchain data and wallet data outside of the docker container. The instructions below will show you how to do this in just a few easy steps.
@ -62,8 +62,8 @@ To extract the binary you need to start a container and then copy the file from
```
docker run -d --name hived-exchange hiveio/hive
docker cp hived-exchange:/usr/local/steemd-default/bin/steemd /local/path/to/steemd
docker cp hived-exchange:/usr/local/steemd-default/bin/cli_wallet /local/path/to/cli_wallet
docker cp hived-exchange:/usr/local/hived-default/bin/hived /local/path/to/hived
docker cp hived-exchange:/usr/local/hived-default/bin/cli_wallet /local/path/to/cli_wallet
docker stop hived-exchange
```
@ -73,7 +73,7 @@ For your convenience, we have provided a provided an [example\_config](example\_
### Custom configuration files when using a Docker image
If you are using our docker image and have a need for using a custom config file, instead use [config-for-docker.ini](https://github.com/openhive-network/hive/blob/master/contrib/config-for-docker.ini). You can place this outside of your container and map to it by adding this argument to your docker run command: `-v /path/to/config.ini:/etc/steemd/config.ini`. In most cases, a custom configuration file is not necessary.
If you are using our docker image and have a need for using a custom config file, instead use [config-for-docker.ini](https://github.com/openhive-network/hive/blob/master/contrib/config-for-docker.ini). You can place this outside of your container and map to it by adding this argument to your docker run command: `-v /path/to/config.ini:/etc/hived/config.ini`. In most cases, a custom configuration file is not necessary.
### Account history and limitations
@ -93,7 +93,7 @@ mkdir hivewallet
The below command will start a daemonized instance opening ports for p2p and RPC while linking the directories we created for blockchain and wallet data inside the container. Fill in `TRACK_ACCOUNT` with the name of your exchange account that you want to follow. The `-v` flags are how you map directories outside of the container to the inside, you list the path to the directories you created earlier before the `:` for each `-v` flag. The restart policy ensures that the container will automatically restart even if your system is restarted.
```
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/steemd/blockchain --restart always hiveio/hive
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/hived/blockchain --restart always hiveio/hive
```
You can see that the container is running with the `docker ps` command.
@ -107,12 +107,12 @@ Initial syncing will take between 6 and 72 hours depending on your equipment, fa
The command below will run the cli_wallet from inside the running container while mapping the `wallet.json` to the directory you created for it on the host.
```
docker exec -it hived-exchange /usr/local/steemd-default/bin/cli_wallet -w /var/hivewallet/wallet.json
docker exec -it hived-exchange /usr/local/hived-default/bin/cli_wallet -w /var/hivewallet/wallet.json
```
### Upgrading for major releases that require a full reindex
For upgrades that require a full replay, we highly recommend *performing the upgrade on a separate server* in order to minimize downtime of your wallet. When the replay is complete, switch to the server running the newer version of Steem. If for some reason it is absolutely not possible to perform the upgrade on a separate server, you would use the following instructions instead:
For upgrades that require a full replay, we highly recommend *performing the upgrade on a separate server* in order to minimize downtime of your wallet. When the replay is complete, switch to the server running the newer version of Hive. If for some reason it is absolutely not possible to perform the upgrade on a separate server, you would use the following instructions instead:
Stop the docker container, remove the existing container, clear out your blockchain data directory completely, pull in the latest docker image (or build the image from scratch), and then start a new container using the same command that you previously launched with.
@ -121,7 +121,7 @@ docker stop hived-exchange
docker rm hived-exchange
rm -rf blockchain/*
docker pull hiveio/hive
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/steemd/blockchain --restart always hiveio/hive
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/hived/blockchain --restart always hiveio/hive
```
### Upgrading for releases that do not require a reindex
@ -132,5 +132,5 @@ For upgrades that do not require a full replay, you would use the following inst
docker stop hived-exchange
docker rm hived-exchange
docker pull hiveio/hive
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/steemd/blockchain --restart always hiveio/hive
docker run -d --name hived-exchange --env TRACK_ACCOUNT=nameofaccount --env USE_PUBLIC_BLOCKLOG=1 -p 2001:2001 -p 8090:8090 -v /path/to/hivewallet:/var/hivewallet -v /path/to/blockchain:/var/lib/hived/blockchain --restart always hiveio/hive
```

View File

@ -1,6 +1,6 @@
# Preface
After MIRAs initial development efforts we released the [Basic MIRA Configuration Guide](https://github.com/openhive-network/hive/blob/master/doc/mira.md) to help bootstrap users attempting to use MIRA enabled `steemd`. There is actually much more fine tuning that can be done to improve MIRA's performance. We will break up this process into three phases:
After MIRAs initial development efforts we released the [Basic MIRA Configuration Guide](https://github.com/openhive-network/hive/blob/master/doc/mira.md) to help bootstrap users attempting to use MIRA enabled `hived`. There is actually much more fine tuning that can be done to improve MIRA's performance. We will break up this process into three phases:
* Phase 1: Gathering statistics
* Phase 2: Analyzing statistics
@ -11,7 +11,7 @@ After MIRAs initial development efforts we released the [Basic MIRA Configuratio
As you may have noticed, within the `database.cfg` file, there is a global option called `statistics`. By default this is set to `false`. This must be set to `true` before proceeding! Here is an example of a `database.cfg` with statistics enabled:
```
$ cat ~/.steemd/database.cfg
$ cat ~/.hived/database.cfg
{
"global": {
"shared_cache": {
@ -39,11 +39,11 @@ $ cat ~/.steemd/database.cfg
```
Once statistics has been enabled, simply perform the action you'd like to optimize. In my example, I will be syncing up the testnet. Start `steemd` like you otherwise normally would. Please be aware that enabling statistics causes a drastic performance impact - you won't want to run this in production. By default, statistics are dumped every 10 minutes so you will want to run for a while. The more data you gather, the more accurate the performance tuning suggestions will potentially be.
Once statistics has been enabled, simply perform the action you'd like to optimize. In my example, I will be syncing up the testnet. Start `hived` like you otherwise normally would. Please be aware that enabling statistics causes a drastic performance impact - you won't want to run this in production. By default, statistics are dumped every 10 minutes so you will want to run for a while. The more data you gather, the more accurate the performance tuning suggestions will potentially be.
# Phase 2: Analyzing statistics
Luckily, you won't need intimate knowledge of RocksDB in order to analyze the statistics data. The developers working on RocksDB have provided us with a tool that can read the gathered statistics and make performance tuning recommendations. This tool can be found within the `steemd` repository at `programs/util/rocksdb_advisor.sh`. From the `program/util` directory run the tool:
Luckily, you won't need intimate knowledge of RocksDB in order to analyze the statistics data. The developers working on RocksDB have provided us with a tool that can read the gathered statistics and make performance tuning recommendations. This tool can be found within the `hived` repository at `programs/util/rocksdb_advisor.sh`. From the `program/util` directory run the tool:
```
$ sh rocksdb_advisor.sh
@ -62,7 +62,7 @@ Suggestion: inc-bloom-bits-per-key option : bloom_bits action : increase suggest
scope: entities:
{'ENTITY_PLACEHOLDER'}
scope: col_fam:
{'boost\\:\\:mpl\\:\\:v_item<steem\\:\\:chain\\:\\:by_id, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'boost\\:\\:mpl\\:\\:v_item<steem\\:\\:chain\\:\\:by_last_owner_update, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'boost\\:\\:mpl\\:\\:v_item<steem\\:\\:chain\\:\\:by_account, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'default'}
{'boost\\:\\:mpl\\:\\:v_item<hive\\:\\:chain\\:\\:by_id, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'boost\\:\\:mpl\\:\\:v_item<hive\\:\\:chain\\:\\:by_last_owner_update, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'boost\\:\\:mpl\\:\\:v_item<hive\\:\\:chain\\:\\:by_account, boost\\:\\:mpl\\:\\:vector0<mpl_\\:\\:na>, 0>', 'default'}
```
In reality you will get significantly more output than above. For the sake of simplicity, we will work with one performance suggestion. We can see here the `rocksdb_advisor.sh` provided a suggestion for the `account_authority_object` database.
@ -73,7 +73,7 @@ Let's move on to applying the advisor's suggestions.
# Phase 3: Applying performance recommendations
If you want to apply the same options to all databases, you would just change the `base` setting as this is applied to every database within a MIRA enabled `steemd` node.
If you want to apply the same options to all databases, you would just change the `base` setting as this is applied to every database within a MIRA enabled `hived` node.
You may notice that you will get different recommendations for different objects. In MIRA's implementation, each object is its own RocksDB database. How do we implement different options for different databases?

View File

@ -100,7 +100,7 @@ The global write buffer is used for performant writes to the database. It lives
## Application memory
When configuring MIRA it is important to consider the normal memory usage of `steemd`. Regardless of the MIRA configuration, `steemd` will tend to use roughly 5.5GiB of memory.
When configuring MIRA it is important to consider the normal memory usage of `hived`. Regardless of the MIRA configuration, `hived` will tend to use roughly 5.5GiB of memory.
---

View File

@ -1,20 +1,20 @@
# Witness Parameters
The role of a witness in the Steem Blockchain is to verify incoming transactions, produce blocks when scheduled, and partake in the Steem governance model by voting on several parameters.
The role of a witness in the Hive Blockchain is to verify incoming transactions, produce blocks when scheduled, and partake in the Hive governance model by voting on several parameters.
These parameters control various aspects of the operation of the blockchain that are not easily defined in code at compile time. One example is the STEEM price feed that defines the conversion rate between STEEM and SBD.
These parameters control various aspects of the operation of the blockchain that are not easily defined in code at compile time. One example is the HIVE price feed that defines the conversion rate between HIVE and HBD.
Witnesses are able to use the `witness_set_properties_operation` to change witness specific properties and vote on paramters.
Unless otherwise noted, the median of the top 20 elected witnesses is used for all calculations needing the parameter.
This operation was added in Steem v0.20.0 to replace the `witness_update_operation` which was not easily extendable. While it is recommended to use `witness_set_properties_operation`, `witness_update_operation` will continue to work.
This operation was added in Hive v0.20.0 to replace the `witness_update_operation` which was not easily extendable. While it is recommended to use `witness_set_properties_operation`, `witness_update_operation` will continue to work.
## Properties
### account_creation_fee
This is the fee in STEEM that must be paid to create an account. This field must be non-negative.
This is the fee in HIVE that must be paid to create an account. This field must be non-negative.
### account_subsidy_budget
@ -47,13 +47,13 @@ A more detailed explanation of resource dynamics can be found [here](./devs/2018
The maximum size of a single block in bytes. The value must be not less than `65536`. The value must not be more than 2MB (`2097152`).
### sbd_interest_rate
### hbd_interest_rate
The annual interest rate paid to SBD holders. SBD interest is compounded on balance changes, no more than once every 30 days.
The annual interest rate paid to HBD holders. HBD interest is compounded on balance changes, no more than once every 30 days.
### sbd_exchange_rate
### hbd_exchange_rate
The exchange rate for STEEM/SBD to be used for printing SBD as rewards as well as SBD->STEEM conversions.
The exchange rate for HIVE/HBD to be used for printing HBD as rewards as well as HBD->HIVE conversions.
The actual price feed is the median of medians. Every round (21 blocks) the median exchange rate is pushed to a queue and the oldest is removed. The median value of the queue is used for any calculations.
### url

View File

@ -1,10 +1,10 @@
file(GLOB HEADERS "include/steem/plugins/example_api_plugin/*.hpp")
file(GLOB HEADERS "include/hive/plugins/example_api_plugin/*.hpp")
add_library( example_api_plugin
${HEADERS}
example_api_plugin.cpp
)
target_link_libraries( example_api_plugin appbase steem_chain fc )
target_link_libraries( example_api_plugin appbase hive_chain fc )
target_include_directories( example_api_plugin
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )

View File

@ -1,90 +1,90 @@
#pragma once
#include <appbase/application.hpp>
#include <steem/plugins/json_rpc/json_rpc_plugin.hpp>
#include <hive/plugins/json_rpc/json_rpc_plugin.hpp>
#define STEEM_EXAMPLE_API_PLUGIN_NAME "example_api"
#define HIVE_EXAMPLE_API_PLUGIN_NAME "example_api"
namespace steem { namespace example_api_plugin {
namespace hive { namespace example_api_plugin {
using namespace appbase;
using namespace appbase;
// Define API method arg and return types
typedef json_rpc::void_type hello_world_args;
// Define API method arg and return types
typedef json_rpc::void_type hello_world_args;
struct hello_world_return
{
string message;
};
struct hello_world_return
{
string message;
};
struct echo_args
{
string call;
};
struct echo_args
{
string call;
};
struct echo_return
{
string response;
}
struct echo_return
{
string response;
}
// All plugins must inherit from appbase::plugin
class example_api_plugin : public appbase::plugin< example_api_plugin >
{
public:
example_api_plugin();
virtual ~example_api_plugin();
// All plugins must inherit from appbase::plugin
class example_api_plugin : public appbase::plugin< example_api_plugin >
{
public:
example_api_plugin();
virtual ~example_api_plugin();
// This defines what plugins are required to run this plugin.
// These plugins will load before this one and shutdown after.
APPBASE_PLUGIN_REQUIRES( (plugins::json_rpc::json_rpc_plugin) );
// This defines what plugins are required to run this plugin.
// These plugins will load before this one and shutdown after.
APPBASE_PLUGIN_REQUIRES( (plugins::json_rpc::json_rpc_plugin) );
// This static method is a required by the appbase::plugin template
static const std::string& name() { static std::string name = STEEM_EXAMPLE_API_PLUGIN_NAME; return name; }
// This static method is a required by the appbase::plugin template
static const std::string& name() { static std::string name = HIVE_EXAMPLE_API_PLUGIN_NAME; return name; }
// Specify any config options here
virtual void set_program_options( options_description&, options_description& ) override {}
// Specify any config options here
virtual void set_program_options( options_description&, options_description& ) override {}
// These implement startup and shutdown logic for the plugin.
// plugin_initialize and plugin_startup are called such that dependencies go first
// plugin_shutdown goes in reverse order such the dependencies are running when shutting down.
virtual void plugin_initialize( const variables_map& options ) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
// These implement startup and shutdown logic for the plugin.
// plugin_initialize and plugin_startup are called such that dependencies go first
// plugin_shutdown goes in reverse order such the dependencies are running when shutting down.
virtual void plugin_initialize( const variables_map& options ) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
// These are the API methods defined for the plugin
// APIs take struct args and return structs
hello_world_return hello_world( const hello_world_args& args );
echo_return echo( const echo_args& args );
};
// These are the API methods defined for the plugin
// APIs take struct args and return structs
hello_world_return hello_world( const hello_world_args& args );
echo_return echo( const echo_args& args );
};
example_api_plugin::example_api_plugin() {}
example_api_plugin::~example_api_plugin() {}
example_api_plugin::example_api_plugin() {}
example_api_plugin::~example_api_plugin() {}
void example_api_plugin::plugin_initialize( const variables_map& options )
{
// This registers the API with the json rpc plugin
JSON_RPC_REGISTER_API( name(), (hello_world)(echo) );
}
void example_api_plugin::plugin_initialize( const variables_map& options )
{
// This registers the API with the json rpc plugin
JSON_RPC_REGISTER_API( name(), (hello_world)(echo) );
}
void example_api_plugin::plugin_startup() {}
void example_api_plugin::plugin_shutdown() {}
void example_api_plugin::plugin_startup() {}
void example_api_plugin::plugin_shutdown() {}
hello_world_return hello_world( const hello_world_args& args )
{
return hello_world_return{ "Hello World" };
}
hello_world_return hello_world( const hello_world_args& args )
{
return hello_world_return{ "Hello World" };
}
echo_return echo( const echo_args& args )
{
return echo_return{ args.call };
}
echo_return echo( const echo_args& args )
{
return echo_return{ args.call };
}
} } // steem::example_api_plugin
} } // hive::example_api_plugin
// Args and return types need to be reflected. hello_world_args does not because it is a typedef of a reflected type
FC_REFLECT( steem::example_api_plugin::hello_world_return, (message) )
FC_REFLECT( steem::example_api_plugin::echo_args, (call) )
FC_REFLECT( steem::example_api_plugin::echo_return, (response) )
FC_REFLECT( hive::example_api_plugin::hello_world_return, (message) )
FC_REFLECT( hive::example_api_plugin::echo_args, (call) )
FC_REFLECT( hive::example_api_plugin::echo_return, (response) )

109
format.py Normal file
View File

@ -0,0 +1,109 @@
#!/usr/bin/python3.6
import sys
import os
INDENT = int(sys.argv[1])
PATH = ""
FILENAME = os.path.split( PATH )[1]
TMP_FILENAME = "____TEMPORARY_FILE_____"
INDENTATION_CHARACHTERS = [ '\t', ' ' ]
FILE_INDENTATION : int = None
DO_NOT_TOUCH_FOLDERS = [
"libraries/fc",
"libraries/vendor",
"libraries/net",
"build",
"out",
".git",
".vs"
]
INPUT_LINE : str = None
def has_indent( line ):
if len(line) == 0:
return False
return line[0] in INDENTATION_CHARACHTERS
def detect_indentation():
first = None
second = None
with open( PATH, 'r' ) as INPUT:
for line in INPUT:
if has_indent(line) and first is not None:
second = count_indents(line)
if second > first:
return second - first
if len( line ) > 1:
first = None
second = None
if line[ len( line ) - 2 ] == "{":
first = count_indents(line)
return -1
def count_indents( line ):
count = 0
for char in line:
if char in INDENTATION_CHARACHTERS:
count += 1
else:
return count
def process( line ):
count = count_indents( line )
ret = line[ count : ]
if count % FILE_INDENTATION != 0:
count = int( count / FILE_INDENTATION ) + 1
else:
count = int( count / FILE_INDENTATION )
return str( " " * count * INDENT ) + ret
def processable( line : str ):
for var in DO_NOT_TOUCH_FOLDERS:
if line.find( var ) != -1:
print("{}: {}".format(var, line))
return False
return True
i = 0
# gather input files
os.system( 'find $PWD -type f | grep -E ".+\.[hc]((pp|xx|c|f)?)$" > ____list_of_files' )
with open( "____list_of_files", 'r') as list_of_files:
for file_to_process in list_of_files:
fname = file_to_process[:-1] # remove \n
# fname = fname.replace("./", os.getcwd() + "/")
if processable( fname ):
PATH = fname
FILENAME = os.path.split( PATH )[1]
# detect
FILE_INDENTATION = detect_indentation()
if FILE_INDENTATION == -1:
print( "cannot determine indentation: {}; skipping".format(PATH) )
continue
if FILE_INDENTATION == INDENT:
print( "nothing to change: {}; skipping".format(PATH) )
continue
if FILE_INDENTATION != 3:
print( "unusual indentation of {}: {}".format(FILE_INDENTATION, PATH) )
# process
with open( PATH, 'r' ) as INPUT:
with open( TMP_FILENAME, 'w' ) as OUTPUT:
for line in INPUT:
OUTPUT.write( process( line ) )
print("succesfully updated indent: {}".format(PATH))
# save
import shutil
shutil.move( TMP_FILENAME, PATH )
os.remove("____list_of_files")

View File

@ -1,320 +1,446 @@
#include <appbase/application.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
#include <boost/asio/signal_set.hpp>
#include <iostream>
#include <fstream>
namespace appbase {
namespace bpo = boost::program_options;
using bpo::options_description;
using bpo::variables_map;
using std::cout;
class application_impl {
public:
application_impl():_app_options("Application Options"){
}
const variables_map* _options = nullptr;
options_description _app_options;
options_description _cfg_options;
variables_map _args;
bfs::path _data_dir;
};
application::application()
:my(new application_impl()){
io_serv = std::make_shared<boost::asio::io_service>();
}
application::~application() { }
void application::startup() {
for (const auto& plugin : initialized_plugins)
plugin->startup();
}
application& application::instance( bool reset ) {
static application* _app = new application();
if( reset )
{
delete _app;
_app = new application();
}
return *_app;
}
application& app() { return application::instance(); }
application& reset() { return application::instance( true ); }
void application::set_program_options()
{
std::stringstream data_dir_ss;
data_dir_ss << "Directory containing configuration file config.ini. Default location: $HOME/." << app_name << " or CWD/. " << app_name;
std::stringstream plugins_ss;
for( auto& p : default_plugins )
{
plugins_ss << p << ' ';
}
options_description app_cfg_opts( "Application Config Options" );
options_description app_cli_opts( "Application Command Line Options" );
app_cfg_opts.add_options()
("plugin", bpo::value< vector<string> >()->composing()->default_value( default_plugins, plugins_ss.str() ), "Plugin(s) to enable, may be specified multiple times");
app_cli_opts.add_options()
("help,h", "Print this help message and exit.")
("version,v", "Print version information.")
("data-dir,d", bpo::value<bfs::path>(), data_dir_ss.str().c_str() )
("config,c", bpo::value<bfs::path>()->default_value( "config.ini" ), "Configuration file name relative to data-dir");
my->_cfg_options.add(app_cfg_opts);
my->_app_options.add(app_cfg_opts);
my->_app_options.add(app_cli_opts);
for(auto& plug : plugins) {
boost::program_options::options_description plugin_cli_opts("Command Line Options for " + plug.second->get_name());
boost::program_options::options_description plugin_cfg_opts("Config Options for " + plug.second->get_name());
plug.second->set_program_options(plugin_cli_opts, plugin_cfg_opts);
if(plugin_cli_opts.options().size())
my->_app_options.add(plugin_cli_opts);
if(plugin_cfg_opts.options().size())
{
my->_cfg_options.add(plugin_cfg_opts);
for(const boost::shared_ptr<bpo::option_description> od : plugin_cfg_opts.options())
{
// If the config option is not already present as a cli option, add it.
if( plugin_cli_opts.find_nothrow( od->long_name(), false ) == nullptr )
{
my->_app_options.add( od );
}
}
}
}
}
bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*> autostart_plugins)
{
try
{
set_program_options();
bpo::store( bpo::parse_command_line( argc, argv, my->_app_options ), my->_args );
if( my->_args.count( "help" ) ) {
cout << my->_app_options << "\n";
return false;
}
if( my->_args.count( "version" ) )
{
cout << version_info << "\n";
return false;
}
bfs::path data_dir;
if( my->_args.count("data-dir") )
{
data_dir = my->_args["data-dir"].as<bfs::path>();
if( data_dir.is_relative() )
data_dir = bfs::current_path() / data_dir;
}
else
{
#ifdef WIN32
char* parent = getenv( "APPDATA" );
#else
char* parent = getenv( "HOME" );
#endif
if( parent != nullptr )
{
data_dir = std::string( parent );
}
else
{
data_dir = bfs::current_path();
}
std::stringstream app_dir;
app_dir << '.' << app_name;
data_dir = data_dir / app_dir.str();
#pragma message( "TODO: Remove this check for Steem release 0.20.1+" )
bfs::path old_dir = bfs::current_path() / "witness_node_data_dir";
if( bfs::exists( old_dir ) )
{
std::cerr << "The default data directory is now '" << data_dir.string() << "' instead of '" << old_dir.string() << "'.\n";
std::cerr << "Please move your data directory to '" << data_dir.string() << "' or specify '--data-dir=" << old_dir.string() <<
"' to continue using the current data directory.\n";
exit(1);
}
}
my->_data_dir = data_dir;
bfs::path config_file_name = data_dir / "config.ini";
if( my->_args.count( "config" ) ) {
auto config_file_name = my->_args["config"].as<bfs::path>();
if( config_file_name.is_relative() )
config_file_name = data_dir / config_file_name;
}
if(!bfs::exists(config_file_name)) {
write_default_config(config_file_name);
}
bpo::store(bpo::parse_config_file< char >( config_file_name.make_preferred().string().c_str(),
my->_cfg_options, true ), my->_args );
if(my->_args.count("plugin") > 0)
{
auto plugins = my->_args.at("plugin").as<std::vector<std::string>>();
for(auto& arg : plugins)
{
vector<string> names;
boost::split(names, arg, boost::is_any_of(" \t,"));
for(const std::string& name : names)
get_plugin(name).initialize(my->_args);
}
}
for (const auto& plugin : autostart_plugins)
if (plugin != nullptr && plugin->get_state() == abstract_plugin::registered)
plugin->initialize(my->_args);
bpo::notify(my->_args);
return true;
}
catch (const boost::program_options::error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return false;
}
}
void application::shutdown() {
for(auto ritr = running_plugins.rbegin();
ritr != running_plugins.rend(); ++ritr) {
(*ritr)->shutdown();
}
for(auto ritr = running_plugins.rbegin();
ritr != running_plugins.rend(); ++ritr) {
plugins.erase((*ritr)->get_name());
}
running_plugins.clear();
initialized_plugins.clear();
plugins.clear();
}
void application::quit() {
io_serv->stop();
}
void application::exec() {
/** To avoid killing process by broken pipe and continue regular app shutdown.
* Useful for usecase: `steemd | tee steemd.log` and pressing Ctrl+C
**/
signal(SIGPIPE, SIG_IGN);
std::shared_ptr<boost::asio::signal_set> sigint_set(new boost::asio::signal_set(*io_serv, SIGINT));
sigint_set->async_wait([sigint_set,this](const boost::system::error_code& err, int num) {
std::cout << "Caught SIGINT\n";
quit();
sigint_set->cancel();
});
std::shared_ptr<boost::asio::signal_set> sigterm_set(new boost::asio::signal_set(*io_serv, SIGTERM));
sigterm_set->async_wait([sigterm_set,this](const boost::system::error_code& err, int num) {
std::cout << "Caught SIGTERM\n";
quit();
sigterm_set->cancel();
});
io_serv->run();
std::cout << "Shutting down...\n";
shutdown(); /// perform synchronous shutdown
}
void application::write_default_config(const bfs::path& cfg_file) {
if(!bfs::exists(cfg_file.parent_path()))
bfs::create_directories(cfg_file.parent_path());
std::ofstream out_cfg( bfs::path(cfg_file).make_preferred().string());
for(const boost::shared_ptr<bpo::option_description> od : my->_cfg_options.options())
{
if(!od->description().empty())
out_cfg << "# " << od->description() << "\n";
boost::any store;
if(!od->semantic()->apply_default(store))
out_cfg << "# " << od->long_name() << " = \n";
else
{
auto example = od->format_parameter();
if( example.empty() )
{
// This is a boolean switch
out_cfg << od->long_name() << " = " << "false\n";
}
else if( example.length() <= 7 )
{
// The string is formatted "arg"
out_cfg << "# " << od->long_name() << " = \n";
}
else
{
// The string is formatted "arg (=<interesting part>)"
example.erase(0, 6);
example.erase(example.length()-1);
out_cfg << od->long_name() << " = " << example << "\n";
}
}
out_cfg << "\n";
}
out_cfg.close();
}
abstract_plugin* application::find_plugin( const string& name )const
{
auto itr = plugins.find( name );
if( itr == plugins.end() )
{
return nullptr;
}
return itr->second.get();
}
abstract_plugin& application::get_plugin(const string& name)const {
auto ptr = find_plugin(name);
if(!ptr)
BOOST_THROW_EXCEPTION(std::runtime_error("unable to find plugin: " + name));
return *ptr;
}
bfs::path application::data_dir()const
{
return my->_data_dir;
}
void application::add_program_options( const options_description& cli, const options_description& cfg )
{
my->_app_options.add( cli );
my->_app_options.add( cfg );
my->_cfg_options.add( cfg );
}
const variables_map& application::get_args() const
{
return my->_args;
}
} /// namespace appbase
#include <appbase/application.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
#include <boost/asio/signal_set.hpp>
#include <iostream>
#include <fstream>
#include <thread>
namespace appbase {
class quoted
{
public:
explicit quoted(const char* s)
: data(s)
{}
friend std::ostream& operator<<(std::ostream& stream, const quoted& q)
{
return stream << "\"" << q.data << "\"";
}
private:
const char* data;
};
namespace bpo = boost::program_options;
using bpo::options_description;
using bpo::variables_map;
using std::cout;
io_handler::io_handler( bool _allow_close_when_signal_is_received, final_action_type&& _final_action )
: allow_close_when_signal_is_received( _allow_close_when_signal_is_received ), final_action( _final_action )
{
}
boost::asio::io_service& io_handler::get_io_service()
{
return io_serv;
}
void io_handler::close()
{
while( lock.test_and_set( std::memory_order_acquire ) );
if( !closed )
{
final_action();
close_signal( sigint_set );
close_signal( sigterm_set );
closed = true;
io_serv.stop();
}
lock.clear( std::memory_order_release );
}
void io_handler::close_signal( p_signal_set& current_signal )
{
if( !current_signal )
return;
boost::system::error_code ec;
current_signal->cancel( ec );
current_signal.reset();
if( ec.value() != 0 )
cout<<"Error during cancelling signal: "<< ec.message() << std::endl;
}
void io_handler::handle_signal( uint32_t _last_signal_code )
{
set_interrupt_request( _last_signal_code );
if( allow_close_when_signal_is_received )
close();
}
void io_handler::attach_signals()
{
/** To avoid killing process by broken pipe and continue regular app shutdown.
* Useful for usecase: `hived | tee hived.log` and pressing Ctrl+C
**/
signal(SIGPIPE, SIG_IGN);
sigint_set = p_signal_set( new boost::asio::signal_set( io_serv, SIGINT ) );
sigint_set->async_wait([ this ](const boost::system::error_code& err, int num) {
handle_signal( SIGINT );
});
sigterm_set = p_signal_set( new boost::asio::signal_set( io_serv, SIGTERM ) );
sigterm_set->async_wait([ this ](const boost::system::error_code& err, int num) {
handle_signal( SIGTERM );
});
}
void io_handler::run()
{
io_serv.run();
}
void io_handler::set_interrupt_request( uint32_t _last_signal_code )
{
last_signal_code = _last_signal_code;
}
bool io_handler::is_interrupt_request() const
{
return last_signal_code != 0;
}
class application_impl {
public:
application_impl():_app_options("Application Options"){
}
const variables_map* _options = nullptr;
options_description _app_options;
options_description _cfg_options;
variables_map _args;
bfs::path _data_dir;
};
application::application()
:my(new application_impl()), main_io_handler( true/*allow_close_when_signal_is_received*/, [ this ](){ shutdown(); } )
{
}
application::~application() { }
void application::startup() {
startup_io_handler = io_handler::p_io_handler( new io_handler ( false/*allow_close_when_signal_is_received*/,
[ this ]()
{
_is_interrupt_request = startup_io_handler->is_interrupt_request();
}
) );
startup_io_handler->attach_signals();
std::thread startup_thread = std::thread( [&]()
{
startup_io_handler->run();
startup_io_handler.reset();
});
for (const auto& plugin : initialized_plugins)
{
plugin->startup();
if( is_interrupt_request() )
break;
}
startup_io_handler->close();
startup_thread.join();
}
application& application::instance( bool reset ) {
static application* _app = new application();
if( reset )
{
delete _app;
_app = new application();
}
return *_app;
}
application& app() { return application::instance(); }
application& reset() { return application::instance( true ); }
void application::set_program_options()
{
std::stringstream data_dir_ss;
data_dir_ss << "Directory containing configuration file config.ini. Default location: $HOME/." << app_name << " or CWD/. " << app_name;
std::stringstream plugins_ss;
for( auto& p : default_plugins )
{
plugins_ss << p << ' ';
}
options_description app_cfg_opts( "Application Config Options" );
options_description app_cli_opts( "Application Command Line Options" );
app_cfg_opts.add_options()
("plugin", bpo::value< vector<string> >()->composing()->default_value( default_plugins, plugins_ss.str() ), "Plugin(s) to enable, may be specified multiple times");
app_cli_opts.add_options()
("help,h", "Print this help message and exit.")
("version,v", "Print version information.")
("dump-config", "Dump configuration and exit")
("data-dir,d", bpo::value<bfs::path>(), data_dir_ss.str().c_str() )
("config,c", bpo::value<bfs::path>()->default_value( "config.ini" ), "Configuration file name relative to data-dir");
my->_cfg_options.add(app_cfg_opts);
my->_app_options.add(app_cfg_opts);
my->_app_options.add(app_cli_opts);
for(auto& plug : plugins) {
boost::program_options::options_description plugin_cli_opts("Command Line Options for " + plug.second->get_name());
boost::program_options::options_description plugin_cfg_opts("Config Options for " + plug.second->get_name());
plug.second->set_program_options(plugin_cli_opts, plugin_cfg_opts);
if(plugin_cli_opts.options().size())
my->_app_options.add(plugin_cli_opts);
if(plugin_cfg_opts.options().size())
{
my->_cfg_options.add(plugin_cfg_opts);
for(const boost::shared_ptr<bpo::option_description> od : plugin_cfg_opts.options())
{
// If the config option is not already present as a cli option, add it.
if( plugin_cli_opts.find_nothrow( od->long_name(), false ) == nullptr )
{
my->_app_options.add( od );
}
}
}
}
}
bool application::initialize_impl(int argc, char** argv, vector<abstract_plugin*> autostart_plugins)
{
try
{
set_program_options();
bpo::store( bpo::parse_command_line( argc, argv, my->_app_options ), my->_args );
if( my->_args.count( "help" ) ) {
cout << my->_app_options << "\n";
return false;
}
if( my->_args.count( "version" ) )
{
cout << version_info << "\n";
return false;
}
bfs::path data_dir;
if( my->_args.count("data-dir") )
{
data_dir = my->_args["data-dir"].as<bfs::path>();
if( data_dir.is_relative() )
data_dir = bfs::current_path() / data_dir;
}
else
{
#ifdef WIN32
char* parent = getenv( "APPDATA" );
#else
char* parent = getenv( "HOME" );
#endif
if( parent != nullptr )
{
data_dir = std::string( parent );
}
else
{
data_dir = bfs::current_path();
}
std::stringstream app_dir;
app_dir << '.' << app_name;
data_dir = data_dir / app_dir.str();
#pragma message( "TODO: Remove this check for Hive release 0.20.1+" )
bfs::path old_dir = bfs::current_path() / "witness_node_data_dir";
if( bfs::exists( old_dir ) )
{
std::cerr << "The default data directory is now '" << data_dir.string() << "' instead of '" << old_dir.string() << "'.\n";
std::cerr << "Please move your data directory to '" << data_dir.string() << "' or specify '--data-dir=" << old_dir.string() <<
"' to continue using the current data directory.\n";
exit(1);
}
}
my->_data_dir = data_dir;
bfs::path config_file_name = data_dir / "config.ini";
if( my->_args.count( "config" ) ) {
config_file_name = my->_args["config"].as<bfs::path>();
if( config_file_name.is_relative() )
config_file_name = data_dir / config_file_name;
}
if(!bfs::exists(config_file_name)) {
write_default_config(config_file_name);
}
bpo::store(bpo::parse_config_file< char >( config_file_name.make_preferred().string().c_str(),
my->_cfg_options, true ), my->_args );
if(my->_args.count("dump-config") > 0)
{
std::cout << "{\n";
std::cout << "\t" << quoted("data-dir") << ": " << quoted(my->_data_dir.string().c_str()) << ",\n";
std::cout << "\t" << quoted("config") << ": " << quoted(config_file_name.string().c_str()) << "\n";
std::cout << "}\n";
return false;
}
if(my->_args.count("plugin") > 0)
{
auto plugins = my->_args.at("plugin").as<std::vector<std::string>>();
for(auto& arg : plugins)
{
vector<string> names;
boost::split(names, arg, boost::is_any_of(" \t,"));
for(const std::string& name : names)
get_plugin(name).initialize(my->_args);
}
}
for (const auto& plugin : autostart_plugins)
if (plugin != nullptr && plugin->get_state() == abstract_plugin::registered)
plugin->initialize(my->_args);
bpo::notify(my->_args);
return true;
}
catch (const boost::program_options::error& e)
{
std::cerr << "Error parsing command line: " << e.what() << "\n";
return false;
}
}
void application::shutdown() {
std::cout << "Shutting down...\n";
for(auto ritr = running_plugins.rbegin();
ritr != running_plugins.rend(); ++ritr) {
(*ritr)->shutdown();
}
for(auto ritr = running_plugins.rbegin();
ritr != running_plugins.rend(); ++ritr) {
plugins.erase((*ritr)->get_name());
}
running_plugins.clear();
initialized_plugins.clear();
plugins.clear();
}
void application::exec() {
if( !is_interrupt_request() )
{
main_io_handler.attach_signals();
main_io_handler.run();
}
else
shutdown();
}
void application::write_default_config(const bfs::path& cfg_file) {
if(!bfs::exists(cfg_file.parent_path()))
bfs::create_directories(cfg_file.parent_path());
std::ofstream out_cfg( bfs::path(cfg_file).make_preferred().string());
for(const boost::shared_ptr<bpo::option_description> od : my->_cfg_options.options())
{
if(!od->description().empty())
out_cfg << "# " << od->description() << "\n";
boost::any store;
if(!od->semantic()->apply_default(store))
out_cfg << "# " << od->long_name() << " = \n";
else
{
auto example = od->format_parameter();
if( example.empty() )
{
// This is a boolean switch
out_cfg << od->long_name() << " = " << "false\n";
}
else if( example.length() <= 7 )
{
// The string is formatted "arg"
out_cfg << "# " << od->long_name() << " = \n";
}
else
{
// The string is formatted "arg (=<interesting part>)"
example.erase(0, 6);
example.erase(example.length()-1);
out_cfg << od->long_name() << " = " << example << "\n";
}
}
out_cfg << "\n";
}
out_cfg.close();
}
abstract_plugin* application::find_plugin( const string& name )const
{
auto itr = plugins.find( name );
if( itr == plugins.end() )
{
return nullptr;
}
return itr->second.get();
}
abstract_plugin& application::get_plugin(const string& name)const {
auto ptr = find_plugin(name);
if(!ptr)
BOOST_THROW_EXCEPTION(std::runtime_error("unable to find plugin: " + name));
return *ptr;
}
bfs::path application::data_dir()const
{
return my->_data_dir;
}
void application::add_program_options( const options_description& cli, const options_description& cfg )
{
my->_app_options.add( cli );
my->_app_options.add( cfg );
my->_cfg_options.add( cfg );
}
const variables_map& application::get_args() const
{
return my->_args;
}
std::set< std::string > application::get_plugins_names() const
{
std::set< std::string > res;
for( auto& plugin : initialized_plugins )
res.insert( plugin->get_name() );
return res;
}
} /// namespace appbase

118
libraries/appbase/examples/main.cpp Executable file → Normal file
View File

@ -13,29 +13,29 @@ using std::vector;
class plugin_a : public appbase::plugin<plugin_a>
{
public:
APPBASE_PLUGIN_REQUIRES();
public:
APPBASE_PLUGIN_REQUIRES();
static const std::string& name() { static std::string name = "plugin_a"; return name; }
static const std::string& name() { static std::string name = "plugin_a"; return name; }
virtual void set_program_options( options_description& cli, options_description& cfg ) override
{
cfg.add_options()
("dbsize", bpo::value<uint64_t>()->default_value( 8*1024 ), "Minimum size MB of database shared memory file")
;
cli.add_options()
("replay", "clear plugin_a database and replay all blocks" )
("reset", "clear plugin_a database and block log" )
;
}
virtual void set_program_options( options_description& cli, options_description& cfg ) override
{
cfg.add_options()
("dbsize", bpo::value<uint64_t>()->default_value( 8*1024 ), "Minimum size MB of database shared memory file")
;
cli.add_options()
("replay", "clear plugin_a database and replay all blocks" )
("reset", "clear plugin_a database and block log" )
;
}
virtual void plugin_initialize( const variables_map& options ) override
{
std::cout << "initialize plugin_a plugin\n";
std::cout << "initialize plugin_a plugin\n";
}
virtual void plugin_startup() override
{
std::cout << "starting plugin_a plugin \n";
std::cout << "starting plugin_a plugin \n";
}
virtual void plugin_shutdown() override
@ -43,65 +43,65 @@ class plugin_a : public appbase::plugin<plugin_a>
std::cout << "shutdown plugin_a plugin \n";
}
database& db() { return _db; }
database& db() { return _db; }
private:
database _db;
private:
database _db;
};
class plugin_b : public appbase::plugin<plugin_b>
{
public:
plugin_b(){};
~plugin_b(){};
public:
plugin_b(){};
~plugin_b(){};
APPBASE_PLUGIN_REQUIRES( (plugin_a) );
APPBASE_PLUGIN_REQUIRES( (plugin_a) );
static const std::string& name() { static std::string name = "plugin_b"; return name; }
static const std::string& name() { static std::string name = "plugin_b"; return name; }
virtual void set_program_options( options_description& cli, options_description& cfg ) override
{
cfg.add_options()
("listen-endpoint", bpo::value<string>()->default_value( "127.0.0.1:9876" ), "The local IP address and port to listen for incoming connections.")
("remote-endpoint", bpo::value< vector<string> >()->composing(), "The IP address and port of a remote peer to sync with.")
("public-endpoint", bpo::value<string>()->default_value( "0.0.0.0:9876" ), "The public IP address and port that should be advertized to peers.")
;
}
virtual void set_program_options( options_description& cli, options_description& cfg ) override
{
cfg.add_options()
("listen-endpoint", bpo::value<string>()->default_value( "127.0.0.1:9876" ), "The local IP address and port to listen for incoming connections.")
("remote-endpoint", bpo::value< vector<string> >()->composing(), "The IP address and port of a remote peer to sync with.")
("public-endpoint", bpo::value<string>()->default_value( "0.0.0.0:9876" ), "The public IP address and port that should be advertized to peers.")
;
}
protected:
virtual void plugin_initialize( const variables_map& options ) override
{
protected:
virtual void plugin_initialize( const variables_map& options ) override
{
std::cout << "initialize plugin_b plugin\n";
}
virtual void plugin_startup() override
{
std::cout << "starting plugin_b plugin \n";
}
}
virtual void plugin_startup() override
{
std::cout << "starting plugin_b plugin \n";
}
virtual void plugin_shutdown() override
{
std::cout << "shutdown plugin_b plugin \n";
}
virtual void plugin_shutdown() override
{
std::cout << "shutdown plugin_b plugin \n";
}
};
int main( int argc, char** argv ) {
try {
appbase::app().register_plugin<plugin_b>();
if( !appbase::app().initialize( argc, argv ) )
return -1;
appbase::app().startup();
appbase::app().exec();
} catch ( const boost::exception& e ) {
std::cerr << boost::diagnostic_information(e) << "\n";
} catch ( const std::exception& e ) {
std::cerr << e.what() << "\n";
} catch ( ... ) {
std::cerr << "unknown exception\n";
}
std::cout << "exited cleanly\n";
return 0;
try {
appbase::app().register_plugin<plugin_b>();
if( !appbase::app().initialize( argc, argv ) )
return -1;
appbase::app().startup();
appbase::app().exec();
} catch ( const boost::exception& e ) {
std::cerr << boost::diagnostic_information(e) << "\n";
} catch ( const std::exception& e ) {
std::cerr << e.what() << "\n";
} catch ( ... ) {
std::cerr << "unknown exception\n";
}
std::cout << "exited cleanly\n";
return 0;
}

View File

@ -1,188 +1,250 @@
#pragma once
#include <appbase/plugin.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/core/demangle.hpp>
#include <boost/asio.hpp>
#include <boost/throw_exception.hpp>
#include <iostream>
#define APPBASE_VERSION_STRING ("appbase 1.0")
namespace appbase {
namespace bpo = boost::program_options;
namespace bfs = boost::filesystem;
class application
{
public:
~application();
/**
* @brief Looks for the --plugin commandline / config option and calls initialize on those plugins
*
* @tparam Plugin List of plugins to initalize even if not mentioned by configuration. For plugins started by
* configuration settings or dependency resolution, this template has no effect.
* @return true if the application and plugins were initialized, false or exception on error
*/
template< typename... Plugin >
bool initialize( int argc, char** argv )
{
return initialize_impl( argc, argv, { find_plugin( Plugin::name() )... } );
}
void startup();
void shutdown();
/**
* Wait until quit(), SIGINT or SIGTERM and then shutdown
*/
void exec();
void quit();
static application& instance( bool reset = false );
template< typename Plugin >
auto& register_plugin()
{
auto existing = find_plugin( Plugin::name() );
if( existing )
return *dynamic_cast< Plugin* >( existing );
auto plug = std::make_shared< Plugin >();
plugins[Plugin::name()] = plug;
plug->register_dependencies();
return *plug;
}
template< typename Plugin >
Plugin* find_plugin()const
{
Plugin* plugin = dynamic_cast< Plugin* >( find_plugin( Plugin::name() ) );
// Do not return plugins that are registered but not at least initialized.
if( plugin != nullptr && plugin->get_state() == abstract_plugin::registered )
{
return nullptr;
}
return plugin;
}
template< typename Plugin >
Plugin& get_plugin()const
{
auto ptr = find_plugin< Plugin >();
if( ptr == nullptr )
BOOST_THROW_EXCEPTION( std::runtime_error( "unable to find plugin: " + Plugin::name() ) );
return *ptr;
}
bfs::path data_dir()const;
void add_program_options( const bpo::options_description& cli, const bpo::options_description& cfg );
const bpo::variables_map& get_args() const;
void set_version_string( const string& version ) { version_info = version; }
void set_app_name( const string& name ) { app_name = name; }
template< typename... Plugin >
void set_default_plugins() { default_plugins = { Plugin::name()... }; }
boost::asio::io_service& get_io_service() { return *io_serv; }
protected:
template< typename Impl >
friend class plugin;
bool initialize_impl( int argc, char** argv, vector< abstract_plugin* > autostart_plugins );
abstract_plugin* find_plugin( const string& name )const;
abstract_plugin& get_plugin( const string& name )const;
/** these notifications get called from the plugin when their state changes so that
* the application can call shutdown in the reverse order.
*/
///@{
void plugin_initialized( abstract_plugin& plug ) { initialized_plugins.push_back( &plug ); }
void plugin_started( abstract_plugin& plug ) { running_plugins.push_back( &plug ); }
///@}
private:
application(); ///< private because application is a singlton that should be accessed via instance()
map< string, std::shared_ptr< abstract_plugin > > plugins; ///< all registered plugins
vector< abstract_plugin* > initialized_plugins; ///< stored in the order they were started running
vector< abstract_plugin* > running_plugins; ///< stored in the order they were started running
std::shared_ptr< boost::asio::io_service > io_serv;
std::string version_info;
std::string app_name = "appbase";
std::vector< std::string > default_plugins;
void set_program_options();
void write_default_config( const bfs::path& cfg_file );
std::unique_ptr< class application_impl > my;
};
application& app();
application& reset();
template< typename Impl >
class plugin : public abstract_plugin
{
public:
virtual ~plugin() {}
virtual state get_state() const override { return _state; }
virtual const std::string& get_name()const override final { return Impl::name(); }
virtual void register_dependencies()
{
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){} );
}
virtual void initialize(const variables_map& options) override final
{
if( _state == registered )
{
_state = initialized;
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){ plug.initialize( options ); } );
this->plugin_initialize( options );
// std::cout << "Initializing plugin " << Impl::name() << std::endl;
app().plugin_initialized( *this );
}
if (_state != initialized)
BOOST_THROW_EXCEPTION( std::runtime_error("Initial state was not registered, so final state cannot be initialized.") );
}
virtual void startup() override final
{
if( _state == initialized )
{
_state = started;
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){ plug.startup(); } );
this->plugin_startup();
app().plugin_started( *this );
}
if (_state != started )
BOOST_THROW_EXCEPTION( std::runtime_error("Initial state was not initialized, so final state cannot be started.") );
}
virtual void shutdown() override final
{
if( _state == started )
{
_state = stopped;
//ilog( "shutting down plugin ${name}", ("name",name()) );
this->plugin_shutdown();
}
}
protected:
plugin() = default;
private:
state _state = abstract_plugin::registered;
};
}
#pragma once
#include <appbase/plugin.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/core/demangle.hpp>
#include <boost/asio.hpp>
#include <boost/throw_exception.hpp>
#include <iostream>
#include <atomic>
#define APPBASE_VERSION_STRING ("appbase 1.0")
namespace appbase {
namespace bpo = boost::program_options;
namespace bfs = boost::filesystem;
class io_handler
{
public:
using p_io_handler = std::shared_ptr< io_handler >;
using p_signal_set = std::shared_ptr< boost::asio::signal_set >;
using final_action_type = std::function< void() >;
private:
std::atomic_flag lock = ATOMIC_FLAG_INIT;
bool closed = false;
bool allow_close_when_signal_is_received = false;
uint32_t last_signal_code = 0;
final_action_type final_action;
p_signal_set sigint_set;
p_signal_set sigterm_set;
boost::asio::io_service io_serv;
void close_signal( p_signal_set& current_signal );
void handle_signal( uint32_t _last_signal_code );
public:
io_handler( bool _allow_close_when_signal_is_received, final_action_type&& _final_action );
boost::asio::io_service& get_io_service();
void close();
void attach_signals();
void run();
void set_interrupt_request( uint32_t _last_signal_code );
bool is_interrupt_request() const;
};
class application
{
public:
~application();
/**
* @brief Looks for the --plugin commandline / config option and calls initialize on those plugins
*
* @tparam Plugin List of plugins to initalize even if not mentioned by configuration. For plugins started by
* configuration settings or dependency resolution, this template has no effect.
* @return true if the application and plugins were initialized, false or exception on error
*/
template< typename... Plugin >
bool initialize( int argc, char** argv )
{
return initialize_impl( argc, argv, { find_plugin( Plugin::name() )... } );
}
void startup();
void shutdown();
/**
* Wait until quit(), SIGINT or SIGTERM and then shutdown
*/
void exec();
static application& instance( bool reset = false );
template< typename Plugin >
auto& register_plugin()
{
auto existing = find_plugin( Plugin::name() );
if( existing )
return *dynamic_cast< Plugin* >( existing );
auto plug = std::make_shared< Plugin >();
plugins[Plugin::name()] = plug;
plug->register_dependencies();
return *plug;
}
template< typename Plugin >
Plugin* find_plugin()const
{
Plugin* plugin = dynamic_cast< Plugin* >( find_plugin( Plugin::name() ) );
// Do not return plugins that are registered but not at least initialized.
if( plugin != nullptr && plugin->get_state() == abstract_plugin::registered )
{
return nullptr;
}
return plugin;
}
template< typename Plugin >
Plugin& get_plugin()const
{
auto ptr = find_plugin< Plugin >();
if( ptr == nullptr )
BOOST_THROW_EXCEPTION( std::runtime_error( "unable to find plugin: " + Plugin::name() ) );
return *ptr;
}
bfs::path data_dir()const;
void add_program_options( const bpo::options_description& cli, const bpo::options_description& cfg );
const bpo::variables_map& get_args() const;
void set_version_string( const string& version ) { version_info = version; }
const std::string& get_version_string() const { return version_info; }
void set_app_name( const string& name ) { app_name = name; }
template< typename... Plugin >
void set_default_plugins() { default_plugins = { Plugin::name()... }; }
boost::asio::io_service& get_io_service() { return main_io_handler.get_io_service(); }
void generate_interrupt_request()
{
if( startup_io_handler )
startup_io_handler->set_interrupt_request( SIGINT );
}
bool is_interrupt_request() const
{
return startup_io_handler ? startup_io_handler->is_interrupt_request() : _is_interrupt_request;
}
std::set< std::string > get_plugins_names() const;
protected:
template< typename Impl >
friend class plugin;
bool initialize_impl( int argc, char** argv, vector< abstract_plugin* > autostart_plugins );
abstract_plugin* find_plugin( const string& name )const;
abstract_plugin& get_plugin( const string& name )const;
/** these notifications get called from the plugin when their state changes so that
* the application can call shutdown in the reverse order.
*/
///@{
void plugin_initialized( abstract_plugin& plug ) { initialized_plugins.push_back( &plug ); }
void plugin_started( abstract_plugin& plug ) { running_plugins.push_back( &plug ); }
///@}
private:
application(); ///< private because application is a singlton that should be accessed via instance()
map< string, std::shared_ptr< abstract_plugin > > plugins; ///< all registered plugins
vector< abstract_plugin* > initialized_plugins; ///< stored in the order they were started running
vector< abstract_plugin* > running_plugins; ///< stored in the order they were started running
std::string version_info;
std::string app_name = "appbase";
std::vector< std::string > default_plugins;
void set_program_options();
void write_default_config( const bfs::path& cfg_file );
std::unique_ptr< class application_impl > my;
io_handler main_io_handler;
//This handler is designed only for startup purposes
io_handler::p_io_handler startup_io_handler;
bool _is_interrupt_request = false;
};
application& app();
application& reset();
template< typename Impl >
class plugin : public abstract_plugin
{
public:
virtual ~plugin() {}
virtual state get_state() const override { return _state; }
virtual const std::string& get_name()const override final { return Impl::name(); }
virtual void register_dependencies()
{
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){} );
}
virtual void initialize(const variables_map& options) override final
{
if( _state == registered )
{
_state = initialized;
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){ plug.initialize( options ); } );
this->plugin_initialize( options );
// std::cout << "Initializing plugin " << Impl::name() << std::endl;
app().plugin_initialized( *this );
}
if (_state != initialized)
BOOST_THROW_EXCEPTION( std::runtime_error("Initial state was not registered, so final state cannot be initialized.") );
}
virtual void startup() override final
{
if( _state == initialized )
{
_state = started;
this->plugin_for_each_dependency( [&]( abstract_plugin& plug ){ plug.startup(); } );
this->plugin_startup();
app().plugin_started( *this );
}
if (_state != started )
BOOST_THROW_EXCEPTION( std::runtime_error("Initial state was not initialized, so final state cannot be started.") );
}
virtual void shutdown() override final
{
if( _state == started )
{
_state = stopped;
//ilog( "shutting down plugin ${name}", ("name",name()) );
this->plugin_shutdown();
}
}
protected:
plugin() = default;
private:
state _state = abstract_plugin::registered;
};
}

View File

@ -11,63 +11,63 @@
visitor( appbase::app().register_plugin<elem>() );
#define APPBASE_PLUGIN_REQUIRES( PLUGINS ) \
virtual void plugin_for_each_dependency( plugin_processor&& l ) override { \
BOOST_PP_SEQ_FOR_EACH( APPBASE_PLUGIN_REQUIRES_VISIT, l, PLUGINS ) \
}
virtual void plugin_for_each_dependency( plugin_processor&& l ) override { \
BOOST_PP_SEQ_FOR_EACH( APPBASE_PLUGIN_REQUIRES_VISIT, l, PLUGINS ) \
}
namespace appbase {
using boost::program_options::options_description;
using boost::program_options::variables_map;
using std::string;
using std::vector;
using std::map;
using boost::program_options::options_description;
using boost::program_options::variables_map;
using std::string;
using std::vector;
using std::map;
class application;
application& app();
class application;
application& app();
class abstract_plugin {
public:
enum state {
registered, ///< the plugin is constructed but doesn't do anything
initialized, ///< the plugin has initlaized any state required but is idle
started, ///< the plugin is actively running
stopped ///< the plugin is no longer running
};
class abstract_plugin {
public:
enum state {
registered, ///< the plugin is constructed but doesn't do anything
initialized, ///< the plugin has initlaized any state required but is idle
started, ///< the plugin is actively running
stopped ///< the plugin is no longer running
};
virtual ~abstract_plugin(){}
virtual ~abstract_plugin(){}
virtual state get_state()const = 0;
virtual const std::string& get_name()const = 0;
virtual void set_program_options( options_description& cli, options_description& cfg ) = 0;
virtual void initialize(const variables_map& options) = 0;
virtual void startup() = 0;
virtual void shutdown() = 0;
virtual state get_state()const = 0;
virtual const std::string& get_name()const = 0;
virtual void set_program_options( options_description& cli, options_description& cfg ) = 0;
virtual void initialize(const variables_map& options) = 0;
virtual void startup() = 0;
virtual void shutdown() = 0;
protected:
typedef std::function<void(abstract_plugin&)> plugin_processor;
protected:
typedef std::function<void(abstract_plugin&)> plugin_processor;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of initialization/startup process triggerred by main application.
Allows to process all plugins, this one depends on.
*/
virtual void plugin_for_each_dependency(plugin_processor&& processor) = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of initialization/startup process triggerred by main application.
Allows to process all plugins, this one depends on.
*/
virtual void plugin_for_each_dependency(plugin_processor&& processor) = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of initialization process triggerred by main application.
*/
virtual void plugin_initialize( const variables_map& options ) = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of startup process triggerred by main application.
*/
virtual void plugin_startup() = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of shutdown process triggerred by main application.
*/
virtual void plugin_shutdown() = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of initialization process triggerred by main application.
*/
virtual void plugin_initialize( const variables_map& options ) = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of startup process triggerred by main application.
*/
virtual void plugin_startup() = 0;
/** Abstract method to be reimplemented in final plugin implementation.
It is a part of shutdown process triggerred by main application.
*/
virtual void plugin_shutdown() = 0;
};
};
template<typename Impl>
class plugin;
template<typename Impl>
class plugin;
}

View File

@ -1,7 +1,7 @@
file(GLOB HEADERS "include/steem/chain/*.hpp" "include/steem/chain/util/*.hpp" "include/steem/chain/smt_objects/*.hpp" "include/steem/chain/sps_objects/*.hpp")
file(GLOB HEADERS "include/hive/chain/*.hpp" "include/hive/chain/util/*.hpp" "include/hive/chain/smt_objects/*.hpp" "include/hive/chain/sps_objects/*.hpp")
## SORT .cpp by most likely to change / break compile
add_library( steem_chain
add_library( hive_chain
# As database takes the longest to compile, start it first
database.cpp
@ -12,8 +12,8 @@ add_library( steem_chain
smt_objects/smt_market_maker.cpp
smt_objects/nai_pool.cpp
steem_evaluator.cpp
steem_objects.cpp
hive_evaluator.cpp
hive_objects.cpp
required_action_evaluator.cpp
optional_action_evaluator.cpp
@ -38,15 +38,15 @@ add_library( steem_chain
${HEADERS}
)
target_link_libraries( steem_chain steem_jsonball steem_protocol fc chainbase steem_schema appbase mira
target_link_libraries( hive_chain hive_jsonball hive_protocol fc chainbase hive_schema appbase mira
${PATCH_MERGE_LIB} )
target_include_directories( steem_chain
target_include_directories( hive_chain
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}"
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" )
if( CLANG_TIDY_EXE )
set_target_properties(
steem_chain PROPERTIES
hive_chain PROPERTIES
CXX_CLANG_TIDY "${DO_CLANG_TIDY}"
)
endif( CLANG_TIDY_EXE )
@ -56,10 +56,10 @@ if(MSVC)
endif(MSVC)
INSTALL( TARGETS
steem_chain
hive_chain
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)
INSTALL( FILES ${HEADERS} DESTINATION "include/steem/chain" )
INSTALL( FILES ${HEADERS} DESTINATION "include/hive/chain" )

View File

@ -1,7 +1,9 @@
#include <steem/chain/block_log.hpp>
#include <hive/chain/block_log.hpp>
#include <fstream>
#include <fc/io/raw.hpp>
#include <appbase/application.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <boost/interprocess/sync/lock_options.hpp>
@ -9,437 +11,456 @@
#define LOG_READ (std::ios::in | std::ios::binary)
#define LOG_WRITE (std::ios::out | std::ios::binary | std::ios::app)
namespace steem { namespace chain {
namespace hive { namespace chain {
typedef boost::interprocess::scoped_lock< boost::mutex > scoped_lock;
typedef boost::interprocess::scoped_lock< boost::mutex > scoped_lock;
boost::interprocess::defer_lock_type defer_lock;
boost::interprocess::defer_lock_type defer_lock;
namespace detail {
class block_log_impl {
public:
optional< signed_block > head;
block_id_type head_id;
std::fstream block_stream;
std::fstream index_stream;
fc::path block_file;
fc::path index_file;
bool block_write = false;
bool index_write = false;
namespace detail {
class block_log_impl {
public:
optional< signed_block > head;
block_id_type head_id;
std::fstream block_stream;
std::fstream index_stream;
fc::path block_file;
fc::path index_file;
bool block_write = false;
bool index_write = false;
bool use_locking = true;
bool use_locking = true;
boost::mutex mtx;
boost::mutex mtx;
inline void check_block_read()
inline void check_block_read()
{
try
{
if( block_write )
{
try
{
if( block_write )
{
block_stream.close();
block_stream.open( block_file.generic_string().c_str(), LOG_READ );
block_write = false;
}
}
FC_LOG_AND_RETHROW()
block_stream.close();
block_stream.open( block_file.generic_string().c_str(), LOG_READ );
block_write = false;
}
}
FC_LOG_AND_RETHROW()
}
inline void check_block_write()
inline void check_block_write()
{
try
{
if( !block_write )
{
try
{
if( !block_write )
{
block_stream.close();
block_stream.open( block_file.generic_string().c_str(), LOG_WRITE );
block_write = true;
}
}
FC_LOG_AND_RETHROW()
block_stream.close();
block_stream.open( block_file.generic_string().c_str(), LOG_WRITE );
block_write = true;
}
}
FC_LOG_AND_RETHROW()
}
inline void check_index_read()
inline void check_index_read()
{
try
{
if( index_write )
{
try
{
if( index_write )
{
index_stream.close();
index_stream.open( index_file.generic_string().c_str(), LOG_READ );
index_write = false;
}
}
FC_LOG_AND_RETHROW()
index_stream.close();
index_stream.open( index_file.generic_string().c_str(), LOG_READ );
index_write = false;
}
}
FC_LOG_AND_RETHROW()
}
inline void check_index_write()
inline void check_index_write()
{
try
{
if( !index_write )
{
try
{
if( !index_write )
{
index_stream.close();
index_stream.open( index_file.generic_string().c_str(), LOG_WRITE );
index_write = true;
}
}
FC_LOG_AND_RETHROW()
index_stream.close();
index_stream.open( index_file.generic_string().c_str(), LOG_WRITE );
index_write = true;
}
};
}
}
FC_LOG_AND_RETHROW()
}
};
}
block_log::block_log()
:my( new detail::block_log_impl() )
{
my->block_stream.exceptions( std::fstream::failbit | std::fstream::badbit );
my->index_stream.exceptions( std::fstream::failbit | std::fstream::badbit );
}
block_log::block_log()
:my( new detail::block_log_impl() )
{
my->block_stream.exceptions( std::fstream::failbit | std::fstream::badbit );
my->index_stream.exceptions( std::fstream::failbit | std::fstream::badbit );
}
block_log::~block_log()
{
flush();
}
block_log::~block_log()
{
flush();
}
void block_log::open( const fc::path& file )
{
if( my->block_stream.is_open() )
my->block_stream.close();
if( my->index_stream.is_open() )
my->index_stream.close();
void block_log::open( const fc::path& file )
{
if( my->block_stream.is_open() )
my->block_stream.close();
if( my->index_stream.is_open() )
my->index_stream.close();
my->block_file = file;
my->index_file = fc::path( file.generic_string() + ".index" );
my->block_file = file;
my->index_file = fc::path( file.generic_string() + ".index" );
my->block_stream.open( my->block_file.generic_string().c_str(), LOG_WRITE );
my->index_stream.open( my->index_file.generic_string().c_str(), LOG_WRITE );
my->block_write = true;
my->index_write = true;
/* On startup of the block log, there are several states the log file and the index file can be
* in relation to eachother.
*
* Block Log
* Exists Is New
* +------------+------------+
* Exists | Check | Delete |
* Index | Head | Index |
* File +------------+------------+
* Is New | Replay | Do |
* | Log | Nothing |
* +------------+------------+
*
* Checking the heads of the files has several conditions as well.
* - If they are the same, do nothing.
* - If the index file head is not in the log file, delete the index and replay.
* - If the index file head is in the log, but not up to date, replay from index head.
*/
auto log_size = fc::file_size( my->block_file );
auto index_size = fc::file_size( my->index_file );
if( log_size )
{
ilog( "Log is nonempty" );
my->head = read_head();
my->head_id = my->head->id();
if( index_size )
{
my->check_block_read();
my->check_index_read();
ilog( "Index is nonempty" );
uint64_t block_pos;
my->block_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->block_stream.read( (char*)&block_pos, sizeof( block_pos ) );
uint64_t index_pos;
my->index_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->index_stream.read( (char*)&index_pos, sizeof( index_pos ) );
if( block_pos < index_pos )
{
ilog( "block_pos < index_pos, close and reopen index_stream" );
construct_index();
}
else if( block_pos > index_pos )
{
ilog( "Index is incomplete" );
construct_index( true/*resume*/, index_pos );
}
}
else
{
ilog( "Index is empty" );
construct_index();
}
}
else if( index_size )
{
ilog( "Index is nonempty, remove and recreate it" );
my->index_stream.close();
fc::remove_all( my->index_file );
my->index_stream.open( my->index_file.generic_string().c_str(), LOG_WRITE );
my->index_write = true;
}
}
void block_log::rewrite(const fc::path& inputFile, const fc::path& outputFile, uint32_t maxBlockNo)
{
if(my->block_stream.is_open())
my->block_stream.close();
if(my->index_stream.is_open())
my->index_stream.close();
my->index_write = false;
my->block_file = inputFile;
my->block_stream.open(my->block_file.generic_string().c_str(), LOG_READ);
my->block_write = false;
std::fstream outFile;
outFile.exceptions(std::fstream::failbit | std::fstream::badbit);
outFile.open(outputFile.generic_string().c_str(), LOG_WRITE);
uint64_t pos = 0;
uint64_t end_pos = 0;
my->block_stream.seekg(-sizeof(uint64_t), std::ios::end);
my->block_stream.read((char*)&end_pos, sizeof(end_pos));
signed_block tmp;
my->block_stream.seekg(pos);
uint32_t blockNo = 0;
while(pos < end_pos)
{
fc::raw::unpack(my->block_stream, tmp);
my->block_stream.read((char*)&pos, sizeof(pos));
uint64_t outPos = outFile.tellp();
if(outPos != pos)
{
ilog("Block position mismatch");
}
auto data = fc::raw::pack_to_vector(tmp);
outFile.write(data.data(), data.size());
outFile.write((char*)&outPos, sizeof(outPos));
if(++blockNo >= maxBlockNo)
break;
if(blockNo % 1000 == 0)
printf("Rewritten block: %u\r", blockNo);
}
outFile.close();
}
void block_log::close()
{
my.reset( new detail::block_log_impl() );
}
bool block_log::is_open()const
{
return my->block_stream.is_open();
}
uint64_t block_log::append( const signed_block& b )
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->check_block_write();
my->check_index_write();
uint64_t pos = my->block_stream.tellp();
FC_ASSERT( static_cast<uint64_t>(my->index_stream.tellp()) == sizeof( uint64_t ) * ( b.block_num() - 1 ),
"Append to index file occuring at wrong position.",
( "position", (uint64_t) my->index_stream.tellp() )( "expected",( b.block_num() - 1 ) * sizeof( uint64_t ) ) );
auto data = fc::raw::pack_to_vector( b );
my->block_stream.write( data.data(), data.size() );
my->block_stream.write( (char*)&pos, sizeof( pos ) );
my->index_stream.write( (char*)&pos, sizeof( pos ) );
my->head = b;
my->head_id = b.id();
return pos;
}
FC_LOG_AND_RETHROW()
}
void block_log::flush()
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->block_stream.flush();
my->index_stream.flush();
}
std::pair< signed_block, uint64_t > block_log::read_block( uint64_t pos )const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return read_block_helper( pos );
}
std::pair< signed_block, uint64_t > block_log::read_block_helper( uint64_t pos )const
{
try
{
my->check_block_read();
my->block_stream.seekg( pos );
std::pair<signed_block,uint64_t> result;
fc::raw::unpack( my->block_stream, result.first );
result.second = uint64_t(my->block_stream.tellg()) + 8;
return result;
}
FC_LOG_AND_RETHROW()
}
optional< std::pair< signed_block, uint64_t > > block_log::read_block_by_num( uint32_t block_num )const
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();
}
optional< std::pair< signed_block, uint64_t > > res;
uint64_t pos = get_block_pos_helper( block_num );
if( pos != npos )
{
res = read_block_helper( pos );
const signed_block& b = res->first;
FC_ASSERT( b.block_num() == block_num , "Wrong block was read from block log.", ( "returned", b.block_num() )( "expected", block_num ));
}
return res;
}
FC_LOG_AND_RETHROW()
}
uint64_t block_log::get_block_pos( uint32_t block_num ) const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return get_block_pos_helper( block_num );
}
uint64_t block_log::get_block_pos_helper( uint32_t block_num ) const
{
try
{
my->check_index_read();
if( !( my->head.valid() && block_num <= protocol::block_header::num_from_id( my->head_id ) && block_num > 0 ) )
return npos;
my->index_stream.seekg( sizeof( uint64_t ) * ( block_num - 1 ) );
uint64_t pos;
my->index_stream.read( (char*)&pos, sizeof( pos ) );
return pos;
}
FC_LOG_AND_RETHROW()
}
signed_block block_log::read_head()const
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->check_block_read();
uint64_t pos;
my->block_stream.seekg( -sizeof(pos), std::ios::end );
my->block_stream.read( (char*)&pos, sizeof(pos) );
return read_block_helper( pos ).first;
}
FC_LOG_AND_RETHROW()
}
const optional< signed_block >& block_log::head()const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return my->head;
}
void block_log::construct_index( bool resume, uint64_t index_pos )
{
try
{
ilog( "Reconstructing Block Log Index..." );
my->index_stream.close();
if( !resume )
fc::remove_all( my->index_file );
my->block_stream.open( my->block_file.generic_string().c_str(), LOG_WRITE );
my->index_stream.open( my->index_file.generic_string().c_str(), LOG_WRITE );
my->block_write = true;
my->index_write = true;
/* On startup of the block log, there are several states the log file and the index file can be
* in relation to eachother.
*
* Block Log
* Exists Is New
* +------------+------------+
* Exists | Check | Delete |
* Index | Head | Index |
* File +------------+------------+
* Is New | Replay | Do |
* | Log | Nothing |
* +------------+------------+
*
* Checking the heads of the files has several conditions as well.
* - If they are the same, do nothing.
* - If the index file head is not in the log file, delete the index and replay.
* - If the index file head is in the log, but not up to date, replay from index head.
*/
auto log_size = fc::file_size( my->block_file );
auto index_size = fc::file_size( my->index_file );
uint64_t pos = resume ? index_pos : 0;
uint64_t end_pos;
my->check_block_read();
if( log_size )
my->block_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->block_stream.read( (char*)&end_pos, sizeof( end_pos ) );
signed_block tmp;
my->block_stream.seekg( pos );
if( resume )
{
ilog( "Log is nonempty" );
my->head = read_head();
my->head_id = my->head->id();
my->index_stream.seekg( 0, std::ios::end );
if( index_size )
{
my->check_block_read();
my->check_index_read();
fc::raw::unpack( my->block_stream, tmp );
my->block_stream.read( (char*)&pos, sizeof( pos ) );
ilog( "Index is nonempty" );
uint64_t block_pos;
my->block_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->block_stream.read( (char*)&block_pos, sizeof( block_pos ) );
uint64_t index_pos;
my->index_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->index_stream.read( (char*)&index_pos, sizeof( index_pos ) );
if( block_pos < index_pos )
{
ilog( "block_pos < index_pos, close and reopen index_stream" );
construct_index();
}
else if( block_pos > index_pos )
{
ilog( "Index is incomplete" );
construct_index();
}
}
else
{
ilog( "Index is empty" );
construct_index();
}
ilog("Resuming Block Log Index. Last applied: ( block number: ${n} )( trx: ${trx} )( bytes position: ${pos} )",
( "n", tmp.block_num() )( "trx", tmp.id() )( "pos", pos ) );
}
else if( index_size )
while( !appbase::app().is_interrupt_request() && pos < end_pos )
{
ilog( "Index is nonempty, remove and recreate it" );
my->index_stream.close();
fc::remove_all( my->index_file );
my->index_stream.open( my->index_file.generic_string().c_str(), LOG_WRITE );
my->index_write = true;
fc::raw::unpack( my->block_stream, tmp );
my->block_stream.read( (char*)&pos, sizeof( pos ) );
my->index_stream.write( (char*)&pos, sizeof( pos ) );
}
}
void block_log::rewrite(const fc::path& inputFile, const fc::path& outputFile, uint32_t maxBlockNo)
{
if(my->block_stream.is_open())
my->block_stream.close();
if(my->index_stream.is_open())
my->index_stream.close();
my->index_write = false;
my->block_file = inputFile;
if( appbase::app().is_interrupt_request() )
ilog("Creating Block Log Index is interrupted on user request. Last applied: ( block number: ${n} )( trx: ${trx} )( bytes position: ${pos} )",
( "n", tmp.block_num() )( "trx", tmp.id() )( "pos", pos ) );
my->block_stream.open(my->block_file.generic_string().c_str(), LOG_READ);
my->block_write = false;
std::fstream outFile;
outFile.exceptions(std::fstream::failbit | std::fstream::badbit);
outFile.open(outputFile.generic_string().c_str(), LOG_WRITE);
uint64_t pos = 0;
uint64_t end_pos = 0;
my->block_stream.seekg(-sizeof(uint64_t), std::ios::end);
my->block_stream.read((char*)&end_pos, sizeof(end_pos));
signed_block tmp;
my->block_stream.seekg(pos);
uint32_t blockNo = 0;
while(pos < end_pos)
{
fc::raw::unpack(my->block_stream, tmp);
my->block_stream.read((char*)&pos, sizeof(pos));
uint64_t outPos = outFile.tellp();
if(outPos != pos)
{
ilog("Block position mismatch");
}
auto data = fc::raw::pack_to_vector(tmp);
outFile.write(data.data(), data.size());
outFile.write((char*)&outPos, sizeof(outPos));
if(++blockNo >= maxBlockNo)
break;
if(blockNo % 1000 == 0)
printf("Rewritten block: %u\r", blockNo);
}
outFile.close();
}
void block_log::close()
{
my.reset( new detail::block_log_impl() );
}
bool block_log::is_open()const
{
return my->block_stream.is_open();
}
uint64_t block_log::append( const signed_block& b )
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->check_block_write();
my->check_index_write();
uint64_t pos = my->block_stream.tellp();
FC_ASSERT( static_cast<uint64_t>(my->index_stream.tellp()) == sizeof( uint64_t ) * ( b.block_num() - 1 ),
"Append to index file occuring at wrong position.",
( "position", (uint64_t) my->index_stream.tellp() )( "expected",( b.block_num() - 1 ) * sizeof( uint64_t ) ) );
auto data = fc::raw::pack_to_vector( b );
my->block_stream.write( data.data(), data.size() );
my->block_stream.write( (char*)&pos, sizeof( pos ) );
my->index_stream.write( (char*)&pos, sizeof( pos ) );
my->head = b;
my->head_id = b.id();
return pos;
}
FC_LOG_AND_RETHROW()
}
void block_log::flush()
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->block_stream.flush();
/// Flush and reopen to be sure that given index file has been saved.
/// Otherwise just executed replay, next stopped by ctrl+C can again corrupt this file.
my->index_stream.flush();
}
my->index_stream.close();
my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE);
}
FC_LOG_AND_RETHROW()
}
std::pair< signed_block, uint64_t > block_log::read_block( uint64_t pos )const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return read_block_helper( pos );
}
std::pair< signed_block, uint64_t > block_log::read_block_helper( uint64_t pos )const
{
try
{
my->check_block_read();
my->block_stream.seekg( pos );
std::pair<signed_block,uint64_t> result;
fc::raw::unpack( my->block_stream, result.first );
result.second = uint64_t(my->block_stream.tellg()) + 8;
return result;
}
FC_LOG_AND_RETHROW()
}
optional< signed_block > block_log::read_block_by_num( uint32_t block_num )const
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
optional< signed_block > b;
uint64_t pos = get_block_pos_helper( block_num );
if( pos != npos )
{
b = read_block_helper( pos ).first;
FC_ASSERT( b->block_num() == block_num , "Wrong block was read from block log.", ( "returned", b->block_num() )( "expected", block_num ));
}
return b;
}
FC_LOG_AND_RETHROW()
}
uint64_t block_log::get_block_pos( uint32_t block_num ) const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return get_block_pos_helper( block_num );
}
uint64_t block_log::get_block_pos_helper( uint32_t block_num ) const
{
try
{
my->check_index_read();
if( !( my->head.valid() && block_num <= protocol::block_header::num_from_id( my->head_id ) && block_num > 0 ) )
return npos;
my->index_stream.seekg( sizeof( uint64_t ) * ( block_num - 1 ) );
uint64_t pos;
my->index_stream.read( (char*)&pos, sizeof( pos ) );
return pos;
}
FC_LOG_AND_RETHROW()
}
signed_block block_log::read_head()const
{
try
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
my->check_block_read();
uint64_t pos;
my->block_stream.seekg( -sizeof(pos), std::ios::end );
my->block_stream.read( (char*)&pos, sizeof(pos) );
return read_block_helper( pos ).first;
}
FC_LOG_AND_RETHROW()
}
const optional< signed_block >& block_log::head()const
{
scoped_lock lock( my->mtx, defer_lock );
if( my->use_locking )
{
lock.lock();;
}
return my->head;
}
void block_log::construct_index()
{
try
{
ilog( "Reconstructing Block Log Index..." );
my->index_stream.close();
fc::remove_all( my->index_file );
my->index_stream.open( my->index_file.generic_string().c_str(), LOG_WRITE );
my->index_write = true;
uint64_t pos = 0;
uint64_t end_pos;
my->check_block_read();
my->block_stream.seekg( -sizeof( uint64_t), std::ios::end );
my->block_stream.read( (char*)&end_pos, sizeof( end_pos ) );
signed_block tmp;
my->block_stream.seekg( pos );
while( pos < end_pos )
{
fc::raw::unpack( my->block_stream, tmp );
my->block_stream.read( (char*)&pos, sizeof( pos ) );
my->index_stream.write( (char*)&pos, sizeof( pos ) );
}
/// Flush and reopen to be sure that given index file has been saved.
/// Otherwise just executed replay, next stopped by ctrl+C can again corrupt this file.
my->index_stream.flush();
my->index_stream.close();
my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE);
}
FC_LOG_AND_RETHROW()
}
void block_log::set_locking( bool use_locking )
{
my->use_locking = true;
}
} } // steem::chain
void block_log::set_locking( bool use_locking )
{
my->use_locking = true;
}
} } // hive::chain

File diff suppressed because it is too large Load Diff

View File

@ -1,81 +1,81 @@
#include <steem/chain/fork_database.hpp>
#include <hive/chain/fork_database.hpp>
#include <steem/chain/database_exceptions.hpp>
#include <hive/chain/database_exceptions.hpp>
namespace steem { namespace chain {
namespace hive { namespace chain {
fork_database::fork_database()
{
}
void fork_database::reset()
{
_head.reset();
_index.clear();
_head.reset();
_index.clear();
}
void fork_database::pop_block()
{
FC_ASSERT( _head, "cannot pop an empty fork database" );
auto prev = _head->prev.lock();
FC_ASSERT( prev, "popping head block would leave fork DB empty" );
_head = prev;
FC_ASSERT( _head, "cannot pop an empty fork database" );
auto prev = _head->prev.lock();
FC_ASSERT( prev, "popping head block would leave fork DB empty" );
_head = prev;
}
void fork_database::start_block(signed_block b)
{
auto item = std::make_shared<fork_item>(std::move(b));
_index.insert(item);
_head = item;
auto item = std::make_shared<fork_item>(std::move(b));
_index.insert(item);
_head = item;
}
/**
* Pushes the block into the fork database and caches it if it doesn't link
*
*/
* Pushes the block into the fork database and caches it if it doesn't link
*
*/
shared_ptr<fork_item> fork_database::push_block(const signed_block& b)
{
auto item = std::make_shared<fork_item>(b);
try {
_push_block(item);
}
catch ( const unlinkable_block_exception& e )
{
wlog( "Pushing block to fork database that failed to link: ${id}, ${num}", ("id",b.id())("num",b.block_num()) );
wlog( "Head: ${num}, ${id}", ("num",_head->data.block_num())("id",_head->data.id()) );
throw;
_unlinked_index.insert( item );
}
return _head;
auto item = std::make_shared<fork_item>(b);
try {
_push_block(item);
}
catch ( const unlinkable_block_exception& e )
{
wlog( "Pushing block to fork database that failed to link: ${id}, ${num}", ("id",b.id())("num",b.block_num()) );
wlog( "Head: ${num}, ${id}", ("num",_head->data.block_num())("id",_head->data.id()) );
throw;
_unlinked_index.insert( item );
}
return _head;
}
void fork_database::_push_block(const item_ptr& item)
{
if( _head ) // make sure the block is within the range that we are caching
{
FC_ASSERT( item->num > std::max<int64_t>( 0, int64_t(_head->num) - (_max_size) ),
"attempting to push a block that is too old",
("item->num",item->num)("head",_head->num)("max_size",_max_size));
}
if( _head ) // make sure the block is within the range that we are caching
{
FC_ASSERT( item->num > std::max<int64_t>( 0, int64_t(_head->num) - (_max_size) ),
"attempting to push a block that is too old",
("item->num",item->num)("head",_head->num)("max_size",_max_size));
}
if( _head && item->previous_id() != block_id_type() )
{
auto& index = _index.get<block_id>();
auto itr = index.find(item->previous_id());
STEEM_ASSERT(itr != index.end(), unlinkable_block_exception, "block does not link to known chain");
FC_ASSERT(!(*itr)->invalid);
item->prev = *itr;
}
if( _head && item->previous_id() != block_id_type() )
{
auto& index = _index.get<block_id>();
auto itr = index.find(item->previous_id());
HIVE_ASSERT(itr != index.end(), unlinkable_block_exception, "block does not link to known chain");
FC_ASSERT(!(*itr)->invalid);
item->prev = *itr;
}
_index.insert(item);
if( !_head || item->num > _head->num ) _head = item;
_index.insert(item);
if( !_head || item->num > _head->num ) _head = item;
}
/**
* Iterate through the unlinked cache and insert anything that
* links to the newly inserted item. This will start a recursive
* set of calls performing a depth-first insertion of pending blocks as
* _push_next(..) calls _push_block(...) which will in turn call _push_next
*/
* Iterate through the unlinked cache and insert anything that
* links to the newly inserted item. This will start a recursive
* set of calls performing a depth-first insertion of pending blocks as
* _push_next(..) calls _push_block(...) which will in turn call _push_next
*/
void fork_database::_push_next( const item_ptr& new_item )
{
auto& prev_idx = _unlinked_index.get<by_previous>();
@ -83,162 +83,162 @@ void fork_database::_push_next( const item_ptr& new_item )
auto itr = prev_idx.find( new_item->id );
while( itr != prev_idx.end() )
{
auto tmp = *itr;
prev_idx.erase( itr );
_push_block( tmp );
auto tmp = *itr;
prev_idx.erase( itr );
_push_block( tmp );
itr = prev_idx.find( new_item->id );
itr = prev_idx.find( new_item->id );
}
}
void fork_database::set_max_size( uint32_t s )
{
_max_size = s;
if( !_head ) return;
_max_size = s;
if( !_head ) return;
{ /// index
auto& by_num_idx = _index.get<block_num>();
auto itr = by_num_idx.begin();
while( itr != by_num_idx.end() )
{
if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) )
by_num_idx.erase(itr);
else
break;
itr = by_num_idx.begin();
}
}
{ /// unlinked_index
auto& by_num_idx = _unlinked_index.get<block_num>();
auto itr = by_num_idx.begin();
while( itr != by_num_idx.end() )
{
if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) )
by_num_idx.erase(itr);
else
break;
itr = by_num_idx.begin();
}
}
{ /// index
auto& by_num_idx = _index.get<block_num>();
auto itr = by_num_idx.begin();
while( itr != by_num_idx.end() )
{
if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) )
by_num_idx.erase(itr);
else
break;
itr = by_num_idx.begin();
}
}
{ /// unlinked_index
auto& by_num_idx = _unlinked_index.get<block_num>();
auto itr = by_num_idx.begin();
while( itr != by_num_idx.end() )
{
if( (*itr)->num < std::max(int64_t(0),int64_t(_head->num) - _max_size) )
by_num_idx.erase(itr);
else
break;
itr = by_num_idx.begin();
}
}
}
bool fork_database::is_known_block(const block_id_type& id)const
{
auto& index = _index.get<block_id>();
auto itr = index.find(id);
if( itr != index.end() )
return true;
auto& unlinked_index = _unlinked_index.get<block_id>();
auto unlinked_itr = unlinked_index.find(id);
return unlinked_itr != unlinked_index.end();
auto& index = _index.get<block_id>();
auto itr = index.find(id);
if( itr != index.end() )
return true;
auto& unlinked_index = _unlinked_index.get<block_id>();
auto unlinked_itr = unlinked_index.find(id);
return unlinked_itr != unlinked_index.end();
}
item_ptr fork_database::fetch_block(const block_id_type& id)const
{
auto& index = _index.get<block_id>();
auto itr = index.find(id);
if( itr != index.end() )
return *itr;
auto& unlinked_index = _unlinked_index.get<block_id>();
auto unlinked_itr = unlinked_index.find(id);
if( unlinked_itr != unlinked_index.end() )
return *unlinked_itr;
return item_ptr();
auto& index = _index.get<block_id>();
auto itr = index.find(id);
if( itr != index.end() )
return *itr;
auto& unlinked_index = _unlinked_index.get<block_id>();
auto unlinked_itr = unlinked_index.find(id);
if( unlinked_itr != unlinked_index.end() )
return *unlinked_itr;
return item_ptr();
}
vector<item_ptr> fork_database::fetch_block_by_number(uint32_t num)const
{
try
{
vector<item_ptr> result;
auto const& block_num_idx = _index.get<block_num>();
auto itr = block_num_idx.lower_bound(num);
while( itr != block_num_idx.end() && itr->get()->num == num )
{
if( (*itr)->num == num )
result.push_back( *itr );
else
break;
++itr;
}
return result;
}
FC_LOG_AND_RETHROW()
try
{
vector<item_ptr> result;
auto const& block_num_idx = _index.get<block_num>();
auto itr = block_num_idx.lower_bound(num);
while( itr != block_num_idx.end() && itr->get()->num == num )
{
if( (*itr)->num == num )
result.push_back( *itr );
else
break;
++itr;
}
return result;
}
FC_LOG_AND_RETHROW()
}
pair<fork_database::branch_type,fork_database::branch_type>
fork_database::fetch_branch_from(block_id_type first, block_id_type second)const
{ try {
// This function gets a branch (i.e. vector<fork_item>) leading
// back to the most recent common ancestor.
pair<branch_type,branch_type> result;
auto first_branch_itr = _index.get<block_id>().find(first);
FC_ASSERT(first_branch_itr != _index.get<block_id>().end());
auto first_branch = *first_branch_itr;
// This function gets a branch (i.e. vector<fork_item>) leading
// back to the most recent common ancestor.
pair<branch_type,branch_type> result;
auto first_branch_itr = _index.get<block_id>().find(first);
FC_ASSERT(first_branch_itr != _index.get<block_id>().end());
auto first_branch = *first_branch_itr;
auto second_branch_itr = _index.get<block_id>().find(second);
FC_ASSERT(second_branch_itr != _index.get<block_id>().end());
auto second_branch = *second_branch_itr;
auto second_branch_itr = _index.get<block_id>().find(second);
FC_ASSERT(second_branch_itr != _index.get<block_id>().end());
auto second_branch = *second_branch_itr;
while( first_branch->data.block_num() > second_branch->data.block_num() )
{
result.first.push_back(first_branch);
first_branch = first_branch->prev.lock();
FC_ASSERT(first_branch);
}
while( second_branch->data.block_num() > first_branch->data.block_num() )
{
result.second.push_back( second_branch );
second_branch = second_branch->prev.lock();
FC_ASSERT(second_branch);
}
while( first_branch->data.previous != second_branch->data.previous )
{
result.first.push_back(first_branch);
result.second.push_back(second_branch);
first_branch = first_branch->prev.lock();
FC_ASSERT(first_branch);
second_branch = second_branch->prev.lock();
FC_ASSERT(second_branch);
}
if( first_branch && second_branch )
{
result.first.push_back(first_branch);
result.second.push_back(second_branch);
}
return result;
while( first_branch->data.block_num() > second_branch->data.block_num() )
{
result.first.push_back(first_branch);
first_branch = first_branch->prev.lock();
FC_ASSERT(first_branch);
}
while( second_branch->data.block_num() > first_branch->data.block_num() )
{
result.second.push_back( second_branch );
second_branch = second_branch->prev.lock();
FC_ASSERT(second_branch);
}
while( first_branch->data.previous != second_branch->data.previous )
{
result.first.push_back(first_branch);
result.second.push_back(second_branch);
first_branch = first_branch->prev.lock();
FC_ASSERT(first_branch);
second_branch = second_branch->prev.lock();
FC_ASSERT(second_branch);
}
if( first_branch && second_branch )
{
result.first.push_back(first_branch);
result.second.push_back(second_branch);
}
return result;
} FC_CAPTURE_AND_RETHROW( (first)(second) ) }
shared_ptr<fork_item> fork_database::walk_main_branch_to_num( uint32_t block_num )const
{
shared_ptr<fork_item> next = head();
if( block_num > next->num )
return shared_ptr<fork_item>();
shared_ptr<fork_item> next = head();
if( block_num > next->num )
return shared_ptr<fork_item>();
while( next.get() != nullptr && next->num > block_num )
next = next->prev.lock();
return next;
while( next.get() != nullptr && next->num > block_num )
next = next->prev.lock();
return next;
}
shared_ptr<fork_item> fork_database::fetch_block_on_main_branch_by_number( uint32_t block_num )const
{
vector<item_ptr> blocks = fetch_block_by_number(block_num);
if( blocks.size() == 1 )
return blocks[0];
if( blocks.size() == 0 )
return shared_ptr<fork_item>();
return walk_main_branch_to_num(block_num);
vector<item_ptr> blocks = fetch_block_by_number(block_num);
if( blocks.size() == 1 )
return blocks[0];
if( blocks.size() == 0 )
return shared_ptr<fork_item>();
return walk_main_branch_to_num(block_num);
}
void fork_database::set_head(shared_ptr<fork_item> h)
{
_head = h;
_head = h;
}
void fork_database::remove(block_id_type id)
{
_index.get<block_id>().erase(id);
_index.get<block_id>().erase(id);
}
} } // steem::chain
} } // hive::chain

View File

@ -1,12 +1,12 @@
#include <steem/chain/generic_custom_operation_interpreter.hpp>
#include <hive/chain/generic_custom_operation_interpreter.hpp>
namespace steem { namespace chain {
namespace hive { namespace chain {
std::string legacy_custom_name_from_type( const std::string& type_name )
{
auto start = type_name.find_last_of( ':' ) + 1;
auto end = type_name.find_last_of( '_' );
return type_name.substr( start, end-start );
auto start = type_name.find_last_of( ':' ) + 1;
auto end = type_name.find_last_of( '_' );
return type_name.substr( start, end-start );
}
} } // steem::chain
} } // hive::chain

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,8 @@
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/hive_objects.hpp>
#include <fc/uint128.hpp>
namespace hive { namespace chain {
} } // hive::chain

View File

@ -0,0 +1,587 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <fc/fixed_string.hpp>
#include <hive/protocol/authority.hpp>
#include <hive/protocol/hive_operations.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/chain/witness_objects.hpp>
#include <hive/chain/shared_authority.hpp>
#include <hive/chain/util/manabar.hpp>
#include <hive/chain/util/delayed_voting_processor.hpp>
#include <hive/chain/util/tiny_asset.hpp>
#include <numeric>
namespace hive { namespace chain {
using chainbase::t_vector;
using hive::protocol::authority;
class account_object : public object< account_object_type, account_object >
{
CHAINBASE_OBJECT( account_object );
public:
//constructor for creation of regular accounts
template< typename Allocator >
account_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _name, const public_key_type& _memo_key,
const time_point_sec& _creation_time, bool _mined,
const account_name_type& _recovery_account,
bool _fill_mana, const asset& incoming_delegation )
: id( _id ), name( _name ), memo_key( _memo_key ), created( _creation_time ), mined( _mined ),
recovery_account( _recovery_account ), delayed_votes( a )
{
received_vesting_shares += incoming_delegation;
voting_manabar.last_update_time = _creation_time.sec_since_epoch();
downvote_manabar.last_update_time = _creation_time.sec_since_epoch();
if( _fill_mana )
voting_manabar.current_mana = HIVE_100_PERCENT;
}
//minimal constructor used for creation of accounts at genesis and in tests
template< typename Allocator >
account_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _name, const public_key_type& _memo_key = public_key_type() )
: id( _id ), name( _name ), memo_key( _memo_key ), delayed_votes( a )
{}
//liquid HIVE balance
asset get_balance() const { return balance; }
//HIVE balance in savings
asset get_savings() const { return savings_balance; }
//unclaimed HIVE rewards
asset get_rewards() const { return reward_hive_balance; }
//liquid HBD balance
asset get_hbd_balance() const { return hbd_balance; }
//HBD balance in savings
asset get_hbd_savings() const { return savings_hbd_balance; }
//unclaimed HBD rewards
asset get_hbd_rewards() const { return reward_hbd_balance; }
//all VESTS held by the account - use other routines to get active VESTS for specific uses
asset get_vesting() const { return vesting_shares; }
//VESTS that were delegated to other accounts
asset get_delegated_vesting() const { return delegated_vesting_shares; }
//VESTS that were borrowed from other accounts
asset get_received_vesting() const { return received_vesting_shares; }
//TODO: add routines for specific uses, f.e. get_witness_voting_power, get_proposal_voting_power, get_post_voting_power...
//unclaimed VESTS rewards
asset get_vest_rewards() const { return reward_vesting_balance; }
//value of unclaimed VESTS rewards in HIVE (HIVE held on global balance)
asset get_vest_rewards_as_hive() const { return reward_vesting_hive; }
account_name_type name;
public_key_type memo_key;
account_name_type proxy;
time_point_sec last_account_update;
time_point_sec created;
bool mined = true;
account_name_type recovery_account;
account_name_type reset_account = HIVE_NULL_ACCOUNT;
time_point_sec last_account_recovery;
uint32_t comment_count = 0;
uint32_t lifetime_vote_count = 0;
uint32_t post_count = 0;
bool can_vote = true;
util::manabar voting_manabar;
util::manabar downvote_manabar;
HIVE_asset balance = asset( 0, HIVE_SYMBOL ); ///< total liquid shares held by this account
HIVE_asset savings_balance = asset( 0, HIVE_SYMBOL ); ///< total liquid shares held by this account
/**
* HBD Deposits pay interest based upon the interest rate set by witnesses. The purpose of these
* fields is to track the total (time * hbd_balance) that it is held. Then at the appointed time
* interest can be paid using the following equation:
*
* interest = interest_rate * hbd_seconds / seconds_per_year
*
* Every time the hbd_balance is updated the hbd_seconds is also updated. If at least
* HIVE_HBD_INTEREST_COMPOUND_INTERVAL_SEC has passed since hbd_last_interest_payment then
* interest is added to hbd_balance.
*
* @defgroup hbd_data HBD Balance Data
*/
///@{
HBD_asset hbd_balance = asset( 0, HBD_SYMBOL ); /// total HBD balance
uint128_t hbd_seconds; ///< total HBD * how long it has been held
time_point_sec hbd_seconds_last_update; ///< the last time the hbd_seconds was updated
time_point_sec hbd_last_interest_payment; ///< used to pay interest at most once per month
HBD_asset savings_hbd_balance = asset( 0, HBD_SYMBOL ); /// total HBD balance
uint128_t savings_hbd_seconds; ///< total HBD * how long it has been held
time_point_sec savings_hbd_seconds_last_update; ///< the last time the hbd_seconds was updated
time_point_sec savings_hbd_last_interest_payment; ///< used to pay interest at most once per month
uint8_t savings_withdraw_requests = 0;
///@}
HBD_asset reward_hbd_balance = asset( 0, HBD_SYMBOL );
HIVE_asset reward_hive_balance = asset( 0, HIVE_SYMBOL );
VEST_asset reward_vesting_balance = asset( 0, VESTS_SYMBOL );
HIVE_asset reward_vesting_hive = asset( 0, HIVE_SYMBOL );
share_type curation_rewards = 0;
share_type posting_rewards = 0;
VEST_asset vesting_shares = asset( 0, VESTS_SYMBOL ); ///< total vesting shares held by this account, controls its voting power
VEST_asset delegated_vesting_shares = asset( 0, VESTS_SYMBOL );
VEST_asset received_vesting_shares = asset( 0, VESTS_SYMBOL );
VEST_asset vesting_withdraw_rate = asset( 0, VESTS_SYMBOL ); ///< at the time this is updated it can be at most vesting_shares/104
time_point_sec next_vesting_withdrawal = fc::time_point_sec::maximum(); ///< after every withdrawal this is incremented by 1 week
share_type withdrawn = 0; /// Track how many shares have been withdrawn
share_type to_withdraw = 0; /// Might be able to look this up with operation history.
uint16_t withdraw_routes = 0; //max 10, why is it 16bit?
uint16_t pending_transfers = 0; //for now max is 255, but it might change
fc::array<share_type, HIVE_MAX_PROXY_RECURSION_DEPTH> proxied_vsf_votes;// = std::vector<share_type>( HIVE_MAX_PROXY_RECURSION_DEPTH, 0 ); ///< the total VFS votes proxied to this account
uint16_t witnesses_voted_for = 0; //max 30, why is it 16bit?
time_point_sec last_post;
time_point_sec last_root_post = fc::time_point_sec::min();
time_point_sec last_post_edit;
time_point_sec last_vote_time;
uint32_t post_bandwidth = 0;
share_type pending_claimed_accounts = 0;
using t_delayed_votes = t_vector< delayed_votes_data >;
/*
Holds sum of VESTS per day.
VESTS from day `X` will be matured after `X` + 30 days ( because `HIVE_DELAYED_VOTING_TOTAL_INTERVAL_SECONDS` == 30 days )
*/
t_delayed_votes delayed_votes;
/*
Total sum of VESTS from `delayed_votes` collection.
It's a helper variable needed for better performance.
*/
ushare_type sum_delayed_votes = 0;
time_point_sec get_the_earliest_time() const
{
if( !delayed_votes.empty() )
return ( delayed_votes.begin() )->time;
else
return time_point_sec::maximum();
}
share_type get_real_vesting_shares() const
{
FC_ASSERT( sum_delayed_votes.value <= vesting_shares.amount, "",
( "sum_delayed_votes", sum_delayed_votes )
( "vesting_shares.amount", vesting_shares.amount )
( "account", name ) );
return asset( vesting_shares.amount - sum_delayed_votes.value, VESTS_SYMBOL ).amount;
}
/// This function should be used only when the account votes for a witness directly
share_type witness_vote_weight()const {
return std::accumulate( proxied_vsf_votes.begin(),
proxied_vsf_votes.end(),
get_real_vesting_shares()
);
}
share_type proxied_vsf_votes_total()const {
return std::accumulate( proxied_vsf_votes.begin(),
proxied_vsf_votes.end(),
share_type() );
}
CHAINBASE_UNPACK_CONSTRUCTOR(account_object, (delayed_votes));
};
class account_metadata_object : public object< account_metadata_object_type, account_metadata_object >
{
CHAINBASE_OBJECT( account_metadata_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( account_metadata_object, (json_metadata)(posting_json_metadata) )
account_id_type account;
shared_string json_metadata;
shared_string posting_json_metadata;
CHAINBASE_UNPACK_CONSTRUCTOR(account_metadata_object, (json_metadata)(posting_json_metadata));
};
class account_authority_object : public object< account_authority_object_type, account_authority_object >
{
CHAINBASE_OBJECT( account_authority_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( account_authority_object, (owner)(active)(posting) )
account_name_type account;
shared_authority owner; ///< used for backup control, can set owner or active
shared_authority active; ///< used for all monetary operations, can set active or posting
shared_authority posting; ///< used for voting and posting
time_point_sec last_owner_update;
CHAINBASE_UNPACK_CONSTRUCTOR(account_authority_object, (owner)(active)(posting));
};
class vesting_delegation_object : public object< vesting_delegation_object_type, vesting_delegation_object >
{
CHAINBASE_OBJECT( vesting_delegation_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( vesting_delegation_object )
//amount of delegated VESTS
const asset& get_vesting() const { return vesting_shares; }
account_name_type delegator;
account_name_type delegatee;
asset vesting_shares = asset( 0, VESTS_SYMBOL );
time_point_sec min_delegation_time;
CHAINBASE_UNPACK_CONSTRUCTOR(vesting_delegation_object);
};
class vesting_delegation_expiration_object : public object< vesting_delegation_expiration_object_type, vesting_delegation_expiration_object >
{
CHAINBASE_OBJECT( vesting_delegation_expiration_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( vesting_delegation_expiration_object )
//amount of expiring delegated VESTS
const asset& get_vesting() const { return vesting_shares; }
account_name_type delegator;
asset vesting_shares = asset( 0, VESTS_SYMBOL );
time_point_sec expiration;
CHAINBASE_UNPACK_CONSTRUCTOR(vesting_delegation_expiration_object);
};
class owner_authority_history_object : public object< owner_authority_history_object_type, owner_authority_history_object >
{
CHAINBASE_OBJECT( owner_authority_history_object );
public:
template< typename Allocator >
owner_authority_history_object( allocator< Allocator > a, uint64_t _id,
const account_object& _account, const shared_authority& _previous_owner, const time_point_sec& _creation_time )
: id( _id ), account( _account.name ), previous_owner_authority( allocator< shared_authority >( a ) ),
last_valid_time( _creation_time )
{
previous_owner_authority = _previous_owner;
}
account_name_type account;
shared_authority previous_owner_authority;
time_point_sec last_valid_time;
CHAINBASE_UNPACK_CONSTRUCTOR(owner_authority_history_object, (previous_owner_authority));
};
class account_recovery_request_object : public object< account_recovery_request_object_type, account_recovery_request_object >
{
CHAINBASE_OBJECT( account_recovery_request_object );
public:
template< typename Allocator >
account_recovery_request_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _account_to_recover, const authority& _new_owner_authority, const time_point_sec& _expiration_time )
: id( _id ), account_to_recover( _account_to_recover ), new_owner_authority( allocator< shared_authority >( a ) ),
expires( _expiration_time )
{
new_owner_authority = _new_owner_authority;
}
account_name_type account_to_recover;
shared_authority new_owner_authority;
time_point_sec expires;
CHAINBASE_UNPACK_CONSTRUCTOR(account_recovery_request_object, (new_owner_authority));
};
class change_recovery_account_request_object : public object< change_recovery_account_request_object_type, change_recovery_account_request_object >
{
CHAINBASE_OBJECT( change_recovery_account_request_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( change_recovery_account_request_object )
account_name_type account_to_recover;
account_name_type recovery_account;
time_point_sec effective_on;
CHAINBASE_UNPACK_CONSTRUCTOR(change_recovery_account_request_object);
};
struct by_proxy;
struct by_next_vesting_withdrawal;
struct by_delayed_voting;
/**
* @ingroup object_index
*/
typedef multi_index_container<
account_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< account_object, account_object::id_type, &account_object::get_id > >,
ordered_unique< tag< by_name >,
member< account_object, account_name_type, &account_object::name > >,
ordered_unique< tag< by_proxy >,
composite_key< account_object,
member< account_object, account_name_type, &account_object::proxy >,
member< account_object, account_name_type, &account_object::name >
> /// composite key by proxy
>,
ordered_unique< tag< by_next_vesting_withdrawal >,
composite_key< account_object,
member< account_object, time_point_sec, &account_object::next_vesting_withdrawal >,
member< account_object, account_name_type, &account_object::name >
> /// composite key by_next_vesting_withdrawal
>,
ordered_unique< tag< by_delayed_voting >,
composite_key< account_object,
const_mem_fun< account_object, time_point_sec, &account_object::get_the_earliest_time >,
const_mem_fun< account_object, account_object::id_type, &account_object::get_id >
>
>
>,
allocator< account_object >
> account_index;
struct by_account;
typedef multi_index_container <
account_metadata_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< account_metadata_object, account_metadata_object::id_type, &account_metadata_object::get_id > >,
ordered_unique< tag< by_account >,
member< account_metadata_object, account_id_type, &account_metadata_object::account > >
>,
allocator< account_metadata_object >
> account_metadata_index;
typedef multi_index_container <
owner_authority_history_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< owner_authority_history_object, owner_authority_history_object::id_type, &owner_authority_history_object::get_id > >,
ordered_unique< tag< by_account >,
composite_key< owner_authority_history_object,
member< owner_authority_history_object, account_name_type, &owner_authority_history_object::account >,
member< owner_authority_history_object, time_point_sec, &owner_authority_history_object::last_valid_time >,
const_mem_fun< owner_authority_history_object, owner_authority_history_object::id_type, &owner_authority_history_object::get_id >
>,
composite_key_compare< std::less< account_name_type >, std::less< time_point_sec >, std::less< owner_authority_history_id_type > >
>
>,
allocator< owner_authority_history_object >
> owner_authority_history_index;
struct by_last_owner_update;
typedef multi_index_container <
account_authority_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< account_authority_object, account_authority_object::id_type, &account_authority_object::get_id > >,
ordered_unique< tag< by_account >,
composite_key< account_authority_object,
member< account_authority_object, account_name_type, &account_authority_object::account >,
const_mem_fun< account_authority_object, account_authority_object::id_type, &account_authority_object::get_id >
>,
composite_key_compare< std::less< account_name_type >, std::less< account_authority_id_type > >
>,
ordered_unique< tag< by_last_owner_update >,
composite_key< account_authority_object,
member< account_authority_object, time_point_sec, &account_authority_object::last_owner_update >,
const_mem_fun< account_authority_object, account_authority_object::id_type, &account_authority_object::get_id >
>,
composite_key_compare< std::greater< time_point_sec >, std::less< account_authority_id_type > >
>
>,
allocator< account_authority_object >
> account_authority_index;
struct by_delegation;
typedef multi_index_container <
vesting_delegation_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< vesting_delegation_object, vesting_delegation_object::id_type, &vesting_delegation_object::get_id > >,
ordered_unique< tag< by_delegation >,
composite_key< vesting_delegation_object,
member< vesting_delegation_object, account_name_type, &vesting_delegation_object::delegator >,
member< vesting_delegation_object, account_name_type, &vesting_delegation_object::delegatee >
>,
composite_key_compare< std::less< account_name_type >, std::less< account_name_type > >
>
>,
allocator< vesting_delegation_object >
> vesting_delegation_index;
struct by_expiration;
struct by_account_expiration;
typedef multi_index_container <
vesting_delegation_expiration_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< vesting_delegation_expiration_object, vesting_delegation_expiration_object::id_type, &vesting_delegation_expiration_object::get_id > >,
ordered_unique< tag< by_expiration >,
composite_key< vesting_delegation_expiration_object,
member< vesting_delegation_expiration_object, time_point_sec, &vesting_delegation_expiration_object::expiration >,
const_mem_fun< vesting_delegation_expiration_object, vesting_delegation_expiration_object::id_type, &vesting_delegation_expiration_object::get_id >
>,
composite_key_compare< std::less< time_point_sec >, std::less< vesting_delegation_expiration_id_type > >
>,
ordered_unique< tag< by_account_expiration >,
composite_key< vesting_delegation_expiration_object,
member< vesting_delegation_expiration_object, account_name_type, &vesting_delegation_expiration_object::delegator >,
member< vesting_delegation_expiration_object, time_point_sec, &vesting_delegation_expiration_object::expiration >,
const_mem_fun< vesting_delegation_expiration_object, vesting_delegation_expiration_object::id_type, &vesting_delegation_expiration_object::get_id >
>,
composite_key_compare< std::less< account_name_type >, std::less< time_point_sec >, std::less< vesting_delegation_expiration_id_type > >
>
>,
allocator< vesting_delegation_expiration_object >
> vesting_delegation_expiration_index;
struct by_expiration;
typedef multi_index_container <
account_recovery_request_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< account_recovery_request_object, account_recovery_request_object::id_type, &account_recovery_request_object::get_id > >,
ordered_unique< tag< by_account >,
member< account_recovery_request_object, account_name_type, &account_recovery_request_object::account_to_recover >
>,
ordered_unique< tag< by_expiration >,
composite_key< account_recovery_request_object,
member< account_recovery_request_object, time_point_sec, &account_recovery_request_object::expires >,
member< account_recovery_request_object, account_name_type, &account_recovery_request_object::account_to_recover >
>,
composite_key_compare< std::less< time_point_sec >, std::less< account_name_type > >
>
>,
allocator< account_recovery_request_object >
> account_recovery_request_index;
struct by_effective_date;
typedef multi_index_container <
change_recovery_account_request_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< change_recovery_account_request_object, change_recovery_account_request_object::id_type, &change_recovery_account_request_object::get_id > >,
ordered_unique< tag< by_account >,
member< change_recovery_account_request_object, account_name_type, &change_recovery_account_request_object::account_to_recover >
>,
ordered_unique< tag< by_effective_date >,
composite_key< change_recovery_account_request_object,
member< change_recovery_account_request_object, time_point_sec, &change_recovery_account_request_object::effective_on >,
member< change_recovery_account_request_object, account_name_type, &change_recovery_account_request_object::account_to_recover >
>,
composite_key_compare< std::less< time_point_sec >, std::less< account_name_type > >
>
>,
allocator< change_recovery_account_request_object >
> change_recovery_account_request_index;
} }
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::vesting_delegation_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::vesting_delegation_expiration_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::change_recovery_account_request_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::account_object,
(id)(name)(memo_key)(proxy)(last_account_update)
(created)(mined)
(recovery_account)(last_account_recovery)(reset_account)
(comment_count)(lifetime_vote_count)(post_count)(can_vote)(voting_manabar)(downvote_manabar)
(balance)
(savings_balance)
(hbd_balance)(hbd_seconds)(hbd_seconds_last_update)(hbd_last_interest_payment)
(savings_hbd_balance)(savings_hbd_seconds)(savings_hbd_seconds_last_update)(savings_hbd_last_interest_payment)(savings_withdraw_requests)
(reward_hive_balance)(reward_hbd_balance)(reward_vesting_balance)(reward_vesting_hive)
(vesting_shares)(delegated_vesting_shares)(received_vesting_shares)
(vesting_withdraw_rate)(next_vesting_withdrawal)(withdrawn)(to_withdraw)(withdraw_routes)
(pending_transfers)(curation_rewards)
(posting_rewards)
(proxied_vsf_votes)(witnesses_voted_for)
(last_post)(last_root_post)(last_post_edit)(last_vote_time)(post_bandwidth)
(pending_claimed_accounts)
(delayed_votes)
(sum_delayed_votes)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_object, hive::chain::account_index )
FC_REFLECT( hive::chain::account_metadata_object,
(id)(account)(json_metadata)(posting_json_metadata) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_metadata_object, hive::chain::account_metadata_index )
FC_REFLECT( hive::chain::account_authority_object,
(id)(account)(owner)(active)(posting)(last_owner_update)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_authority_object, hive::chain::account_authority_index )
FC_REFLECT( hive::chain::vesting_delegation_object,
(id)(delegator)(delegatee)(vesting_shares)(min_delegation_time) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::vesting_delegation_object, hive::chain::vesting_delegation_index )
FC_REFLECT( hive::chain::vesting_delegation_expiration_object,
(id)(delegator)(vesting_shares)(expiration) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::vesting_delegation_expiration_object, hive::chain::vesting_delegation_expiration_index )
FC_REFLECT( hive::chain::owner_authority_history_object,
(id)(account)(previous_owner_authority)(last_valid_time)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::owner_authority_history_object, hive::chain::owner_authority_history_index )
FC_REFLECT( hive::chain::account_recovery_request_object,
(id)(account_to_recover)(new_owner_authority)(expires)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_recovery_request_object, hive::chain::account_recovery_request_index )
FC_REFLECT( hive::chain::change_recovery_account_request_object,
(id)(account_to_recover)(recovery_account)(effective_on)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::change_recovery_account_request_object, hive::chain::change_recovery_account_request_index )
namespace helpers
{
template <>
class index_statistic_provider<hive::chain::account_index>
{
public:
typedef hive::chain::account_index IndexType;
typedef typename hive::chain::account_object::t_delayed_votes t_delayed_votes;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
for(const auto& o : index)
{
info._item_additional_allocation += o.delayed_votes.capacity()*sizeof(t_delayed_votes::value_type);
}
}
return info;
}
};
} /// namespace helpers

View File

@ -0,0 +1,77 @@
#pragma once
#include <fc/filesystem.hpp>
#include <hive/protocol/block.hpp>
namespace hive { namespace chain {
using namespace hive::protocol;
namespace detail { class block_log_impl; }
/* The block log is an external append only log of the blocks. Blocks should only be written
* to the log after they irreverisble as the log is append only. The log is a doubly linked
* list of blocks. There is a secondary index file of only block positions that enables O(1)
* random access lookup by block number.
*
* +---------+----------------+---------+----------------+-----+------------+-------------------+
* | Block 1 | Pos of Block 1 | Block 2 | Pos of Block 2 | ... | Head Block | Pos of Head Block |
* +---------+----------------+---------+----------------+-----+------------+-------------------+
*
* +----------------+----------------+-----+-------------------+
* | Pos of Block 1 | Pos of Block 2 | ... | Pos of Head Block |
* +----------------+----------------+-----+-------------------+
*
* The block log can be walked in order by deserializing a block, skipping 8 bytes, deserializing a
* block, repeat... The head block of the file can be found by seeking to the position contained
* in the last 8 bytes the file. The block log can be read backwards by jumping back 8 bytes, following
* the position, reading the block, jumping back 8 bytes, etc.
*
* Blocks can be accessed at random via block number through the index file. Seek to 8 * (block_num - 1)
* to find the position of the block in the main file.
*
* The main file is the only file that needs to persist. The index file can be reconstructed during a
* linear scan of the main file.
*/
class block_log {
public:
block_log();
~block_log();
void open( const fc::path& file );
void rewrite(const fc::path& inputFile, const fc::path& outputFile, uint32_t maxBlockNo);
void close();
bool is_open()const;
uint64_t append( const signed_block& b );
void flush();
std::pair< signed_block, uint64_t > read_block( uint64_t file_pos )const;
optional< std::pair< signed_block, uint64_t > > read_block_by_num( uint32_t block_num )const;
/**
* Return offset of block in file, or block_log::npos if it does not exist.
*/
uint64_t get_block_pos( uint32_t block_num ) const;
signed_block read_head()const;
const optional< signed_block >& head()const;
/*
* Used by the database to skip locking when reindexing
* APIs don't work at this point, so there is no danger.
*/
void set_locking( bool );
static const uint64_t npos = std::numeric_limits<uint64_t>::max();
private:
void construct_index( bool resume = false, uint64_t index_pos = 0 );
std::pair< signed_block, uint64_t > read_block_helper( uint64_t file_pos )const;
uint64_t get_block_pos_helper( uint32_t block_num ) const;
std::unique_ptr<detail::block_log_impl> my;
};
} }

View File

@ -0,0 +1,46 @@
#pragma once
#include <hive/chain/hive_object_types.hpp>
namespace hive { namespace chain {
using hive::protocol::block_id_type;
/**
* @brief tracks minimal information about past blocks to implement TaPOS
* @ingroup object
*
* When attempting to calculate the validity of a transaction we need to
* lookup a past block and check its block hash and the time it occurred
* so we can calculate whether the current transaction is valid and at
* what time it should expire.
*/
class block_summary_object : public object< block_summary_object_type, block_summary_object >
{
CHAINBASE_OBJECT( block_summary_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( block_summary_object )
block_id_type block_id;
};
typedef multi_index_container<
block_summary_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< block_summary_object, block_summary_object::id_type, &block_summary_object::get_id > >
>,
allocator< block_summary_object >
> block_summary_index;
} } // hive::chain
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::block_summary_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::block_summary_object, (id)(block_id) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::block_summary_object, hive::chain::block_summary_index )

View File

@ -5,38 +5,38 @@
#include <fc/io/datastream.hpp>
#include <fc/io/raw.hpp>
namespace steem { namespace chain {
namespace hive { namespace chain {
typedef chainbase::t_vector< char > buffer_type;
} } // steem::chain
} } // hive::chain
namespace fc { namespace raw {
template< typename T, typename B > inline void pack_to_buffer( B& raw, const T& v )
{
auto size = pack_size( v );
raw.resize( size );
datastream< char* > ds( raw.data(), size );
pack( ds, v );
auto size = pack_size( v );
raw.resize( size );
datastream< char* > ds( raw.data(), size );
pack( ds, v );
}
template< typename T, typename B > inline void unpack_from_buffer( const B& raw, T& v )
{
datastream< const char* > ds( raw.data(), raw.size() );
unpack( ds, v );
datastream< const char* > ds( raw.data(), raw.size() );
unpack( ds, v );
}
template< typename T, typename B > inline T unpack_from_buffer( const B& raw )
{
T v;
datastream< const char* > ds( raw.data(), raw.size() );
unpack( ds, v );
return v;
T v;
datastream< const char* > ds( raw.data(), raw.size() );
unpack( ds, v );
return v;
}
} } // fc::raw
#ifndef ENABLE_MIRA
FC_REFLECT_TYPENAME( steem::chain::buffer_type )
FC_REFLECT_TYPENAME( hive::chain::buffer_type )
#endif

View File

@ -0,0 +1,404 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/authority.hpp>
#include <hive/protocol/hive_operations.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/chain/witness_objects.hpp>
#include <hive/chain/account_object.hpp>
#include <hive/chain/util/tiny_asset.hpp>
#include <fc/crypto/ripemd160.hpp>
#define HIVE_ROOT_POST_PARENT_ID hive::chain::account_id_type::null_id()
namespace hive { namespace chain {
using protocol::beneficiary_route_type;
using chainbase::t_vector;
using chainbase::t_pair;
#ifdef HIVE_ENABLE_SMT
using protocol::votable_asset_info;
#endif
class comment_object : public object< comment_object_type, comment_object >
{
CHAINBASE_OBJECT( comment_object );
public:
using author_and_permlink_hash_type = fc::ripemd160;
template< typename Allocator >
comment_object( allocator< Allocator > a, uint64_t _id,
const account_object& _author, const std::string& _permlink,
fc::optional< std::reference_wrapper< const comment_object > > _parent_comment );
//returns comment identification hash
const author_and_permlink_hash_type& get_author_and_permlink_hash() const { return author_and_permlink_hash; }
static author_and_permlink_hash_type compute_author_and_permlink_hash(
account_id_type author_account_id, const std::string& permlink );
//returns id of root comment (self when top comment)
comment_id_type get_root_id() const { return root_comment; }
//returns id of parent comment (null_id() when top comment)
comment_id_type get_parent_id() const { return parent_comment; }
//tells if comment is top comment
bool is_root() const { return parent_comment == comment_id_type::null_id(); }
//returns comment depth (distance to root)
uint16_t get_depth() const { return depth; }
private:
comment_id_type root_comment;
comment_id_type parent_comment;
author_and_permlink_hash_type author_and_permlink_hash;
uint16_t depth = 0; //looks like a candidate for removal (see https://github.com/steemit/steem/issues/767 )
CHAINBASE_UNPACK_CONSTRUCTOR(comment_object);
};
template< typename Allocator >
inline comment_object::comment_object( allocator< Allocator > a, uint64_t _id,
const account_object& _author, const std::string& _permlink,
fc::optional< std::reference_wrapper< const comment_object > > _parent_comment
)
: id ( _id )
{
author_and_permlink_hash = compute_author_and_permlink_hash( _author.get_id(), _permlink );
if ( _parent_comment.valid() )
{
parent_comment = ( *_parent_comment ).get().get_id();
root_comment = ( *_parent_comment ).get().get_root_id();
depth = ( *_parent_comment ).get().get_depth() + 1;
}
else
{
parent_comment = comment_id_type::null_id();
root_comment = id;
}
}
inline comment_object::author_and_permlink_hash_type comment_object::compute_author_and_permlink_hash(
account_id_type author_account_id, const std::string& permlink )
{
return fc::ripemd160::hash( permlink + "@" + std::to_string( author_account_id ) );
}
/*
Helper class related to `comment_object` - members needed for payout calculation.
Objects of this class can be removed, it depends on `cashout_time`
when `cashout_time == fc::time_point_sec::maximum()`
*/
class comment_cashout_object : public object< comment_cashout_object_type, comment_cashout_object >
{
CHAINBASE_OBJECT( comment_cashout_object );
public:
template< typename Allocator >
comment_cashout_object( allocator< Allocator > a, uint64_t _id,
const comment_object& _comment, const account_object& _author, const std::string& _permlink,
const time_point_sec& _creation_time, const time_point_sec& _cashout_time, uint16_t _reward_weight = 0 )
: id( _comment.get_id() ), //note that it is possible because relation is 1->{0,1} so we can share id
author_id( _author.get_id() ), permlink( a ), active( _creation_time ),
last_payout( time_point_sec::min() ), created( _creation_time ), cashout_time( _cashout_time ),
max_cashout_time( time_point_sec::maximum() ), reward_weight( _reward_weight ), beneficiaries( a )
#ifdef HIVE_ENABLE_SMT
, allowed_vote_assets( a )
#endif
{
from_string( permlink, _permlink );
FC_ASSERT( _creation_time <= _cashout_time );
}
//returns id of associated comment
comment_id_type get_comment_id() const { return comment_object::id_type( id ); }
//returns creation time
const time_point_sec& get_creation_time() const { return created; }
account_id_type author_id;
shared_string permlink;
time_point_sec active; ///< the last time this post was "touched" by voting or reply
time_point_sec last_payout;
/// index on pending_payout for "things happning now... needs moderation"
/// TRENDING = UNCLAIMED + PENDING
share_type net_rshares; // reward is proportional to rshares^2, this is the sum of all votes (positive and negative)
share_type abs_rshares; /// this is used to track the total abs(weight) of votes for the purpose of calculating cashout_time
share_type vote_rshares; /// Total positive rshares from all votes. Used to calculate delta weights. Needed to handle vote changing and removal.
share_type children_abs_rshares; /// this is used to calculate cashout time of a discussion.
uint32_t children = 0; ///< used to track the total number of children, grandchildren, etc...
private:
time_point_sec created;
public:
time_point_sec cashout_time; /// 24 hours from the weighted average of vote time
time_point_sec max_cashout_time;
uint64_t total_vote_weight = 0; /// the total weight of voting rewards, used to calculate pro-rata share of curation payouts
uint16_t reward_weight = 0;
/** tracks the total payout this comment has received over time, measured in HBD */
HBD_asset total_payout_value = asset(0, HBD_SYMBOL);
HBD_asset curator_payout_value = asset(0, HBD_SYMBOL);
HBD_asset beneficiary_payout_value = asset( 0, HBD_SYMBOL );
share_type author_rewards = 0;
int32_t net_votes = 0;
HBD_asset max_accepted_payout = asset( 1000000000, HBD_SYMBOL ); /// HBD value of the maximum payout this post will receive
uint16_t percent_hbd = HIVE_100_PERCENT; /// the percent of HBD to key, unkept amounts will be received as VESTS
bool allow_votes = true; /// allows a post to receive votes;
bool allow_curation_rewards = true;
using t_beneficiaries = t_vector< beneficiary_route_type >;
t_beneficiaries beneficiaries;
#ifdef HIVE_ENABLE_SMT
using t_votable_assets = t_vector< t_pair< asset_symbol_type, votable_asset_info > >;
t_votable_assets allowed_vote_assets;
CHAINBASE_UNPACK_CONSTRUCTOR(comment_cashout_object, (permlink)(beneficiaries)(allowed_vote_assets));
#else
CHAINBASE_UNPACK_CONSTRUCTOR(comment_cashout_object, (permlink)(beneficiaries));
#endif
};
class comment_content_object : public object< comment_content_object_type, comment_content_object >
{
CHAINBASE_OBJECT( comment_content_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( comment_content_object, (title)(body)(json_metadata) )
comment_id_type comment;
shared_string title;
shared_string body;
shared_string json_metadata;
CHAINBASE_UNPACK_CONSTRUCTOR(comment_content_object, (title)(body)(json_metadata));
};
/**
* This index maintains the set of voter/comment pairs that have been used, voters cannot
* vote on the same comment more than once per payout period.
*/
class comment_vote_object : public object< comment_vote_object_type, comment_vote_object>
{
CHAINBASE_OBJECT( comment_vote_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( comment_vote_object )
account_id_type voter;
comment_id_type comment;
uint64_t weight = 0; ///< defines the score this vote receives, used by vote payout calc. 0 if a negative vote or changed votes.
int64_t rshares = 0; ///< The number of rshares this vote is responsible for
int16_t vote_percent = 0; ///< The percent weight of the vote
time_point_sec last_update; ///< The time of the last update of the vote
int8_t num_changes = 0;
CHAINBASE_UNPACK_CONSTRUCTOR(comment_vote_object);
};
struct by_comment_voter;
struct by_voter_comment;
typedef multi_index_container<
comment_vote_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< comment_vote_object, comment_vote_object::id_type, &comment_vote_object::get_id > >,
ordered_unique< tag< by_comment_voter >,
composite_key< comment_vote_object,
member< comment_vote_object, comment_id_type, &comment_vote_object::comment>,
member< comment_vote_object, account_id_type, &comment_vote_object::voter>
>
>,
ordered_unique< tag< by_voter_comment >,
composite_key< comment_vote_object,
member< comment_vote_object, account_id_type, &comment_vote_object::voter>,
member< comment_vote_object, comment_id_type, &comment_vote_object::comment>
>
>
>,
allocator< comment_vote_object >
> comment_vote_index;
struct by_permlink; /// author, perm
struct by_root;
struct by_author_last_update;
/**
* @ingroup object_index
*/
typedef multi_index_container<
comment_object,
indexed_by<
/// CONSENSUS INDICES - used by evaluators
ordered_unique< tag< by_id >,
const_mem_fun< comment_object, comment_object::id_type, &comment_object::get_id > >,
ordered_unique< tag< by_permlink >, /// used by consensus to find posts referenced in ops
const_mem_fun< comment_object, const comment_object::author_and_permlink_hash_type&, &comment_object::get_author_and_permlink_hash > >,
ordered_unique< tag< by_root >,
composite_key< comment_object,
const_mem_fun< comment_object, comment_id_type, &comment_object::get_root_id >,
const_mem_fun< comment_object, comment_object::id_type, &comment_object::get_id >
>
>
>,
allocator< comment_object >
> comment_index;
struct by_cashout_time; /// cashout_time
typedef multi_index_container<
comment_cashout_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< comment_cashout_object, comment_cashout_object::id_type, &comment_cashout_object::get_id > >,
ordered_unique< tag< by_cashout_time >,
composite_key< comment_cashout_object,
member< comment_cashout_object, time_point_sec, &comment_cashout_object::cashout_time>,
const_mem_fun< comment_cashout_object, comment_cashout_object::id_type, &comment_cashout_object::get_id >
>
>
>,
allocator< comment_cashout_object >
> comment_cashout_index;
struct by_comment;
typedef multi_index_container<
comment_content_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< comment_content_object, comment_content_object::id_type, &comment_content_object::get_id > >,
ordered_unique< tag< by_comment >,
member< comment_content_object, comment_id_type, &comment_content_object::comment > >
>,
allocator< comment_content_object >
> comment_content_index;
} } // hive::chain
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::comment_vote_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::comment_object,
(id)(root_comment)(parent_comment)
(author_and_permlink_hash)(depth)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::comment_object, hive::chain::comment_index )
FC_REFLECT( hive::chain::comment_cashout_object,
(id)(author_id)(permlink)
(active)(last_payout)
(net_rshares)(abs_rshares)(vote_rshares)(children_abs_rshares)
(children)(created)(cashout_time)(max_cashout_time)
(total_vote_weight)(reward_weight)(total_payout_value)(curator_payout_value)(beneficiary_payout_value)
(author_rewards)(net_votes)
(max_accepted_payout)(percent_hbd)(allow_votes)(allow_curation_rewards)
(beneficiaries)
#ifdef HIVE_ENABLE_SMT
(allowed_vote_assets)
#endif
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::comment_cashout_object, hive::chain::comment_cashout_index )
FC_REFLECT( hive::chain::comment_content_object,
(id)(comment)(title)(body)(json_metadata) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::comment_content_object, hive::chain::comment_content_index )
FC_REFLECT( hive::chain::comment_vote_object,
(id)(voter)(comment)(weight)(rshares)(vote_percent)(last_update)(num_changes)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::comment_vote_object, hive::chain::comment_vote_index )
namespace helpers
{
using hive::chain::shared_string;
template <>
class index_statistic_provider<hive::chain::comment_index>
{
public:
typedef hive::chain::comment_index IndexType;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
return info;
}
};
template <>
class index_statistic_provider<hive::chain::comment_cashout_index>
{
public:
typedef hive::chain::comment_cashout_index IndexType;
typedef typename hive::chain::comment_cashout_object::t_beneficiaries t_beneficiaries;
#ifdef HIVE_ENABLE_SMT
typedef typename hive::chain::comment_cashout_object::t_votable_assets t_votable_assets;
#endif
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
for(const auto& o : index)
{
info._item_additional_allocation += o.permlink.capacity()*sizeof(shared_string::value_type);
info._item_additional_allocation += o.beneficiaries.capacity()*sizeof(t_beneficiaries::value_type);
#ifdef HIVE_ENABLE_SMT
info._item_additional_allocation += o.allowed_vote_assets.capacity()*sizeof(t_votable_assets::value_type);
#endif
}
}
return info;
}
};
template <>
class index_statistic_provider<hive::chain::comment_content_index>
{
public:
typedef hive::chain::comment_content_index IndexType;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
for(const auto& o : index)
{
info._item_additional_allocation += o.title.capacity()*sizeof(shared_string::value_type);
info._item_additional_allocation += o.body.capacity()*sizeof(shared_string::value_type);
info._item_additional_allocation += o.json_metadata.capacity()*sizeof(shared_string::value_type);
}
}
return info;
}
};
} /// namespace helpers

View File

@ -0,0 +1,49 @@
#pragma once
#include <cstdint>
#include <hive/protocol/config.hpp>
#include <hive/protocol/types.hpp>
#include <fc/uint128.hpp>
namespace hive { namespace protocol {
template< uint16_t percent, uint64_t multiply_constant, uint64_t shift_constant >
share_type calc_percent_reward( share_type current_supply )
{
static_assert( shift_constant > 0, "shift constant cannot be zero" );
static_assert( shift_constant < 128, "shift constant is implausibly large, re-check your arguments" );
static_assert( multiply_constant > 256, "multiply constant is implausibly small, re-check your arguments" );
static_assert( multiply_constant < UINT64_MAX / (10 * uint64_t( HIVE_100_PERCENT )), "multiply constant is too large, we may be in danger of overflow" );
static_assert( (percent == 0) || (percent > HIVE_1_PERCENT), "percent is smaller than 1%, re-check your arguments" );
static_assert( percent <= HIVE_100_PERCENT, "percent is implausibly large, re-check your arguments (if you really mean to do this, you should revise the overflow check above accordingly)" );
static const uint128_t half = uint128_t(1) << (shift_constant - 1);
uint128_t reward = current_supply.value;
reward *= (percent * multiply_constant); // compile-time constant, fits in 64 bits
reward += half; // round to nearest whole integer instead of truncating
reward >>= shift_constant;
return reward.to_uint64();
}
template< uint16_t percent >
inline share_type calc_percent_reward_per_hour( share_type current_supply )
{
return calc_percent_reward< percent, HIVE_APR_PERCENT_MULTIPLY_PER_HOUR, HIVE_APR_PERCENT_SHIFT_PER_HOUR >( current_supply );
}
template< uint16_t percent >
inline share_type calc_percent_reward_per_block( share_type current_supply )
{
return calc_percent_reward< percent, HIVE_APR_PERCENT_MULTIPLY_PER_BLOCK, HIVE_APR_PERCENT_SHIFT_PER_BLOCK >( current_supply );
}
template< uint16_t percent >
inline share_type calc_percent_reward_per_round( share_type current_supply )
{
return calc_percent_reward< percent, HIVE_APR_PERCENT_MULTIPLY_PER_ROUND, HIVE_APR_PERCENT_SHIFT_PER_ROUND >( current_supply );
}
} }

View File

@ -0,0 +1,27 @@
#pragma once
#include <memory>
#include <hive/protocol/types.hpp>
namespace hive { namespace schema {
struct abstract_schema;
} }
namespace hive { namespace protocol {
struct custom_json_operation;
} }
namespace hive { namespace chain {
class custom_operation_interpreter
{
public:
virtual void apply( const protocol::custom_json_operation& op ) = 0;
virtual void apply( const protocol::custom_binary_operation & op ) = 0;
virtual hive::protocol::custom_id_type get_custom_id() = 0;
virtual std::shared_ptr< hive::schema::abstract_schema > get_operation_schema() = 0;
};
} } // hive::chain

View File

@ -0,0 +1,832 @@
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
*/
#pragma once
#include <hive/chain/block_log.hpp>
#include <hive/chain/fork_database.hpp>
#include <hive/chain/global_property_object.hpp>
#include <hive/chain/hardfork_property_object.hpp>
#include <hive/chain/node_property_object.hpp>
#include <hive/chain/notifications.hpp>
#include <hive/chain/util/advanced_benchmark_dumper.hpp>
#include <hive/chain/util/signal.hpp>
#include <hive/chain/util/hf23_helper.hpp>
#include <hive/protocol/protocol.hpp>
#include <hive/protocol/hardfork.hpp>
#include <appbase/plugin.hpp>
#include <fc/signals.hpp>
#include <fc/log/logger.hpp>
#include <functional>
#include <map>
namespace hive { namespace chain {
using hive::protocol::signed_transaction;
using hive::protocol::operation;
using hive::protocol::authority;
using hive::protocol::asset;
using hive::protocol::asset_symbol_type;
using hive::protocol::price;
using abstract_plugin = appbase::abstract_plugin;
struct hardfork_versions
{
fc::time_point_sec times[ HIVE_NUM_HARDFORKS + 1 ];
protocol::hardfork_version versions[ HIVE_NUM_HARDFORKS + 1 ];
};
class database;
#ifdef ENABLE_MIRA
using set_index_type_func = std::function< void(database&, mira::index_type, const boost::filesystem::path&, const boost::any&) >;
#endif
struct index_delegate {
#ifdef ENABLE_MIRA
set_index_type_func set_index_type;
#endif
};
using index_delegate_map = std::map< std::string, index_delegate >;
class database_impl;
class custom_operation_interpreter;
namespace util {
struct comment_reward_context;
}
namespace util {
class advanced_benchmark_dumper;
}
struct reindex_notification;
struct generate_optional_actions_notification {};
typedef std::function<void(uint32_t, const chainbase::database::abstract_index_cntr_t&)> TBenchmarkMidReport;
typedef std::pair<uint32_t, TBenchmarkMidReport> TBenchmark;
struct open_args
{
fc::path data_dir;
fc::path shared_mem_dir;
uint64_t initial_supply = HIVE_INIT_SUPPLY;
uint64_t hbd_initial_supply = HIVE_HBD_INIT_SUPPLY;
uint64_t shared_file_size = 0;
uint16_t shared_file_full_threshold = 0;
uint16_t shared_file_scale_rate = 0;
int16_t sps_remove_threshold = -1;
uint32_t chainbase_flags = 0;
bool do_validate_invariants = false;
bool benchmark_is_enabled = false;
fc::variant database_cfg;
bool replay_in_memory = false;
std::vector< std::string > replay_memory_indices{};
// The following fields are only used on reindexing
uint32_t stop_replay_at = 0;
bool exit_after_replay = false;
bool force_replay = false;
TBenchmark benchmark = TBenchmark(0, [](uint32_t, const chainbase::database::abstract_index_cntr_t&) {});
};
/**
* @class database
* @brief tracks the blockchain state in an extensible manner
*/
class database : public chainbase::database
{
public:
database();
~database();
bool is_producing()const { return _is_producing; }
void set_producing( bool p ) { _is_producing = p; }
bool is_pending_tx()const { return _is_pending_tx; }
void set_pending_tx( bool p ) { _is_pending_tx = p; }
bool is_processing_block()const { return _currently_processing_block_id.valid(); }
bool _is_producing = false;
bool _is_pending_tx = false;
bool _log_hardforks = true;
enum validation_steps
{
skip_nothing = 0,
skip_witness_signature = 1 << 0, ///< used while reindexing
skip_transaction_signatures = 1 << 1, ///< used by non-witness nodes
skip_transaction_dupe_check = 1 << 2, ///< used while reindexing
skip_fork_db = 1 << 3, ///< used while reindexing
skip_block_size_check = 1 << 4, ///< used when applying locally generated transactions
skip_tapos_check = 1 << 5, ///< used while reindexing -- note this skips expiration check as well
skip_authority_check = 1 << 6, ///< used while reindexing -- disables any checking of authority on transactions
skip_merkle_check = 1 << 7, ///< used while reindexing
skip_undo_history_check = 1 << 8, ///< used while reindexing
skip_witness_schedule_check = 1 << 9, ///< used while reindexing
skip_validate = 1 << 10, ///< used prior to checkpoint, skips validate() call on transaction
skip_validate_invariants = 1 << 11, ///< used to skip database invariant check on block application
skip_undo_block = 1 << 12, ///< used to skip undo db on reindex
skip_block_log = 1 << 13 ///< used to skip block logging on reindex
};
/**
* @brief Open a database, creating a new one if necessary
*
* Opens a database in the specified directory. If no initialized database is found the database
* will be initialized with the default state.
*
* @param data_dir Path to open or create database in
*/
void open( const open_args& args );
private:
uint32_t reindex_internal( const open_args& args, std::pair< signed_block, uint64_t >& block_data );
public:
/**
* @brief Check if replaying was finished and all blocks from `block_log` were processed.
*
* This method is called from a chain plugin, if returns `true` then a synchronization is allowed.
* If returns `false`, then opening a node should be forbidden.
*
* There are output-type arguments: `head_block_num_origin`, `head_block_num_state` for information purposes only.
*
* @return information if replaying was finished
*/
bool is_reindex_complete( uint64_t* head_block_num_origin, uint64_t* head_block_num_state ) const;
/**
* @brief Rebuild object graph from block history and open detabase
*
* This method may be called after or instead of @ref database::open, and will rebuild the object graph by
* replaying blockchain history. When this method exits successfully, the database will be open.
*
* @return the last replayed block number.
*/
uint32_t reindex( const open_args& args );
/**
* @brief wipe Delete database from disk, and potentially the raw chain as well.
* @param include_blocks If true, delete the raw chain as well as the database.
*
* Will close the database before wiping. Database will be closed when this function returns.
*/
void wipe(const fc::path& data_dir, const fc::path& shared_mem_dir, bool include_blocks);
void close(bool rewind = true);
//////////////////// db_block.cpp ////////////////////
/**
* @return true if the block is in our fork DB or saved to disk as
* part of the official chain, otherwise return false
*/
bool is_known_block( const block_id_type& id )const;
bool is_known_transaction( const transaction_id_type& id )const;
fc::sha256 get_pow_target()const;
uint32_t get_pow_summary_target()const;
block_id_type find_block_id_for_num( uint32_t block_num )const;
block_id_type get_block_id_for_num( uint32_t block_num )const;
optional<signed_block> fetch_block_by_id( const block_id_type& id )const;
optional<signed_block> fetch_block_by_number( uint32_t num )const;
const signed_transaction get_recent_transaction( const transaction_id_type& trx_id )const;
std::vector<block_id_type> get_block_ids_on_fork(block_id_type head_of_fork) const;
/// Warning: to correctly process old blocks initially old chain-id should be set.
chain_id_type hive_chain_id = STEEM_CHAIN_ID;
chain_id_type get_chain_id() const;
void set_chain_id( const chain_id_type& chain_id );
/** Allows to visit all stored blocks until processor returns true. Caller is responsible for block disasembling
* const signed_block_header& - header of previous block
* const signed_block& - block to be processed currently
*/
void foreach_block(std::function<bool(const signed_block_header&, const signed_block&)> processor) const;
/// Allows to process all blocks visit all transactions held there until processor returns true.
void foreach_tx(std::function<bool(const signed_block_header&, const signed_block&,
const signed_transaction&, uint32_t)> processor) const;
/// Allows to process all operations held in blocks and transactions until processor returns true.
void foreach_operation(std::function<bool(const signed_block_header&, const signed_block&,
const signed_transaction&, uint32_t, const operation&, uint16_t)> processor) const;
const witness_object& get_witness( const account_name_type& name )const;
const witness_object* find_witness( const account_name_type& name )const;
/// Gives name of account with NO authority which holds resources for payouts according to proposals (at a time of given hardfork)
std::string get_treasury_name( uint32_t hardfork )const;
std::string get_treasury_name()const { return get_treasury_name( get_hardfork() ); }
const account_object& get_treasury()const { return get_account( get_treasury_name() ); }
/// Returns true for any account name that was ever a treasury account
bool is_treasury( const account_name_type& name )const;
const account_object& get_account( const account_id_type id )const;
const account_object* find_account( const account_id_type& id )const;
const account_object& get_account( const account_name_type& name )const;
const account_object* find_account( const account_name_type& name )const;
const comment_object& get_comment( comment_id_type comment_id )const;
const comment_object& get_comment( const account_id_type& author, const shared_string& permlink )const;
const comment_object* find_comment( const account_id_type& author, const shared_string& permlink )const;
const comment_object& get_comment( const account_name_type& author, const shared_string& permlink )const;
const comment_object* find_comment( const account_name_type& author, const shared_string& permlink )const;
#ifndef ENABLE_MIRA
const comment_object& get_comment( const account_id_type& author, const string& permlink )const;
const comment_object* find_comment( const account_id_type& author, const string& permlink )const;
const comment_object& get_comment( const account_name_type& author, const string& permlink )const;
const comment_object* find_comment( const account_name_type& author, const string& permlink )const;
#endif
const escrow_object& get_escrow( const account_name_type& name, uint32_t escrow_id )const;
const escrow_object* find_escrow( const account_name_type& name, uint32_t escrow_id )const;
const limit_order_object& get_limit_order( const account_name_type& owner, uint32_t id )const;
const limit_order_object* find_limit_order( const account_name_type& owner, uint32_t id )const;
const savings_withdraw_object& get_savings_withdraw( const account_name_type& owner, uint32_t request_id )const;
const savings_withdraw_object* find_savings_withdraw( const account_name_type& owner, uint32_t request_id )const;
const dynamic_global_property_object& get_dynamic_global_properties()const;
const node_property_object& get_node_properties()const;
const feed_history_object& get_feed_history()const;
const witness_schedule_object& get_witness_schedule_object()const;
const hardfork_property_object& get_hardfork_property_object()const;
private:
const comment_object& get_comment_for_payout_time( const comment_object& comment )const;
public:
const time_point_sec calculate_discussion_payout_time( const comment_object& comment )const;
const time_point_sec calculate_discussion_payout_time( const comment_cashout_object& comment_cashout )const;
const reward_fund_object& get_reward_fund()const;
const comment_cashout_object* find_comment_cashout( const comment_object& comment ) const;
const comment_cashout_object* find_comment_cashout( comment_id_type comment_id ) const;
const comment_object& get_comment( const comment_cashout_object& comment_cashout ) const;
void remove_old_comments();
asset get_effective_vesting_shares( const account_object& account, asset_symbol_type vested_symbol )const;
void max_bandwidth_per_share()const;
/**
* Calculate the percent of block production slots that were missed in the
* past 128 blocks, not including the current block.
*/
uint32_t witness_participation_rate()const;
void add_checkpoints( const flat_map<uint32_t,block_id_type>& checkpts );
const flat_map<uint32_t,block_id_type> get_checkpoints()const { return _checkpoints; }
bool before_last_checkpoint()const;
bool push_block( const signed_block& b, uint32_t skip = skip_nothing );
void push_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing );
void _maybe_warn_multiple_production( uint32_t height )const;
bool _push_block( const signed_block& b );
void _push_transaction( const signed_transaction& trx );
void pop_block();
void clear_pending();
void push_virtual_operation( const operation& op );
void pre_push_virtual_operation( const operation& op );
void post_push_virtual_operation( const operation& op );
/*
* Pushing an action without specifying an execution time will execute at head block.
* The execution time must be greater than or equal to head block.
*/
void push_required_action( const required_automated_action& a, time_point_sec execution_time );
void push_required_action( const required_automated_action& a );
void push_optional_action( const optional_automated_action& a, time_point_sec execution_time );
void push_optional_action( const optional_automated_action& a );
void notify_pre_apply_required_action( const required_action_notification& note );
void notify_post_apply_required_action( const required_action_notification& note );
void notify_pre_apply_optional_action( const optional_action_notification& note );
void notify_post_apply_optional_action( const optional_action_notification& note );
/**
* This method is used to track applied operations during the evaluation of a block, these
* operations should include any operation actually included in a transaction as well
* as any implied/virtual operations that resulted, such as filling an order.
* The applied operations are cleared after post_apply_operation.
*/
void notify_pre_apply_operation( const operation_notification& note );
void notify_post_apply_operation( const operation_notification& note );
void notify_pre_apply_block( const block_notification& note );
void notify_post_apply_block( const block_notification& note );
void notify_irreversible_block( uint32_t block_num );
void notify_pre_apply_transaction( const transaction_notification& note );
void notify_post_apply_transaction( const transaction_notification& note );
using apply_required_action_handler_t = std::function< void(const required_action_notification&) >;
using apply_optional_action_handler_t = std::function< void(const optional_action_notification&) >;
using apply_operation_handler_t = std::function< void(const operation_notification&) >;
using apply_transaction_handler_t = std::function< void(const transaction_notification&) >;
using apply_block_handler_t = std::function< void(const block_notification&) >;
using irreversible_block_handler_t = std::function< void(uint32_t) >;
using reindex_handler_t = std::function< void(const reindex_notification&) >;
using generate_optional_actions_handler_t = std::function< void(const generate_optional_actions_notification&) >;
using prepare_snapshot_handler_t = std::function < void(const database&, const database::abstract_index_cntr_t&)>;
private:
template <typename TSignal,
typename TNotification = std::function<typename TSignal::signature_type>>
boost::signals2::connection connect_impl( TSignal& signal, const TNotification& func,
const abstract_plugin& plugin, int32_t group, const std::string& item_name = "" );
template< bool IS_PRE_OPERATION >
boost::signals2::connection any_apply_operation_handler_impl( const apply_operation_handler_t& func,
const abstract_plugin& plugin, int32_t group );
public:
boost::signals2::connection add_pre_apply_required_action_handler ( const apply_required_action_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_apply_required_action_handler( const apply_required_action_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_pre_apply_optional_action_handler ( const apply_optional_action_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_apply_optional_action_handler( const apply_optional_action_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_pre_apply_operation_handler ( const apply_operation_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_apply_operation_handler ( const apply_operation_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_pre_apply_transaction_handler ( const apply_transaction_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_apply_transaction_handler ( const apply_transaction_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_pre_apply_block_handler ( const apply_block_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_apply_block_handler ( const apply_block_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_irreversible_block_handler ( const irreversible_block_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_pre_reindex_handler ( const reindex_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_post_reindex_handler ( const reindex_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_generate_optional_actions_handler ( const generate_optional_actions_handler_t& func, const abstract_plugin& plugin, int32_t group = -1 );
boost::signals2::connection add_prepare_snapshot_handler (const prepare_snapshot_handler_t& func, const abstract_plugin& plugin, int32_t group = -1);
//////////////////// db_witness_schedule.cpp ////////////////////
/**
* @brief Get the witness scheduled for block production in a slot.
*
* slot_num always corresponds to a time in the future.
*
* If slot_num == 1, returns the next scheduled witness.
* If slot_num == 2, returns the next scheduled witness after
* 1 block gap.
*
* Use the get_slot_time() and get_slot_at_time() functions
* to convert between slot_num and timestamp.
*/
account_name_type get_scheduled_witness(uint32_t slot_num)const;
/**
* Get the time at which the given slot occurs.
*
* If slot_num == 0, return time_point_sec().
*
* If slot_num == N for N > 0, return the Nth next
* block-interval-aligned time greater than head_block_time().
*/
fc::time_point_sec get_slot_time(uint32_t slot_num)const;
/**
* Get the last slot which occurs AT or BEFORE the given time.
*
* The return value is the greatest value N such that
* get_slot_time( N ) <= when.
*
* If no such N exists, return 0.
*/
uint32_t get_slot_at_time(fc::time_point_sec when)const;
/** @return the HBD created and deposited to_account, may return HIVE if there is no median feed */
std::pair< asset, asset > create_hbd( const account_object& to_account, asset hive, bool to_reward_balance=false );
using Before = std::function< void( const asset& ) >;
asset adjust_account_vesting_balance(const account_object& to_account, const asset& liquid, bool to_reward_balance, Before&& before_vesting_callback );
asset create_vesting( const account_object& to_account, asset hive, bool to_reward_balance=false );
void adjust_total_payout( const comment_cashout_object& a, const asset& hbd, const asset& curator_hbd_value, const asset& beneficiary_value );
void adjust_liquidity_reward( const account_object& owner, const asset& volume, bool is_hbd );
void adjust_balance( const account_object& a, const asset& delta );
void adjust_balance( const account_name_type& name, const asset& delta )
{
adjust_balance( get_account( name ), delta );
}
void adjust_savings_balance( const account_object& a, const asset& delta );
void adjust_reward_balance( const account_object& a, const asset& value_delta, const asset& share_delta = asset(0,VESTS_SYMBOL) );
void adjust_reward_balance( const account_name_type& name, const asset& value_delta, const asset& share_delta = asset(0,VESTS_SYMBOL) )
{
adjust_reward_balance( get_account( name ), value_delta, share_delta );
}
void adjust_supply( const asset& delta, bool adjust_vesting = false );
void adjust_rshares2( fc::uint128_t old_rshares2, fc::uint128_t new_rshares2 );
void update_owner_authority( const account_object& account, const authority& owner_authority );
asset get_balance( const account_object& a, asset_symbol_type symbol )const;
asset get_savings_balance( const account_object& a, asset_symbol_type symbol )const;
asset get_balance( const account_name_type& aname, asset_symbol_type symbol )const
{
return get_balance( get_account( aname ), symbol );
}
/** this updates the votes for witnesses as a result of account voting proxy changing */
void adjust_proxied_witness_votes( const account_object& a,
const std::array< share_type, HIVE_MAX_PROXY_RECURSION_DEPTH+1 >& delta,
int depth = 0 );
/** this updates the votes for all witnesses as a result of account VESTS changing */
void adjust_proxied_witness_votes( const account_object& a, share_type delta, int depth = 0 );
/** this is called by `adjust_proxied_witness_votes` when account proxy to self */
void adjust_witness_votes( const account_object& a, share_type delta );
/** this updates the vote of a single witness as a result of a vote being added or removed*/
void adjust_witness_vote( const witness_object& obj, share_type delta );
/** clears all vote records for a particular account but does not update the
* witness vote totals. Vote totals should be updated first via a call to
* adjust_proxied_witness_votes( a, -a.witness_vote_weight() )
*/
void clear_witness_votes( const account_object& a );
void process_vesting_withdrawals();
share_type pay_curators( const comment_object& comment, const comment_cashout_object& comment_cashout, share_type& max_rewards );
share_type cashout_comment_helper( util::comment_reward_context& ctx, const comment_object& comment, const comment_cashout_object& comment_cashout, bool forward_curation_remainder = true );
void process_comment_cashout();
void process_funds();
void process_conversions();
void process_savings_withdraws();
void process_subsidized_accounts();
void account_recovery_processing();
void expire_escrow_ratification();
void process_decline_voting_rights();
void update_median_feed();
asset get_liquidity_reward()const;
asset get_content_reward()const;
asset get_producer_reward();
asset get_curation_reward()const;
asset get_pow_reward()const;
uint16_t get_curation_rewards_percent() const;
share_type pay_reward_funds( share_type reward );
void pay_liquidity_reward();
/**
* Helper method to return the current HBD value of a given amount of
* HIVE. Return 0 HBD if there isn't a current_median_history
*/
asset to_hbd( const asset& hive )const;
asset to_hive( const asset& hbd )const;
time_point_sec head_block_time()const;
uint32_t head_block_num()const;
block_id_type head_block_id()const;
node_property_object& node_properties();
uint32_t last_non_undoable_block_num() const;
//////////////////// db_init.cpp ////////////////////
void initialize_evaluators();
void register_custom_operation_interpreter( std::shared_ptr< custom_operation_interpreter > interpreter );
std::shared_ptr< custom_operation_interpreter > get_custom_json_evaluator( const custom_id_type& id );
/// Reset the object graph in-memory
void initialize_indexes();
void resetState(const open_args& args);
void init_schema();
void init_genesis(uint64_t initial_supply = HIVE_INIT_SUPPLY, uint64_t hbd_initial_supply = HIVE_HBD_INIT_SUPPLY );
/**
* This method validates transactions without adding it to the pending state.
* @throw if an error occurs
*/
void validate_transaction( const signed_transaction& trx );
/** when popping a block, the transactions that were removed get cached here so they
* can be reapplied at the proper time */
std::deque< signed_transaction > _popped_tx;
vector< signed_transaction > _pending_tx;
bool apply_order( const limit_order_object& new_order_object );
bool fill_order( const limit_order_object& order, const asset& pays, const asset& receives );
void cancel_order( const limit_order_object& obj );
int match( const limit_order_object& bid, const limit_order_object& ask, const price& trade_price );
void perform_vesting_share_split( uint32_t magnitude );
void retally_comment_children();
void retally_witness_votes();
void retally_witness_vote_counts( bool force = false );
void retally_liquidity_weight();
void update_virtual_supply();
bool has_hardfork( uint32_t hardfork )const;
uint32_t get_hardfork()const;
/* For testing and debugging only. Given a hardfork
with id N, applies all hardforks with id <= N */
void set_hardfork( uint32_t hardfork, bool process_now = true );
void validate_invariants()const;
/**
* @}
*/
const std::string& get_json_schema() const;
void set_flush_interval( uint32_t flush_blocks );
void check_free_memory( bool force_print, uint32_t current_block_num );
void apply_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing );
void apply_required_action( const required_automated_action& a );
void apply_optional_action( const optional_automated_action& a );
optional< chainbase::database::session >& pending_transaction_session();
void set_index_delegate( const std::string& n, index_delegate&& d );
const index_delegate& get_index_delegate( const std::string& n );
bool has_index_delegate( const std::string& n );
const index_delegate_map& index_delegates();
#ifdef IS_TEST_NET
bool liquidity_rewards_enabled = true;
bool skip_price_feed_limit_check = true;
bool skip_transaction_delta_check = true;
bool disable_low_mem_warning = true;
#endif
#ifdef HIVE_ENABLE_SMT
///Smart Media Tokens related methods
///@{
void validate_smt_invariants()const;
///@}
#endif
//Restores balances for some accounts, which were cleared by mistake during HF23
void restore_accounts( const hf23_helper::hf23_items& balances, const std::set< std::string >& restored_accounts );
//Clears all pending operations on account that involve balance, moves tokens to treasury account
void clear_accounts( hf23_helper::hf23_items& balances, const std::set< std::string >& cleared_accounts );
void clear_account( const account_object& account,
asset* transferred_hbd_ptr = nullptr, asset* transferred_hive_ptr = nullptr,
asset* converted_vests_ptr = nullptr, asset* hive_from_vests_ptr = nullptr );
protected:
//Mark pop_undo() as protected -- we do not want outside calling pop_undo(); it should call pop_block() instead
//void pop_undo() { object_database::pop_undo(); }
void notify_changed_objects();
private:
optional< chainbase::database::session > _pending_tx_session;
void apply_block( const signed_block& next_block, uint32_t skip = skip_nothing );
void _apply_block( const signed_block& next_block );
void _apply_transaction( const signed_transaction& trx );
void apply_operation( const operation& op );
void process_required_actions( const required_automated_actions& actions );
void process_optional_actions( const optional_automated_actions& actions );
///Steps involved in applying a new block
///@{
const witness_object& validate_block_header( uint32_t skip, const signed_block& next_block )const;
void create_block_summary(const signed_block& next_block);
//calculates sum of all balances stored on given account, returns true if any is nonzero
bool collect_account_total_balance( const account_object& account, asset* total_hive, asset* total_hbd,
asset* total_vests, asset* vesting_shares_hive_value );
//removes (burns) balances held on null account
void clear_null_account_balance();
//moves balances from old treasury account to current one
void consolidate_treasury_balance();
//locks given account by clearing its authorizations and removing pending recovery [account change] request (used for treasury in HF code)
void lock_account( const account_object& account );
void process_proposals( const block_notification& note );
void process_delayed_voting(const block_notification& note );
void update_global_dynamic_data( const signed_block& b );
void update_signing_witness(const witness_object& signing_witness, const signed_block& new_block);
void update_last_irreversible_block();
void migrate_irreversible_state();
void clear_expired_transactions();
void clear_expired_orders();
void clear_expired_delegations();
void process_header_extensions( const signed_block& next_block, required_automated_actions& req_actions, optional_automated_actions& opt_actions );
void generate_required_actions();
void generate_optional_actions();
void init_hardforks();
void process_hardforks();
void apply_hardfork( uint32_t hardfork );
///@}
#ifdef HIVE_ENABLE_SMT
template< typename smt_balance_object_type, typename modifier_type >
void adjust_smt_balance( const account_object& owner, const asset& delta, modifier_type&& modifier );
#endif
void modify_balance( const account_object& a, const asset& delta, bool check_balance );
void modify_reward_balance( const account_object& a, const asset& value_delta, const asset& share_delta, bool check_balance );
operation_notification create_operation_notification( const operation& op )const
{
operation_notification note(op);
note.trx_id = _current_trx_id;
note.block = _current_block_num;
note.trx_in_block = _current_trx_in_block;
note.op_in_trx = _current_op_in_trx;
return note;
}
public:
const transaction_id_type& get_current_trx() const
{
return _current_trx_id;
}
uint16_t get_current_op_in_trx() const
{
return _current_op_in_trx;
}
int16_t get_sps_remove_threshold() const
{
return _sps_remove_threshold;
}
void set_sps_remove_threshold( int16_t val )
{
_sps_remove_threshold = val;
}
util::advanced_benchmark_dumper& get_benchmark_dumper()
{
return _benchmark_dumper;
}
const hardfork_versions& get_hardfork_versions()
{
return _hardfork_versions;
}
private:
std::unique_ptr< database_impl > _my;
fork_database _fork_db;
hardfork_versions _hardfork_versions;
block_log _block_log;
// this function needs access to _plugin_index_signal
template< typename MultiIndexType >
friend void add_plugin_index( database& db );
transaction_id_type _current_trx_id;
uint32_t _current_block_num = 0;
int32_t _current_trx_in_block = 0;
uint16_t _current_op_in_trx = 0;
uint16_t _current_virtual_op = 0;
optional< block_id_type > _currently_processing_block_id;
flat_map<uint32_t,block_id_type> _checkpoints;
node_property_object _node_property_object;
uint32_t _flush_blocks = 0;
uint32_t _next_flush_block = 0;
uint32_t _last_free_gb_printed = 0;
uint16_t _shared_file_full_threshold = 0;
uint16_t _shared_file_scale_rate = 0;
int16_t _sps_remove_threshold = -1;
flat_map< custom_id_type, std::shared_ptr< custom_operation_interpreter > > _custom_operation_interpreters;
std::string _json_schema;
util::advanced_benchmark_dumper _benchmark_dumper;
index_delegate_map _index_delegate_map;
hf23_helper::hf23_items _hf23_items;
fc::signal<void(const required_action_notification&)> _pre_apply_required_action_signal;
fc::signal<void(const required_action_notification&)> _post_apply_required_action_signal;
fc::signal<void(const optional_action_notification&)> _pre_apply_optional_action_signal;
fc::signal<void(const optional_action_notification&)> _post_apply_optional_action_signal;
fc::signal<void(const operation_notification&)> _pre_apply_operation_signal;
/**
* This signal is emitted for plugins to process every operation after it has been fully applied.
*/
fc::signal<void(const operation_notification&)> _post_apply_operation_signal;
/**
* This signal is emitted when we start processing a block.
*
* You may not yield from this callback because the blockchain is holding
* the write lock and may be in an "inconstant state" until after it is
* released.
*/
fc::signal<void(const block_notification&)> _pre_apply_block_signal;
fc::signal<void(uint32_t)> _on_irreversible_block;
/**
* This signal is emitted after all operations and virtual operation for a
* block have been applied but before the get_applied_operations() are cleared.
*
* You may not yield from this callback because the blockchain is holding
* the write lock and may be in an "inconstant state" until after it is
* released.
*/
fc::signal<void(const block_notification&)> _post_apply_block_signal;
/**
* This signal is emitted any time a new transaction is about to be applied
* to the chain state.
*/
fc::signal<void(const transaction_notification&)> _pre_apply_transaction_signal;
/**
* This signal is emitted any time a new transaction has been applied to the
* chain state.
*/
fc::signal<void(const transaction_notification&)> _post_apply_transaction_signal;
/**
* Emitted when reindexing starts
*/
fc::signal<void(const reindex_notification&)> _pre_reindex_signal;
/**
* Emitted when reindexing finishes
*/
fc::signal<void(const reindex_notification&)> _post_reindex_signal;
fc::signal<void(const generate_optional_actions_notification& )> _generate_optional_actions_signal;
fc::signal<void(const database&, const database::abstract_index_cntr_t&)> _prepare_snapshot_signal;
/**
* Emitted After a block has been applied and committed. The callback
* should not yield and should execute quickly.
*/
//fc::signal<void(const vector< object_id_type >&)> changed_objects;
/** this signal is emitted any time an object is removed and contains a
* pointer to the last value of every object that was removed.
*/
//fc::signal<void(const vector<const object*>&)> removed_objects;
/**
* Internal signal to execute deferred registration of plugin indexes.
*/
fc::signal<void()> _plugin_index_signal;
};
struct reindex_notification
{
reindex_notification( const open_args& a ) : args( a ) {}
bool force_replay = false;
bool reindex_success = false;
uint32_t last_block_number = 0;
const open_args& args;
};
} }

View File

@ -0,0 +1,123 @@
#pragma once
#include <hive/protocol/exceptions.hpp>
#define HIVE_DECLARE_OP_BASE_EXCEPTIONS( op_name ) \
FC_DECLARE_DERIVED_EXCEPTION( \
op_name ## _validate_exception, \
hive::chain::operation_validate_exception, \
4040000 + 100 * protocol::operation::tag< protocol::op_name ## _operation >::value, \
#op_name "_operation validation exception" \
) \
FC_DECLARE_DERIVED_EXCEPTION( \
op_name ## _evaluate_exception, \
hive::chain::operation_evaluate_exception, \
4050000 + 100 * protocol::operation::tag< protocol::op_name ## _operation >::value, \
#op_name "_operation evaluation exception" \
)
#define HIVE_DECLARE_OP_VALIDATE_EXCEPTION( exc_name, op_name, seqnum, msg ) \
FC_DECLARE_DERIVED_EXCEPTION( \
op_name ## _ ## exc_name, \
hive::chain::op_name ## _validate_exception, \
4040000 + 100 * protocol::operation::tag< protocol::op_name ## _operation >::value \
+ seqnum, \
msg \
)
#define HIVE_DECLARE_OP_EVALUATE_EXCEPTION( exc_name, op_name, seqnum, msg ) \
FC_DECLARE_DERIVED_EXCEPTION( \
op_name ## _ ## exc_name, \
hive::chain::op_name ## _evaluate_exception, \
4050000 + 100 * protocol::operation::tag< protocol::op_name ## _operation >::value \
+ seqnum, \
msg \
)
#define HIVE_DECLARE_INTERNAL_EXCEPTION( exc_name, seqnum, msg ) \
FC_DECLARE_DERIVED_EXCEPTION( \
internal_ ## exc_name, \
hive::chain::internal_exception, \
4990000 + seqnum, \
msg \
)
#define HIVE_TRY_NOTIFY( signal, ... ) \
try \
{ \
signal( __VA_ARGS__ ); \
} \
catch( const hive::chain::plugin_exception& e ) \
{ \
throw; \
} \
catch( const fc::exception& e ) \
{ \
elog( "Caught exception in plugin: ${e}", ("e", e.to_detail_string() ) ); \
} \
catch( const boost::exception& e ) \
{ \
elog( "Caught unexpected exception in plugin: ${e}", ("e", \
boost::diagnostic_information(e)) ); \
} \
catch( const std::exception& e ) \
{ \
elog( "Caught unexpected exception in plugin: ${e}", ("e", e.what())); \
} \
catch( ... ) \
{ \
wlog( "Caught unexpected exception in plugin" ); \
}
namespace hive { namespace chain {
FC_DECLARE_EXCEPTION( chain_exception, 4000000, "blockchain exception" )
FC_DECLARE_DERIVED_EXCEPTION( database_query_exception, hive::chain::chain_exception, 4010000, "database query exception" )
FC_DECLARE_DERIVED_EXCEPTION( block_validate_exception, hive::chain::chain_exception, 4020000, "block validation exception" )
FC_DECLARE_DERIVED_EXCEPTION( transaction_exception, hive::chain::chain_exception, 4030000, "transaction validation exception" )
FC_DECLARE_DERIVED_EXCEPTION( operation_validate_exception, hive::chain::chain_exception, 4040000, "operation validation exception" )
FC_DECLARE_DERIVED_EXCEPTION( operation_evaluate_exception, hive::chain::chain_exception, 4050000, "operation evaluation exception" )
FC_DECLARE_DERIVED_EXCEPTION( utility_exception, hive::chain::chain_exception, 4060000, "utility method exception" )
FC_DECLARE_DERIVED_EXCEPTION( undo_database_exception, hive::chain::chain_exception, 4070000, "undo database exception" )
FC_DECLARE_DERIVED_EXCEPTION( unlinkable_block_exception, hive::chain::chain_exception, 4080000, "unlinkable block" )
FC_DECLARE_DERIVED_EXCEPTION( unknown_hardfork_exception, hive::chain::chain_exception, 4090000, "chain attempted to apply unknown hardfork" )
FC_DECLARE_DERIVED_EXCEPTION( plugin_exception, hive::chain::chain_exception, 4100000, "plugin exception" )
FC_DECLARE_DERIVED_EXCEPTION( block_log_exception, hive::chain::chain_exception, 4110000, "block log exception" )
FC_DECLARE_DERIVED_EXCEPTION( market_exception, hive::chain::chain_exception, 4120000, "market exception" )
FC_DECLARE_DERIVED_EXCEPTION( order_match_exception, hive::chain::market_exception, 4120100, "order match exception" )
FC_DECLARE_DERIVED_EXCEPTION( order_fill_exception, hive::chain::market_exception, 4120100, "order fill exception" )
FC_DECLARE_DERIVED_EXCEPTION( transaction_expiration_exception, hive::chain::transaction_exception, 4030100, "transaction expiration exception" )
FC_DECLARE_DERIVED_EXCEPTION( transaction_tapos_exception, hive::chain::transaction_exception, 4030200, "transaction tapos exception" )
FC_DECLARE_DERIVED_EXCEPTION( pop_empty_chain, hive::chain::undo_database_exception, 4070001, "there are no blocks to pop" )
HIVE_DECLARE_OP_BASE_EXCEPTIONS( transfer );
// HIVE_DECLARE_OP_EVALUATE_EXCEPTION( from_account_not_whitelisted, transfer, 1, "owner mismatch" )
HIVE_DECLARE_OP_BASE_EXCEPTIONS( account_create );
HIVE_DECLARE_OP_EVALUATE_EXCEPTION( max_auth_exceeded, account_create, 1, "Exceeds max authority fan-out" )
HIVE_DECLARE_OP_EVALUATE_EXCEPTION( auth_account_not_found, account_create, 2, "Auth account not found" )
HIVE_DECLARE_OP_BASE_EXCEPTIONS( account_update );
HIVE_DECLARE_OP_EVALUATE_EXCEPTION( max_auth_exceeded, account_update, 1, "Exceeds max authority fan-out" )
HIVE_DECLARE_OP_EVALUATE_EXCEPTION( auth_account_not_found, account_update, 2, "Auth account not found" )
FC_DECLARE_DERIVED_EXCEPTION( internal_exception, hive::chain::chain_exception, 4990000, "internal exception" )
HIVE_DECLARE_INTERNAL_EXCEPTION( verify_auth_max_auth_exceeded, 1, "Exceeds max authority fan-out" )
HIVE_DECLARE_INTERNAL_EXCEPTION( verify_auth_account_not_found, 2, "Auth account not found" )
} } // hive::chain
#pragma once
#include <fc/exception/exception.hpp>
#include <hive/protocol/exceptions.hpp>
namespace hive { namespace chain {
} } // hive::chain

View File

@ -0,0 +1,168 @@
#pragma once
#include <hive/chain/database.hpp>
/*
* This file provides with() functions which modify the database
* temporarily, then restore it. These functions are mostly internal
* implementation detail of the database.
*
* Essentially, we want to be able to use "finally" to restore the
* database regardless of whether an exception is thrown or not, but there
* is no "finally" in C++. Instead, C++ requires us to create a struct
* and put the finally block in a destructor. Aagh!
*/
namespace hive { namespace chain { namespace detail {
/**
* Class used to help the with_skip_flags implementation.
* It must be defined in this header because it must be
* available to the with_skip_flags implementation,
* which is a template and therefore must also be defined
* in this header.
*/
struct skip_flags_restorer
{
skip_flags_restorer( node_property_object& npo, uint32_t old_skip_flags )
: _npo( npo ), _old_skip_flags( old_skip_flags )
{}
~skip_flags_restorer()
{
_npo.skip_flags = _old_skip_flags;
}
node_property_object& _npo;
uint32_t _old_skip_flags; // initialized in ctor
};
/**
* Class used to help the without_pending_transactions
* implementation.
*
* TODO: Change the name of this class to better reflect the fact
* that it restores popped transactions as well as pending transactions.
*/
struct pending_transactions_restorer
{
pending_transactions_restorer( database& db, std::vector<signed_transaction>&& pending_transactions )
: _db(db), _pending_transactions( std::move(pending_transactions) )
{
_db.clear_pending();
}
~pending_transactions_restorer()
{
auto start = fc::time_point::now();
bool apply_trxs = true;
uint32_t applied_txs = 0;
uint32_t postponed_txs = 0;
for( const auto& tx : _db._popped_tx )
{
if( apply_trxs && fc::time_point::now() - start > HIVE_PENDING_TRANSACTION_EXECUTION_LIMIT ) apply_trxs = false;
if( apply_trxs )
{
try {
if( !_db.is_known_transaction( tx.id() ) ) {
// since push_transaction() takes a signed_transaction,
// the operation_results field will be ignored.
_db._push_transaction( tx );
applied_txs++;
}
} catch ( const fc::exception& ) {}
}
else
{
_db._pending_tx.push_back( tx );
postponed_txs++;
}
}
_db._popped_tx.clear();
for( const signed_transaction& tx : _pending_transactions )
{
if( apply_trxs && fc::time_point::now() - start > HIVE_PENDING_TRANSACTION_EXECUTION_LIMIT ) apply_trxs = false;
if( apply_trxs )
{
try
{
if( !_db.is_known_transaction( tx.id() ) ) {
// since push_transaction() takes a signed_transaction,
// the operation_results field will be ignored.
_db._push_transaction( tx );
applied_txs++;
}
}
catch( const transaction_exception& e )
{
dlog( "Pending transaction became invalid after switching to block ${b} ${n} ${t}",
("b", _db.head_block_id())("n", _db.head_block_num())("t", _db.head_block_time()) );
dlog( "The invalid transaction caused exception ${e}", ("e", e.to_detail_string()) );
dlog( "${t}", ("t", tx) );
}
catch( const fc::exception& e )
{
/*
dlog( "Pending transaction became invalid after switching to block ${b} ${n} ${t}",
("b", _db.head_block_id())("n", _db.head_block_num())("t", _db.head_block_time()) );
dlog( "The invalid pending transaction caused exception ${e}", ("e", e.to_detail_string() ) );
dlog( "${t}", ("t", tx) );
*/
}
}
else
{
_db._pending_tx.push_back( tx );
postponed_txs++;
}
}
if( postponed_txs++ )
{
wlog( "Postponed ${p} pending transactions. ${a} were applied.", ("p", postponed_txs)("a", applied_txs) );
}
}
database& _db;
std::vector< signed_transaction > _pending_transactions;
};
/**
* Set the skip_flags to the given value, call callback,
* then reset skip_flags to their previous value after
* callback is done.
*/
template< typename Lambda >
void with_skip_flags(
database& db,
uint32_t skip_flags,
Lambda callback )
{
node_property_object& npo = db.node_properties();
skip_flags_restorer restorer( npo, npo.skip_flags );
npo.skip_flags = skip_flags;
callback();
return;
}
/**
* Empty pending_transactions, call callback,
* then reset pending_transactions after callback is done.
*
* Pending transactions which no longer validate will be culled.
*/
template< typename Lambda >
void without_pending_transactions(
database& db,
std::vector<signed_transaction>&& pending_transactions,
Lambda callback )
{
pending_transactions_restorer restorer( db, std::move(pending_transactions) );
callback();
return;
}
} } } // hive::chain::detail

View File

@ -0,0 +1,96 @@
#pragma once
#include <hive/protocol/exceptions.hpp>
#include <hive/protocol/operations.hpp>
namespace hive { namespace chain {
class database;
template< typename OperationType=hive::protocol::operation >
class evaluator
{
public:
virtual ~evaluator() {}
virtual void apply(const OperationType& op) = 0;
virtual int get_type()const = 0;
virtual std::string get_name( const OperationType& op ) = 0;
};
template< typename EvaluatorType, typename OperationType=hive::protocol::operation >
class evaluator_impl : public evaluator<OperationType>
{
public:
typedef OperationType operation_sv_type;
// typedef typename EvaluatorType::operation_type op_type;
evaluator_impl( database& d )
: _db(d) {}
virtual ~evaluator_impl() {}
virtual void apply(const OperationType& o) final override
{
auto* eval = static_cast< EvaluatorType* >(this);
const auto& op = o.template get< typename EvaluatorType::operation_type >();
eval->do_apply(op);
}
virtual int get_type()const override { return OperationType::template tag< typename EvaluatorType::operation_type >::value; }
virtual std::string get_name( const OperationType& o ) override
{
const auto& op = o.template get< typename EvaluatorType::operation_type >();
return boost::core::demangle( typeid( op ).name() );
}
database& db() { return _db; }
protected:
database& _db;
};
} }
#define HIVE_DEFINE_EVALUATOR( X ) \
class X ## _evaluator : public hive::chain::evaluator_impl< X ## _evaluator > \
{ \
public: \
typedef X ## _operation operation_type; \
\
X ## _evaluator( database& db ) \
: hive::chain::evaluator_impl< X ## _evaluator >( db ) \
{} \
\
void do_apply( const X ## _operation& o ); \
};
#define HIVE_DEFINE_ACTION_EVALUATOR( X, ACTION ) \
class X ## _evaluator : public hive::chain::evaluator_impl< X ## _evaluator, ACTION > \
{ \
public: \
typedef X ## _action operation_type; \
\
X ## _evaluator( database& db ) \
: hive::chain::evaluator_impl< X ## _evaluator, ACTION >( db ) \
{} \
\
void do_apply( const X ## _action& o ); \
};
#define HIVE_DEFINE_PLUGIN_EVALUATOR( PLUGIN, OPERATION, X ) \
class X ## _evaluator : public hive::chain::evaluator_impl< X ## _evaluator, OPERATION > \
{ \
public: \
typedef X ## _operation operation_type; \
\
X ## _evaluator( hive::chain::database& db, PLUGIN* plugin ) \
: hive::chain::evaluator_impl< X ## _evaluator, OPERATION >( db ), \
_plugin( plugin ) \
{} \
\
void do_apply( const X ## _operation& o ); \
\
PLUGIN* _plugin; \
};

View File

@ -0,0 +1,78 @@
#pragma once
#include <hive/chain/evaluator.hpp>
namespace hive { namespace chain {
template< typename OperationType >
class evaluator_registry
{
public:
evaluator_registry( database& d )
: _db(d)
{
for( int i=0; i<OperationType::count(); i++ )
_op_evaluators.emplace_back();
}
template< typename EvaluatorType, typename... Args >
void register_evaluator( Args... args )
{
_op_evaluators[ OperationType::template tag< typename EvaluatorType::operation_type >::value ].reset( new EvaluatorType(_db, args...) );
}
private:
template< bool CHECK >
unique_ptr< evaluator<OperationType> >& get_evaluator_impl( const OperationType& op )
{
static unique_ptr< evaluator<OperationType> > empty;
int i_which = op.which();
uint64_t u_which = uint64_t( i_which );
if( i_which < 0 )
{
if( CHECK )
return empty;
else
assert( "Negative operation tag" && false );
}
if( u_which >= _op_evaluators.size() )
{
if( CHECK )
return empty;
else
assert( "No registered evaluator for this operation" && false );
}
unique_ptr< evaluator<OperationType> >& eval = _op_evaluators[ u_which ];
if( !eval )
{
if( CHECK )
return empty;
else
assert( "No registered evaluator for this operation" && false );
}
return eval;
}
public:
bool is_evaluator( const OperationType& op )
{
return get_evaluator_impl< true/*CHECK*/ >( op ).get() != nullptr;
}
evaluator<OperationType>& get_evaluator( const OperationType& op )
{
return *get_evaluator_impl< false/*CHECK*/ >( op );
}
std::vector< std::unique_ptr< evaluator<OperationType> > > _op_evaluators;
database& _db;
};
} }

View File

@ -0,0 +1,107 @@
#pragma once
#include <hive/protocol/block.hpp>
#include <boost/multi_index_container.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/mem_fun.hpp>
namespace hive { namespace chain {
using hive::protocol::signed_block;
using hive::protocol::block_id_type;
struct fork_item
{
private:
fork_item(){}
public:
fork_item( signed_block d )
:num(d.block_num()),id(d.id()),data( std::move(d) ){}
block_id_type previous_id()const { return data.previous; }
weak_ptr< fork_item > prev;
uint32_t num; // initialized in ctor
/**
* Used to flag a block as invalid and prevent other blocks from
* building on top of it.
*/
bool invalid = false;
block_id_type id;
signed_block data;
};
typedef shared_ptr<fork_item> item_ptr;
/**
* As long as blocks are pushed in order the fork
* database will maintain a linked tree of all blocks
* that branch from the start_block. The tree will
* have a maximum depth of 1024 blocks after which
* the database will start lopping off forks.
*
* Every time a block is pushed into the fork DB the
* block with the highest block_num will be returned.
*/
class fork_database
{
public:
typedef vector<item_ptr> branch_type;
/// The maximum number of blocks that may be skipped in an out-of-order push
const static int MAX_BLOCK_REORDERING = 1024;
fork_database();
void reset();
void start_block(signed_block b);
void remove(block_id_type b);
void set_head(shared_ptr<fork_item> h);
bool is_known_block(const block_id_type& id)const;
shared_ptr<fork_item> fetch_block(const block_id_type& id)const;
vector<item_ptr> fetch_block_by_number(uint32_t n)const;
/**
* @return the new head block ( the longest fork )
*/
shared_ptr<fork_item> push_block(const signed_block& b);
shared_ptr<fork_item> head()const { return _head; }
void pop_block();
/**
* Given two head blocks, return two branches of the fork graph that
* end with a common ancestor (same prior block)
*/
pair< branch_type, branch_type > fetch_branch_from(block_id_type first,
block_id_type second)const;
shared_ptr<fork_item> walk_main_branch_to_num( uint32_t block_num )const;
shared_ptr<fork_item> fetch_block_on_main_branch_by_number( uint32_t block_num )const;
struct block_id;
struct block_num;
struct by_previous;
typedef boost::multi_index_container<
item_ptr,
boost::multi_index::indexed_by<
boost::multi_index::hashed_unique<boost::multi_index::tag<block_id>, boost::multi_index::member<fork_item, block_id_type, &fork_item::id>, std::hash<fc::ripemd160>>,
boost::multi_index::hashed_non_unique<boost::multi_index::tag<by_previous>, boost::multi_index::const_mem_fun<fork_item, block_id_type, &fork_item::previous_id>, std::hash<fc::ripemd160>>,
boost::multi_index::ordered_non_unique<boost::multi_index::tag<block_num>, boost::multi_index::member<fork_item,uint32_t,&fork_item::num>>
>
> fork_multi_index_type;
void set_max_size( uint32_t s );
private:
/** @return a pointer to the newly pushed item */
void _push_block(const item_ptr& b );
void _push_next(const item_ptr& newly_inserted);
uint32_t _max_size = 1024;
fork_multi_index_type _unlinked_index;
fork_multi_index_type _index;
shared_ptr<fork_item> _head;
};
} } // hive::chain

View File

@ -0,0 +1,237 @@
#pragma once
#include <hive/protocol/schema_types.hpp>
#include <hive/chain/schema_types.hpp>
#include <hive/schema/schema.hpp>
#include <hive/protocol/hive_operations.hpp>
#include <hive/protocol/operation_util_impl.hpp>
#include <hive/protocol/types.hpp>
#include <hive/chain/evaluator.hpp>
#include <hive/chain/evaluator_registry.hpp>
#include <hive/chain/custom_operation_interpreter.hpp>
#include <fc/variant.hpp>
#include <string>
#include <vector>
namespace hive { namespace chain {
using protocol::operation;
using protocol::authority;
using protocol::account_name_type;
using protocol::custom_id_type;
class database;
std::string legacy_custom_name_from_type( const std::string& type_name );
struct get_legacy_custom_operation_name
{
string& name;
get_legacy_custom_operation_name( string& dv )
: name( dv ) {}
typedef void result_type;
template< typename T > void operator()( const T& v )const
{
name = legacy_custom_name_from_type( fc::get_typename< T >::name() );
}
};
struct get_custom_operation_name
{
string& name;
get_custom_operation_name( string& n )
: name( n ) {}
typedef void result_type;
template< typename T > void operator()( const T& v )const
{
name = fc::trim_typename_namespace( fc::get_typename< T >::name() );
}
};
template< typename CustomOperationType >
void custom_op_from_variant( const fc::variant& var, CustomOperationType& vo )
{
static std::map<string,int64_t> to_legacy_tag = []()
{
std::map<string,int64_t> name_map;
for( int i = 0; i < CustomOperationType::count(); ++i )
{
CustomOperationType tmp;
tmp.set_which(i);
string n;
tmp.visit( get_legacy_custom_operation_name(n) );
name_map[n] = i;
}
return name_map;
}();
static std::map< string, int64_t > to_tag = []()
{
std::map< string, int64_t > name_map;
for( int i = 0; i < CustomOperationType::count(); ++i )
{
CustomOperationType tmp;
tmp.set_which(i);
string n;
tmp.visit( get_custom_operation_name( n ) );
name_map[n] = i;
}
return name_map;
}();
if( var.is_array() ) // legacy serialization
{
auto ar = var.get_array();
if( ar.size() < 2 ) return;
if( ar[0].is_uint64() )
vo.set_which( ar[0].as_uint64() );
else
{
auto itr = to_legacy_tag.find(ar[0].as_string());
FC_ASSERT( itr != to_legacy_tag.end(), "Invalid operation name: ${n}", ("n", ar[0]) );
vo.set_which( itr->second );
}
vo.visit( fc::to_static_variant( ar[1] ) );
}
else // new serialization
{
FC_ASSERT( var.is_object(), "Input data have to treated as object." );
auto v_object = var.get_object();
FC_ASSERT( v_object.contains( "type" ), "Type field doesn't exist." );
FC_ASSERT( v_object.contains( "value" ), "Value field doesn't exist." );
int64_t which = -1;
if( v_object[ "type" ].is_integer() )
{
which = v_object[ "type" ].as_int64();
}
else
{
auto itr = to_tag.find( v_object[ "type" ].as_string() );
FC_ASSERT( itr != to_tag.end(), "Invalid object name: ${n}", ("n", v_object[ "type" ]) );
which = itr->second;
}
vo.set_which( which );
vo.visit( fc::to_static_variant( v_object[ "value" ] ) );
}
}
template< typename CustomOperationType >
class generic_custom_operation_interpreter
: public custom_operation_interpreter, public evaluator_registry< CustomOperationType >
{
public:
generic_custom_operation_interpreter( database& db, const custom_id_type& cid )
: evaluator_registry< CustomOperationType >(db), custom_id(cid) {}
virtual ~generic_custom_operation_interpreter() = default;
void apply_operations( const vector< CustomOperationType >& custom_operations, const operation& outer_o )
{
auto plugin_session = this->_db.start_undo_session();
flat_set<account_name_type> outer_active;
flat_set<account_name_type> outer_owner;
flat_set<account_name_type> outer_posting;
std::vector< authority > outer_other;
flat_set<account_name_type> inner_active;
flat_set<account_name_type> inner_owner;
flat_set<account_name_type> inner_posting;
std::vector< authority > inner_other;
operation_get_required_authorities( outer_o, outer_active, outer_owner, outer_posting, outer_other );
for( const CustomOperationType& inner_o : custom_operations )
{
operation_validate( inner_o );
operation_get_required_authorities( inner_o, inner_active, inner_owner, inner_posting, inner_other );
}
FC_ASSERT( inner_owner == outer_owner );
FC_ASSERT( inner_active == outer_active );
FC_ASSERT( inner_posting == outer_posting );
FC_ASSERT( inner_other == outer_other );
for( const CustomOperationType& inner_o : custom_operations )
{
// gcc errors if this-> is not here
// error message is "declarations in dependent base are not found by unqualified lookup"
this->get_evaluator( inner_o ).apply( inner_o );
}
plugin_session.squash();
}
virtual void apply( const protocol::custom_json_operation& outer_o ) override
{
try
{
FC_TODO( "Should we hardfork out old serialization?" )
fc::variant v = fc::json::from_string( outer_o.json );
std::vector< CustomOperationType > custom_operations;
if( v.is_array() && v.size() > 0 && v.get_array()[0].is_array() )
{
// it looks like a list
for( auto& o : v.get_array() )
{
custom_operations.emplace_back();
custom_op_from_variant( o, custom_operations.back() );
}
}
else
{
custom_operations.emplace_back();
custom_op_from_variant( v, custom_operations[0] );
}
apply_operations( custom_operations, operation( outer_o ) );
} FC_CAPTURE_AND_RETHROW( (outer_o) )
}
virtual void apply( const protocol::custom_binary_operation& outer_o ) override
{
try
{
vector< CustomOperationType > custom_operations;
try
{
custom_operations = fc::raw::unpack_from_vector< vector< CustomOperationType > >( outer_o.data );
}
catch ( fc::exception& )
{
custom_operations.push_back( fc::raw::unpack_from_vector< CustomOperationType >( outer_o.data ) );
}
apply_operations( custom_operations, operation( outer_o ) );
}
FC_CAPTURE_AND_RETHROW( (outer_o) )
}
virtual std::shared_ptr< hive::schema::abstract_schema > get_operation_schema() override
{
return hive::schema::get_schema_for_type< CustomOperationType >();
}
virtual custom_id_type get_custom_id() override
{
return custom_id;
}
private:
custom_id_type custom_id;
};
} }

View File

@ -0,0 +1,238 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <fc/uint128.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/protocol/asset.hpp>
namespace hive { namespace chain {
using hive::protocol::asset;
using hive::protocol::price;
/**
* @class dynamic_global_property_object
* @brief Maintains global state information
* @ingroup object
* @ingroup implementation
*
* This is an implementation detail. The values here are calculated during normal chain operations and reflect the
* current values of global blockchain properties.
*/
class dynamic_global_property_object : public object< dynamic_global_property_object_type, dynamic_global_property_object>
{
CHAINBASE_OBJECT( dynamic_global_property_object );
public:
template< typename Allocator >
dynamic_global_property_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _initial_witness, const asset& _initial_hive_supply, const asset& _initial_hbd_supply )
: id( _id ), current_witness( _initial_witness ), virtual_supply( _initial_hive_supply, HIVE_SYMBOL ),
current_supply( _initial_hive_supply, HIVE_SYMBOL ), init_hbd_supply( _initial_hbd_supply, HBD_SYMBOL )
{}
//main HIVE token counter (see also get_full_hive_supply)
const asset& get_current_supply() const { return current_supply; }
//initial amount of HBD issued (see also get_full_hbd_supply)
const asset& get_init_hbd_supply() const { return init_hbd_supply; }
//main HBD token counter (see also get_full_hbd_supply)
const asset& get_current_hbd_supply() const { return current_hbd_supply; }
//rate of interest for holding HBD (in BPS - basis points)
uint16_t get_hbd_interest_rate() const { return hbd_interest_rate; }
//percentage of HIVE being converted to HBD during payouts (in BPS - basis points)
uint16_t get_hbd_print_rate() const { return hbd_print_rate; }
//pool of HIVE tokens vested normally
const asset& get_total_vesting_fund_hive() const { return total_vesting_fund_hive; }
//amount of VESTS produced from HIVE held in normal vested fund
const asset& get_total_vesting_shares() const { return total_vesting_shares; }
//pool of HIVE tokens for pending (liquid) rewards
const asset& get_total_reward_fund_hive() const { return total_reward_fund_hive; }
//pool of HIVE tokens for pending (vested) rewards
const asset& get_pending_rewarded_vesting_hive() const { return pending_rewarded_vesting_hive; }
//amount of VESTS produced from HIVE held in pending reward vested fund
const asset& get_pending_rewarded_vesting_shares() const { return pending_rewarded_vesting_shares; }
uint32_t head_block_number = 0;
block_id_type head_block_id;
time_point_sec time = HIVE_GENESIS_TIME;
account_name_type current_witness; //< TODO: replace with account_id_type
/**
* The total POW accumulated, aka the sum of num_pow_witness at the time new POW is added
*/
uint64_t total_pow = -1;
/**
* The current count of how many pending POW witnesses there are, determines the difficulty
* of doing pow
*/
uint32_t num_pow_witnesses = 0;
asset virtual_supply = asset( 0, HIVE_SYMBOL ); //< TODO: replace with HIVE_asset
asset current_supply = asset( 0, HIVE_SYMBOL ); //< TODO: replace with HIVE_asset
asset init_hbd_supply = asset( 0, HBD_SYMBOL ); //< TODO: replace with HBD_asset
asset current_hbd_supply = asset( 0, HBD_SYMBOL ); //< TODO: replace with HBD_asset
asset total_vesting_fund_hive = asset( 0, HIVE_SYMBOL ); //< TODO: replace with HIVE_asset
asset total_vesting_shares = asset( 0, VESTS_SYMBOL ); //< TODO: replace with VEST_asset
asset total_reward_fund_hive = asset( 0, HIVE_SYMBOL ); //< TODO: replace with HIVE_asset
fc::uint128 total_reward_shares2; ///< the running total of REWARD^2
asset pending_rewarded_vesting_shares = asset( 0, VESTS_SYMBOL ); //< TODO: replace with VEST_asset
asset pending_rewarded_vesting_hive = asset( 0, HIVE_SYMBOL ); //< TODO: replace with HIVE_asset
price get_vesting_share_price() const
{
if ( total_vesting_fund_hive.amount == 0 || total_vesting_shares.amount == 0 )
return price ( asset( 1000, HIVE_SYMBOL ), asset( 1000000, VESTS_SYMBOL ) );
return price( total_vesting_shares, total_vesting_fund_hive );
}
price get_reward_vesting_share_price() const
{
return price( total_vesting_shares + pending_rewarded_vesting_shares,
total_vesting_fund_hive + pending_rewarded_vesting_hive );
}
/**
* This property defines the interest rate that HBD deposits receive.
*/
uint16_t hbd_interest_rate = 0;
uint16_t hbd_print_rate = HIVE_100_PERCENT;
/**
* Maximum block size is decided by the set of active witnesses which change every round.
* Each witness posts what they think the maximum size should be as part of their witness
* properties, the median size is chosen to be the maximum block size for the round.
*
* @note the minimum value for maximum_block_size is defined by the protocol to prevent the
* network from getting stuck by witnesses attempting to set this too low.
*/
uint32_t maximum_block_size = HIVE_MAX_BLOCK_SIZE;
/**
* The size of the block that is partitioned for actions.
* Required actions can only be delayed if they take up more than this amount. More can be
* included, but are not required. Block generation should only include transactions up
* to maximum_block_size - required_actions_parition_size to ensure required actions are
* not delayed when they should not be.
*/
uint16_t required_actions_partition_percent = 0;
/**
* The current absolute slot number. Equal to the total
* number of slots since genesis. Also equal to the total
* number of missed slots plus head_block_number.
*/
uint64_t current_aslot = 0;
/**
* used to compute witness participation.
*/
fc::uint128_t recent_slots_filled = fc::uint128::max_value();
uint8_t participation_count = 128; ///< Divide by 128 to compute participation percentage
uint32_t last_irreversible_block_num = 0;
/**
* The number of votes regenerated per day. Any user voting slower than this rate will be
* "wasting" voting power through spillover; any user voting faster than this rate will have
* their votes reduced.
*/
uint32_t vote_power_reserve_rate = HIVE_INITIAL_VOTE_POWER_RATE;
uint32_t delegation_return_period = HIVE_DELEGATION_RETURN_PERIOD_HF0;
uint64_t reverse_auction_seconds = HIVE_REVERSE_AUCTION_WINDOW_SECONDS_HF6;
int64_t available_account_subsidies = 0;
uint16_t hbd_stop_percent = HIVE_HBD_STOP_PERCENT_HF14;
uint16_t hbd_start_percent = HIVE_HBD_START_PERCENT_HF14;
//settings used to compute payments for every proposal
time_point_sec next_maintenance_time = HIVE_GENESIS_TIME;
time_point_sec last_budget_time = HIVE_GENESIS_TIME;
uint16_t content_reward_percent = HIVE_CONTENT_REWARD_PERCENT_HF16;
uint16_t vesting_reward_percent = HIVE_VESTING_FUND_PERCENT_HF16;
uint16_t sps_fund_percent = HIVE_PROPOSAL_FUND_PERCENT_HF0;
asset sps_interval_ledger = asset( 0, HBD_SYMBOL ); //< TODO: replace with HBD_asset
uint16_t downvote_pool_percent = 0;
#ifdef HIVE_ENABLE_SMT
asset smt_creation_fee = asset( 1000, HBD_SYMBOL ); //< TODO: replace with HBD_asset
#endif
CHAINBASE_UNPACK_CONSTRUCTOR(dynamic_global_property_object);
};
typedef multi_index_container<
dynamic_global_property_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< dynamic_global_property_object, dynamic_global_property_object::id_type, &dynamic_global_property_object::get_id > >
>,
allocator< dynamic_global_property_object >
> dynamic_global_property_index;
} } // hive::chain
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::dynamic_global_property_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::dynamic_global_property_object,
(id)
(head_block_number)
(head_block_id)
(time)
(current_witness)
(total_pow)
(num_pow_witnesses)
(virtual_supply)
(current_supply)
(init_hbd_supply)
(current_hbd_supply)
(total_vesting_fund_hive)
(total_vesting_shares)
(total_reward_fund_hive)
(total_reward_shares2)
(pending_rewarded_vesting_shares)
(pending_rewarded_vesting_hive)
(hbd_interest_rate)
(hbd_print_rate)
(maximum_block_size)
(required_actions_partition_percent)
(current_aslot)
(recent_slots_filled)
(participation_count)
(last_irreversible_block_num)
(vote_power_reserve_rate)
(delegation_return_period)
(reverse_auction_seconds)
(available_account_subsidies)
(hbd_stop_percent)
(hbd_start_percent)
(next_maintenance_time)
(last_budget_time)
(content_reward_percent)
(vesting_reward_percent)
(sps_fund_percent)
(sps_interval_ledger)
(downvote_pool_percent)
#ifdef HIVE_ENABLE_SMT
(smt_creation_fee)
#endif
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::dynamic_global_property_object, hive::chain::dynamic_global_property_index )

View File

@ -0,0 +1,43 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/version.hpp>
#include <hive/chain/hive_object_types.hpp>
namespace hive { namespace chain {
using chainbase::t_vector;
class hardfork_property_object : public object< hardfork_property_object_type, hardfork_property_object >
{
CHAINBASE_OBJECT( hardfork_property_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( hardfork_property_object, (processed_hardforks) )
using t_processed_hardforks = t_vector< fc::time_point_sec >;
t_processed_hardforks processed_hardforks;
uint32_t last_hardfork = 0;
protocol::hardfork_version current_hardfork_version;
protocol::hardfork_version next_hardfork;
fc::time_point_sec next_hardfork_time;
CHAINBASE_UNPACK_CONSTRUCTOR(hardfork_property_object, (processed_hardforks));
};
typedef multi_index_container<
hardfork_property_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< hardfork_property_object, hardfork_property_object::id_type, &hardfork_property_object::get_id > >
>,
allocator< hardfork_property_object >
> hardfork_property_index;
} } // hive::chain
FC_REFLECT( hive::chain::hardfork_property_object,
(id)(processed_hardforks)(last_hardfork)(current_hardfork_version)
(next_hardfork)(next_hardfork_time) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::hardfork_property_object, hive::chain::hardfork_property_index )

View File

@ -0,0 +1,155 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/authority.hpp>
#include <hive/protocol/operations.hpp>
#include <hive/protocol/hive_operations.hpp>
#include <hive/chain/buffer_type.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/chain/witness_objects.hpp>
namespace hive { namespace chain {
class operation_object : public object< operation_object_type, operation_object >
{
CHAINBASE_OBJECT( operation_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( operation_object, (serialized_op) )
transaction_id_type trx_id;
uint32_t block = 0;
uint32_t trx_in_block = 0;
uint32_t op_in_trx = 0;
uint32_t virtual_op = 0;
time_point_sec timestamp;
buffer_type serialized_op;
uint64_t get_virtual_op() const { return virtual_op; }
CHAINBASE_UNPACK_CONSTRUCTOR(operation_object, (serialized_op));
};
struct by_location;
struct by_transaction_id;
typedef multi_index_container<
operation_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< operation_object, operation_object::id_type, &operation_object::get_id > >,
ordered_unique< tag< by_location >,
composite_key< operation_object,
member< operation_object, uint32_t, &operation_object::block >,
const_mem_fun< operation_object, operation_object::id_type, &operation_object::get_id >
>
>
#ifndef SKIP_BY_TX_ID
,
ordered_unique< tag< by_transaction_id >,
composite_key< operation_object,
member< operation_object, transaction_id_type, &operation_object::trx_id> ,
const_mem_fun< operation_object, operation_object::id_type, &operation_object::get_id >
>
>
#endif
>,
allocator< operation_object >
> operation_index;
class account_history_object : public object< account_history_object_type, account_history_object >
{
CHAINBASE_OBJECT( account_history_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( account_history_object )
account_name_type account;
uint32_t sequence = 0;
operation_id_type op;
CHAINBASE_UNPACK_CONSTRUCTOR(account_history_object);
};
struct by_account;
struct by_account_rev;
typedef multi_index_container<
account_history_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< account_history_object, account_history_object::id_type, &account_history_object::get_id > >,
ordered_unique< tag< by_account >,
composite_key< account_history_object,
member< account_history_object, account_name_type, &account_history_object::account>,
member< account_history_object, uint32_t, &account_history_object::sequence>
>,
composite_key_compare< std::less< account_name_type >, std::greater< uint32_t > >
>
>,
allocator< account_history_object >
> account_history_index;
} }
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::account_history_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::operation_object, (id)(trx_id)(block)(trx_in_block)(op_in_trx)(virtual_op)(timestamp)(serialized_op) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::operation_object, hive::chain::operation_index )
FC_REFLECT( hive::chain::account_history_object, (id)(account)(sequence)(op) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_history_object, hive::chain::account_history_index )
namespace helpers
{
template <>
class index_statistic_provider<hive::chain::operation_index>
{
public:
typedef hive::chain::operation_index IndexType;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
for(const auto& o : index)
info._item_additional_allocation +=
o.serialized_op.capacity()*sizeof(hive::chain::buffer_type::value_type);
}
return info;
}
};
template <>
class index_statistic_provider<hive::chain::account_history_index>
{
public:
typedef hive::chain::account_history_index IndexType;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
//for(const auto& o : index)
// info._item_additional_allocation += o.get_ops().capacity()*
// sizeof(hive::chain::account_history_object::operation_container::value_type);
}
return info;
}
};
} /// namespace helpers

View File

@ -0,0 +1,71 @@
#pragma once
#include <hive/protocol/hive_operations.hpp>
#include <hive/chain/evaluator.hpp>
namespace hive { namespace chain {
using namespace hive::protocol;
HIVE_DEFINE_EVALUATOR( account_create )
HIVE_DEFINE_EVALUATOR( account_create_with_delegation )
HIVE_DEFINE_EVALUATOR( account_update )
HIVE_DEFINE_EVALUATOR( account_update2 )
HIVE_DEFINE_EVALUATOR( transfer )
HIVE_DEFINE_EVALUATOR( transfer_to_vesting )
HIVE_DEFINE_EVALUATOR( witness_update )
HIVE_DEFINE_EVALUATOR( account_witness_vote )
HIVE_DEFINE_EVALUATOR( account_witness_proxy )
HIVE_DEFINE_EVALUATOR( withdraw_vesting )
HIVE_DEFINE_EVALUATOR( set_withdraw_vesting_route )
HIVE_DEFINE_EVALUATOR( comment )
HIVE_DEFINE_EVALUATOR( comment_options )
HIVE_DEFINE_EVALUATOR( delete_comment )
HIVE_DEFINE_EVALUATOR( vote )
HIVE_DEFINE_EVALUATOR( custom )
HIVE_DEFINE_EVALUATOR( custom_json )
HIVE_DEFINE_EVALUATOR( custom_binary )
HIVE_DEFINE_EVALUATOR( pow )
HIVE_DEFINE_EVALUATOR( pow2 )
HIVE_DEFINE_EVALUATOR( feed_publish )
HIVE_DEFINE_EVALUATOR( convert )
HIVE_DEFINE_EVALUATOR( limit_order_create )
HIVE_DEFINE_EVALUATOR( limit_order_cancel )
HIVE_DEFINE_EVALUATOR( report_over_production )
HIVE_DEFINE_EVALUATOR( limit_order_create2 )
HIVE_DEFINE_EVALUATOR( escrow_transfer )
HIVE_DEFINE_EVALUATOR( escrow_approve )
HIVE_DEFINE_EVALUATOR( escrow_dispute )
HIVE_DEFINE_EVALUATOR( escrow_release )
HIVE_DEFINE_EVALUATOR( claim_account )
HIVE_DEFINE_EVALUATOR( create_claimed_account )
HIVE_DEFINE_EVALUATOR( request_account_recovery )
HIVE_DEFINE_EVALUATOR( recover_account )
HIVE_DEFINE_EVALUATOR( change_recovery_account )
HIVE_DEFINE_EVALUATOR( transfer_to_savings )
HIVE_DEFINE_EVALUATOR( transfer_from_savings )
HIVE_DEFINE_EVALUATOR( cancel_transfer_from_savings )
HIVE_DEFINE_EVALUATOR( decline_voting_rights )
HIVE_DEFINE_EVALUATOR( reset_account )
HIVE_DEFINE_EVALUATOR( set_reset_account )
HIVE_DEFINE_EVALUATOR( claim_reward_balance )
#ifdef HIVE_ENABLE_SMT
HIVE_DEFINE_EVALUATOR( claim_reward_balance2 )
#endif
HIVE_DEFINE_EVALUATOR( delegate_vesting_shares )
HIVE_DEFINE_EVALUATOR( witness_set_properties )
#ifdef HIVE_ENABLE_SMT
HIVE_DEFINE_EVALUATOR( smt_setup )
HIVE_DEFINE_EVALUATOR( smt_setup_emissions )
HIVE_DEFINE_EVALUATOR( smt_set_setup_parameters )
HIVE_DEFINE_EVALUATOR( smt_set_runtime_parameters )
HIVE_DEFINE_EVALUATOR( smt_create )
HIVE_DEFINE_EVALUATOR( smt_contribute )
#endif
HIVE_DEFINE_EVALUATOR( create_proposal )
HIVE_DEFINE_EVALUATOR(update_proposal)
HIVE_DEFINE_EVALUATOR( update_proposal_votes )
HIVE_DEFINE_EVALUATOR( remove_proposal )
} } // hive::chain

View File

@ -1,10 +1,11 @@
#pragma once
// This header forward-declares pack/unpack and to/from variant functions for Steem types.
// This header forward-declares pack/unpack and to/from variant functions for Hive types.
// These declarations need to be as early as possible to prevent compiler errors.
#include <chainbase/allocators.hpp>
#include <chainbase/util/object_id.hpp>
#include <chainbase/util/object_id_serialization.hpp>
#ifdef ENABLE_MIRA
#include <mira/multi_index_container_fwd.hpp>
@ -14,11 +15,6 @@ namespace fc {
namespace raw {
template<typename Stream, typename T>
inline void pack( Stream& s, const chainbase::oid<T>& id );
template<typename Stream, typename T>
inline void unpack( Stream& s, chainbase::oid<T>& id, uint32_t depth = 0 );
#ifndef ENABLE_MIRA
template<typename Stream>
inline void pack( Stream& s, const chainbase::shared_string& ss );

View File

@ -0,0 +1,372 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <chainbase/chainbase.hpp>
#include <chainbase/util/object_id_serialization.hpp>
#include <hive/protocol/types.hpp>
#include <hive/protocol/authority.hpp>
#include <hive/chain/buffer_type.hpp>
#include <hive/chain/multi_index_types.hpp>
namespace hive {
namespace protocol {
struct asset;
struct price;
}
namespace chain {
using chainbase::object;
using chainbase::oid;
using chainbase::oid_ref;
using chainbase::allocator;
using hive::protocol::block_id_type;
using hive::protocol::transaction_id_type;
using hive::protocol::chain_id_type;
using hive::protocol::account_name_type;
using hive::protocol::share_type;
using hive::protocol::ushare_type;
using chainbase::shared_string;
inline std::string to_string( const shared_string& str ) { return std::string( str.begin(), str.end() ); }
inline void from_string( shared_string& out, const string& in ){ out.assign( in.begin(), in.end() ); }
using chainbase::by_id;
struct by_name;
enum object_type
{
dynamic_global_property_object_type,
account_object_type,
account_metadata_object_type,
account_authority_object_type,
witness_object_type,
transaction_object_type,
block_summary_object_type,
witness_schedule_object_type,
comment_object_type,
comment_content_object_type,
comment_vote_object_type,
witness_vote_object_type,
limit_order_object_type,
feed_history_object_type,
convert_request_object_type,
liquidity_reward_balance_object_type,
operation_object_type,
account_history_object_type,
hardfork_property_object_type,
withdraw_vesting_route_object_type,
owner_authority_history_object_type,
account_recovery_request_object_type,
change_recovery_account_request_object_type,
escrow_object_type,
savings_withdraw_object_type,
decline_voting_rights_request_object_type,
block_stats_object_type,
reward_fund_object_type,
vesting_delegation_object_type,
vesting_delegation_expiration_object_type,
pending_required_action_object_type,
pending_optional_action_object_type,
proposal_object_type,
proposal_vote_object_type,
comment_cashout_object_type,
#ifdef HIVE_ENABLE_SMT
// SMT objects
smt_token_object_type,
account_regular_balance_object_type,
account_rewards_balance_object_type,
nai_pool_object_type,
smt_token_emissions_object_type,
smt_contribution_object_type,
smt_ico_object_type,
#endif
};
class dynamic_global_property_object;
class account_object;
class account_metadata_object;
class account_authority_object;
class witness_object;
class transaction_object;
class block_summary_object;
class witness_schedule_object;
class comment_object;
class comment_content_object;
class comment_vote_object;
class witness_vote_object;
class limit_order_object;
class feed_history_object;
class convert_request_object;
class liquidity_reward_balance_object;
class operation_object;
class account_history_object;
class hardfork_property_object;
class withdraw_vesting_route_object;
class owner_authority_history_object;
class account_recovery_request_object;
class change_recovery_account_request_object;
class escrow_object;
class savings_withdraw_object;
class decline_voting_rights_request_object;
class block_stats_object;
class reward_fund_object;
class vesting_delegation_object;
class vesting_delegation_expiration_object;
class pending_required_action_object;
class pending_optional_action_object;
class comment_cashout_object;
#ifdef HIVE_ENABLE_SMT
class smt_token_object;
class account_regular_balance_object;
class account_rewards_balance_object;
class nai_pool_object;
class smt_token_emissions_object;
class smt_contribution_object;
class smt_ico_object;
#endif
class proposal_object;
class proposal_vote_object;
typedef oid_ref< dynamic_global_property_object > dynamic_global_property_id_type;
typedef oid_ref< account_object > account_id_type;
typedef oid_ref< account_metadata_object > account_metadata_id_type;
typedef oid_ref< account_authority_object > account_authority_id_type;
typedef oid_ref< witness_object > witness_id_type;
typedef oid_ref< transaction_object > transaction_object_id_type;
typedef oid_ref< block_summary_object > block_summary_id_type;
typedef oid_ref< witness_schedule_object > witness_schedule_id_type;
typedef oid_ref< comment_object > comment_id_type;
typedef oid_ref< comment_content_object > comment_content_id_type;
typedef oid_ref< comment_vote_object > comment_vote_id_type;
typedef oid_ref< witness_vote_object > witness_vote_id_type;
typedef oid_ref< limit_order_object > limit_order_id_type;
typedef oid_ref< feed_history_object > feed_history_id_type;
typedef oid_ref< convert_request_object > convert_request_id_type;
typedef oid_ref< liquidity_reward_balance_object > liquidity_reward_balance_id_type;
typedef oid_ref< operation_object > operation_id_type;
typedef oid_ref< account_history_object > account_history_id_type;
typedef oid_ref< hardfork_property_object > hardfork_property_id_type;
typedef oid_ref< withdraw_vesting_route_object > withdraw_vesting_route_id_type;
typedef oid_ref< owner_authority_history_object > owner_authority_history_id_type;
typedef oid_ref< account_recovery_request_object > account_recovery_request_id_type;
typedef oid_ref< change_recovery_account_request_object > change_recovery_account_request_id_type;
typedef oid_ref< escrow_object > escrow_id_type;
typedef oid_ref< savings_withdraw_object > savings_withdraw_id_type;
typedef oid_ref< decline_voting_rights_request_object > decline_voting_rights_request_id_type;
typedef oid_ref< block_stats_object > block_stats_id_type;
typedef oid_ref< reward_fund_object > reward_fund_id_type;
typedef oid_ref< vesting_delegation_object > vesting_delegation_id_type;
typedef oid_ref< vesting_delegation_expiration_object > vesting_delegation_expiration_id_type;
typedef oid_ref< pending_required_action_object > pending_required_action_id_type;
typedef oid_ref< pending_optional_action_object > pending_optional_action_id_type;
typedef oid_ref< comment_cashout_object > comment_cashout_id_type;
#ifdef HIVE_ENABLE_SMT
typedef oid_ref< smt_token_object > smt_token_id_type;
typedef oid_ref< account_regular_balance_object > account_regular_balance_id_type;
typedef oid_ref< account_rewards_balance_object > account_rewards_balance_id_type;
typedef oid_ref< nai_pool_object > nai_pool_id_type;
typedef oid_ref< smt_token_emissions_object > smt_token_emissions_id_type;
typedef oid_ref< smt_contribution_object > smt_contribution_id_type;
typedef oid_ref< smt_ico_object > smt_ico_id_type;
#endif
typedef oid_ref< proposal_object > proposal_id_type;
typedef oid_ref< proposal_vote_object > proposal_vote_id_type;
enum bandwidth_type
{
post, ///< Rate limiting posting reward eligibility over time
forum, ///< Rate limiting for all forum related actins
market ///< Rate limiting for all other actions
};
} } //hive::chain
#ifdef ENABLE_MIRA
namespace mira {
template< typename T > struct is_static_length< chainbase::oid< T > > : public boost::true_type {};
template< typename T > struct is_static_length< chainbase::oid_ref< T > > : public boost::true_type {};
template< typename T > struct is_static_length< fc::fixed_string< T > > : public boost::true_type {};
template<> struct is_static_length< hive::protocol::account_name_type > : public boost::true_type {};
template<> struct is_static_length< hive::protocol::asset_symbol_type > : public boost::true_type {};
template<> struct is_static_length< hive::protocol::asset > : public boost::true_type {};
template<> struct is_static_length< hive::protocol::price > : public boost::true_type {};
} // mira
#endif
namespace fc
{
class variant;
#ifndef ENABLE_MIRA
inline void to_variant( const hive::chain::shared_string& s, variant& var )
{
var = fc::string( hive::chain::to_string( s ) );
}
inline void from_variant( const variant& var, hive::chain::shared_string& s )
{
auto str = var.as_string();
s.assign( str.begin(), str.end() );
}
#endif
namespace raw
{
#ifndef ENABLE_MIRA
template< typename Stream >
void pack( Stream& s, const chainbase::shared_string& ss )
{
std::string str = hive::chain::to_string( ss );
fc::raw::pack( s, str );
}
template< typename Stream >
void unpack( Stream& s, chainbase::shared_string& ss, uint32_t depth )
{
depth++;
std::string str;
fc::raw::unpack( s, str, depth );
hive::chain::from_string( ss, str );
}
#endif
template< typename Stream, typename E, typename A >
void pack( Stream& s, const boost::interprocess::deque<E, A>& dq )
{
// This could be optimized
std::vector<E> temp;
std::copy( dq.begin(), dq.end(), std::back_inserter(temp) );
pack( s, temp );
}
template< typename Stream, typename E, typename A >
void unpack( Stream& s, boost::interprocess::deque<E, A>& dq, uint32_t depth )
{
depth++;
FC_ASSERT( depth <= MAX_RECURSION_DEPTH );
// This could be optimized
std::vector<E> temp;
unpack( s, temp, depth );
dq.clear();
std::copy( temp.begin(), temp.end(), std::back_inserter(dq) );
}
template< typename Stream, typename K, typename V, typename C, typename A >
void pack( Stream& s, const boost::interprocess::flat_map< K, V, C, A >& value )
{
fc::raw::pack( s, unsigned_int((uint32_t)value.size()) );
auto itr = value.begin();
auto end = value.end();
while( itr != end )
{
fc::raw::pack( s, *itr );
++itr;
}
}
template< typename Stream, typename K, typename V, typename C, typename A >
void unpack( Stream& s, boost::interprocess::flat_map< K, V, C, A >& value, uint32_t depth )
{
depth++;
FC_ASSERT( depth <= MAX_RECURSION_DEPTH );
unsigned_int size;
unpack( s, size, depth );
value.clear();
FC_ASSERT( size.value*(sizeof(K)+sizeof(V)) < MAX_ARRAY_ALLOC_SIZE );
for( uint32_t i = 0; i < size.value; ++i )
{
std::pair<K,V> tmp;
fc::raw::unpack( s, tmp, depth );
value.insert( std::move(tmp) );
}
}
#ifndef ENABLE_MIRA
template< typename T >
T unpack_from_vector( const hive::chain::buffer_type& s )
{
try
{
T tmp;
if( s.size() )
{
datastream<const char*> ds( s.data(), size_t(s.size()) );
fc::raw::unpack(ds,tmp);
}
return tmp;
} FC_RETHROW_EXCEPTIONS( warn, "error unpacking ${type}", ("type",fc::get_typename<T>::name() ) )
}
#endif
} } // namespace fc::raw
FC_REFLECT_ENUM( hive::chain::object_type,
(dynamic_global_property_object_type)
(account_object_type)
(account_metadata_object_type)
(account_authority_object_type)
(witness_object_type)
(transaction_object_type)
(block_summary_object_type)
(witness_schedule_object_type)
(comment_object_type)
(comment_content_object_type)
(comment_vote_object_type)
(witness_vote_object_type)
(limit_order_object_type)
(feed_history_object_type)
(convert_request_object_type)
(liquidity_reward_balance_object_type)
(operation_object_type)
(account_history_object_type)
(hardfork_property_object_type)
(withdraw_vesting_route_object_type)
(owner_authority_history_object_type)
(account_recovery_request_object_type)
(change_recovery_account_request_object_type)
(escrow_object_type)
(savings_withdraw_object_type)
(decline_voting_rights_request_object_type)
(block_stats_object_type)
(reward_fund_object_type)
(vesting_delegation_object_type)
(vesting_delegation_expiration_object_type)
(pending_required_action_object_type)
(pending_optional_action_object_type)
(proposal_object_type)
(proposal_vote_object_type)
(comment_cashout_object_type)
#ifdef HIVE_ENABLE_SMT
(smt_token_object_type)
(account_regular_balance_object_type)
(account_rewards_balance_object_type)
(nai_pool_object_type)
(smt_token_emissions_object_type)
(smt_contribution_object_type)
(smt_ico_object_type)
#endif
)
#ifndef ENABLE_MIRA
FC_REFLECT_TYPENAME( hive::chain::shared_string )
#endif
FC_REFLECT_ENUM( hive::chain::bandwidth_type, (post)(forum)(market) )

View File

@ -0,0 +1,549 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/authority.hpp>
#include <hive/protocol/hive_operations.hpp>
#include <hive/protocol/misc_utilities.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <boost/multiprecision/cpp_int.hpp>
namespace hive { namespace chain {
using hive::protocol::asset;
using hive::protocol::price;
using hive::protocol::asset_symbol_type;
using chainbase::t_deque;
typedef protocol::fixed_string< 16 > reward_fund_name_type;
/**
* This object is used to track pending requests to convert HBD to HIVE
*/
class convert_request_object : public object< convert_request_object_type, convert_request_object >
{
CHAINBASE_OBJECT( convert_request_object );
public:
template< typename Allocator >
convert_request_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _owner, const asset& _amount, const time_point_sec& _conversion_time, uint32_t _requestid )
: id( _id ), owner( _owner ), requestid( _requestid ), amount( _amount ), conversion_date( _conversion_time )
{}
//amount of HBD to be converted to HIVE
const asset& get_convert_amount() const { return amount; }
account_name_type owner; //< TODO: can be replaced with account_id_type
uint32_t requestid = 0; ///< id set by owner, the owner,requestid pair must be unique
asset amount; //< TODO: can be replaced with HBD_asset
time_point_sec conversion_date; ///< at this time the feed_history_median_price * amount
CHAINBASE_UNPACK_CONSTRUCTOR(convert_request_object);
};
class escrow_object : public object< escrow_object_type, escrow_object >
{
CHAINBASE_OBJECT( escrow_object );
public:
template< typename Allocator >
escrow_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _from, const account_name_type& _to, const account_name_type& _agent,
const asset& _hive_amount, const asset& _hbd_amount, const asset& _fee,
const time_point_sec& _ratification_deadline, const time_point_sec& _escrow_expiration, uint32_t _escrow_transfer_id )
: id( _id ), escrow_id( _escrow_transfer_id ), from( _from ), to( _to ), agent( _agent ),
ratification_deadline( _ratification_deadline ), escrow_expiration( _escrow_expiration ),
hbd_balance( _hbd_amount ), hive_balance( _hive_amount ), pending_fee( _fee )
{}
//HIVE portion of transfer balance
const asset& get_hive_balance() const { return hive_balance; }
//HBD portion of transfer balance
const asset& get_hbd_balance() const { return hbd_balance; }
//fee offered to escrow (can be either in HIVE or HBD)
const asset& get_fee() const { return pending_fee; }
bool is_approved() const { return to_approved && agent_approved; }
uint32_t escrow_id = 20;
account_name_type from; //< TODO: can be replaced with account_id_type
account_name_type to; //< TODO: can be replaced with account_id_type
account_name_type agent; //< TODO: can be replaced with account_id_type
time_point_sec ratification_deadline;
time_point_sec escrow_expiration;
asset hbd_balance; //< TODO: can be replaced with HBD_asset
asset hive_balance; //< TODO: can be replaced with HIVE_asset
asset pending_fee; //fee can use HIVE of HBD
bool to_approved = false; //< TODO: can be replaced with bit field along with all flags
bool agent_approved = false;
bool disputed = false;
CHAINBASE_UNPACK_CONSTRUCTOR(escrow_object);
};
class savings_withdraw_object : public object< savings_withdraw_object_type, savings_withdraw_object >
{
CHAINBASE_OBJECT( savings_withdraw_object );
public:
template< typename Allocator >
savings_withdraw_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _from, const account_name_type& _to, const asset& _amount,
const string& _memo, const time_point_sec& _time_of_completion, uint32_t _request_id )
: id( _id ), from( _from ), to( _to ), memo( a ), request_id( _request_id ),
amount( _amount ), complete( _time_of_completion )
{
#ifndef IS_LOW_MEM
from_string( memo, _memo );
#endif
}
//amount of savings to withdraw (HIVE or HBD)
const asset& get_withdraw_amount() const { return amount; }
account_name_type from; //< TODO: can be replaced with account_id_type
account_name_type to; //< TODO: can be replaced with account_id_type
shared_string memo;
uint32_t request_id = 0;
asset amount; //can be expressed in HIVE or HBD
time_point_sec complete;
CHAINBASE_UNPACK_CONSTRUCTOR(savings_withdraw_object, (memo));
};
/**
* If last_update is greater than 1 week, then volume gets reset to 0
*
* When a user is a maker, their volume increases
* When a user is a taker, their volume decreases
*
* Every 1000 blocks, the account that has the highest volume_weight() is paid the maximum of
* 1000 HIVE or 1000 * virtual_supply / (100*blocks_per_year) aka 10 * virtual_supply / blocks_per_year
*
* After being paid volume gets reset to 0
*/
class liquidity_reward_balance_object : public object< liquidity_reward_balance_object_type, liquidity_reward_balance_object >
{
CHAINBASE_OBJECT( liquidity_reward_balance_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( liquidity_reward_balance_object )
int64_t get_hive_volume() const { return hive_volume; }
int64_t get_hbd_volume() const { return hbd_volume; }
account_id_type owner;
int64_t hive_volume = 0;
int64_t hbd_volume = 0;
uint128_t weight = 0;
time_point_sec last_update = fc::time_point_sec::min(); /// used to decay negative liquidity balances. block num
/// this is the sort index
uint128_t volume_weight()const
{
return hive_volume * hbd_volume * is_positive();
}
uint128_t min_volume_weight()const
{
return std::min(hive_volume,hbd_volume) * is_positive();
}
void update_weight( bool hf9 )
{
weight = hf9 ? min_volume_weight() : volume_weight();
}
inline int is_positive()const
{
return ( hive_volume > 0 && hbd_volume > 0 ) ? 1 : 0;
}
CHAINBASE_UNPACK_CONSTRUCTOR(liquidity_reward_balance_object);
};
/**
* This object gets updated once per hour, on the hour
*/
class feed_history_object : public object< feed_history_object_type, feed_history_object >
{
CHAINBASE_OBJECT( feed_history_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( feed_history_object, (price_history) )
price current_median_history; ///< the current median of the price history, used as the base for convert operations
using t_price_history = t_deque< price >;
t_deque< price > price_history; ///< tracks this last week of median_feed one per hour
CHAINBASE_UNPACK_CONSTRUCTOR(feed_history_object, (price_history));
};
/**
* @brief an offer to sell a amount of a asset at a specified exchange rate by a certain time
* @ingroup object
* @ingroup protocol
* @ingroup market
*
* This limit_order_objects are indexed by @ref expiration and is automatically deleted on the first block after expiration.
*/
class limit_order_object : public object< limit_order_object_type, limit_order_object >
{
CHAINBASE_OBJECT( limit_order_object );
public:
template< typename Allocator >
limit_order_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _seller, const asset& _amount_to_sell, const price& _sell_price,
const time_point_sec& _creation_time, const time_point_sec& _expiration_time, uint32_t _orderid )
: id( _id ), created( _creation_time ), expiration( _expiration_time ), seller( _seller ),
orderid( _orderid ), for_sale( _amount_to_sell.amount ), sell_price( _sell_price )
{
FC_ASSERT( _amount_to_sell.symbol == _sell_price.base.symbol );
}
pair< asset_symbol_type, asset_symbol_type > get_market() const
{
return sell_price.base.symbol < sell_price.quote.symbol ?
std::make_pair( sell_price.base.symbol, sell_price.quote.symbol ) :
std::make_pair( sell_price.quote.symbol, sell_price.base.symbol );
}
asset amount_for_sale() const { return asset( for_sale, sell_price.base.symbol ); }
asset amount_to_receive() const { return amount_for_sale() * sell_price; }
time_point_sec created;
time_point_sec expiration;
account_name_type seller; //< TODO: can be replaced with account_id_type
uint32_t orderid = 0;
share_type for_sale; ///< asset id is sell_price.base.symbol
price sell_price;
CHAINBASE_UNPACK_CONSTRUCTOR(limit_order_object);
};
/**
* @breif a route to send withdrawn vesting shares.
*/
class withdraw_vesting_route_object : public object< withdraw_vesting_route_object_type, withdraw_vesting_route_object >
{
CHAINBASE_OBJECT( withdraw_vesting_route_object, true );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( withdraw_vesting_route_object )
account_name_type from_account;
account_name_type to_account;
uint16_t percent = 0;
bool auto_vest = false;
CHAINBASE_UNPACK_CONSTRUCTOR(withdraw_vesting_route_object);
};
class decline_voting_rights_request_object : public object< decline_voting_rights_request_object_type, decline_voting_rights_request_object >
{
CHAINBASE_OBJECT( decline_voting_rights_request_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( decline_voting_rights_request_object )
account_name_type account;
time_point_sec effective_date;
CHAINBASE_UNPACK_CONSTRUCTOR(decline_voting_rights_request_object);
};
class reward_fund_object : public object< reward_fund_object_type, reward_fund_object >
{
CHAINBASE_OBJECT( reward_fund_object );
public:
template< typename Allocator >
reward_fund_object( allocator< Allocator > a, uint64_t _id,
const string& _name, const asset& _balance, const time_point_sec& _creation_time, const uint128_t& _claims = 0 )
: id( _id ), name( _name ), reward_balance( _balance ), recent_claims( _claims ), last_update( _creation_time )
{}
//amount of HIVE in reward fund
const asset& get_reward_balance() const { return reward_balance; }
reward_fund_name_type name;
asset reward_balance = asset( 0, HIVE_SYMBOL );
uint128_t recent_claims = 0;
time_point_sec last_update;
uint128_t content_constant = HIVE_CONTENT_CONSTANT_HF0;
uint16_t percent_curation_rewards = HIVE_1_PERCENT * 25;
uint16_t percent_content_rewards = HIVE_100_PERCENT;
protocol::curve_id author_reward_curve = protocol::curve_id::quadratic;
protocol::curve_id curation_reward_curve = protocol::curve_id::bounded_curation;
CHAINBASE_UNPACK_CONSTRUCTOR(reward_fund_object);
};
struct by_price;
struct by_expiration;
struct by_account;
typedef multi_index_container<
limit_order_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< limit_order_object, limit_order_object::id_type, &limit_order_object::get_id > >,
ordered_unique< tag< by_expiration >,
composite_key< limit_order_object,
member< limit_order_object, time_point_sec, &limit_order_object::expiration >,
const_mem_fun< limit_order_object, limit_order_object::id_type, &limit_order_object::get_id >
>
>,
ordered_unique< tag< by_price >,
composite_key< limit_order_object,
member< limit_order_object, price, &limit_order_object::sell_price >,
const_mem_fun< limit_order_object, limit_order_object::id_type, &limit_order_object::get_id >
>,
composite_key_compare< std::greater< price >, std::less< limit_order_id_type > >
>,
ordered_unique< tag< by_account >,
composite_key< limit_order_object,
member< limit_order_object, account_name_type, &limit_order_object::seller >,
member< limit_order_object, uint32_t, &limit_order_object::orderid >
>
>
>,
allocator< limit_order_object >
> limit_order_index;
struct by_owner;
struct by_conversion_date;
typedef multi_index_container<
convert_request_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< convert_request_object, convert_request_object::id_type, &convert_request_object::get_id > >,
ordered_unique< tag< by_conversion_date >,
composite_key< convert_request_object,
member< convert_request_object, time_point_sec, &convert_request_object::conversion_date >,
const_mem_fun< convert_request_object, convert_request_object::id_type, &convert_request_object::get_id >
>
>,
ordered_unique< tag< by_owner >,
composite_key< convert_request_object,
member< convert_request_object, account_name_type, &convert_request_object::owner >,
member< convert_request_object, uint32_t, &convert_request_object::requestid >
>
>
>,
allocator< convert_request_object >
> convert_request_index;
struct by_owner;
struct by_volume_weight;
typedef multi_index_container<
liquidity_reward_balance_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< liquidity_reward_balance_object, liquidity_reward_balance_object::id_type, &liquidity_reward_balance_object::get_id > >,
ordered_unique< tag< by_owner >,
member< liquidity_reward_balance_object, account_id_type, &liquidity_reward_balance_object::owner > >,
ordered_unique< tag< by_volume_weight >,
composite_key< liquidity_reward_balance_object,
member< liquidity_reward_balance_object, fc::uint128, &liquidity_reward_balance_object::weight >,
member< liquidity_reward_balance_object, account_id_type, &liquidity_reward_balance_object::owner >
>,
composite_key_compare< std::greater< fc::uint128 >, std::less< account_id_type > >
>
>,
allocator< liquidity_reward_balance_object >
> liquidity_reward_balance_index;
typedef multi_index_container<
feed_history_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< feed_history_object, feed_history_object::id_type, &feed_history_object::get_id > >
>,
allocator< feed_history_object >
> feed_history_index;
struct by_withdraw_route;
struct by_destination;
typedef multi_index_container<
withdraw_vesting_route_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< withdraw_vesting_route_object, withdraw_vesting_route_object::id_type, &withdraw_vesting_route_object::get_id > >,
ordered_unique< tag< by_withdraw_route >,
composite_key< withdraw_vesting_route_object,
member< withdraw_vesting_route_object, account_name_type, &withdraw_vesting_route_object::from_account >,
member< withdraw_vesting_route_object, account_name_type, &withdraw_vesting_route_object::to_account >
>,
composite_key_compare< std::less< account_name_type >, std::less< account_name_type > >
>,
ordered_unique< tag< by_destination >,
composite_key< withdraw_vesting_route_object,
member< withdraw_vesting_route_object, account_name_type, &withdraw_vesting_route_object::to_account >,
const_mem_fun< withdraw_vesting_route_object, withdraw_vesting_route_object::id_type, &withdraw_vesting_route_object::get_id >
>
>
>,
allocator< withdraw_vesting_route_object >
> withdraw_vesting_route_index;
struct by_from_id;
struct by_ratification_deadline;
typedef multi_index_container<
escrow_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< escrow_object, escrow_object::id_type, &escrow_object::get_id > >,
ordered_unique< tag< by_from_id >,
composite_key< escrow_object,
member< escrow_object, account_name_type, &escrow_object::from >,
member< escrow_object, uint32_t, &escrow_object::escrow_id >
>
>,
ordered_unique< tag< by_ratification_deadline >,
composite_key< escrow_object,
const_mem_fun< escrow_object, bool, &escrow_object::is_approved >,
member< escrow_object, time_point_sec, &escrow_object::ratification_deadline >,
const_mem_fun< escrow_object, escrow_object::id_type, &escrow_object::get_id >
>,
composite_key_compare< std::less< bool >, std::less< time_point_sec >, std::less< escrow_id_type > >
>
>,
allocator< escrow_object >
> escrow_index;
struct by_from_rid;
struct by_to_complete;
struct by_complete_from_rid;
typedef multi_index_container<
savings_withdraw_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< savings_withdraw_object, savings_withdraw_object::id_type, &savings_withdraw_object::get_id > >,
ordered_unique< tag< by_from_rid >,
composite_key< savings_withdraw_object,
member< savings_withdraw_object, account_name_type, &savings_withdraw_object::from >,
member< savings_withdraw_object, uint32_t, &savings_withdraw_object::request_id >
>
>,
ordered_unique< tag< by_complete_from_rid >,
composite_key< savings_withdraw_object,
member< savings_withdraw_object, time_point_sec, &savings_withdraw_object::complete >,
member< savings_withdraw_object, account_name_type, &savings_withdraw_object::from >,
member< savings_withdraw_object, uint32_t, &savings_withdraw_object::request_id >
>
>,
ordered_unique< tag< by_to_complete >,
composite_key< savings_withdraw_object,
member< savings_withdraw_object, account_name_type, &savings_withdraw_object::to >,
member< savings_withdraw_object, time_point_sec, &savings_withdraw_object::complete >,
const_mem_fun< savings_withdraw_object, savings_withdraw_object::id_type, &savings_withdraw_object::get_id >
>
>
>,
allocator< savings_withdraw_object >
> savings_withdraw_index;
struct by_account;
struct by_effective_date;
typedef multi_index_container<
decline_voting_rights_request_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< decline_voting_rights_request_object, decline_voting_rights_request_object::id_type, &decline_voting_rights_request_object::get_id > >,
ordered_unique< tag< by_account >,
member< decline_voting_rights_request_object, account_name_type, &decline_voting_rights_request_object::account >
>,
ordered_unique< tag< by_effective_date >,
composite_key< decline_voting_rights_request_object,
member< decline_voting_rights_request_object, time_point_sec, &decline_voting_rights_request_object::effective_date >,
member< decline_voting_rights_request_object, account_name_type, &decline_voting_rights_request_object::account >
>,
composite_key_compare< std::less< time_point_sec >, std::less< account_name_type > >
>
>,
allocator< decline_voting_rights_request_object >
> decline_voting_rights_request_index;
typedef multi_index_container<
reward_fund_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< reward_fund_object, reward_fund_object::id_type, &reward_fund_object::get_id > >,
ordered_unique< tag< by_name >,
member< reward_fund_object, reward_fund_name_type, &reward_fund_object::name > >
>,
allocator< reward_fund_object >
> reward_fund_index;
} } // hive::chain
#ifdef ENABLE_MIRA
namespace mira {
template<> struct is_static_length< hive::chain::convert_request_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::escrow_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::liquidity_reward_balance_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::limit_order_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::withdraw_vesting_route_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::decline_voting_rights_request_object > : public boost::true_type {};
template<> struct is_static_length< hive::chain::reward_fund_object > : public boost::true_type {};
} // mira
#endif
#include <hive/chain/comment_object.hpp>
#include <hive/chain/account_object.hpp>
FC_REFLECT( hive::chain::limit_order_object,
(id)(created)(expiration)(seller)(orderid)(for_sale)(sell_price) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::limit_order_object, hive::chain::limit_order_index )
FC_REFLECT( hive::chain::feed_history_object,
(id)(current_median_history)(price_history) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::feed_history_object, hive::chain::feed_history_index )
FC_REFLECT( hive::chain::convert_request_object,
(id)(owner)(requestid)(amount)(conversion_date) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::convert_request_object, hive::chain::convert_request_index )
FC_REFLECT( hive::chain::liquidity_reward_balance_object,
(id)(owner)(hive_volume)(hbd_volume)(weight)(last_update) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::liquidity_reward_balance_object, hive::chain::liquidity_reward_balance_index )
FC_REFLECT( hive::chain::withdraw_vesting_route_object,
(id)(from_account)(to_account)(percent)(auto_vest) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::withdraw_vesting_route_object, hive::chain::withdraw_vesting_route_index )
FC_REFLECT( hive::chain::savings_withdraw_object,
(id)(from)(to)(memo)(request_id)(amount)(complete) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::savings_withdraw_object, hive::chain::savings_withdraw_index )
FC_REFLECT( hive::chain::escrow_object,
(id)(escrow_id)(from)(to)(agent)
(ratification_deadline)(escrow_expiration)
(hbd_balance)(hive_balance)(pending_fee)
(to_approved)(agent_approved)(disputed) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::escrow_object, hive::chain::escrow_index )
FC_REFLECT( hive::chain::decline_voting_rights_request_object,
(id)(account)(effective_date) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::decline_voting_rights_request_object, hive::chain::decline_voting_rights_request_index )
FC_REFLECT( hive::chain::reward_fund_object,
(id)
(name)
(reward_balance)
(recent_claims)
(last_update)
(content_constant)
(percent_curation_rewards)
(percent_content_rewards)
(author_reward_curve)
(curation_reward_curve)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::reward_fund_object, hive::chain::reward_fund_index )

View File

@ -0,0 +1,100 @@
#pragma once
#include <mira/index_converter.hpp>
#include <hive/schema/schema.hpp>
#include <hive/protocol/schema_types.hpp>
#include <hive/chain/schema_types.hpp>
#include <hive/chain/database.hpp>
namespace hive { namespace chain {
using hive::schema::abstract_schema;
struct index_info
: public chainbase::index_extension
{
index_info();
virtual ~index_info();
virtual std::shared_ptr< abstract_schema > get_schema() = 0;
};
template< typename MultiIndexType >
struct index_info_impl
: public index_info
{
typedef typename MultiIndexType::value_type value_type;
index_info_impl()
: _schema( hive::schema::get_schema_for_type< value_type >() ) {}
virtual ~index_info_impl() {}
virtual std::shared_ptr< abstract_schema > get_schema() override
{ return _schema; }
std::shared_ptr< abstract_schema > _schema;
};
template< typename MultiIndexType >
void _add_index_impl( database& db )
{
db.add_index< MultiIndexType >();
std::shared_ptr< chainbase::index_extension > ext =
std::make_shared< index_info_impl< MultiIndexType > >();
db.add_index_extension< MultiIndexType >( ext );
}
template< typename MultiIndexType >
void add_core_index( database& db )
{
_add_index_impl< MultiIndexType >( db );
}
template< typename MultiIndexType >
void add_plugin_index( database& db )
{
db._plugin_index_signal.connect( [&db](){ _add_index_impl< MultiIndexType >(db); } );
}
} }
#ifdef ENABLE_MIRA
#define HIVE_ADD_CORE_INDEX(db, index_name) \
do { \
hive::chain::add_core_index< index_name >( db ); \
hive::chain::index_delegate delegate; \
delegate.set_index_type = \
[]( database& _db, mira::index_type type, const boost::filesystem::path& p, const boost::any& cfg ) \
{ _db.get_mutable_index< index_name >().mutable_indices().set_index_type( type, p, cfg ); }; \
db.set_index_delegate( #index_name, std::move( delegate ) ); \
} while( false )
#define HIVE_ADD_PLUGIN_INDEX(db, index_name) \
do { \
hive::chain::add_plugin_index< index_name >( db ); \
hive::chain::index_delegate delegate; \
delegate.set_index_type = \
[]( database& _db, mira::index_type type, const boost::filesystem::path& p, const boost::any& cfg ) \
{ _db.get_mutable_index< index_name >().mutable_indices().set_index_type( type, p, cfg ); }; \
db.set_index_delegate( #index_name, std::move( delegate ) ); \
} while( false )
#else
#define HIVE_ADD_CORE_INDEX(db, index_name) \
do { \
hive::chain::add_core_index< index_name >( db ); \
hive::chain::index_delegate delegate; \
db.set_index_delegate( #index_name, std::move( delegate ) ); \
} while( false )
#define HIVE_ADD_PLUGIN_INDEX(db, index_name) \
do { \
hive::chain::add_plugin_index< index_name >( db ); \
hive::chain::index_delegate delegate; \
db.set_index_delegate( #index_name, std::move( delegate ) ); \
} while( false )
#endif

View File

@ -23,7 +23,7 @@
#include <type_traits>
#include <typeinfo>
namespace steem { namespace chain {
namespace hive { namespace chain {
#ifndef ENABLE_MIRA
using boost::multi_index::multi_index_container;
@ -38,7 +38,7 @@ using boost::multi_index::const_mem_fun;
template< class Iterator >
inline boost::reverse_iterator< Iterator > make_reverse_iterator( Iterator iterator )
{
return boost::reverse_iterator< Iterator >( iterator );
return boost::reverse_iterator< Iterator >( iterator );
}
#else
@ -56,22 +56,22 @@ template< typename T1, typename T2, typename T3 >
class bmic_type : public ::mira::boost_multi_index_adapter< T1, T2, T3 >
{
public:
using mira::boost_multi_index_adapter< T1, T2, T3 >::boost_multi_index_adapter;
using mira::boost_multi_index_adapter< T1, T2, T3 >::boost_multi_index_adapter;
};
template< typename T1, typename T2, typename T3 >
class mira_type : public ::mira::multi_index_container< T1, T2, T3 >
{
public:
using mira::multi_index_container< T1, T2, T3 >::multi_index_container;
using mira::multi_index_container< T1, T2, T3 >::multi_index_container;
};
template< class Iterator >
inline Iterator make_reverse_iterator( Iterator iterator )
{
return iterator.reverse();
return iterator.reverse();
}
#endif
} } // steem::chain
} } // hive::chain

View File

@ -1,8 +1,8 @@
#pragma once
namespace steem { namespace chain {
namespace hive { namespace chain {
/**
/**
* @brief Contains per-node database configuration.
*
* Transactions are evaluated differently based on per-node state.
@ -12,12 +12,12 @@ namespace steem { namespace chain {
* from the p2p network. Or configuration-specified tradeoffs of
* performance/hardfork resilience vs. paranoia.
*/
class node_property_object
{
public:
node_property_object(){}
~node_property_object(){}
class node_property_object
{
public:
node_property_object(){}
~node_property_object(){}
uint32_t skip_flags = 0;
};
} } // steem::chain
uint32_t skip_flags = 0;
};
} } // hive::chain

View File

@ -0,0 +1,57 @@
#pragma once
#include <hive/protocol/block.hpp>
namespace hive { namespace chain {
struct block_notification
{
block_notification( const hive::protocol::signed_block& b ) : block(b)
{
block_id = b.id();
block_num = hive::protocol::block_header::num_from_id( block_id );
}
hive::protocol::block_id_type block_id;
uint32_t block_num = 0;
const hive::protocol::signed_block& block;
};
struct transaction_notification
{
transaction_notification( const hive::protocol::signed_transaction& tx ) : transaction(tx)
{
transaction_id = tx.id();
}
hive::protocol::transaction_id_type transaction_id;
const hive::protocol::signed_transaction& transaction;
};
struct operation_notification
{
operation_notification( const hive::protocol::operation& o ) : op(o) {}
transaction_id_type trx_id;
uint32_t block = 0;
uint32_t trx_in_block = 0;
uint32_t op_in_trx = 0;
uint32_t virtual_op = 0;
const hive::protocol::operation& op;
};
struct required_action_notification
{
required_action_notification( const hive::protocol::required_automated_action& a ) : action(a) {}
const hive::protocol::required_automated_action& action;
};
struct optional_action_notification
{
optional_action_notification( const hive::protocol::optional_automated_action& a ) : action(a) {}
const hive::protocol::optional_automated_action& action;
};
} }

View File

@ -0,0 +1,15 @@
#pragma once
#include <hive/protocol/hive_optional_actions.hpp>
#include <hive/chain/evaluator.hpp>
namespace hive { namespace chain {
using namespace hive::protocol;
#ifdef IS_TEST_NET
HIVE_DEFINE_ACTION_EVALUATOR( example_optional, optional_automated_action )
#endif
} } //hive::chain

View File

@ -0,0 +1,43 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/required_automated_actions.hpp>
#include <hive/chain/hive_object_types.hpp>
namespace hive { namespace chain {
using hive::protocol::optional_automated_action;
class pending_optional_action_object : public object< pending_optional_action_object_type, pending_optional_action_object >
{
CHAINBASE_OBJECT( pending_optional_action_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( pending_optional_action_object )
time_point_sec execution_time;
optional_automated_action action;
CHAINBASE_UNPACK_CONSTRUCTOR(pending_optional_action_object);
};
typedef multi_index_container<
pending_optional_action_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< pending_optional_action_object, pending_optional_action_object::id_type, &pending_optional_action_object::get_id > >,
ordered_unique< tag< by_execution >,
composite_key< pending_optional_action_object,
member< pending_optional_action_object, time_point_sec, &pending_optional_action_object::execution_time >,
const_mem_fun< pending_optional_action_object, pending_optional_action_object::id_type, &pending_optional_action_object::get_id >
>
>
>,
allocator< pending_optional_action_object >
> pending_optional_action_index;
} } //hive::chain
FC_REFLECT( hive::chain::pending_optional_action_object,
(id)(execution_time)(action) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::pending_optional_action_object, hive::chain::pending_optional_action_index )

View File

@ -0,0 +1,44 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/required_automated_actions.hpp>
#include <hive/chain/hive_object_types.hpp>
namespace hive { namespace chain {
using hive::protocol::required_automated_action;
class pending_required_action_object : public object< pending_required_action_object_type, pending_required_action_object >
{
CHAINBASE_OBJECT( pending_required_action_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( pending_required_action_object )
time_point_sec execution_time;
required_automated_action action;
CHAINBASE_UNPACK_CONSTRUCTOR(pending_required_action_object);
};
struct by_execution;
typedef multi_index_container<
pending_required_action_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< pending_required_action_object, pending_required_action_object::id_type, &pending_required_action_object::get_id > >,
ordered_unique< tag< by_execution >,
composite_key< pending_required_action_object,
member< pending_required_action_object, time_point_sec, &pending_required_action_object::execution_time >,
const_mem_fun< pending_required_action_object, pending_required_action_object::id_type, &pending_required_action_object::get_id >
>
>
>,
allocator< pending_required_action_object >
> pending_required_action_index;
} } //hive::chain
FC_REFLECT( hive::chain::pending_required_action_object,
(id)(execution_time)(action) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::pending_required_action_object, hive::chain::pending_required_action_index )

View File

@ -0,0 +1,15 @@
#pragma once
#include <hive/protocol/hive_required_actions.hpp>
#include <hive/chain/evaluator.hpp>
namespace hive { namespace chain {
using namespace hive::protocol;
#ifdef IS_TEST_NET
HIVE_DEFINE_ACTION_EVALUATOR( example_required, required_automated_action )
#endif
} } //hive::chain

View File

@ -0,0 +1,4 @@
#pragma once
#include <hive/chain/schema_types/oid.hpp>

View File

@ -0,0 +1,69 @@
#pragma once
#include <hive/schema/abstract_schema.hpp>
#include <hive/schema/schema_impl.hpp>
#include <chainbase/util/object_id.hpp>
namespace hive { namespace schema { namespace detail {
//////////////////////////////////////////////
// oid //
//////////////////////////////////////////////
template< typename T >
struct schema_oid_impl
: public abstract_schema
{
HIVE_SCHEMA_TEMPLATE_CLASS_BODY( schema_oid_impl )
};
template< typename T >
void schema_oid_impl< T >::get_deps( std::vector< std::shared_ptr< abstract_schema > >& deps )
{
deps.push_back( get_schema_for_type<T>() );
}
template< typename T >
void schema_oid_impl< T >::get_str_schema( std::string& s )
{
if( str_schema != "" )
{
s = str_schema;
return;
}
std::vector< std::shared_ptr< abstract_schema > > deps;
get_deps( deps );
std::string e_name;
deps[0]->get_name(e_name);
std::string my_name;
get_name( my_name );
fc::mutable_variant_object mvo;
mvo("name", my_name)
("type", "oid")
("etype", e_name)
;
str_schema = fc::json::to_string( mvo );
s = str_schema;
return;
}
}
template< typename T >
struct schema_reflect< chainbase::oid< T > >
{
typedef detail::schema_oid_impl< T > schema_impl_type;
};
template< typename T >
struct schema_reflect< chainbase::oid_ref< T > >
{
typedef detail::schema_oid_impl< T > schema_impl_type;
};
} }

View File

@ -0,0 +1,105 @@
#pragma once
#include <hive/chain/hive_object_types.hpp>
#include <hive/protocol/authority.hpp>
#include <chainbase/chainbase.hpp>
#include <boost/interprocess/managed_mapped_file.hpp>
namespace hive { namespace chain {
using hive::protocol::authority;
using hive::protocol::public_key_type;
using hive::protocol::account_name_type;
using hive::protocol::weight_type;
using chainbase::t_flat_map;
using chainbase::t_allocator_pair;
/**
* The purpose of this class is to represent an authority object in a manner compatiable with
* shared memory storage. This requires all dynamic fields to be allocated with the same allocator
* that allocated the shared_authority.
*/
struct shared_authority
{
#ifdef ENABLE_MIRA
shared_authority() {}
#endif
template< typename Allocator >
shared_authority( const authority& a, const Allocator& alloc ) :
account_auths( account_pair_allocator_type( alloc ) ),
key_auths( key_pair_allocator_type( alloc ) )
{
account_auths.reserve( a.account_auths.size() );
key_auths.reserve( a.key_auths.size() );
for( const auto& item : a.account_auths )
account_auths.insert( item );
for( const auto& item : a.key_auths )
key_auths.insert( item );
weight_threshold = a.weight_threshold;
}
shared_authority( const shared_authority& cpy ) :
weight_threshold( cpy.weight_threshold ),
account_auths( cpy.account_auths ), key_auths( cpy.key_auths ) {}
template< typename Allocator >
explicit shared_authority( const Allocator& alloc ) :
account_auths( account_pair_allocator_type( alloc ) ),
key_auths( key_pair_allocator_type( alloc ) ) {}
template< typename Allocator, class ...Args >
shared_authority( const Allocator& alloc, uint32_t weight_threshold, Args... auths ) :
weight_threshold( weight_threshold ),
account_auths( account_pair_allocator_type( alloc ) ),
key_auths( key_pair_allocator_type( alloc ) )
{
add_authorities( auths... );
}
operator authority()const;
shared_authority& operator=( const authority& a );
void add_authority( const public_key_type& k, weight_type w );
void add_authority( const account_name_type& k, weight_type w );
template<typename AuthType>
void add_authorities(AuthType k, weight_type w)
{
add_authority(k, w);
}
template<typename AuthType, class ...Args>
void add_authorities(AuthType k, weight_type w, Args... auths)
{
add_authority(k, w);
add_authorities(auths...);
}
vector<public_key_type> get_keys()const;
bool is_impossible()const;
uint32_t num_auths()const;
void clear();
void validate()const;
using account_pair_allocator_type = t_allocator_pair< account_name_type, weight_type >;
using key_pair_allocator_type = t_allocator_pair< public_key_type, weight_type >;
typedef t_flat_map< account_name_type, weight_type> account_authority_map;
typedef t_flat_map< public_key_type, weight_type> key_authority_map;
uint32_t weight_threshold = 0;
account_authority_map account_auths;
key_authority_map key_auths;
};
bool operator == ( const shared_authority& a, const shared_authority& b );
bool operator == ( const authority& a, const shared_authority& b );
bool operator == ( const shared_authority& a, const authority& b );
} } //hive::chain
FC_REFLECT_TYPENAME( hive::chain::shared_authority::account_authority_map)
FC_REFLECT( hive::chain::shared_authority, (weight_threshold)(account_auths)(key_auths) )

View File

@ -0,0 +1,22 @@
#include <hive/protocol/types.hpp>
namespace hive { namespace chain {
inline static const map< uint32_t, checksum_type >& get_shared_db_merkle()
{
static const map< uint32_t, checksum_type > shared_db_merkle
{
{ 3705111, checksum_type( "0a8f0fd5450c3706ec8b8cbad795cd0b3679bf35" ) },
{ 3705120, checksum_type( "2027edb72b671f7011c8cc4c7a8b59c39b305093" ) },
{ 3713940, checksum_type( "bf8a1d516927c506ebdbb7b38bef2e992435435f" ) },
{ 3714132, checksum_type( "e8b77773d268b72c8d650337b8cce360bbe64779" ) },
{ 3714567, checksum_type( "45af59a8c2d7d4a606151ef5dae03d2dfe13fbdd" ) },
{ 3714588, checksum_type( "e64275443bdc82f104ac936486d367af8f6d1584" ) },
{ 4138790, checksum_type( "f65a3a788a2ef52406d8ba5705d7288be228403f" ) },
{ 5435426, checksum_type( "0b32538b2d22bd3146d54b6e3cb5ae8b9780e8a5" ) }
};
return shared_db_merkle;
}
} } //hive::chain

View File

@ -0,0 +1,7 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/smt_objects/smt_token_object.hpp>
#include <hive/chain/smt_objects/account_balance_object.hpp>
#include <hive/chain/smt_objects/smt_market_maker.hpp>
#include <hive/chain/smt_objects/nai_pool_object.hpp>
#include <hive/chain/smt_objects/nai_pool.hpp>

View File

@ -0,0 +1,135 @@
#pragma once
#include <hive/chain/account_object.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/protocol/smt_operations.hpp>
#ifdef HIVE_ENABLE_SMT
namespace hive { namespace chain {
/**
* Class responsible for holding regular (i.e. non-reward) balance of SMT for given account.
* It has not been unified with reward balance object counterpart, due to different number
* of fields needed to hold balances (2 for regular, 3 for reward).
*/
class account_regular_balance_object : public object< account_regular_balance_object_type, account_regular_balance_object >
{
CHAINBASE_OBJECT( account_regular_balance_object );
public:
template < typename Allocator>
account_regular_balance_object( allocator< Allocator > a, uint64_t _id,
const account_object& _owner, asset_symbol_type liquid_symbol )
: id( _id ), owner( _owner.get_id() ), liquid( 0, liquid_symbol ), vesting( 0, liquid_symbol.get_paired_symbol() )
{}
bool is_empty() const
{
return ( liquid.amount == 0 ) && ( vesting.amount == 0 );
}
asset_symbol_type get_liquid_symbol() const
{
return liquid.symbol;
}
/// Name of the account, the balance is held for.
account_id_type owner;
asset liquid; /// 'balance' for HIVE
asset vesting; /// 'vesting_shares' for VESTS
CHAINBASE_UNPACK_CONSTRUCTOR(account_regular_balance_object);
};
/**
* Class responsible for holding reward balance of SMT for given account.
* It has not been unified with regular balance object counterpart, due to different number
* of fields needed to hold balances (2 for regular, 3 for reward).
*/
class account_rewards_balance_object : public object< account_rewards_balance_object_type, account_rewards_balance_object >
{
CHAINBASE_OBJECT( account_rewards_balance_object );
public:
template < typename Allocator >
account_rewards_balance_object( allocator< Allocator > a, uint64_t _id,
const account_object& _owner, asset_symbol_type _liquid_symbol )
: id( _id ), owner( _owner.get_id() ), pending_liquid( 0, _liquid_symbol ),
pending_vesting_shares( 0, _liquid_symbol.get_paired_symbol() ), pending_vesting_value( 0, _liquid_symbol )
{}
bool is_empty() const
{
return ( pending_liquid.amount == 0 ) && ( pending_vesting_shares.amount == 0 );
}
asset_symbol_type get_liquid_symbol() const
{
return pending_liquid.symbol;
}
/// Name of the account, the balance is held for.
account_id_type owner;
asset pending_liquid; /// 'reward_hive_balance' for pending HIVE
asset pending_vesting_shares; /// 'reward_vesting_balance' for pending VESTS
asset pending_vesting_value; /// 'reward_vesting_hive' for pending VESTS
CHAINBASE_UNPACK_CONSTRUCTOR(account_rewards_balance_object);
};
struct by_owner_liquid_symbol;
typedef multi_index_container <
account_regular_balance_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< account_regular_balance_object, account_regular_balance_object::id_type, &account_regular_balance_object::get_id>
>,
ordered_unique<tag<by_owner_liquid_symbol>,
composite_key<account_regular_balance_object,
member< account_regular_balance_object, account_id_type, &account_regular_balance_object::owner >,
const_mem_fun< account_regular_balance_object, asset_symbol_type, &account_regular_balance_object::get_liquid_symbol >
>
>
>,
allocator< account_regular_balance_object >
> account_regular_balance_index;
typedef multi_index_container <
account_rewards_balance_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< account_rewards_balance_object, account_rewards_balance_object::id_type, &account_rewards_balance_object::get_id>
>,
ordered_unique<tag<by_owner_liquid_symbol>,
composite_key<account_rewards_balance_object,
member< account_rewards_balance_object, account_id_type, &account_rewards_balance_object::owner >,
const_mem_fun< account_rewards_balance_object, asset_symbol_type, &account_rewards_balance_object::get_liquid_symbol >
>
>
>,
allocator< account_rewards_balance_object >
> account_rewards_balance_index;
} } // namespace hive::chain
FC_REFLECT( hive::chain::account_regular_balance_object,
(id)
(owner)
(liquid)
(vesting)
)
FC_REFLECT( hive::chain::account_rewards_balance_object,
(id)
(owner)
(pending_liquid)
(pending_vesting_shares)
(pending_vesting_value)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_regular_balance_object, hive::chain::account_regular_balance_index )
CHAINBASE_SET_INDEX_TYPE( hive::chain::account_rewards_balance_object, hive::chain::account_rewards_balance_index )
#endif

View File

@ -0,0 +1,14 @@
#pragma once
#include <hive/chain/database.hpp>
#include <hive/protocol/asset_symbol.hpp>
#ifdef HIVE_ENABLE_SMT
namespace hive { namespace chain {
void replenish_nai_pool( database& db );
void remove_from_nai_pool( database &db, const asset_symbol_type& a );
} } // hive::chain
#endif

View File

@ -0,0 +1,50 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/protocol/asset_symbol.hpp>
#ifdef HIVE_ENABLE_SMT
namespace hive { namespace chain {
class nai_pool_object : public object< nai_pool_object_type, nai_pool_object >
{
CHAINBASE_OBJECT( nai_pool_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( nai_pool_object )
uint8_t num_available_nais = 0;
fc::array< asset_symbol_type, SMT_MAX_NAI_POOL_COUNT > nais;
std::vector< asset_symbol_type > pool() const
{
return std::vector< asset_symbol_type >{ nais.begin(), nais.begin() + num_available_nais };
}
bool contains( const asset_symbol_type& a ) const
{
const auto end = nais.begin() + num_available_nais;
return std::find( nais.begin(), end, asset_symbol_type::from_asset_num( a.get_stripped_precision_smt_num() ) ) != end;
}
CHAINBASE_UNPACK_CONSTRUCTOR(nai_pool_object);
};
typedef multi_index_container <
nai_pool_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< nai_pool_object, nai_pool_object::id_type, &nai_pool_object::get_id > >
>,
allocator< nai_pool_object >
> nai_pool_index;
} } // namespace hive::chain
FC_REFLECT( hive::chain::nai_pool_object, (id)(num_available_nais)(nais) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::nai_pool_object, hive::chain::nai_pool_index )
#endif

View File

@ -0,0 +1,31 @@
#pragma once
#ifdef HIVE_ENABLE_SMT
#include <cstdint>
#include <utility>
#include <vector>
#include <fc/reflect/reflect.hpp>
namespace hive { namespace chain {
struct rational_u64
{
rational_u64() {}
rational_u64( const std::pair< uint64_t, uint64_t >& p )
: numerator(p.first), denominator(p.second) {}
uint64_t numerator = 0;
uint64_t denominator = 0;
};
const std::vector< rational_u64 >& get_mm_ticks();
} }
FC_REFLECT( hive::chain::rational_u64,
(numerator)
(denominator)
)
#endif

View File

@ -0,0 +1,342 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <hive/chain/util/tiny_asset.hpp>
#include <hive/protocol/smt_operations.hpp>
#ifdef HIVE_ENABLE_SMT
namespace hive { namespace chain {
using protocol::curve_id;
enum class smt_phase : uint8_t
{
account_elevated,
setup_completed,
contribution_begin_time_completed,
contribution_end_time_completed,
launch_failed, /// launch window closed with either not enough contributions or some cap not revealed
launch_success /// enough contributions were declared and caps revealed before launch windows closed
};
/**Note that the object represents both liquid and vesting variant of SMT.
* The same object is returned by indices when searched by liquid/vesting symbol/nai.
*/
class smt_token_object : public object< smt_token_object_type, smt_token_object >
{
CHAINBASE_OBJECT( smt_token_object );
public:
struct smt_market_maker_state
{
uint32_t reserve_ratio = 0;
HIVE_asset hive_balance;
asset token_balance;
};
public:
template< typename Allocator >
smt_token_object( allocator< Allocator > a, uint64_t _id,
asset_symbol_type _liquid_symbol, const account_name_type& _control_account )
: id( _id ), liquid_symbol( _liquid_symbol ), control_account( _control_account )
{
market_maker.token_balance = asset( 0, liquid_symbol );
}
price one_vesting_to_one_liquid() const
{
int64_t one_smt = std::pow(10, liquid_symbol.decimals());
return price ( asset( one_smt, liquid_symbol.get_paired_symbol() ), asset( one_smt, liquid_symbol ) );
// ^ On the assumption that liquid and vesting SMT have the same precision. See issue 2212
}
price get_vesting_share_price() const
{
if ( total_vesting_fund_smt == 0 || total_vesting_shares == 0 )
return one_vesting_to_one_liquid();
// ^ In original method of globa_property_object it was one liquid to one vesting which seems to be a bug.
return price( asset( total_vesting_shares, liquid_symbol.get_paired_symbol() ), asset( total_vesting_fund_smt, liquid_symbol ) );
}
price get_reward_vesting_share_price() const
{
share_type reward_vesting_shares = total_vesting_shares + pending_rewarded_vesting_shares;
share_type reward_vesting_smt = total_vesting_fund_smt + pending_rewarded_vesting_smt;
if( reward_vesting_shares == 0 || reward_vesting_smt == 0 )
return one_vesting_to_one_liquid();
// ^ Additional check not found in original get_reward_vesting_share_price. See issue 2212
return price( asset( reward_vesting_shares, liquid_symbol.get_paired_symbol() ), asset( reward_vesting_smt, liquid_symbol ) );
}
/**The object represents both liquid and vesting variant of SMT
* To get vesting symbol, call liquid_symbol.get_paired_symbol()
*/
asset_symbol_type liquid_symbol;
account_name_type control_account;
smt_phase phase = smt_phase::account_elevated;
share_type current_supply = 0;
share_type total_vesting_fund_smt = 0;
share_type total_vesting_shares = 0;
share_type pending_rewarded_vesting_shares = 0;
share_type pending_rewarded_vesting_smt = 0;
smt_market_maker_state market_maker;
/// set_setup_parameters
bool allow_voting = true;
/// set_runtime_parameters
uint32_t cashout_window_seconds = HIVE_CASHOUT_WINDOW_SECONDS;
uint32_t reverse_auction_window_seconds = HIVE_REVERSE_AUCTION_WINDOW_SECONDS_HF20;
uint32_t vote_regeneration_period_seconds = HIVE_VOTING_MANA_REGENERATION_SECONDS;
uint32_t votes_per_regeneration_period = SMT_DEFAULT_VOTES_PER_REGEN_PERIOD;
uint128_t content_constant = HIVE_CONTENT_CONSTANT_HF0;
uint16_t percent_curation_rewards = SMT_DEFAULT_PERCENT_CURATION_REWARDS;
protocol::curve_id author_reward_curve = curve_id::linear;
protocol::curve_id curation_reward_curve = curve_id::square_root;
bool allow_downvotes = true;
///parameters for 'smt_setup_operation'
int64_t max_supply = 0;
CHAINBASE_UNPACK_CONSTRUCTOR(smt_token_object);
};
class smt_ico_object : public object< smt_ico_object_type, smt_ico_object >
{
CHAINBASE_OBJECT( smt_ico_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( smt_ico_object )
asset_symbol_type symbol;
hive::protocol::
smt_capped_generation_policy capped_generation_policy;
time_point_sec contribution_begin_time;
time_point_sec contribution_end_time;
time_point_sec launch_time;
share_type hive_units_soft_cap = -1;
share_type hive_units_hard_cap = -1;
asset contributed = asset( 0, HIVE_SYMBOL );
CHAINBASE_UNPACK_CONSTRUCTOR(smt_ico_object);
};
class smt_token_emissions_object : public object< smt_token_emissions_object_type, smt_token_emissions_object >
{
CHAINBASE_OBJECT( smt_token_emissions_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( smt_token_emissions_object )
asset_symbol_type symbol;
time_point_sec schedule_time = HIVE_GENESIS_TIME;
hive::protocol::smt_emissions_unit emissions_unit;
uint32_t interval_seconds = 0;
uint32_t interval_count = 0;
time_point_sec lep_time = HIVE_GENESIS_TIME;
time_point_sec rep_time = HIVE_GENESIS_TIME;
asset lep_abs_amount = asset();
asset rep_abs_amount = asset();
uint32_t lep_rel_amount_numerator = 0;
uint32_t rep_rel_amount_numerator = 0;
uint8_t rel_amount_denom_bits = 0;
CHAINBASE_UNPACK_CONSTRUCTOR(smt_token_emissions_object);
};
class smt_contribution_object : public object< smt_contribution_object_type, smt_contribution_object >
{
CHAINBASE_OBJECT( smt_contribution_object );
public:
template< typename Allocator >
smt_contribution_object( allocator< Allocator > a, uint64_t _id,
const account_name_type& _contributor, const asset& _contribution, const asset_symbol_type& _smt_symbol, uint32_t _contribution_id )
: id( _id ), symbol( _smt_symbol ), contributor( _contributor ), contribution_id( _contribution_id ), contribution( _contribution )
{}
asset_symbol_type symbol;
account_name_type contributor;
uint32_t contribution_id;
HIVE_asset contribution;
CHAINBASE_UNPACK_CONSTRUCTOR(smt_contribution_object);
};
struct by_symbol_contributor;
struct by_contributor;
struct by_symbol_id;
typedef multi_index_container <
smt_contribution_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< smt_contribution_object, smt_contribution_object::id_type, &smt_contribution_object::get_id > >,
ordered_unique< tag< by_symbol_contributor >,
composite_key< smt_contribution_object,
member< smt_contribution_object, asset_symbol_type, &smt_contribution_object::symbol >,
member< smt_contribution_object, account_name_type, &smt_contribution_object::contributor >,
member< smt_contribution_object, uint32_t, &smt_contribution_object::contribution_id >
>
>,
ordered_unique< tag< by_symbol_id >,
composite_key< smt_contribution_object,
member< smt_contribution_object, asset_symbol_type, &smt_contribution_object::symbol >,
const_mem_fun< smt_contribution_object, smt_contribution_object::id_type, &smt_contribution_object::get_id >
>
>
#ifndef IS_LOW_MEM
,
ordered_unique< tag< by_contributor >,
composite_key< smt_contribution_object,
member< smt_contribution_object, account_name_type, &smt_contribution_object::contributor >,
member< smt_contribution_object, asset_symbol_type, &smt_contribution_object::symbol >,
member< smt_contribution_object, uint32_t, &smt_contribution_object::contribution_id >
>
>
#endif
>,
allocator< smt_contribution_object >
> smt_contribution_index;
struct by_symbol;
struct by_control_account;
typedef multi_index_container <
smt_token_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< smt_token_object, smt_token_object::id_type, &smt_token_object::get_id > >,
ordered_unique< tag< by_symbol >,
member< smt_token_object, asset_symbol_type, &smt_token_object::liquid_symbol > >,
ordered_unique< tag< by_control_account >,
composite_key< smt_token_object,
member< smt_token_object, account_name_type, &smt_token_object::control_account >,
member< smt_token_object, asset_symbol_type, &smt_token_object::liquid_symbol >
>
>
>,
allocator< smt_token_object >
> smt_token_index;
typedef multi_index_container <
smt_ico_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< smt_ico_object, smt_ico_object::id_type, &smt_ico_object::get_id > >,
ordered_unique< tag< by_symbol >,
member< smt_ico_object, asset_symbol_type, &smt_ico_object::symbol > >
>,
allocator< smt_ico_object >
> smt_ico_index;
struct by_symbol_time;
typedef multi_index_container <
smt_token_emissions_object,
indexed_by <
ordered_unique< tag< by_id >,
const_mem_fun< smt_token_emissions_object, smt_token_emissions_object::id_type, &smt_token_emissions_object::get_id > >,
ordered_unique< tag< by_symbol_time >,
composite_key< smt_token_emissions_object,
member< smt_token_emissions_object, asset_symbol_type, &smt_token_emissions_object::symbol >,
member< smt_token_emissions_object, time_point_sec, &smt_token_emissions_object::schedule_time >
>
>
>,
allocator< smt_token_emissions_object >
> smt_token_emissions_index;
} } // namespace hive::chain
FC_REFLECT_ENUM( hive::chain::smt_phase,
(account_elevated)
(setup_completed)
(contribution_begin_time_completed)
(contribution_end_time_completed)
(launch_failed)
(launch_success)
)
FC_REFLECT( hive::chain::smt_token_object::smt_market_maker_state,
(reserve_ratio)
(hive_balance)
(token_balance)
)
FC_REFLECT( hive::chain::smt_token_object,
(id)
(liquid_symbol)
(control_account)
(phase)
(current_supply)
(total_vesting_fund_smt)
(total_vesting_shares)
(pending_rewarded_vesting_shares)
(pending_rewarded_vesting_smt)
(allow_downvotes)
(market_maker)
(allow_voting)
(cashout_window_seconds)
(reverse_auction_window_seconds)
(vote_regeneration_period_seconds)
(votes_per_regeneration_period)
(content_constant)
(percent_curation_rewards)
(author_reward_curve)
(curation_reward_curve)
(max_supply)
)
FC_REFLECT( hive::chain::smt_ico_object,
(id)
(symbol)
(capped_generation_policy)
(contribution_begin_time)
(contribution_end_time)
(launch_time)
(hive_units_soft_cap)
(hive_units_hard_cap)
(contributed)
)
FC_REFLECT( hive::chain::smt_token_emissions_object,
(id)
(symbol)
(schedule_time)
(emissions_unit)
(interval_seconds)
(interval_count)
(lep_time)
(rep_time)
(lep_abs_amount)
(rep_abs_amount)
(lep_rel_amount_numerator)
(rep_rel_amount_numerator)
(rel_amount_denom_bits)
)
FC_REFLECT( hive::chain::smt_contribution_object,
(id)
(symbol)
(contributor)
(contribution_id)
(contribution)
)
CHAINBASE_SET_INDEX_TYPE( hive::chain::smt_token_object, hive::chain::smt_token_index )
CHAINBASE_SET_INDEX_TYPE( hive::chain::smt_ico_object, hive::chain::smt_ico_index )
CHAINBASE_SET_INDEX_TYPE( hive::chain::smt_token_emissions_object, hive::chain::smt_token_emissions_index )
CHAINBASE_SET_INDEX_TYPE( hive::chain::smt_contribution_object, hive::chain::smt_contribution_index )
#endif

View File

@ -0,0 +1,156 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/chain/hive_object_types.hpp>
#include <boost/multi_index/composite_key.hpp>
#include <hive/protocol/asset.hpp>
namespace hive { namespace chain {
using hive::protocol::asset;
class proposal_object : public object< proposal_object_type, proposal_object >
{
CHAINBASE_OBJECT( proposal_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( proposal_object, (subject)(permlink) )
//additional key, at this moment has the same value as `id` member
uint32_t proposal_id;
// account that created the proposal
account_name_type creator;
// account_being_funded
account_name_type receiver;
// start_date (when the proposal will begin paying out if it gets enough vote weight)
time_point_sec start_date;
// end_date (when the proposal expires and can no longer pay out)
time_point_sec end_date;
//daily_pay (the amount of HBD that is being requested to be paid out daily)
asset daily_pay;
//subject (a very brief description or title for the proposal)
shared_string subject;
//permlink (a link to a page describing the work proposal in depth, generally this will probably be to a Hive post).
shared_string permlink;
//This will be calculate every maintenance period
uint64_t total_votes = 0;
bool removed = false;
time_point_sec get_end_date_with_delay() const
{
time_point_sec ret = end_date;
ret += HIVE_PROPOSAL_MAINTENANCE_CLEANUP;
return ret;
}
CHAINBASE_UNPACK_CONSTRUCTOR(proposal_object, (subject)(permlink));
};
class proposal_vote_object : public object< proposal_vote_object_type, proposal_vote_object>
{
CHAINBASE_OBJECT( proposal_vote_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( proposal_vote_object )
//account that made voting
account_name_type voter;
//the voter voted for this proposal number
uint32_t proposal_id; //note: it cannot be proposal_id_type because we are searching using proposal_object::proposal_id, not proposal_object::id
CHAINBASE_UNPACK_CONSTRUCTOR(proposal_vote_object);
};
struct by_proposal_id;
struct by_start_date;
struct by_end_date;
struct by_creator;
struct by_total_votes;
typedef multi_index_container<
proposal_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< proposal_object, proposal_object::id_type, &proposal_object::get_id > >,
ordered_unique< tag< by_proposal_id >,
member< proposal_object, uint32_t, &proposal_object::proposal_id > >,
ordered_unique< tag< by_start_date >,
composite_key< proposal_object,
member< proposal_object, time_point_sec, &proposal_object::start_date >,
member< proposal_object, uint32_t, &proposal_object::proposal_id >
>,
composite_key_compare< std::less< time_point_sec >, std::less< uint32_t > >
>,
ordered_unique< tag< by_end_date >,
composite_key< proposal_object,
const_mem_fun< proposal_object, time_point_sec, &proposal_object::get_end_date_with_delay >,
member< proposal_object, uint32_t, &proposal_object::proposal_id >
>,
composite_key_compare< std::less< time_point_sec >, std::less< uint32_t > >
>,
ordered_unique< tag< by_creator >,
composite_key< proposal_object,
member< proposal_object, account_name_type, &proposal_object::creator >,
member< proposal_object, uint32_t, &proposal_object::proposal_id >
>,
composite_key_compare< std::less< account_name_type >, std::less< uint32_t > >
>,
ordered_unique< tag< by_total_votes >,
composite_key< proposal_object,
member< proposal_object, uint64_t, &proposal_object::total_votes >,
member< proposal_object, uint32_t, &proposal_object::proposal_id >
>,
composite_key_compare< std::less< uint64_t >, std::less< uint32_t > >
>
>,
allocator< proposal_object >
> proposal_index;
struct by_voter_proposal;
struct by_proposal_voter;
typedef multi_index_container<
proposal_vote_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< proposal_vote_object, proposal_vote_object::id_type, &proposal_vote_object::get_id > >,
ordered_unique< tag< by_voter_proposal >,
composite_key< proposal_vote_object,
member< proposal_vote_object, account_name_type, &proposal_vote_object::voter >,
member< proposal_vote_object, uint32_t, &proposal_vote_object::proposal_id >
>
>,
ordered_unique< tag< by_proposal_voter >,
composite_key< proposal_vote_object,
member< proposal_vote_object, uint32_t, &proposal_vote_object::proposal_id >,
member< proposal_vote_object, account_name_type, &proposal_vote_object::voter >
>
>
>,
allocator< proposal_vote_object >
> proposal_vote_index;
} } // hive::chain
#ifdef ENABLE_STD_ALLOCATOR
namespace mira {
template<> struct is_static_length< hive::chain::proposal_vote_object > : public boost::true_type {};
} // mira
#endif
FC_REFLECT( hive::chain::proposal_object, (id)(proposal_id)(creator)(receiver)(start_date)(end_date)(daily_pay)(subject)(permlink)(total_votes)(removed) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::proposal_object, hive::chain::proposal_index )
FC_REFLECT( hive::chain::proposal_vote_object, (id)(voter)(proposal_id) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::proposal_vote_object, hive::chain::proposal_vote_index )

View File

@ -0,0 +1,18 @@
#pragma once
#include <hive/protocol/block.hpp>
namespace hive { namespace chain {
struct transaction_notification
{
transaction_notification( const hive::protocol::signed_transaction& tx ) : transaction(tx)
{
transaction_id = tx.id();
}
hive::protocol::transaction_id_type transaction_id;
const hive::protocol::signed_transaction& transaction;
};
} }

View File

@ -0,0 +1,84 @@
#pragma once
#include <hive/chain/hive_fwd.hpp>
#include <hive/protocol/transaction.hpp>
#include <hive/chain/buffer_type.hpp>
#include <hive/chain/hive_object_types.hpp>
//#include <boost/multi_index/hashed_index.hpp>
namespace hive { namespace chain {
using hive::protocol::signed_transaction;
using chainbase::t_vector;
/**
* The purpose of this object is to enable the detection of duplicate transactions. When a transaction is included
* in a block a transaction_object is added. At the end of block processing all transaction_objects that have
* expired can be removed from the index.
*/
class transaction_object : public object< transaction_object_type, transaction_object >
{
CHAINBASE_OBJECT( transaction_object );
public:
CHAINBASE_DEFAULT_CONSTRUCTOR( transaction_object, (packed_trx) )
typedef buffer_type t_packed_trx;
t_packed_trx packed_trx;
transaction_id_type trx_id;
time_point_sec expiration;
};
struct by_expiration;
struct by_trx_id;
typedef multi_index_container<
transaction_object,
indexed_by<
ordered_unique< tag< by_id >,
const_mem_fun< transaction_object, transaction_object::id_type, &transaction_object::get_id > >,
ordered_unique< tag< by_trx_id >,
member< transaction_object, transaction_id_type, &transaction_object::trx_id > >,
ordered_unique< tag< by_expiration >,
composite_key< transaction_object,
member<transaction_object, time_point_sec, &transaction_object::expiration >,
const_mem_fun<transaction_object, transaction_object::id_type, &transaction_object::get_id >
>
>
>,
allocator< transaction_object >
> transaction_index;
} } // hive::chain
FC_REFLECT( hive::chain::transaction_object, (id)(packed_trx)(trx_id)(expiration) )
CHAINBASE_SET_INDEX_TYPE( hive::chain::transaction_object, hive::chain::transaction_index )
namespace helpers
{
template <>
class index_statistic_provider<hive::chain::transaction_index>
{
public:
typedef hive::chain::transaction_index IndexType;
typedef typename hive::chain::transaction_object::t_packed_trx t_packed_trx;
index_statistic_info gather_statistics(const IndexType& index, bool onlyStaticInfo) const
{
index_statistic_info info;
gather_index_static_data(index, &info);
if(onlyStaticInfo == false)
{
for(const auto& o : index)
{
info._item_additional_allocation += o.packed_trx.capacity()*sizeof(t_packed_trx::value_type);
}
}
return info;
}
};
} /// namespace helpers

View File

@ -0,0 +1,117 @@
#pragma once
#include <fc/time.hpp>
#include <fc/variant.hpp>
#include <fc/reflect/variant.hpp>
#include <fc/exception/exception.hpp>
#include <fc/io/json.hpp>
#include <sys/time.h>
#include <utility>
namespace hive { namespace chain { namespace util {
template <typename TCntr>
struct emplace_ret_value
{
using type = decltype(std::declval<TCntr>().emplace(std::declval<typename TCntr::value_type>()));
};
class advanced_benchmark_dumper
{
public:
struct item
{
std::string op_name;
mutable uint64_t time;
item( std::string _op_name, uint64_t _time ): op_name( _op_name ), time( _time ) {}
bool operator<( const item& obj ) const { return op_name < obj.op_name; }
void inc( uint64_t _time ) const { time += _time; }
};
struct ritem
{
std::string op_name;
uint64_t time;
ritem( std::string _op_name, uint64_t _time ): op_name( _op_name ), time( _time ){}
bool operator<( const ritem& obj ) const { return time > obj.time; }
};
template< typename COLLECTION >
struct total_info
{
uint64_t total_time = 0;
COLLECTION items;
total_info(){}
total_info( uint64_t _total_time ): total_time( _total_time ) {}
void inc( uint64_t _time ) { total_time += _time; }
template <typename... TArgs>
typename emplace_ret_value<COLLECTION>::type emplace(TArgs&&... args)
{ return items.emplace(std::forward<TArgs>(args)...); }
};
private:
static uint32_t cnt;
static std::string virtual_operation_name;
static std::string apply_context_name;
bool enabled = false;
uint32_t flush_cnt = 0;
uint32_t flush_max = 500000;
uint64_t time_begin = 0;
std::string file_name;
total_info< std::set< item > > info;
template< typename COLLECTION >
void dump_impl( const total_info< COLLECTION >& src, const std::string& src_file_name );
public:
advanced_benchmark_dumper();
~advanced_benchmark_dumper();
static std::string& get_virtual_operation_name(){ return virtual_operation_name; }
template< bool IS_PRE_OPERATION >
static std::string generate_desc( const std::string& desc1, const std::string& desc2 )
{
std::stringstream s;
s << ( IS_PRE_OPERATION ? "pre--->" : "post--->" ) << desc1 << "--->" << desc2;
return s.str();
}
void set_enabled( bool val ) { enabled = val; }
bool is_enabled() { return enabled; }
void begin();
template< bool APPLY_CONTEXT = false >
void end( const std::string& str );
void dump();
};
} } } // hive::chain::util
FC_REFLECT( hive::chain::util::advanced_benchmark_dumper::item, (op_name)(time) )
FC_REFLECT( hive::chain::util::advanced_benchmark_dumper::ritem, (op_name)(time) )
FC_REFLECT( hive::chain::util::advanced_benchmark_dumper::total_info< std::set< hive::chain::util::advanced_benchmark_dumper::item > >, (total_time)(items) )
FC_REFLECT( hive::chain::util::advanced_benchmark_dumper::total_info< std::multiset< hive::chain::util::advanced_benchmark_dumper::ritem > >, (total_time)(items) )

View File

@ -0,0 +1,26 @@
#pragma once
#include <hive/protocol/asset.hpp>
namespace hive { namespace chain { namespace util {
using hive::protocol::asset;
using hive::protocol::price;
inline asset to_hbd( const price& p, const asset& hive )
{
FC_ASSERT( hive.symbol == HIVE_SYMBOL );
if( p.is_null() )
return asset( 0, HBD_SYMBOL );
return hive * p;
}
inline asset to_hive( const price& p, const asset& hbd )
{
FC_ASSERT( hbd.symbol == HBD_SYMBOL );
if( p.is_null() )
return asset( 0, HIVE_SYMBOL );
return hbd * p;
}
} } }

View File

@ -0,0 +1,52 @@
#pragma once
#include <chainbase/allocators.hpp>
#include <hive/chain/account_object.hpp>
#include <hive/chain/database.hpp>
#include <hive/chain/index.hpp>
namespace hive { namespace chain {
struct votes_update_data
{
bool withdraw_executor = false;
mutable share_type val = 0;
const account_object* account = nullptr;
};
struct votes_update_data_less
{
bool operator()( const votes_update_data& obj1, const votes_update_data& obj2 ) const
{
FC_ASSERT( obj1.account && obj2.account, "unexpected error: ${error}", ("error", delayed_voting_messages::object_is_null ) );
return obj1.account->get_id() < obj2.account->get_id();
}
};
class delayed_voting
{
public:
using votes_update_data_items = std::set< votes_update_data, votes_update_data_less >;
using opt_votes_update_data_items = fc::optional< votes_update_data_items >;
private:
chain::database& db;
void erase_delayed_value( const account_object& account, const ushare_type val );
public:
delayed_voting( chain::database& _db ) : db( _db ){}
void add_delayed_value( const account_object& account, const time_point_sec& head_time, const ushare_type val );
void add_votes( opt_votes_update_data_items& items, const bool withdraw_executor, const share_type val, const account_object& account );
fc::optional< ushare_type > update_votes( const opt_votes_update_data_items& items, const time_point_sec& head_time );
void run( const fc::time_point_sec& head_time );
};
} } // namespace hive::chain

View File

@ -0,0 +1,132 @@
#pragma once
#include <hive/chain/hive_object_types.hpp>
namespace hive { namespace chain {
namespace delayed_voting_messages
{
constexpr const char* incorrect_head_time = "head time must be greater or equal to last delayed voting time";
constexpr const char* incorrect_sum_greater_equal = "unexpected error: sum of delayed votings must be greater or equal to zero";
constexpr const char* incorrect_sum_equal = "unexpected error: sum of delayed votings must be equal to zero";
constexpr const char* incorrect_erased_votes = "unexpected error: number votes to be erased must be greater or equal to sum of delayed votings";
constexpr const char* object_is_null = "unexpected error: objects are empty";
constexpr const char* incorrect_votes_update = "unexpected error: votes updating is incorrect";
constexpr const char* incorrect_withdraw_data = "unexpected error: withdraw data is inconsistent";
}
struct delayed_votes_data
{
time_point_sec time;
ushare_type val = 0;
bool operator==( const delayed_votes_data& obj ) const
{
return ( time == obj.time ) && ( val == obj.val );
}
};
struct delayed_voting_processor
{
template< typename COLLECTION_TYPE >
static void add( COLLECTION_TYPE& items, ushare_type& sum, const time_point_sec& head_time, const ushare_type val )
{
/*
A collection is filled gradually - every item in `items` is created each HIVE_DELAYED_VOTING_INTERVAL_SECONDS time.
Input data:
2020-03-10 00:30:00 1000
2020-03-10 05:00:00 2000
2020-03-10 11:00:00 7000
2020-03-11 01:00:00 6000
2020-03-12 02:00:00 8000
2020-03-12 02:00:00 200000
Result:
items[0] = {2020-03-10, 10000}
items[1] = {2020-03-11, 6000}
items[2] = {2020-03-12, 208000}
*/
if( val == 0 )
return;
if( items.empty() )
{
sum += val;
items.emplace_back( delayed_votes_data{ head_time, val } );
return;
}
delayed_votes_data& back_obj = items.back();
FC_ASSERT( head_time >= back_obj.time, "unexpected error: ${error}", ("error", delayed_voting_messages::incorrect_head_time ) );
sum += val;
if( head_time >= back_obj.time + HIVE_DELAYED_VOTING_INTERVAL_SECONDS )
{
items.emplace_back( delayed_votes_data{ head_time, val } );
}
else
{
back_obj.val += val;
}
}
template< typename COLLECTION_TYPE >
static void erase_front( COLLECTION_TYPE& items, ushare_type& sum )
{
if( !items.empty() )
{
auto _begin = items.begin();
FC_ASSERT( sum >= _begin->val, "unexpected error: ${error}", ("error", delayed_voting_messages::incorrect_sum_greater_equal ) );
sum -= _begin->val;
items.erase( items.begin() );
}
else
FC_ASSERT( sum == 0, "unexpected error: ${error}", ("error", delayed_voting_messages::incorrect_sum_equal ) );
}
template< typename COLLECTION_TYPE >
static void erase( COLLECTION_TYPE& items, ushare_type& sum, ushare_type count )
{
if( count == 0 )
return;
FC_ASSERT( count <= sum, "unexpected error: ${error}", ("error", delayed_voting_messages::incorrect_erased_votes ) );
if( sum == count )
{
sum = 0;
items.clear();
}
else
{
sum -= count;
while( true )
{
auto& obj = items.back();
if( count >= obj.val )
{
count -= obj.val;
items.pop_back();
}
else
{
obj.val -= count;
break;
}
}
}
}
};
} } // namespace hive::chain
FC_REFLECT( hive::chain::delayed_votes_data,
(time)(val)
)

View File

@ -0,0 +1,38 @@
#pragma once
#include <hive/protocol/asset.hpp>
#include<string>
namespace hive { namespace chain {
using hive::protocol::asset;
class hf23_helper
{
public:
struct hf23_item
{
std::string name;
asset balance;
asset hbd_balance;
};
struct cmp_hf23_item
{
bool operator()( const hf23_item& a, const hf23_item& b ) const
{
return std::strcmp( a.name.c_str(), b.name.c_str() ) < 0;
}
};
using hf23_items = std::set< hf23_item, cmp_hf23_item >;
public:
static void gather_balance( hf23_items& source, const std::string& name, const asset& balance, const asset& hbd_balance );
};
} } // namespace hive::chain

Some files were not shown because too many files have changed in this diff Show More