initial commit

This commit is contained in:
Ben Teitelbaum 2019-01-16 10:22:44 -08:00
commit 19a59fbb47
48 changed files with 3038 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
terraform/.terraform
db/messages
db/*.sqlite3

118
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,118 @@
variables:
GIT_SUBMODULE_STRATEGY: none
CI_DISPOSABLE_ENVIRONMENT: "true"
image: blockstream/gcloud-docker@sha256:31c1a01d143558f0ba5677d121891a958fa600195679fe325980ec72e5264f2a
stages:
- build
- plan
- deploy
before_script:
- TMPF=$(mktemp) || exit 1
- echo $GCLOUD_KEY > $TMPF
- export GOOGLE_APPLICATION_CREDENTIALS=$TMPF
- gcloud auth activate-service-account --key-file=$TMPF
- gcloud auth list
- gcloud --version
build:
stage: build
only:
- master@satellite/ionosphere
script:
- docker build -f Dockerfile -t us.gcr.io/blockstream-store/ionosphere:latest -t us.gcr.io/blockstream-store/ionosphere:$CI_COMMIT_SHA .
- docker push us.gcr.io/blockstream-store/ionosphere:$CI_COMMIT_SHA
- docker push us.gcr.io/blockstream-store/ionosphere:latest
- echo "Building Ionosphere SSE image"
- docker build -f Dockerfile.sse -t us.gcr.io/blockstream-store/ionosphere-sse:latest -t us.gcr.io/blockstream-store/ionosphere-sse:$CI_COMMIT_SHA sse/
- docker push us.gcr.io/blockstream-store/ionosphere-sse:$CI_COMMIT_SHA
- docker push us.gcr.io/blockstream-store/ionosphere-sse:latest
plan:
stage: plan
only:
- branches@satellite/ionosphere
except:
- master@satellite/ionosphere
- production@satellite/ionosphere
- cleanup_staging@satellite/ionosphere
script:
- (cd terraform
&& terraform init -input=false
&& terraform workspace select staging
&& terraform plan
-var "ionosphere_docker=us.gcr.io/blockstream-store/ionosphere:$CI_COMMIT_SHA"
-var "ionosphere_sse_docker=us.gcr.io/blockstream-store/ionosphere-sse:$CI_COMMIT_SHA"
-var "region=$REGION"
-var "zone=$ZONE"
-var "instance_type=$INSTANCE_TYPE"
-var "host=$HOST_STAGING"
-var "ssl_cert=$SSL_CERT_STAGING"
-var "timeout=$TIMEOUT"
-var "prom_service_acct=$PROM_SA"
-var "opsgenie_key=$OPSGENIE_KEY"
-var "rpcuser=$RPCUSER"
-var "rpcpass=$RPCPASS"
-input=false)
deploy_staging:
stage: deploy
only:
- master@satellite/ionosphere
script:
- (cd terraform
&& terraform init -input=false
&& terraform workspace select staging
&& terraform apply
-var "ionosphere_docker=us.gcr.io/blockstream-store/ionosphere:$CI_COMMIT_SHA"
-var "ionosphere_sse_docker=us.gcr.io/blockstream-store/ionosphere-sse:$CI_COMMIT_SHA"
-var "region=$REGION"
-var "zone=$ZONE"
-var "instance_type=$INSTANCE_TYPE"
-var "host=$HOST_STAGING"
-var "ssl_cert=$SSL_CERT_STAGING"
-var "timeout=$TIMEOUT"
-var "prom_service_acct=$PROM_SA"
-var "opsgenie_key=$OPSGENIE_KEY"
-var "rpcuser=$RPCUSER"
-var "rpcpass=$RPCPASS"
-input=false -auto-approve)
deploy_production:
stage: deploy
only:
- production@satellite/ionosphere
script:
- (cd terraform
&& terraform init -input=false
&& terraform workspace select prod
&& terraform apply
-var "ionosphere_docker=us.gcr.io/blockstream-store/ionosphere:$CI_COMMIT_SHA"
-var "ionosphere_sse_docker=us.gcr.io/blockstream-store/ionosphere-sse:$CI_COMMIT_SHA"
-var "region=$REGION"
-var "zone=$ZONE"
-var "instance_type=$INSTANCE_TYPE"
-var "host=$HOST"
-var "ssl_cert=$SSL_CERT"
-var "timeout=$TIMEOUT"
-var "prom_service_acct=$PROM_SA"
-var "opsgenie_key=$OPSGENIE_KEY"
-var "rpcuser=$RPCUSER"
-var "rpcpass=$RPCPASS"
-input=false -auto-approve)
cleanup_staging:
stage: deploy
image:
name: hashicorp/terraform:light
entrypoint: [""]
only:
- cleanup_staging@satellite/ionosphere
script:
- (cd terraform && terraform init -input=false &&
terraform workspace select staging &&
terraform destroy
-target module.blc.google_compute_instance_group_manager.blc
-auto-approve)

32
Dockerfile Normal file
View file

@ -0,0 +1,32 @@
FROM alpine:latest
ENV RUBY_PACKAGES ruby ruby-io-console ruby-irb ruby-rake ruby-bundler ruby-bigdecimal ruby-json
ENV RUBY_DEPS libstdc++ tzdata bash ca-certificates openssl sqlite sqlite-dev
RUN addgroup -g 1000 ionosphere \
&& adduser -u 1000 -D -G ionosphere ionosphere
RUN apk update && \
apk upgrade && \
apk --update add $RUBY_PACKAGES $RUBY_DEPS && \
echo 'gem: --no-document' > /etc/gemrc
RUN mkdir /app && \
mkdir -p /data/ionosphere
COPY Gemfile /app
COPY Gemfile.lock /app
WORKDIR /app
# install packages needed for building compiled gems; install gems; then delete build dependencies to keep Docker image small
ENV BUILD_PACKAGES sudo build-base ruby-dev libc-dev linux-headers openssl-dev
RUN apk --update add --virtual build_deps $BUILD_PACKAGES && \
bundle install && \
apk del build_deps && \
rm -rf /var/cache/apk/*
COPY . /app
RUN chown -R ionosphere:ionosphere /app
USER ionosphere
CMD ./docker_entrypoint.sh

14
Dockerfile.sse Normal file
View file

@ -0,0 +1,14 @@
FROM node:8
# Create app directory
WORKDIR /usr/src/app
# Install app dependencies
COPY package*.json ./
RUN npm install
# Bundle app source
COPY . .
CMD [ "npm", "start" ]

17
Gemfile Normal file
View file

@ -0,0 +1,17 @@
source 'https://rubygems.org'
gem "sinatra" # lightweight web application framework
gem "sinatra-param", require: "sinatra/param"
gem "sinatra-activerecord"
gem "sqlite3"
gem "faraday" # HTTP client library
gem 'daemons'
gem 'puma'
gem 'rake'
gem 'aasm'
gem 'redis'
group :test do
gem 'minitest'
gem 'rack-test'
end

69
Gemfile.lock Normal file
View file

@ -0,0 +1,69 @@
GEM
remote: https://rubygems.org/
specs:
aasm (5.0.1)
concurrent-ruby (~> 1.0)
activemodel (5.2.2)
activesupport (= 5.2.2)
activerecord (5.2.2)
activemodel (= 5.2.2)
activesupport (= 5.2.2)
arel (>= 9.0)
activesupport (5.2.2)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 0.7, < 2)
minitest (~> 5.1)
tzinfo (~> 1.1)
arel (9.0.0)
concurrent-ruby (1.1.3)
daemons (1.3.0)
faraday (0.15.4)
multipart-post (>= 1.2, < 3)
i18n (1.2.0)
concurrent-ruby (~> 1.0)
minitest (5.11.3)
multipart-post (2.0.0)
mustermann (1.0.3)
puma (3.12.0)
rack (2.0.6)
rack-protection (2.0.4)
rack
rack-test (1.1.0)
rack (>= 1.0, < 3)
rake (12.3.2)
redis (4.0.3)
sinatra (2.0.4)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.4)
tilt (~> 2.0)
sinatra-activerecord (2.0.13)
activerecord (>= 3.2)
sinatra (>= 1.0)
sinatra-param (1.5.0)
sinatra (>= 1.3)
sqlite3 (1.3.13)
thread_safe (0.3.6)
tilt (2.0.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
PLATFORMS
ruby
DEPENDENCIES
aasm
daemons
faraday
minitest
puma
rack-test
rake
redis
sinatra
sinatra-activerecord
sinatra-param
sqlite3
BUNDLED WITH
1.17.1

19
LICENSE Normal file
View file

@ -0,0 +1,19 @@
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

163
README.md Normal file
View file

@ -0,0 +1,163 @@
# Satellite API
A lightning app (Lapp) based on c-lightning. Presents an API to submit messages for global broadcast over Blockstream Satellite and pay for them with Bitcoin Lightning payments.
<!-- markdown-toc start - Don't edit this section. Run M-x markdown-toc-generate-toc again -->
## Contents
- [Setup](#setup)
- [Run](#run)
- [Example Applications](#example-applications)
- [REST API](#rest-api)
- [POST /order](#post-order)
- [POST /order/:uuid/bump](#post-orderuuidbump)
- [GET /order/:uuid](#get-orderuuid)
- [GET /order/:uuid/sent_message](#get-orderuuidsentmessage)
- [DELETE /order/:uuid](#delete-orderuuid)
- [GET /orders/queued](#get-ordersqueued)
- [GET /orders/sent](#get-orderssent)
- [GET /info](#get-info)
- [GET /subscribe/:channels](#get-subscribechannels)
- [Debugging](#debugging)
- [Queue Page](#queue-page)
- [Future Work](#future-work)
<!-- markdown-toc end -->
## Setup
The Blockstream Satellite API is dependent on [lightning-charge](https://github.com/ElementsProject/lightning-charge), which itself is dependent on [c-lightning](https://github.com/ElementsProject/lightning) and [bitcoin](https://github.com/bitcoin/bitcoin). To bring up charged, lightningd, and bitcoind, a [handy docker-compose](https://github.com/DeviaVir/blc-docker) script is available.
The satellite API itself is comprised of a RESTful API server and a transmitter daemon. The API server speaks JSON and is used for creating and managing message transmission orders and for processing lightning-charge payment callbacks. The transmitter daemon dequeues paid orders and writes the uploaded message a named pipe, where they are subsequently processed by the Blockstream Satellite GNU Radio transmitter.
## Run ##
The included `Dockerfile` builds a Docker file with the necessary gem dependencies, directory structure, and permissions. The included `docker_entrypoint.sh` runs the API server and transmitter daemon.
After building a Docker image (`satellite_api` in the example below), decide where you are going to keep your persisted data (`~/docker/data` in the example below) and run it like this:
```bash
docker run -e CHARGE_ROOT=http://api-token:mySecretToken@localhost:9112 -e CALLBACK_URI_ROOT=http://my.public.ip:9292 -u `id -u` -v ~/docker/data:/data -p 9292:9292 -it satellite_api
```
To run in developer mode, set the `RACK_ENV` environment variable like this:
```bash
docker run -e CHARGE_ROOT=http://api-token:mySecretToken@localhost:9112 -e CALLBACK_URI_ROOT=http://my.public.ip:9292 -e RACK_ENV=development -u `id -u` -v ~/docker/data:/data -p 9292:9292 -it satellite_api
```
## Example Applications
Example Python applications are available at the [Blockstream Satellite examples directory](https://github.com/Blockstream/satellite/tree/master/examples) as a reference regarding how to implement the interaction with the API. There is one application specifically for sending data to the API, called "API data sender", and another application for reading the API data acquired by the Blockstream Satellite receiver, called "API data reader". Additionally, there is one application that allows testing API data reception directly through the internet, without the actual satellite receiver hardware, called "demo receiver". Refer to the documentation in the given link.
## REST API ##
Each call to an API endpoint responds with a JSON object, whether the call is successful or results in an error.
The code samples below assume that you've set `SATELLITE_API` in your shell to the public base URL of your server.
### POST /order ###
Place an order for a message transmission. The body of the POST must provide a `file` containing the message and a `bid` in millisatoshis. If the bid is below an allowed minimum millisatoshis per byte, an error is returned.
For example, to place an order to transmit the file `hello_world.png` with an initial bid of 10,000 millisatoshi, issue an HTTP POST request like this:
```bash
curl -F "bid=10000" -F "file=@/path/to/upload/file/hello_world.png" $SATELLITE_API/order
```
If successful, the response includes the JSON Lightning invoice as returned by Lightning Charge's [POST /invoice](https://github.com/ElementsProject/lightning-charge#post-invoice) and an authentication token that can be used to modify the order. Within the metadata of the Lightning invoice, metadata is included providing: the bid (in millisatoshis), the SHA256 digest of the uploaded message file, and a UUID for the order.
```bash
{"auth_token":"d784e322dad7ec2671086ce3ad94e05108f2501180d8228577fbec4115774750","uuid":"409348bc-6af0-4999-b715-4136753979df","lightning_invoice":{"id":"N0LOTYc9j0gWtQVjVW7pK","msatoshi":"514200","description":"BSS Test","rhash":"5e5c9d111bc76ce4bf9b211f12ca2d9b66b81ae9839b4e530b16cedbef653a3a","payreq":"lntb5142n1pd78922pp5tewf6ygmcakwf0umyy039j3dndntsxhfswd5u5ctzm8dhmm98gaqdqdgff4xgz5v4ehgxqzjccqp286gfgrcpvzl04sdg2f9sany7ptc5aracnd6kvr2nr0e0x5ajpmfhsjkqzw679ytqgnt6w4490jjrgcvuemz790salqyz9far68cpqtgq3q23el","expires_at":1541642146,"created_at":1541641546,"metadata":{"sha256_message_digest":"0e2bddf3bba1893b5eef660295ef12d6fc72870da539c328cf24e9e6dbb00f00","uuid":"409348bc-6af0-4999-b715-4136753979df"},"status":"unpaid"}}
```
### POST /order/:uuid/bump ###
Increase the bid for an order sitting in the transmission queue. The `bid_increase` must be provided in the body of the POST. A Lightning invoice is returned for it and, when it is paid, the increase is added to the current bid. An `auth_token` must also be provided. For example, to increase the bid on the order placed above by 100,000 millisatoshis, issue a POST like this:
```bash
curl -v -F "bid_increase=100000" -F "auth_token=d784e322dad7ec2671086ce3ad94e05108f2501180d8228577fbec4115774750" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df/bump
```
Response object is in the same format as for `POST /order`.
As shown below for DELETE, the `auth_token` may alternatively be provided using the `X-Auth-Token` HTTP header.
### GET /order/:uuid ###
Retrieve an order by UUID. Must provide the corresponding auth token to prove that it is yours.
```bash
curl -v -H "X-Auth-Token: 5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df
```
### DELETE /order/:uuid ###
To cancel an order, issue an HTTP DELETE request to the API endpoint `/order/:uuid/` providing the UUID of the order. An `auth_token` must also be provided. For example, to cancel the order above, issue a request like this:
```bash
curl -v -X DELETE -F "auth_token=5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df
```
The `auth_token` may be provided as a parameter in the DELETE body as above or may be provided using the `X-Auth-Token` HTTP header, like this:
```bash
curl -v -X DELETE -H "X-Auth-Token: 5248b13a722cd9b2e17ed3a2da8f7ac6bd9a8fe7130357615e074596e3d5872f" $SATELLITE_API/order/409348bc-6af0-4999-b715-4136753979df
```
### GET /orders/queued ###
Retrieve a list of paid, but unsent orders. Both pending orders and the order currently being transmitted are returned. Optionally, accepts a parameter specifying how many queued order to return.
```bash
curl $SATELLITE_API/orders/queued
```
```bash
curl $SATELLITE_API/orders/queued?limit=18
```
The response is a JSON array of records (one for each queued message). The revealed fields for each record include: `uuid`, `bid`, `bid_per_byte`, `message_size`, `message_digest`, `status`, `created_at`, `started_transmission_at`, and `ended_transmission_at`.
### GET /orders/sent ###
Retrieves a list of up to 20 sent orders in reverse chronological order. Optionally, accepts the parameter `before` (a timestamp in ISO 8601 format) specifying that only orders before the given time are to be returned.
```bash
curl $SATELLITE_API/orders/sent
```
```bash
curl $SATELLITE_API/orders/sent?limit=18
```
The response is a JSON array of records (one for each queued message). The revealed fields for each record include: `uuid`, `bid`, `bid_per_byte`, `message_size`, `message_digest`, `status`, `created_at`, `started_transmission_at`, and `ended_transmission_at`.
### GET /info
Returns information about the c-lightning node where satellite API payments are terminated. The response is a JSON object consisting of the node ID, port, IP addresses, and other information useful for opening payment channels. For example:
```bash
{"id":"032c6ba19a2141c5fee6ac8b6ff6cf24456fd4e8e206716a39af3300876c3a4835","port":42259,"address":[],"version":"v0.5.2-2016-11-21-1937-ge97ee3d","blockheight":434,"network":"regtest"}
```
### GET /subscribe/:channels
Subscribe to one or more [server-side events](https://en.wikipedia.org/wiki/Server-sent_events) channels. The `channels` parameter is a comma-separated list of event channels. Currently, only one channel is available: `transmissions`, to which an event is pushed each time a message transmission begins and ends. Event data includes a JSON representation of the order, including its current status.
```bash
curl $SATELLITE_API/subscribe/:channels
```
## Debugging ##
### Queue Page ###
For debugging and as an example of how to build a web front-end to the satellite API, there is a simple table view of queued, pending, and sent messages at `$SATELLITE_API/queue.html`
## Future Work ##
* Configure `Rack::Attack` or similar to block and throttle abusive requests.
* Support bids priced in fiat currencies.
* Report the top bid_per_byte, queue depth, and estimated time to transmit in the response to `POST /order`.

11
Rakefile Normal file
View file

@ -0,0 +1,11 @@
require 'sinatra/activerecord'
require "sinatra/activerecord/rake"
require_relative 'constants'
Rake::Task['db:drop'].enhance do
FileUtils.remove_entry_secure(MESSAGE_STORE_PATH) if File.exist?(MESSAGE_STORE_PATH)
end
Rake::Task['db:create'].enhance do
FileUtils.mkdir_p(MESSAGE_STORE_PATH)
end

9
config.ru Normal file
View file

@ -0,0 +1,9 @@
# To launch: bundle exec rackup
require 'rubygems'
require 'bundler'
Bundler.require
require './main'
run Sinatra::Application

20
config/database.yml Normal file
View file

@ -0,0 +1,20 @@
development:
adapter: sqlite3
database: db/ionosphere_development.sqlite3
pool: 5
timeout: 5000
# Warning: The database defined as "test" will be erased and
# re-generated from your development database when you run "rake".
# Do not set this db to the same as development or production.
test:
adapter: sqlite3
database: db/ionosphere_test.sqlite3
pool: 5
timeout: 5000
production:
adapter: sqlite3
database: /data/ionosphere/ionosphere_production.sqlite3
pool: 5
timeout: 5000

36
constants.rb Normal file
View file

@ -0,0 +1,36 @@
ENV['RACK_ENV'] ||= 'development'
KILO_BYTE = 2 ** 10
MEGA_BYTE = 2 ** 20
ONE_HOUR = 60 * 60
ONE_DAY = 24 * ONE_HOUR
ONE_MONTH = 31 * ONE_DAY
require 'yaml'
yaml_path = File.join(File.expand_path(File.dirname(__FILE__)), 'config', 'database.yml')
conf = YAML.load_file(yaml_path)
DB_ROOT = File.dirname(conf[ENV['RACK_ENV']]['database'])
MESSAGE_STORE_PATH = File.join(DB_ROOT, 'messages')
CALLBACK_URI_ROOT = ENV['CALLBACK_URI_ROOT'] || "http://localhost:4567"
CHARGE_API_TOKEN = ENV['CHARGE_API_TOKEN'] || 'mySecretToken'
CHARGE_ROOT = ENV['CHARGE_ROOT'] || "http://api-token:#{CHARGE_API_TOKEN}@localhost:9112"
MIN_PER_BYTE_BID = Integer(ENV['MIN_PER_BYTE_BID'] || 50) # minimum price per byte in millisatoshis
MIN_MESSAGE_SIZE = Integer(ENV['MIN_MESSAGE_SIZE'] || KILO_BYTE)
TRANSMIT_RATE = Integer(ENV['TRANSMIT_RATE'] || KILO_BYTE) # bytes per second
MAX_HEAD_OF_LINE_BLOCKING_TIME = 10 # more than 10 seconds and it doesn't feel "instant"
MAX_MESSAGE_SIZE = MAX_HEAD_OF_LINE_BLOCKING_TIME * TRANSMIT_RATE
LN_INVOICE_EXPIRY = ONE_HOUR
LN_INVOICE_DESCRIPTION = (ENV['RACK_ENV'] == 'production') ? "Blockstream Satellite Transmission" : "BSS Test"
MAX_LIGHTNING_INVOICE_SIZE = 1024
EXPIRE_PENDING_ORDERS_AFTER = ONE_DAY
MESSAGE_FILE_RETENTION_TIME = ONE_MONTH
PAGE_SIZE = 20
MAX_QUEUED_ORDERS_REQUEST = 100
REDIS_URI = ENV['REDIS_URI'] || "redis://127.0.0.1:6379"

61
daemons/transmitter.rb Normal file
View file

@ -0,0 +1,61 @@
require 'active_record'
require 'logger'
logger = Logger.new(STDOUT)
logger.level = Logger::INFO
require_relative '../constants'
require_relative '../models/init'
# complete any old transmissions that could be stuck (e.g. by early termination of the transmitter daemon)
Order.transmitting.each do |order|
logger.info "completing stuck transmission #{order.uuid}"
order.end_transmission!
end
# NB: no mutex is needed around max_tx_seq_num because it is assumed that there is only one transmitter
max_tx_seq_num = Order.maximum(:tx_seq_num) || 0
CLEANUP_DUTY_CYCLE = 5 * 60 # five minutes
last_cleanup_at = Time.now - CLEANUP_DUTY_CYCLE
# loop forever dequing the highest-priced paid order and piping it to the GNU radio FIFO
loop do
sendable_order = nil
while sendable_order.nil? do
if Time.now > last_cleanup_at + CLEANUP_DUTY_CYCLE
# expire any unpaid invoices that have reached their expiration time (orders may be auto-expired as a result)
Invoice.where(status: :pending).where("expires_at < ?", Time.now).each { |i| i.expire! }
# expire old pending orders
Order.where(status: :pending).where("created_at < ?", Time.now - EXPIRE_PENDING_ORDERS_AFTER).each { |o| o.expire! }
# delete message files for messages sent long ago
Order.where(status: :sent).where("ended_transmission_at < ?", Time.now - MESSAGE_FILE_RETENTION_TIME).each { |o| o.delete_message_file }
last_cleanup_at = Time.now
end
sleep 1
# look for an elligble order to transmit and, if one is found, begin transmitting it
Order.transaction do
sendable_order = Order.where(status: :paid).order(bid_per_byte: :desc).first
if sendable_order
logger.info "transmission start #{sendable_order.uuid}"
max_tx_seq_num += 1
sendable_order.update(tx_seq_num: max_tx_seq_num)
sendable_order.transmit!
end
end
end
if TRANSMIT_RATE
transmit_time = Float(sendable_order.message_size) / TRANSMIT_RATE
logger.info "sleeping for #{transmit_time} while #{sendable_order.uuid} transmits"
sleep transmit_time
end
logger.info "transmission end #{sendable_order.uuid}"
sendable_order.end_transmission!
end

0
db/messages/.gitkeep Normal file
View file

View file

@ -0,0 +1,6 @@
class AddTxSeqNumToOrders < ActiveRecord::Migration[5.2]
def change
add_column :orders, :tx_seq_num, :integer
add_index :orders, :tx_seq_num, unique: true
end
end

View file

@ -0,0 +1,7 @@
class AddStatusAndAmountToInvoices < ActiveRecord::Migration[5.2]
def change
add_column :invoices, :status, :integer
add_column :invoices, :amount, :integer
add_index :invoices, :status
end
end

View file

@ -0,0 +1,5 @@
class AddUnpaidBidToOrders < ActiveRecord::Migration[5.2]
def change
add_column :orders, :unpaid_bid, :integer
end
end

View file

@ -0,0 +1,6 @@
class AddExpiresAtToInvoices < ActiveRecord::Migration[5.2]
def change
add_column :invoices, :expires_at, :datetime
add_index :invoices, :expires_at
end
end

View file

@ -0,0 +1,6 @@
class ChangeTransmissionTimestampNamesInOrders < ActiveRecord::Migration[5.2]
def change
rename_column :orders, :upload_started_at, :started_transmission_at
rename_column :orders, :upload_ended_at, :ended_transmission_at
end
end

48
db/schema.rb Normal file
View file

@ -0,0 +1,48 @@
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 2019_01_06_221008) do
create_table "invoices", force: :cascade do |t|
t.string "lid"
t.string "invoice", limit: 1024
t.datetime "paid_at"
t.datetime "created_at"
t.integer "order_id"
t.integer "status"
t.integer "amount"
t.datetime "expires_at"
t.index ["expires_at"], name: "index_invoices_on_expires_at"
t.index ["lid"], name: "index_invoices_on_lid", unique: true
t.index ["order_id"], name: "index_invoices_on_order_id"
t.index ["status"], name: "index_invoices_on_status"
end
create_table "orders", force: :cascade do |t|
t.integer "bid"
t.integer "message_size"
t.float "bid_per_byte"
t.string "message_digest", limit: 64
t.integer "status"
t.string "uuid", limit: 36
t.datetime "created_at"
t.datetime "cancelled_at"
t.datetime "started_transmission_at"
t.datetime "ended_transmission_at"
t.integer "tx_seq_num"
t.integer "unpaid_bid"
t.index ["bid_per_byte"], name: "index_orders_on_bid_per_byte"
t.index ["tx_seq_num"], name: "index_orders_on_tx_seq_num", unique: true
t.index ["uuid"], name: "index_orders_on_uuid", unique: true
end
end

61
docker-compose.yml Normal file
View file

@ -0,0 +1,61 @@
version: '3'
services:
blc:
image: "blockstream/lightning-charge"
ports:
- "9112:9112"
environment:
- API_TOKEN=mySecretToken
volumes:
- blc:/data
ionosphere:
image: ionosphere
build:
context: .
dockerfile: Dockerfile
depends_on:
- blc
ports:
- "9292:9292"
links:
- blc
environment:
- CHARGE_ROOT=http://api-token:mySecretToken@blc:9112
- CALLBACK_URI_ROOT=http://localhost:9292
- RACK_ENV=production
volumes:
- ~/docker/data:/data
ionosphere-tx:
image: ionosphere
depends_on:
- ionosphere
- redis
links:
- redis
environment:
- REDIS_URI=redis://redis:6379
- RACK_ENV=production
volumes:
- ~/docker/data:/data
command: ./docker_entrypoint_transmitter.sh
ionosphere-sse:
image: ionosphere-sse
build:
context: sse/
dockerfile: ../Dockerfile.sse
depends_on:
- redis
- ionosphere-tx
ports:
- "4500:4500"
links:
- redis
environment:
- SUB_CHANNELS=transmissions
- REDIS_URI=redis://redis:6379
redis:
image: "redis:latest"
ports:
- "6379:6379"
volumes:
blc:

15
docker_entrypoint.sh Executable file
View file

@ -0,0 +1,15 @@
#!/bin/bash
set -eo pipefail
# create or migrate the db (if needed)
if [ ! -f /data/ionosphere/ionosphere_production.sqlite3 ]; then
bundle exec rake db:create
bundle exec rake db:schema:load
fi
bundle exec rake db:migrate
bundle exec rackup --host 0.0.0.0
# shutdown the entire process when any of the background jobs exits (even if successfully)
wait -n
kill -TERM $$

View file

@ -0,0 +1,14 @@
#!/bin/bash
set -eo pipefail
if [ ! -f /data/ionosphere/ionosphere_production.sqlite3 ]; then
bundle exec rake db:create
bundle exec rake db:schema:load
fi
echo "starting transmitter"
bundle exec ruby daemons/transmitter.rb
# shutdown the entire process when any of the background jobs exits (even if successfully)
wait -n
kill -TERM $$

View file

@ -0,0 +1,6 @@
require 'openssl'
def hash_hmac(digest, key, data)
d = OpenSSL::Digest.new(digest)
OpenSSL::HMAC.hexdigest(d, key, data)
end

5
helpers/init.rb Normal file
View file

@ -0,0 +1,5 @@
require 'sinatra/param'
require_relative './digest_helpers'
require_relative './invoice_helpers'
require_relative './order_helpers'

View file

@ -0,0 +1,50 @@
require "faraday"
module Sinatra
module InvoiceHelpers
def fetch_invoice_by_lid
Invoice.where(lid: params[:lid]).first || halt(404, {:message => "Not found", :errors => ["Invalid invoice id"]}.to_json)
end
def authorize_invoice!(invoice)
if invoice.charged_auth_token != params[:charged_auth_token]
halt 401, {:message => "Unauthorized", :errors => ["Invalid authentication token"]}.to_json
else
invoice
end
end
def get_and_authenticate_invoice
authorize_invoice!(fetch_invoice_by_lid)
end
def new_invoice(order, bid)
bid = Integer(bid)
# generate Lightning invoice
charged_response = $lightning_charge.post '/invoice', {
msatoshi: bid,
description: LN_INVOICE_DESCRIPTION,
expiry: LN_INVOICE_EXPIRY,
metadata: {uuid: order.uuid, sha256_message_digest: order.message_digest}
}
unless charged_response.status == 201
halt 400, {:message => "Lightning Charge invoice creation error", :errors => ["received #{response.status} from charged"]}.to_json
end
lightning_invoice = JSON.parse(charged_response.body)
invoice = Invoice.new(order: order, amount: bid, lid: lightning_invoice["id"], invoice: charged_response.body, expires_at: Time.now + LN_INVOICE_EXPIRY)
# register the webhook
webhook_registration_response = $lightning_charge.post "/invoice/#{invoice.lid}/webhook", {
url: invoice.callback_url
}
unless webhook_registration_response.status == 201
halt 400, {:message => "Lightning Charge webhook registration error", :errors => ["received #{response.status} from charged"]}.to_json
end
invoice
end
end
helpers InvoiceHelpers
end

22
helpers/order_helpers.rb Normal file
View file

@ -0,0 +1,22 @@
module Sinatra
module OrderHelpers
def fetch_order_by_uuid
Order.where(uuid: params[:uuid]).first || halt(404, {:message => "Not found", :errors => ["Invalid order id"]}.to_json)
end
def authorize_order!(order)
if order.user_auth_token != params[:auth_token]
halt 401, {:message => "Unauthorized", :errors => ["Invalid authentication token"]}.to_json
else
order
end
end
def get_and_authenticate_order
authorize_order!(fetch_order_by_uuid)
end
end
helpers OrderHelpers
end

206
main.rb Normal file
View file

@ -0,0 +1,206 @@
require 'sinatra'
require "sinatra/activerecord"
require "faraday"
require 'securerandom'
require 'openssl'
require 'time'
require_relative 'constants'
require_relative './models/init'
require_relative 'helpers/init'
configure do
set :raise_errors, false
set :show_exceptions, :after_handler
$lightning_charge = Faraday.new(:url => CHARGE_ROOT)
end
before do
content_type :json
end
configure :test, :development do
get '/order/:uuid/sent_message' do
(order = Order.find_by(uuid: params[:uuid], status: [:sent, :transmitting])) || halt(404, {:message => "Not found", :errors => ["Sent order with that id not found"]}.to_json)
send_file order.message_path, :disposition => 'attachment'
end
end
# GET /info
#
# returns:
# information about the c-lightning node where satellite API payments are terminated
#
get '/info' do
# call lightning-charge info, which invokes lightning-cli getinfo
response = $lightning_charge.get '/info'
response.body
end
# GET /orders/queued
# params:
# limit - return top limit orders (optional)
# returns:
# array of JSON orders sorted by bid-per-byte descending
get '/orders/queued' do
param :limit, Integer, default: PAGE_SIZE, max: MAX_QUEUED_ORDERS_REQUEST, message: "can't display more than top #{MAX_QUEUED_ORDERS_REQUEST} orders"
Order.where(status: [:paid, :transmitting])
.select(Order::PUBLIC_FIELDS)
.order(bid_per_byte: :desc)
.limit(params[:limit]).to_json(:only => Order::PUBLIC_FIELDS)
end
# GET /orders/sent
# params:
# before - return the previous PAGE_SIZE orders sent before the given time (time should be sent as in ISO 8601 format and defaults to now)
# returns:
# array of JSON orders sorted in reverse chronological order
get '/orders/sent' do
param :before, String, required: false, default: lambda { Time.now.utc.iso8601 }
before = DateTime.iso8601(params[:before])
Order.where(status: :sent).where("created_at < ?", before)
.select(Order::PUBLIC_FIELDS)
.order(ended_transmission_at: :desc)
.limit(PAGE_SIZE).to_json(:only => Order::PUBLIC_FIELDS)
end
# GET /orders/pending
# params:
# before - return the previous PAGE_SIZE orders sent before the given time (time should be sent as in ISO 8601 format and defaults to now)
# returns:
# array of JSON orders sorted in reverse chronological order
get '/orders/pending' do
param :before, String, required: false, default: lambda { Time.now.utc.iso8601 }
before = DateTime.iso8601(params[:before])
Order.where(status: :pending).where("created_at < ?", before)
.select(Order::PUBLIC_FIELDS)
.order(created_at: :desc)
.limit(PAGE_SIZE).to_json(:only => Order::PUBLIC_FIELDS)
end
get '/message/:tx_seq_num' do
(order = Order.find_by(tx_seq_num: params[:tx_seq_num], status: [:sent, :transmitting])) || halt(404, {:message => "Not found", :errors => ["Sent order with that tx sequence number not found"]}.to_json)
send_file order.message_path, :disposition => 'attachment'
end
# POST /order
#
# upload a message, along with a bid (in millisatoshis)
# return JSON object with status, uuid, and lightning payment invoice
post '/order' do
param :bid, Integer, required: true, min: 0, message: "must be a positive integer number of msatoshis"
param :file, Hash, required: true
bid = Integer(params[:bid])
# process the upload
unless tmpfile = params[:file][:tempfile]
halt 400, {:message => "Message upload problem", :errors => ["No tempfile received"]}.to_json
end
unless name = params[:file][:filename]
halt 400, {:message => "Message upload problem", :errors => ["Filename missing"]}.to_json
end
order = Order.new(uuid: SecureRandom.uuid)
message_file = File.new(order.message_path, "wb")
message_size = 0
sha256 = OpenSSL::Digest::SHA256.new
while block = tmpfile.read(65536)
message_size += block.size
if message_size > MAX_MESSAGE_SIZE
halt 413, {:message => "Message upload problem", :errors => ["Message size exceeds max size #{MAX_MESSAGE_SIZE}"]}.to_json
end
sha256 << block
message_file.write(block)
end
message_file.close()
if message_size < MIN_MESSAGE_SIZE
FileUtils.rm(message_file)
halt 400, {:message => "Message upload problem", :errors => ["Message too small. Minimum message size is #{MIN_MESSAGE_SIZE}"]}.to_json
end
order.message_size = message_size
order.message_digest = sha256.to_s
if bid.to_f / message_size.to_f < MIN_PER_BYTE_BID
halt 413, {:message => "Bid too low", :errors => ["Per byte bid cannot be below #{MIN_PER_BYTE_BID} millisatoshis per byte. The minimum bid for this message is #{order.message_size * MIN_PER_BYTE_BID} millisatoshis." ]}.to_json
end
invoice = new_invoice(order, bid)
order.invoices << invoice
order.save
{:auth_token => order.user_auth_token, :uuid => order.uuid, :lightning_invoice => JSON.parse(invoice.invoice)}.to_json
end
post '/order/:uuid/bump' do
param :uuid, String, required: true
param :bid_increase, Integer, required: true, min: 0, message: "must be a positive integer number of msatoshis"
param :auth_token, String, required: true, default: lambda { env['HTTP_X_AUTH_TOKEN'] },
message: "auth_token must be provided either in the DELETE body or in an X-Auth-Token header"
bid_increase = Integer(params[:bid_increase])
order = get_and_authenticate_order
unless order.bump
halt 400, {:message => "Cannot bump order", :errors => ["Order already #{order.status}"]}.to_json
end
invoice = new_invoice(order, bid_increase)
order.invoices << invoice
order.save
{:auth_token => order.user_auth_token, :uuid => order.uuid, :lightning_invoice => JSON.parse(invoice.invoice)}.to_json
end
get '/order/:uuid' do
param :uuid, String, required: true
param :auth_token, String, required: true, default: lambda { env['HTTP_X_AUTH_TOKEN'] },
message: "auth_token must be provided either in the DELETE body or in an X-Auth-Token header"
get_and_authenticate_order.as_sanitized_json
end
delete '/order/:uuid' do
param :uuid, String, required: true
param :auth_token, String, required: true, default: lambda { env['HTTP_X_AUTH_TOKEN'] },
message: "auth_token must be provided either in the DELETE body or in an X-Auth-Token header"
order = get_and_authenticate_order
unless order.cancel!
halt 400, {:message => "Cannot cancel order", :errors => ["Order already #{order.status}"]}.to_json
end
{:message => "order cancelled"}.to_json
end
# invoice paid callback from charged
post '/callback/:lid/:charged_auth_token' do
param :lid, String, required: true
param :charged_auth_token, String, required: true
invoice = get_and_authenticate_invoice
if invoice.nil?
halt 404, {:message => "Payment problem", :errors => ["Invoice not found"]}.to_json
end
unless invoice.order
halt 404, {:message => "Payment problem", :errors => ["Orphaned invoice"]}.to_json
end
if invoice.paid?
halt 400, {:message => "Payment problem", :errors => ["Order already paid"]}.to_json
end
invoice.pay!
{:message => "invoice #{invoice.lid} paid"}.to_json
end
# subscribe to one or more SSE channels
# params:
# channels - comma-separated list of channels to subscribe to
# returns:
# SSE event stream
# available channels:
# transmissions - an event is pushed to this channel when each message transmission begins and ends
get '/subscribe/:channels' do
redirect "http://#{request.host}:4500/stream?channels=#{params[:channels]}"
end

3
models/init.rb Normal file
View file

@ -0,0 +1,3 @@
require "sinatra/activerecord"
require_relative './orders'
require_relative './invoices'

40
models/invoices.rb Normal file
View file

@ -0,0 +1,40 @@
require 'aasm'
require "sinatra/activerecord"
require_relative '../constants'
require_relative './orders'
require_relative '../helpers/digest_helpers'
class Invoice < ActiveRecord::Base
include AASM
enum status: [:pending, :paid, :expired]
validates :lid, presence: true
validates :invoice, presence: true
validates :expires_at, presence: true
belongs_to :order
aasm :column => :status, :enum => true, :whiny_transitions => false, :no_direct_assignment => true do
state :pending, initial: true
state :expired, after_enter: Proc.new { self.order.expire_if_pending_and_no_pending_invoices }
state :paid, before_enter: Proc.new { self.paid_at = Time.now }, after_enter: Proc.new { self.order.maybe_mark_as_paid }
event :pay do
transitions :from => :pending, :to => :paid
end
event :expire do
transitions :from => :pending, :to => :expired
end
end
LIGHTNING_WEBHOOK_KEY = hash_hmac('sha256', 'charged-token', CHARGE_API_TOKEN)
def charged_auth_token
hash_hmac('sha256', LIGHTNING_WEBHOOK_KEY, self.lid)
end
def callback_url
"#{CALLBACK_URI_ROOT}/callback/#{self.lid}/#{self.charged_auth_token}"
end
end

126
models/orders.rb Normal file
View file

@ -0,0 +1,126 @@
require 'aasm'
require 'redis'
require 'json'
require_relative '../constants'
require_relative './invoices'
require_relative '../helpers/digest_helpers'
class Order < ActiveRecord::Base
include AASM
PUBLIC_FIELDS = [:uuid, :unpaid_bid, :bid, :bid_per_byte, :message_size, :message_digest, :status, :created_at, :started_transmission_at, :ended_transmission_at, :tx_seq_num]
@@redis = Redis.new(url: REDIS_URI)
enum status: [:pending, :paid, :transmitting, :sent, :cancelled]
before_validation :adjust_bids
validates :bid, presence: true, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :unpaid_bid, presence: true, numericality: { only_integer: true, greater_than_or_equal_to: 0 }
validates :message_size, presence: true, numericality: { only_integer: true, greater_than_or_equal_to: MIN_MESSAGE_SIZE }
validates :message_digest, presence: true
validates :bid_per_byte, numericality: { greater_than_or_equal_to: 0 }
validates :status, presence: true
validates :uuid, presence: true
has_many :invoices, after_add: :adjust_bids_and_save, after_remove: :adjust_bids_and_save
aasm :column => :status, :enum => true, :whiny_transitions => false, :no_direct_assignment => true do
state :pending, initial: true
state :paid
state :expired
state :transmitting, before_enter: Proc.new { self.started_transmission_at = Time.now }
state :sent, before_enter: Proc.new { self.ended_transmission_at = Time.now }
state :cancelled, before_enter: Proc.new { self.cancelled_at = Time.now }
event :pay do
transitions :from => :pending, :to => :paid, :guard => :paid_enough?
transitions :from => :paid, :to => :paid
end
event :transmit, :after => :notify_transmissions_channel do
transitions :from => :paid, :to => :transmitting
end
event :end_transmission, :after => :notify_transmissions_channel do
transitions :from => :transmitting, :to => :sent
end
event :cancel, :after => :delete_message_file do
transitions :from => [:pending, :paid], :to => :cancelled
end
event :bump do
transitions :from => :pending, :to => :pending
transitions :from => :paid, :to => :paid
end
event :expire, :after => :delete_message_file do
transitions :from => :pending, :to => :expired
end
end
def adjust_bids_and_save(invoice)
self.adjust_bids
self.save
end
def adjust_bids
self.bid = paid_invoices_total
self.bid_per_byte = (self.bid.to_f / self.message_size.to_f).round(2)
self.unpaid_bid = unpaid_invoices_total
end
def maybe_mark_as_paid
self.pay! if self.paid_enough?
end
def paid_enough?
self.adjust_bids
self.bid_per_byte >= MIN_PER_BYTE_BID
end
def paid_invoices_total
self.invoices.where(status: :paid).pluck(:amount).reduce(:+) || 0
end
def unpaid_invoices_total
self.pending_invoices.pluck(:amount).reduce(:+) || 0
end
def pending_invoices
self.invoices.where(status: :pending)
end
def expire_if_pending_and_no_pending_invoices
self.expire! if self.pending? and self.pending_invoices.empty?
end
def notify_transmissions_channel
@@redis.publish 'transmissions', self.to_json(:only => Order::PUBLIC_FIELDS)
end
def message_path
File.join(MESSAGE_STORE_PATH, self.uuid)
end
# have all invoices been paid?
def invoices_all_paid?
self.invoices.pluck(:paid_at).map {|i| not i.nil?}.reduce(:&)
end
USER_AUTH_KEY = hash_hmac('sha256', 'user-token', CHARGE_API_TOKEN)
def user_auth_token
hash_hmac('sha256', USER_AUTH_KEY, self.uuid)
end
def as_sanitized_json
self.to_json(:only => Order::PUBLIC_FIELDS)
end
def delete_message_file
File.delete(self.message_path)
end
end

162
public/queue.html Normal file
View file

@ -0,0 +1,162 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Ionosphere Queues</title>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap-theme.min.css" integrity="sha384-rHyoN1iRsVXV4nD0JutlnGaslCJuC7uwjduW9SVrLvRYooPp2bWYgmgJQIXwl/Sp" crossorigin="anonymous">
<style>
table {
width: 100%;
table-layout: fixed;
margin: 5px;
}
td {
overflow-wrap: break-word;
vertical-align: top;
}
td, th {
padding: 10px;
}
</style>
<!-- Latest compiled and minified jQuery -->
<script src="https://code.jquery.com/jquery-3.3.1.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<!-- Latest compiled and minified Bootstrap JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" crossorigin="anonymous"></script>
<!-- Latest compiled and minified moment.js JavaScript -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.22.2/moment-with-locales.min.js" crossorigin="anonymous"></script>
</head>
<body id="home">
<ul class="nav nav-pills mb-3" id="pills-tab" role="tablist">
<li class="nav-item active">
<a class="nav-link" id="pills-queued-tab" data-toggle="pill" href="#pills-queued" role="tab" aria-controls="pills-queued" aria-selected="true">Queued</a>
</li>
<li class="nav-item">
<a class="nav-link" id="pills-pending-tab" data-toggle="pill" href="#pills-pending" role="tab" aria-controls="pills-pending" aria-selected="false">Pending</a>
</li>
<li class="nav-item">
<a class="nav-link" id="pills-sent-tab" data-toggle="pill" href="#pills-sent" role="tab" aria-controls="pills-sent" aria-selected="false">Sent</a>
</li>
</ul>
<div class="tab-content" id="pills-tabContent">
<div class="tab-pane fade active in" id="pills-queued" role="tabpanel" aria-labelledby="pills-queued-tab">
<div class="panel panel-default">
<table id="queued_table">
<thead>
<tr>
<th class="created_at">Created</th>
<th class="bid_per_byte">Bid per Byte (millisatoshis)</th>
<th class="message_size">Message Size</th>
<th class="unpaid_bid">Unpaid Bid</th>
<th class="uuid">Order ID</th>
<th class="status">Status</th>
<th class="started_transmission_at">Transmission Started</th>
</tr>
</thead>
</table>
</div>
</div>
<div class="tab-pane fade" id="pills-pending" role="tabpanel" aria-labelledby="pills-pending-tab">
<div class="panel panel-default">
<table id="pending_table">
<thead>
<tr>
<th class="created_at">Created</th>
<th class="bid_per_byte">Bid per Byte (millisatoshis)</th>
<th class="message_size">Message Size</th>
<th class="unpaid_bid">Unpaid Bid</th>
<th class="uuid">Order ID</th>
<th class="status">Status</th>
</tr>
</thead>
</table>
</div>
</div>
<div class="tab-pane fade" id="pills-sent" role="tabpanel" aria-labelledby="pills-sent-tab">
<div class="panel panel-default">
<table id="sent_table">
<thead>
<tr>
<th class="created_at">Created</th>
<th class="bid_per_byte">Bid per Byte (millisatoshis)</th>
<th class="message">Message</th>
<th class="message_size">Message Size</th>
<th class="unpaid_bid">Unpaid Bid</th>
<th class="uuid">Order ID</th>
<th class="status">Status</th>
<th class="started_transmission_at">Transmission Started</th>
<th class="ended_transmission_at">Transmission Ended</th>
</tr>
</thead>
</table>
</div>
</div>
</div>
<script type="text/javascript">
$(function() {
$.getJSON( "orders/queued", function( data ) {
$.each( data, function( key, val ) {
var started_transmission_at = moment(val.started_transmission_at);
var ended_transmission_at = moment(val.ended_transmission_at);
$("#queued_table").append($('<tr>').append(
$('<td>').text(moment(val.created_at).fromNow()),
$('<td>').text(val.bid_per_byte),
$('<td>').text(val.message_size),
$('<td>').text(val.unpaid_bid),
$('<td>').text(val.uuid),
$('<td>').text(val.status),
$('<td>').text(started_transmission_at.isValid() ? started_transmission_at.fromNow() : "")
));
});
});
$.getJSON( "orders/pending", function( data ) {
$.each( data, function( key, val ) {
var started_transmission_at = moment(val.started_transmission_at);
var ended_transmission_at = moment(val.ended_transmission_at);
$("#pending_table").append($('<tr>').append(
$('<td>').text(moment(val.created_at).fromNow()),
$('<td>').text(val.bid_per_byte),
$('<td>').text(val.message_size),
$('<td>').text(val.unpaid_bid),
$('<td>').text(val.uuid),
$('<td>').text(val.status)
));
});
});
$.getJSON( "orders/sent", function( data ) {
$.each( data, function( key, val ) {
var started_transmission_at = moment(val.started_transmission_at);
var ended_transmission_at = moment(val.ended_transmission_at);
$("#sent_table").append($('<tr>').append(
$('<td>').text(moment(val.created_at).fromNow()),
$('<td>').text(val.bid_per_byte),
$('<td>').html('<a href="order/' + val.uuid + '/sent_message">download</a>'),
$('<td>').text(val.message_size),
$('<td>').text(val.unpaid_bid),
$('<td>').text(val.uuid),
$('<td>').text(val.status),
$('<td>').text(started_transmission_at.isValid() ? started_transmission_at.fromNow() : ""),
$('<td>').text(ended_transmission_at.isValid() ? ended_transmission_at.fromNow() : "")
));
});
});
});
</script>
</body>
</html>

19
sse/LICENSE Normal file
View file

@ -0,0 +1,19 @@
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

14
sse/README.md Normal file
View file

@ -0,0 +1,14 @@
# Redis to SSE
Subscribes to a redis pub/sub channel and broadcasts messages
over HTTP server-sent events.
To start the server:
```bash
$ git clone git@github.com:shesek/redis-to-sse && cd redis-to-sse
$ npm install
$ REDIS_URI=redis://127.0.0.1:6379 SUB_TOPIC=foobar PORT=4500 npm start
```
To subscribe to events, send a GET request to `/stream`.

408
sse/package-lock.json generated Normal file
View file

@ -0,0 +1,408 @@
{
"name": "redis-to-sse",
"version": "0.1.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
"accepts": {
"version": "1.3.5",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
"integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
"requires": {
"mime-types": "~2.1.18",
"negotiator": "0.6.1"
}
},
"array-flatten": {
"version": "1.1.1",
"resolved": "http://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
},
"basic-auth": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
"integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
"requires": {
"safe-buffer": "5.1.2"
}
},
"body-parser": {
"version": "1.18.3",
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz",
"integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=",
"requires": {
"bytes": "3.0.0",
"content-type": "~1.0.4",
"debug": "2.6.9",
"depd": "~1.1.2",
"http-errors": "~1.6.3",
"iconv-lite": "0.4.23",
"on-finished": "~2.3.0",
"qs": "6.5.2",
"raw-body": "2.3.3",
"type-is": "~1.6.16"
}
},
"bytes": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
},
"content-disposition": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
"integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
},
"content-type": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
"integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
},
"cookie": {
"version": "0.3.1",
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
"integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
},
"cookie-signature": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
"integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
},
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"requires": {
"ms": "2.0.0"
}
},
"depd": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
"integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
},
"destroy": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
"integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
},
"double-ended-queue": {
"version": "2.1.0-0",
"resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz",
"integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw="
},
"ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
"integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
},
"encodeurl": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
"integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
},
"escape-html": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
"integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
},
"etag": {
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
"integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
},
"express": {
"version": "4.16.4",
"resolved": "https://registry.npmjs.org/express/-/express-4.16.4.tgz",
"integrity": "sha512-j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==",
"requires": {
"accepts": "~1.3.5",
"array-flatten": "1.1.1",
"body-parser": "1.18.3",
"content-disposition": "0.5.2",
"content-type": "~1.0.4",
"cookie": "0.3.1",
"cookie-signature": "1.0.6",
"debug": "2.6.9",
"depd": "~1.1.2",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"finalhandler": "1.1.1",
"fresh": "0.5.2",
"merge-descriptors": "1.0.1",
"methods": "~1.1.2",
"on-finished": "~2.3.0",
"parseurl": "~1.3.2",
"path-to-regexp": "0.1.7",
"proxy-addr": "~2.0.4",
"qs": "6.5.2",
"range-parser": "~1.2.0",
"safe-buffer": "5.1.2",
"send": "0.16.2",
"serve-static": "1.13.2",
"setprototypeof": "1.1.0",
"statuses": "~1.4.0",
"type-is": "~1.6.16",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
}
},
"finalhandler": {
"version": "1.1.1",
"resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
"integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==",
"requires": {
"debug": "2.6.9",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"on-finished": "~2.3.0",
"parseurl": "~1.3.2",
"statuses": "~1.4.0",
"unpipe": "~1.0.0"
}
},
"forwarded": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
},
"fresh": {
"version": "0.5.2",
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
},
"http-errors": {
"version": "1.6.3",
"resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
"integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
"requires": {
"depd": "~1.1.2",
"inherits": "2.0.3",
"setprototypeof": "1.1.0",
"statuses": ">= 1.4.0 < 2"
}
},
"iconv-lite": {
"version": "0.4.23",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
"integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
"requires": {
"safer-buffer": ">= 2.1.2 < 3"
}
},
"inherits": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
},
"ipaddr.js": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz",
"integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4="
},
"media-typer": {
"version": "0.3.0",
"resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
},
"merge-descriptors": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
"integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
},
"methods": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
},
"mime": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
"integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
},
"mime-db": {
"version": "1.37.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
"integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg=="
},
"mime-types": {
"version": "2.1.21",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
"integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
"requires": {
"mime-db": "~1.37.0"
}
},
"morgan": {
"version": "1.9.1",
"resolved": "https://registry.npmjs.org/morgan/-/morgan-1.9.1.tgz",
"integrity": "sha512-HQStPIV4y3afTiCYVxirakhlCfGkI161c76kKFca7Fk1JusM//Qeo1ej2XaMniiNeaZklMVrh3vTtIzpzwbpmA==",
"requires": {
"basic-auth": "~2.0.0",
"debug": "2.6.9",
"depd": "~1.1.2",
"on-finished": "~2.3.0",
"on-headers": "~1.0.1"
}
},
"ms": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
},
"negotiator": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
"integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
},
"on-finished": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
"integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
"requires": {
"ee-first": "1.1.1"
}
},
"on-headers": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz",
"integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c="
},
"parseurl": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
"integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
},
"path-to-regexp": {
"version": "0.1.7",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
},
"proxy-addr": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz",
"integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==",
"requires": {
"forwarded": "~0.1.2",
"ipaddr.js": "1.8.0"
}
},
"qs": {
"version": "6.5.2",
"resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
"integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
},
"range-parser": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
"integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
},
"raw-body": {
"version": "2.3.3",
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
"integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
"requires": {
"bytes": "3.0.0",
"http-errors": "1.6.3",
"iconv-lite": "0.4.23",
"unpipe": "1.0.0"
}
},
"redis": {
"version": "2.8.0",
"resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz",
"integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==",
"requires": {
"double-ended-queue": "^2.1.0-0",
"redis-commands": "^1.2.0",
"redis-parser": "^2.6.0"
}
},
"redis-commands": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.4.0.tgz",
"integrity": "sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw=="
},
"redis-parser": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz",
"integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs="
},
"safe-buffer": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
"integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
},
"safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"send": {
"version": "0.16.2",
"resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz",
"integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==",
"requires": {
"debug": "2.6.9",
"depd": "~1.1.2",
"destroy": "~1.0.4",
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
"http-errors": "~1.6.2",
"mime": "1.4.1",
"ms": "2.0.0",
"on-finished": "~2.3.0",
"range-parser": "~1.2.0",
"statuses": "~1.4.0"
}
},
"serve-static": {
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz",
"integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==",
"requires": {
"encodeurl": "~1.0.2",
"escape-html": "~1.0.3",
"parseurl": "~1.3.2",
"send": "0.16.2"
}
},
"setprototypeof": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
},
"statuses": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz",
"integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew=="
},
"type-is": {
"version": "1.6.16",
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
"integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
"requires": {
"media-typer": "0.3.0",
"mime-types": "~2.1.18"
}
},
"unpipe": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
"integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
},
"utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
},
"vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
}
}
}

15
sse/package.json Normal file
View file

@ -0,0 +1,15 @@
{
"name": "redis-to-sse",
"version": "0.1.0",
"description": "Redis to SSE",
"scripts": {
"start": "node server.js"
},
"author": "Nadav Ivgi",
"license": "MIT",
"dependencies": {
"express": "^4.16.4",
"morgan": "^1.9.1",
"redis": "^2.8.0"
}
}

46
sse/server.js Normal file
View file

@ -0,0 +1,46 @@
// Setup redis
const redis = require('redis').createClient(process.env.REDIS_URI),
channels = process.env.SUB_CHANNELS.split(',')
console.log(`Subscribing to Redis on ${channels.join(',')}`)
channels.forEach(chan => redis.subscribe(chan))
// Log messages and number of SSE subscribers
redis.on('message', (chan, msg) => console.log(`Broadcasting ${chan}: ${msg}`))
setInterval(_ => console.log(`Total subscribers: ${ redis.listenerCount('message') - 1 }`), 60000)
// Setup express server
const app = require('express')()
app.set('trust proxy', process.env.PROXIED || 'loopback')
app.use(require('morgan')('dev'))
// SSE endpoint
app.get('/stream', (req, res) => {
const subscriptions = req.query.channels && req.query.channels.split(',')
console.log(`New subscriber for ${ subscriptions ? subscriptions.join(',') : 'all channels' }`)
res.set({
'X-Accel-Buffering': 'no',
'Cache-Control': 'no-cache',
'Content-Type': 'text/event-stream',
'Connection': 'keep-alive'
}).flushHeaders()
function onMsg (chan, msg) {
if (!subscriptions || subscriptions.includes(chan)) {
res.write(`event:${chan}\ndata:${msg}\n\n`)
}
}
redis.on('message', onMsg)
const keepAlive = setInterval(_ => res.write(': keepalive\n\n'), 25000)
req.once('close', _ => (redis.removeListener('message', onMsg),
clearInterval(keepAlive),
console.log('Subscriber disconnected')))
})
app.listen(
process.env.PORT || 4500,
function() { console.log(`HTTP server running on ${this.address().address}:${this.address().port}`) }
)

6
sse/test-producer.js Normal file
View file

@ -0,0 +1,6 @@
const redis = require('redis').createClient(process.env.REDIS_URI)
const chan = process.env.PUB_CHANNEL
let i = 0
setInterval(_ => redis.publish(chan, JSON.stringify({ foo: 'bar', i: ++i })), 1000)

57
terraform/main.tf Normal file
View file

@ -0,0 +1,57 @@
terraform {
required_version = "> 0.11.0"
backend "gcs" {
bucket = "tf-state-ionosphere"
prefix = "terraform/state"
project = "blockstream-store"
}
}
data "terraform_remote_state" "lightning-store-prod" {
backend = "gcs"
config {
bucket = "tf-state-lightning-store"
prefix = "terraform/state"
project = "blockstream-store"
}
workspace = "staging"
defaults {
prometheus_service_account = "${var.prom_service_acct}"
}
}
provider "google" {
project = "${var.project}"
}
module "blc" {
source = "modules/blc"
project = "${var.project}"
name = "satellite-api"
network = "default"
bitcoin_docker = "${var.bitcoin_docker}"
lightning_docker = "${var.lightning_docker}"
charge_docker = "${var.charge_docker}"
ionosphere_docker = "${var.ionosphere_docker}"
ionosphere_sse_docker = "${var.ionosphere_sse_docker}"
node_exporter_docker = "${var.node_exporter_docker}"
net = "testnet"
env = "${local.env}"
# CI vars
region = "${var.region}"
zone = "${var.zone}"
instance_type = "${var.instance_type}"
host = "${var.host}"
ssl_cert = "${var.ssl_cert}"
timeout = "${var.timeout}"
prom_service_acct = "${data.terraform_remote_state.lightning-store-prod.prometheus_service_account}"
opsgenie_key = "${var.opsgenie_key}"
rpcuser = "${var.rpcuser}"
rpcpass = "${var.rpcpass}"
}

View file

@ -0,0 +1,419 @@
bootcmd:
- blkid /dev/disk/by-id/google-data || mkfs.ext4 -L data /dev/disk/by-id/google-data
- mkdir -p /mnt/disks/data
mounts:
- [ /dev/disk/by-id/google-data, /mnt/disks/data, auto, "rw,noatime,discard,nobarrier,nodev" ]
users:
- name: bs
uid: 2000
write_files:
- path: /home/bs/bitcoin.conf
permissions: 0644
owner: root
content: |
rpcuser=${rpcuser}
rpcpassword=${rpcpass}
rpcport=${rpcport}
txindex=1
dbcache=4000
- path: /home/bs/lightning.conf
permissions: 0644
owner: root
content: |
alias=ionosphere
bitcoin-rpcuser=${rpcuser}
bitcoin-rpcpassword=${rpcpass}
bitcoin-rpcport=${rpcport}
announce-addr=${announce_addr}
bind-addr=0.0.0.0
- path: /home/bs/default.conf
permissions: 0644
owner: root
content: |
log_format withtime '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'rt="$request_time" uct="$upstream_connect_time" uht="$upstream_header_time" urt="$upstream_response_time"';
server {
access_log /var/log/nginx/access.log withtime;
error_log /var/log/nginx/error.log;
server_name ${host};
listen 80 default_server;
server_tokens off;
set_real_ip_from 130.211.0.0/22;
set_real_ip_from 35.191.0.0/16;
real_ip_recursive on;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Proxy to main ionosphere API
location /api/ {
add_header 'Access-Control-Allow-Origin' null always;
add_header 'Access-Control-Allow-Headers' null always;
if ($request_method = 'OPTIONS')
{
add_header 'Access-Control-Allow-Origin' '*';
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE';
add_header 'Access-Control-Allow-Headers' 'X-Auth-Token,DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range';
add_header 'X-XSS-Protection' '1; mode=block' always;
return 200;
}
if ($http_origin ~ '(preview.)?blockstream.com')
{
add_header 'Access-Control-Allow-Origin' '$http_origin' always;
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE' always;
add_header 'Access-Control-Allow-Headers' 'X-Auth-Token,DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range' always;
add_header 'X-XSS-Protection' '1; mode=block' always;
}
proxy_pass http://0.0.0.0:9292/;
}
# Proxy SSE container
location /api/subscribe/ {
chunked_transfer_encoding off;
proxy_buffering off;
proxy_request_buffering off;
proxy_cache off;
proxy_http_version 1.1;
proxy_pass http://0.0.0.0:4500/stream?channels=;
}
}
- path: /home/bs/check_containers.sh
permissions: 0744
owner: root
content: |
#!/bin/bash
# Save # and names of running containers
NUM_CONT=$$(docker ps -q | wc -l)
RUNNING_CONT="$$(docker ps --format '{{.Names}}' | tr '\n' ', ' | sed -e 's/,$//g')"
# If less than 10 are running, send alert to opsgenie
if [ $${NUM_CONT} != '10' ]
then
curl -s -X POST https://api.opsgenie.com/v2/alerts \
-H "Content-Type: application/json" \
-H "Authorization: GenieKey ${opsgenie_key}" \
-d \
'{
"message": "Satellite API instance does not have all 10 containers running",
"alias": "satapi-missing-containers",
"description":"Currently running '$${NUM_CONT}'/10: '$${RUNNING_CONT}'",
"tags": ["SatAPI","Critical"],
"entity":"satellite.blockstream.com/api",
"priority":"P3"
}'
else
echo "'$${NUM_CONT}'/10 containers are running"
fi
- path: /etc/systemd/system/check-containers.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Check # of containers every 10 mins
Wants=check-containers.timer
After=charge.service
[Service]
ExecStart=/bin/bash /home/bs/check_containers.sh
- path: /etc/systemd/system/check-containers.timer
permissions: 0644
owner: root
content: |
[Unit]
Description=Run check-containers service every 10 minutes (7 min delay)
[Timer]
OnBootSec=420s
OnUnitActiveSec=10m
Persistent=true
[Install]
WantedBy=timers.target
- path: /etc/systemd/system/nginx.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Nginx proxy
Wants=gcr-online.target
After=ionosphere.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 80 -j ACCEPT
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=nginx \
-v /home/bs/default.conf:/etc/nginx/conf.d/default.conf:ro \
"nginx:latest"
ExecStop=/usr/bin/docker stop nginx
ExecStopPost=/usr/bin/docker rm nginx
ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 80 -j ACCEPT
- path: /etc/systemd/system/node-exporter.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Prometheus node-exporter
Wants=gcr-online.target docker.service
After=gcr-online.service docker.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${node_exporter_docker}
ExecStartPre=/sbin/iptables -A INPUT -m tcp -p tcp --dport 9100 -j ACCEPT
ExecStart=/usr/bin/docker run \
--name=node-exporter \
--network=host \
--read-only \
-v /proc:/host/proc:ro \
-v /sys:/host/sys:ro \
-v /:/rootfs:ro \
-v metrics:/metrics:ro \
-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket:ro \
"${node_exporter_docker}" --path.procfs /host/proc --path.sysfs /host/sys --collector.textfile.directory /metrics --collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc($|/))" --collector.systemd
ExecStop=/usr/bin/docker stop node-exporter
ExecStopPost=/usr/bin/docker rm node-exporter
ExecStopPost=/sbin/iptables -D INPUT -m tcp -p tcp --dport 9100 -j ACCEPT
- path: /etc/systemd/system/bitcoin.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Bitcoin node
Wants=gcr-online.target
After=gcr-online.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${bitcoin_docker}
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=bitcoin \
-v /home/bs/bitcoin.conf:/root/.bitcoin/bitcoin.conf:ro \
-v /mnt/disks/data/testnet:/root/.bitcoin:rw \
"${bitcoin_docker}" ${bitcoin_cmd}
ExecStop=/usr/bin/docker exec bitcoin bitcoin-cli stop
ExecStop=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop bitcoin
ExecStopPost=/usr/bin/docker rm bitcoin
- path: /etc/systemd/system/lightning.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Lightning node
Wants=gcr-online.target
After=bitcoin.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${lightning_docker}
ExecStartPre=/sbin/iptables -A INPUT -p tcp --dport ${lightning_port} -j ACCEPT
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=lightning \
-v /home/bs/lightning.conf:/root/.lightning/lightning.conf:ro \
-v /mnt/disks/data/lightning:/root/.lightning:rw \
"${lightning_docker}" ${lightning_cmd}
ExecStop=/usr/bin/docker exec lightning lightning-cli stop
ExecStop=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop lightning
ExecStopPost=/usr/bin/docker rm lightning
ExecStopPost=/sbin/iptables -D INPUT -p tcp --dport ${lightning_port} -j ACCEPT
- path: /etc/systemd/system/redis.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Redis db for server-side events
Wants=gcr-online.target
After=gcr-online.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker pull redis:latest
ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport ${redis_port} -j ACCEPT
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=sse-redis-db \
"redis:latest"
ExecStop=/usr/bin/docker stop sse-redis-db
ExecStopPost=/usr/bin/docker rm sse-redis-db
ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport ${redis_port} -j ACCEPT
- path: /etc/systemd/system/ionosphere.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Ionosphere daemon
Wants=gcr-online.target
After=lightning.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${ionosphere_docker}
ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport 9292 -j ACCEPT
ExecStartPre=/usr/bin/docker run \
--user root \
-v /mnt/disks/data/ionosphere:/data \
--entrypoint bash \
--rm \
"${ionosphere_docker}" \
-c 'chown -R ionosphere:ionosphere /data'
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=ionosphere \
-v /mnt/disks/data/ionosphere:/data \
-e "RACK_ENV=production" \
-e "CHARGE_ROOT=http://api-token:${rpcpass}@localhost:9112" \
-e "CALLBACK_URI_ROOT=http://localhost:9292" \
"${ionosphere_docker}"
ExecStop=/usr/bin/docker stop ionosphere
ExecStopPost=/usr/bin/docker rm ionosphere
ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport 9292 -j ACCEPT
- path: /etc/systemd/system/ionosphere-tx.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Ionosphere Transmitter daemon
Wants=gcr-online.target
After=ionosphere.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=ionosphere-tx \
-v /mnt/disks/data/ionosphere:/data \
-e "RACK_ENV=production" \
"${ionosphere_docker}" ./docker_entrypoint_transmitter.sh
ExecStop=/usr/bin/docker stop ionosphere-tx
ExecStopPost=/usr/bin/docker rm ionosphere-tx
- path: /etc/systemd/system/ionosphere-sse.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Ionosphere Server-Side Events Server
Wants=gcr-online.target
After=redis.service
[Service]
Restart=always
RestartSec=3
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${ionosphere_sse_docker}
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=ionosphere-sse \
-e "SUB_CHANNELS=transmissions" \
-e "REDIS_URI=redis://localhost:6379" \
"${ionosphere_sse_docker}"
ExecStop=/usr/bin/docker stop ionosphere-sse
ExecStopPost=/usr/bin/docker rm ionosphere-sse
- path: /etc/systemd/system/charge.service
permissions: 0644
owner: root
content: |
[Unit]
Description=Charge instance
Wants=gcr-online.target
After=ionosphere.service
[Service]
Restart=always
RestartSec=200
Environment=HOME=/home/bs
ExecStartPre=/usr/bin/docker-credential-gcr configure-docker
ExecStartPre=/usr/bin/docker pull ${charge_docker}
ExecStartPre=/sbin/iptables -A INPUT -p tcp -s localhost --dport 9112 -j ACCEPT
ExecStart=/usr/bin/docker run \
--network=host \
--pid=host \
--name=charge \
-v /mnt/disks/data/lightning:/etc/lightning:ro \
-v /mnt/disks/data/charge:/data:rw \
-e "API_TOKEN=${rpcpass}" \
"${charge_docker}"
ExecStop=/usr/bin/docker stop charge
ExecStopPost=/usr/bin/docker rm charge
ExecStopPost=/sbin/iptables -D INPUT -p tcp -s localhost --dport 9112 -j ACCEPT
runcmd:
- systemctl daemon-reload
- systemctl start bitcoin.service
- systemctl enable bitcoin.service
- systemctl start lightning.service
- systemctl enable lightning.service
- systemctl start redis.service
- systemctl enable redis.service
- systemctl start ionosphere.service
- systemctl enable ionosphere.service
- systemctl start ionosphere-tx.service
- systemctl enable ionosphere-tx.service
- systemctl start ionosphere-sse.service
- systemctl enable ionosphere-sse.service
- systemctl start charge.service
- systemctl enable charge.service
- systemctl start nginx.service
- systemctl enable nginx.service
- systemctl enable node-exporter.service
- systemctl start node-exporter.service
- systemctl start check-containers.timer
- systemctl enable check-containers.timer

View file

@ -0,0 +1,41 @@
data "google_compute_network" "blc" {
name = "default"
}
data "google_compute_image" "blc" {
family = "satapi-data-${var.env}"
project = "${var.project}"
}
data "template_file" "blc" {
template = "${file("${path.module}/cloud-init/blc.yaml")}"
vars {
rpcuser = "${var.rpcuser}"
rpcpass = "${var.rpcpass}"
rpcport = "${var.net == "testnet" ? "18332" : "8332"}"
bitcoin_cmd = "bitcoind ${var.net == "testnet" ? "-testnet" : ""} -printtoconsole"
lightning_cmd = "lightningd ${var.net == "testnet" ? "--testnet" : "--mainnet"} --conf=/root/.lightning/lightning.conf"
announce_addr = "${google_compute_address.blc.address}"
lightning_port = 9735
bitcoin_docker = "${var.bitcoin_docker}"
lightning_docker = "${var.lightning_docker}"
charge_docker = "${var.charge_docker}"
redis_port = 6379
ionosphere_docker = "${var.ionosphere_docker}"
ionosphere_sse_docker = "${var.ionosphere_sse_docker}"
node_exporter_docker = "${var.node_exporter_docker}"
opsgenie_key = "${var.opsgenie_key}"
host = "${var.host}"
}
}
data "template_cloudinit_config" "blc" {
gzip = false
base64_encode = false
part {
content_type = "text/cloud-config"
content = "${data.template_file.blc.rendered}"
}
}

View file

@ -0,0 +1,10 @@
resource "google_service_account" "blc" {
account_id = "${var.name}-${var.env}"
display_name = "${var.name}-${var.env}"
}
resource "google_project_iam_member" "blc" {
project = "${var.project}"
role = "roles/editor"
member = "serviceAccount:${google_service_account.blc.email}"
}

View file

@ -0,0 +1,69 @@
# Forwarding rules
resource "google_compute_global_forwarding_rule" "rule-https" {
name = "${var.name}-https-forwarding-rule-${var.env}"
target = "${google_compute_target_https_proxy.https-proxy.self_link}"
port_range = "443"
ip_protocol = "TCP"
ip_address = "${google_compute_global_address.lb.address}"
}
resource "google_compute_global_forwarding_rule" "rule-http" {
name = "${var.name}-http-forwarding-rule-${var.env}"
target = "${google_compute_target_http_proxy.http-proxy.self_link}"
port_range = "80"
ip_protocol = "TCP"
ip_address = "${google_compute_global_address.lb.address}"
}
# Target proxies
resource "google_compute_target_http_proxy" "http-proxy" {
name = "${var.name}-http-proxy-${var.env}"
url_map = "${google_compute_url_map.http.self_link}"
}
resource "google_compute_target_https_proxy" "https-proxy" {
name = "${var.name}-https-proxy-${var.env}"
url_map = "${google_compute_url_map.https.self_link}"
ssl_certificates = ["${var.ssl_cert}"]
}
# URL maps
resource "google_compute_url_map" "http" {
name = "${var.name}-http-urlmap-${var.env}"
default_service = "${google_compute_backend_service.blc.self_link}"
host_rule {
hosts = ["${var.host}"]
path_matcher = "allpaths"
}
path_matcher {
name = "allpaths"
default_service = "${google_compute_backend_service.blc.self_link}"
path_rule {
paths = ["/*"]
service = "${google_compute_backend_service.blc.self_link}"
}
}
}
resource "google_compute_url_map" "https" {
name = "${var.name}-https-urlmap-${var.env}"
default_service = "${google_compute_backend_service.blc.self_link}"
host_rule {
hosts = ["${var.host}"]
path_matcher = "allpaths"
}
path_matcher {
name = "allpaths"
default_service = "${google_compute_backend_service.blc.self_link}"
path_rule {
paths = ["/*"]
service = "${google_compute_backend_service.blc.self_link}"
}
}
}

View file

@ -0,0 +1,83 @@
# Instance group
resource "google_compute_instance_group_manager" "blc" {
name = "${var.name}-ig-${var.env}"
base_instance_name = "${var.name}-ig-${var.env}-${count.index}"
instance_template = "${google_compute_instance_template.blc.self_link}"
zone = "${var.zone}"
target_size = 1
update_strategy = "ROLLING_UPDATE"
rolling_update_policy {
type = "PROACTIVE"
minimal_action = "REPLACE"
max_surge_fixed = 0
max_unavailable_fixed = 1
min_ready_sec = 60
}
}
resource "google_compute_disk" "blc" {
name = "${var.name}-data-${var.env}"
type = "pd-standard"
image = "${data.google_compute_image.blc.self_link}"
zone = "${var.zone}"
lifecycle {
prevent_destroy = true
ignore_changes = ["image"]
}
}
# Instance template
resource "google_compute_instance_template" "blc" {
name_prefix = "${var.name}-${var.env}-template-"
description = "This template is used to create ${var.name} ${var.env} instances."
machine_type = "${var.instance_type}"
region = "${var.region}"
labels {
type = "lightning-app"
name = "${var.name}"
}
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
}
disk {
source_image = "${var.boot_image}"
disk_type = "pd-ssd"
auto_delete = true
boot = true
}
disk {
source = "${google_compute_disk.blc.name}"
auto_delete = false
device_name = "data"
}
network_interface {
network = "${data.google_compute_network.blc.self_link}"
access_config {
nat_ip = "${google_compute_address.blc.address}"
}
}
metadata {
google-logging-enabled = "true"
user-data = "${data.template_cloudinit_config.blc.rendered}"
}
service_account {
email = "${google_service_account.blc.email}"
scopes = ["compute-ro", "storage-ro"]
}
lifecycle {
create_before_destroy = true
}
}

View file

@ -0,0 +1,82 @@
# IP addresses
resource "google_compute_address" "blc" {
name = "${var.name}-external-ip-${var.env}-${count.index}"
project = "${var.project}"
region = "${var.region}"
count = 1
}
resource "google_compute_global_address" "lb" {
name = "${var.name}-client-lb-${var.env}"
project = "${var.project}"
}
# FW rules
resource "google_compute_firewall" "blc" {
name = "${var.name}-fw-rule-${var.env}"
network = "${data.google_compute_network.blc.self_link}"
allow {
protocol = "tcp"
ports = ["18333", "9735", "80"]
}
target_service_accounts = [
"${google_service_account.blc.email}",
]
}
resource "google_compute_firewall" "blc-prom" {
name = "${var.name}-prometheus-access-${var.env}"
network = "${data.google_compute_network.blc.self_link}"
allow {
protocol = "tcp"
ports = ["9100"]
}
source_service_accounts = [
"${var.prom_service_acct}",
]
target_service_accounts = [
"${google_service_account.blc.email}",
]
}
# Backend service
resource "google_compute_backend_service" "blc" {
name = "${var.name}-backend-service-${var.env}"
description = "Satellite API"
protocol = "HTTP"
port_name = "http"
timeout_sec = "${var.timeout}"
backend {
group = "${google_compute_instance_group_manager.blc.instance_group}"
}
health_checks = ["${google_compute_health_check.blc.self_link}"]
}
# Health checks
resource "google_compute_health_check" "blc" {
name = "${var.name}-health-check-${var.env}"
check_interval_sec = 5
timeout_sec = 3
tcp_health_check {
port = "80"
}
}
resource "google_compute_http_health_check" "blc-http" {
name = "${var.name}-health-check-${var.env}"
timeout_sec = 5
check_interval_sec = 10
port = "80"
request_path = "/"
}

View file

@ -0,0 +1,91 @@
variable "project" {
type = "string"
default = "blockstream-store"
}
variable "boot_image" {
type = "string"
default = "cos-cloud/cos-stable"
}
variable "rpcuser" {
type = "string"
default = ""
}
variable "rpcpass" {
type = "string"
default = ""
}
variable "env" {
type = "string"
}
variable "name" {
type = "string"
}
variable "network" {
type = "string"
}
variable "region" {
type = "string"
}
variable "zone" {
type = "string"
}
variable "instance_type" {
type = "string"
}
variable "net" {
type = "string"
}
variable "ssl_cert" {
type = "string"
}
variable "host" {
type = "string"
}
variable "timeout" {
type = "string"
}
variable "opsgenie_key" {
type = "string"
}
variable "prom_service_acct" {
type = "string"
}
variable "bitcoin_docker" {
type = "string"
}
variable "charge_docker" {
type = "string"
}
variable "lightning_docker" {
type = "string"
}
variable "ionosphere_docker" {
type = "string"
}
variable "ionosphere_sse_docker" {
type = "string"
}
variable "node_exporter_docker" {
type = "string"
}

100
terraform/variables.tf Normal file
View file

@ -0,0 +1,100 @@
locals {
context_variables = {
"staging" = {
env = "staging"
}
"prod" = {
env = "prod"
}
}
env = "${lookup(local.context_variables[terraform.workspace], "env")}"
}
variable "project" {
type = "string"
default = "blockstream-store"
}
variable "ssl_cert" {
type = "string"
default = ""
}
variable "rpcuser" {
type = "string"
default = ""
}
variable "rpcpass" {
type = "string"
default = ""
}
variable "host" {
type = "string"
default = ""
}
variable "region" {
type = "string"
default = ""
}
variable "zone" {
type = "string"
default = ""
}
variable "instance_type" {
type = "string"
default = ""
}
variable "timeout" {
type = "string"
default = 15
}
variable "prom_service_acct" {
type = "string"
default = ""
}
variable "opsgenie_key" {
type = "string"
default = ""
}
# Overwritten by CI
variable "ionosphere_docker" {
type = "string"
default = ""
}
variable "ionosphere_sse_docker" {
type = "string"
default = ""
}
# Less frequently updated images
variable "node_exporter_docker" {
type = "string"
default = "prom/node-exporter@sha256:55302581333c43d540db0e144cf9e7735423117a733cdec27716d87254221086"
}
variable "bitcoin_docker" {
type = "string"
default = "us.gcr.io/blockstream-store/bitcoind@sha256:d385d5455000b85b0e2103cdbc69e642c46872b698ff807892ba4c4a40e72ca7"
}
variable "lightning_docker" {
type = "string"
default = "us.gcr.io/blockstream-store/lightningd@sha256:ca00792c25f4af420db94501d37bf8570d642ae21b7fd30792364aa9a617ec87"
}
variable "charge_docker" {
type = "string"
default = "us.gcr.io/blockstream-store/charged@sha256:669893e02a14863f469498a40626e46de3ec67ff2ee4d7443cd56bc6ba3a8f3a"
}

218
tests/tests.rb Normal file
View file

@ -0,0 +1,218 @@
ENV['RACK_ENV'] = 'test'
ENV['CALLBACK_URI_ROOT'] = 'http://localhost:9292'
require 'minitest/autorun'
require 'rack/test'
require 'json'
require_relative '../main'
TEST_FILE = "test.random"
TINY_TEST_FILE = "tiny_test.txt"
unless File.exists?(TEST_FILE) and File.exists?(TINY_TEST_FILE)
`dd if=/dev/random of=#{TEST_FILE} bs=1k count=#{MAX_MESSAGE_SIZE / KILO_BYTE}`
`echo "abcdefghijklmnopqrstuvwxyz" > #{TINY_TEST_FILE}`
end
DEFAULT_BID = File.stat(TEST_FILE).size * (MIN_PER_BYTE_BID + 1)
class MainAppTest < Minitest::Test
include Rack::Test::Methods
def app
Sinatra::Application
end
def place_order(bid = DEFAULT_BID)
post '/order', params={"bid" => bid, "file" => Rack::Test::UploadedFile.new(TEST_FILE, "image/png")}
r = JSON.parse(last_response.body)
Order.find_by_uuid(r['uuid'])
end
def bump_order(order, amount)
header 'X-Auth-Token', order.user_auth_token
post "/order/#{order.uuid}/bump", params={"bid_increase" => amount}
r = JSON.parse(last_response.body)
r['lightning_invoice']
end
def setup
@order = place_order
end
def pay_invoice(invoice)
post "/callback/#{invoice.lid}/#{invoice.charged_auth_token}"
assert last_response.ok?
end
def write_response
File.open('response.html', 'w') { |file| file.write(last_response.body) }
end
def order_is_queued(uuid)
get "/orders/queued?limit=#{MAX_QUEUED_ORDERS_REQUEST}"
assert last_response.ok?
r = JSON.parse(last_response.body)
uuids = r.map {|o| o['uuid']}
uuids.include?(uuid)
end
def test_get_orders_queued
get "/orders/queued?limit=#{MAX_QUEUED_ORDERS_REQUEST}"
assert last_response.ok?
r = JSON.parse(last_response.body)
queued_before = r.count
@order = place_order
pay_invoice(@order.invoices.last)
assert order_is_queued(@order.uuid)
get "/orders/queued?limit=#{MAX_QUEUED_ORDERS_REQUEST}"
assert last_response.ok?
r = JSON.parse(last_response.body)
queued_after = r.count
assert_equal queued_after, queued_before + 1
end
def test_get_orders_sent
get '/orders/sent'
assert last_response.ok?
end
def test_get_order
place_order
assert last_response.ok?
r = JSON.parse(last_response.body)
header 'X-Auth-Token', r['auth_token']
get %Q(/order/#{r['uuid']})
assert last_response.ok?
end
def test_order_creation
place_order
assert last_response.ok?
r = JSON.parse(last_response.body)
refute_nil r['auth_token']
refute_nil r['uuid']
refute_nil r['lightning_invoice']
end
def test_bid_too_low
post '/order', params={"bid" => 1, "file" => Rack::Test::UploadedFile.new(TEST_FILE, "image/png")}
refute last_response.ok?
r = JSON.parse(last_response.body)
assert_equal r['message'], 'Bid too low'
refute_nil r['errors']
end
def test_no_file_uploaded
post '/order', params={"bid" => DEFAULT_BID}
refute last_response.ok?
end
def test_uploaded_file_too_large
skip "test later"
end
def test_uploaded_file_too_small
post '/order', params={"bid" => DEFAULT_BID, "file" => Rack::Test::UploadedFile.new(TINY_TEST_FILE, "text/plain")}
refute last_response.ok?
r = JSON.parse(last_response.body)
assert_match "too small", r["errors"][0]
end
def test_bump
# place an order
@order = place_order
refute order_is_queued(@order.uuid)
@order.reload
assert_equal 0, @order.bid
assert_equal DEFAULT_BID, @order.unpaid_bid
first_invoice = @order.invoices.first
pay_invoice(first_invoice)
assert order_is_queued(@order.uuid)
@order.reload
assert_equal DEFAULT_BID, @order.bid
assert @order.bid_per_byte > 0
assert_equal 0, @order.unpaid_bid
bid_pre = @order.bid
bid_per_byte_pre = @order.bid_per_byte
unpaid_bid_pre = @order.unpaid_bid
# bump it
bump_order(@order, DEFAULT_BID / 2)
assert last_response.ok?
@order.reload
assert_equal bid_pre, @order.bid
assert_equal bid_per_byte_pre, @order.bid_per_byte
assert_equal DEFAULT_BID / 2, @order.unpaid_bid
bid_pre = @order.bid
bid_per_byte_pre = @order.bid_per_byte
r = JSON.parse(last_response.body)
refute_nil r['auth_token']
refute_nil r['uuid']
refute_nil r['lightning_invoice']
lid = r['lightning_invoice']['id']
assert order_is_queued(@order.uuid)
# pay the bump
second_invoice = Invoice.find_by_lid(lid)
pay_invoice(second_invoice)
@order.reload
assert_equal DEFAULT_BID + DEFAULT_BID / 2, @order.bid
assert @order.bid_per_byte > bid_per_byte_pre
assert_equal 0, @order.unpaid_bid
end
def test_paying_small_invoices_doesnt_result_in_paid_order
place_order
refute @order.paid?
first_invoice = @order.invoices.first
bump_order(@order, 123)
refute @order.paid?
second_invoice = @order.invoices.where(amount: 123).first
pay_invoice(second_invoice)
@order.reload
refute @order.paid?
pay_invoice(first_invoice)
@order.reload
assert @order.paid?
end
def test_that_bumping_down_fails
bump_order(@order, -1)
refute last_response.ok?
end
def test_order_deletion
@order = place_order
assert File.file?(@order.message_path)
header 'X-Auth-Token', @order.user_auth_token
cancelled_before = Order.where(status: :cancelled).count
delete "/order/#{@order.uuid}"
refute File.file?(@order.message_path)
cancelled_after = Order.where(status: :cancelled).count
assert last_response.ok?
assert_equal cancelled_after, cancelled_before + 1
delete "/order/#{@order.uuid}"
refute last_response.ok?
end
def test_get_sent_message
@order = place_order
get "/order/#{@order.uuid}/sent_message"
refute last_response.ok?
pay_invoice(@order.invoices.last)
@order.reload
@order.transmit!
get "/order/#{@order.uuid}/sent_message"
assert last_response.ok?
@order.end_transmission!
get "/order/#{@order.uuid}/sent_message"
assert last_response.ok?
end
end