diff --git a/.travis.yml b/.travis.yml index fdc9692b..422cd932 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,7 @@ sudo: false python: - '2.7' + - '3.4' - '3.5' - '3.6' @@ -13,4 +14,3 @@ install: script: - flake8 . - diff --git a/README.md b/README.md index 7585acc4..bcbd2d9a 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ [![Coverage Status](https://coveralls.io/repos/github/dhs-ncats/pshtt/badge.svg)](https://coveralls.io/github/dhs-ncats/pshtt) +[![Build Status](https://travis-ci.org/dhs-ncats/pshtt.svg?branch=master)](https://travis-ci.org/dhs-ncats/pshtt) + `pshtt` (_"pushed"_) is a tool to scan domains for HTTPS best practices. It saves its results to a CSV (or JSON). `pshtt` was developed to _push_ organizations— especially large ones like the US Federal Government :us: — to adopt HTTPS across the enterprise. Federal .gov domains must comply with [M-15-13](https://https.cio.gov), a 2015 memorandum from the White House Office of Management and Budget that requires federal agencies to enforce HTTPS on their public web sites and services by the end of 2016. Much has been done, and [still more yet to do](https://18f.gsa.gov/2017/01/04/tracking-the-us-governments-progress-on-moving-https/). diff --git a/gce-scripts/README.md b/gce-scripts/README.md new file mode 100644 index 00000000..78f63c94 --- /dev/null +++ b/gce-scripts/README.md @@ -0,0 +1,203 @@ +# Pshtt as an HTTPS status checker + +Welcome! This is the documentation on how to run pshtt to scan sites for their +HTTPS status. These instructions are mostly about how to run it at scale, but at +the end, there are instructions on how to run on a local instance. + +This document goes over how to both run pshtt on multiple instances on google +cloud engine and also how to run it as a singular instance on your local +machine. It takes about 30 minutes to set up from start to finish. + +Running pshtt on 150 instances takes about 12 - 15 hours for a million sites. +Assume at worst that each site will take 10 seconds (which is the default +timeout) and scale up to whatever timeframe you want to run in based off of +that. + +Example: 1000 sites in 2 hours would take 2 instances. + +# How to run Pshtt on Google Cloud Engine + +## Before you run + +1. Set up a [google compute engine + account](https://cloud.google.com/compute/docs/access/user-accounts/). + +2. Make sure you have the correct quota allowances. + + * Go to the [quotas page](https://cloud.google.com/compute/quotas) + and select the project that you want to run this under. + * Request quotas --- click on the following items in the list and click + "edit qutoas" at the top of the page: + * CPUS (all regions) --> 150 + * In use IP addresses --> 150 + * One Region's in use IPs (ex us-west1) --> 150 + * Same Region's CPUs (ex. us-west1) --> 150 + +3. Create Instance Group Template. + + You will want to run multiple instances (presumably), and creating an + Instance Group template allows you to make up to 150 machines under the same + template. + + * Go to Compute Engine, then click on the Instance templates + tab and click "Create Instance Template". + * Name --> "pshtt-template" + * Machine type -- 1 CPU (n1-standard-1 (1 vCPU, 3.75 GB memory)). + * Check allow HTTP and HTTPS traffic. + * Boot Disk --- Ubuntu 14.04 LTS. + * automatic restart (under management tab) -- off. + * Hit create. + +# How to run Pshtt on Google Cloud Engine + +1. Create a ssh key ONLY for the google cloud instances and upload to your + profile. + + This is a security measure. ***DO NOT USE YOUR REGULAR SSH KEY.*** + + * `cd ~/.ssh && ssh-keygen -t rsa -f gce_pshtt_key` + * Go to the [metadata + tab](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys) and hit edit. + * `cd ~/.ssh && cat gce_pshtt_key.pub` + * Copy the output of the above command and paste it into the console. + +2. Create the instance group. + + It is important to name your instance group something identifiable, + especially if you are sharing a project with others. Remember this instance + group name for a later step. ***We recommend that you try 1 instance at + first to make sure it works***. + + * Go to the instance group tab. + * Click Multi-Zone, and select the region that you requested your + instances for. + * Chose "pshtt-template" under instance template. + + * Hit create. + + * Welcome to your new instance group! + +## Updating Data Files and Setting up to Run + +The following is a set of commands to run to make your running directory. + +1. Download the gcloud command line tool. + + * follow the [download + link](https://cloud.google.com/sdk/docs/#install_the_latest_cloud_tools_version_cloudsdk_current_version) + and install the correct sdk for your OS. + * If this is your first time installing the gcloud command line tool, + follow the instructions on the page. Do not set any default zones. + * If you already have this installed, following the following + instructions: + * `gcloud init` + * Click `2` create a new configuration. + * Enter `pshtt-configuration` + * Choose the appropriate account + * Click the appopriate number corresponding to your google project + * If it complains that the API is not enabled, hit enabled and retry. + * Do not set default zone or region + * at this point, your default project should be this google project. + You can switch to any of your previous projects by running `gcloud + config set project PROJECTNAME` + +2. Setting up your directory. + + * `mkdir ~/pshtt_run` + * Creates the dir that you will run your program out of. + * `gcloud compute instances list | sed -n '1!p' | grep + "" | awk '{print $5}' > ~/pshtt_run/hosts.txt` + * `` is what you named the instance group you created + above. + +3. Copy all .sh scripts from this directory: + + * Keep the name of the scripts the same. + * `chmod +x ~/pshtt_run/*.sh` + * which will make all the scripts executable. + * `touch domains.csv` + * Your domain list, one domain per line, with the input list ending in + `.csv`. + * domains must have the schema stripped of them and no trailing '/', + such as: + * domain.tld + * subdomain.domain.tld + * www.subdomain.domain.tld + * `mkdir ~/pshtt_run/data_results/` + * `mv ~/pshtt_run/combine_shards.py ~/pshtt_run/data_results` + * Places combine_shards.py into data_results/. + * `mkdir ~/pshtt_run/input_files/` + +4. roots.pem + + We want to use our own CA file when running pshtt. We use the mozilla root + store for this purpose. Follow instructions on this + [PR](https://github.com/agl/extract-nss-root-certs). + +5. Updating ssh key + + * If your new ssh key is called "gce_pshtt_key", skip this step. + * If you did not name your ***new*** ssh key gce_pshtt_key, then you will + need to go through and rename the gce_pshtt_key in all the .sh files to + whatever you named your key. + * in vim, this is ":%s/gce_pshtt_key/yourkeynamehere/g ". + +## How to run + +1. `screen -S pshtt_running` +2. `cd ~/pshtt_run/` +3. `./run_all_scripts > + log.out` + * number of shards == number of hosts + * each machine will contain a shard of the data to run. + * This is the script that sets up all machines and puts all datafiles on + the machines for running. + * `./run_all_scripts top-1m.nocommas.8.31.2017 100 alexa` + * will produce 100 shards all starting with "alexa" in the input_files + dir. + * ex. alexa000.csv + * NOTE: you can ONLY create 999 shards. If you need more than 999 shards, + you will need to change the split_up_dataset.sh file. +4. exit screen `cntr+a+d` + +## During the run + +* `./check_instances.sh` + * will print the ip of each host, as well as FINISHED or NOT FINISHED. + +## After the run + +* `./grab_and_combine_data.sh` + + * will grab all log and result data files, combine data files into one + large result file, and put these into data_results/. + +* Delete your instance group. If you want to run data analysis, jump down to + the data analysis portion. + +# Running Pshtt on your local machine + +1. Copy packages_to_install.sh and install the packages_to_install.sh. + * `sudo ./packages_to_install.sh` +2. Clone pshtt. + * `git clone https://github.com/dhs-ncats/pshtt.git` +3. Put roots.pem, running_script.sh, and your input file in the same dir as + pshtt. + * Follow directions under Updating data files above on how to get a + roots.pem. + * domains must have the schema stripped of them and no trailing '/', such + as: + * domain.tld + * subdomain.domain.tld + * www.subdomain.domain.tld + * `chmod +x running_script.sh` to make it executable. +4. Run `./running_script.sh ` +5. Results and profit. + * Results can be found in `.json`. + * If you want to be able to use this json file with any of the colab + notebooks (like the one listed below), you will also need to run + combine_shards.py.into the same dir as the json file. + * Copy combine_shards.py into the same dir as the json file. + * `echo .json > to_combine.txt` + * `python combine_shards.py to_combine.txt > final_results.json` + * Log can be found in `time_.txt`. diff --git a/gce-scripts/check_instances.sh b/gce-scripts/check_instances.sh new file mode 100755 index 00000000..ff13db77 --- /dev/null +++ b/gce-scripts/check_instances.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Checks all the instances in hosts and checks the end of the log file +# to see if it's finished. The script prints out FINISHED or NOT FINISHED +# for each host respectively. + +hosts_file='hosts.txt' +list_of_files=$(ls -1q input_files) +i=1 + +# Grab the correct input file for the corresponding machine. +for z in $list_of_files; +do + machine=$(sed "${i}q;d" $hosts_file) + # Check if the file has 'Wrote Results', which indicates that it's finished. + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" tail pshtt/time_"${z}".txt | grep -q 'Wrote results' + finished=$(echo $?) + if [[ "${finished}" -eq 0 ]]; then + echo 'server '"${machine}"' FINISHED' + else + echo 'server '"${machine}"' NOT FINISHED' + fi + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" cat pshtt/time_"${z}".txt | grep -q 'Traceback' + error=$(echo $?) + if [[ "${error}" -eq 0 ]]; then + echo 'server '"${machine}"' ERROR ON THIS MACHINE. CHECK INSTANCE.' + else + echo 'server '"${machine}"' NO ERROR.' + fi + ((i=i+1)) +done diff --git a/gce-scripts/combine_shards.py b/gce-scripts/combine_shards.py new file mode 100644 index 00000000..b9cc9189 --- /dev/null +++ b/gce-scripts/combine_shards.py @@ -0,0 +1,29 @@ +"""Combines pshtt shards into one final data file.""" +import json +import sys + + +def main(): + if (len(sys.argv)) < 2: + print('you need a filename!') + exit(1) + # Master file is the file with the list of filenames to intake. + # Fileception. + master_file = sys.argv[1] + filenames = [] + + # Read in the filenames that are the different shards. + with open(master_file, 'r') as input_file: + for line in input_file: + filenames.append(line.rstrip()) + # For each shard, read it in and append to the final list to + # print out. + for item in filenames: + with open(item, 'r') as input_file: + json_data = json.load(input_file) + for item in json_data: + print(json.dumps(item)) + + +if __name__ == '__main__': + main() diff --git a/gce-scripts/grab_and_combine_data.sh b/gce-scripts/grab_and_combine_data.sh new file mode 100755 index 00000000..e42679ae --- /dev/null +++ b/gce-scripts/grab_and_combine_data.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# If pshtt is done on all machines, it grabs both +# the log file and the output file from the machines and +# places them in the data_results/ directory. + +# This script also sets up the files to be combined by +# the combine_shards script. Because pshtt outputs the results +# as a list of dicts, we need to combine all of those lists. +# We output the dicts as a file of dicts, one per line. +hosts_file='hosts.txt' +list_of_files=$(ls -1q input_files) +i=1 + +for z in $list_of_files; +do + machine=$(sed "${i}q;d" $hosts_file) + echo 'Kicking off '"${machine}"' number '$i + # Grab the actual result file. + echo 'grabbing result file' + scp -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}":~/pshtt/"${z}".json data_results/ + echo $? + # Grab the log file from that machine. + echo 'grabbing log file' + scp -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}":~/pshtt/time_"${z}".txt data_results/ + echo $? + echo 'creating to_combine.txt' + touch data_results/to_combine.txt + echo $? + echo 'putting file name into combine script' + echo "${z}"'.json' >> data_results/to_combine.txt + echo $? + ((i=i+1)) +done + +cd data_results +python combine_shards.py to_combine.txt > final_results.json diff --git a/gce-scripts/packages_to_install.sh b/gce-scripts/packages_to_install.sh new file mode 100755 index 00000000..d145d6ee --- /dev/null +++ b/gce-scripts/packages_to_install.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Installs all the necessary packages for pshtt to run. +# Logs which package it is installing as well as it's success (0) or failure +# (1). +echo 'UPDATE' +apt-get -y update -qq +echo $? ' ERROR CODE' +echo 'GIT' +apt-get -y install git -qq +echo $? ' ERROR CODE' +echo 'PYTHON3-PIP' +apt-get -y install python3-pip -qq +echo $? ' ERROR CODE' +echo 'LIBFFI6' +apt-get -y install libffi6 libffi-dev -qq +echo $? ' ERROR CODE' +echo 'LIBSSL' +apt-get -y install build-essential libssl-dev libffi-dev python3-dev -qq +echo $? ' ERROR CODE' +echo 'SETUPTOOLS' +pip3 install --upgrade setuptools -qq +echo $? ' ERROR CODE' +echo 'CFFI' +pip3 install cffi -qq +echo $? ' ERROR CODE' +echo 'SSLYZE' +pip3 install sslyze -qq +echo $? ' ERROR CODE' +echo 'PUBLIC SUFFIX' +pip3 install publicsuffix -qq +echo $? ' ERROR CODE' +echo 'REQUESTS' +pip3 install --upgrade requests -qq +echo $? ' ERROR CODE' +echo 'DOCOPT' +pip3 install docopt -qq +echo $? ' ERROR CODE' +echo 'PYOPENSSL' +pip3 install pyopenssl -qq +echo $? ' ERROR CODE' +echo 'PYTABLEWRITER' +pip3 install pytablewriter -qq +echo $? ' ERROR CODE' +echo 'TYPING' +pip3 install typing -qq +echo $? ' ERROR CODE' +echo 'FINISHED INSTALLING PACKAGES' diff --git a/gce-scripts/run_all_scripts.sh b/gce-scripts/run_all_scripts.sh new file mode 100755 index 00000000..0d5f0bed --- /dev/null +++ b/gce-scripts/run_all_scripts.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# This is the first script to run. This script calls +# all the other pertinent scripts for setting up +# and kicking off runs. + +# ./run_all_scripts.sh <#_of_shards> +# Ex: ./run_all_scripts.sh top-1m.nocommas.8.31.2017 100 alexa + +# Only the first input argument is required. The other two will default +# to 10 and shard respectively. + +# will split up the file top-1m.nocommas.8.31.2017 into 100 files +# into a dir called input_files, and all the files will start with +# alexa_. So the shard files will be alexa000.csv, alexa001.csv +# etc. + +# If any of the scripts fails, this hard fails and tells the user what script +# went wrong. + + +input_file=$1 +number_of_shards=${2-10} +output_file_name=${3-shard_} + +echo 'Splitting dataset' +./split_up_dataset.sh "${1}" "${2}" "${3}" +error=$(echo $?) + +if [[ "${error}" -eq 1 ]]; then + echo 'ERROR WITH SPLIT DATASET SCRIPT' + exit 1 +fi + +echo 'Scp and setup' +./scp_and_setup.sh "${3}" +error=$(echo $?) +if [[ "${error}" -eq 1 ]]; then + echo 'ERROR WITH SCP AND SETUP SCRIPT' + exit 1 +fi + +echo 'Running instances' +./run_instances.sh +error=$(echo $?) +if [[ "${error}" -eq 1 ]]; then + echo 'ERROR WITH RUNNING INSTANCES SCRIPT' + exit 1 +fi diff --git a/gce-scripts/run_instances.sh b/gce-scripts/run_instances.sh new file mode 100755 index 00000000..b0be57b0 --- /dev/null +++ b/gce-scripts/run_instances.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Runs pshtt on all instances, using the correct input file. + +hosts_file='hosts.txt' +list_of_files=$(ls -1q input_files/) +i=1 + +# For each file, find the corresponding machine it's been uploaded to, +# check if the screen exists (create if not) and kick off pshtt on that screen. + +for z in $list_of_files; +do + machine=$(sed "${i}q;d" $hosts_file) + # Check if screen exists. + echo 'Kicking off '"${machine}"' number '$i + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" screen -list | grep -q "pshtt_screen" + answer=$(echo $?) + # If screen does not exist, then create it. + if [[ "${answer}" -eq 1 ]] ; then + echo 'Creating screen' + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" screen -S pshtt_screen -d -m + echo $? + fi + + # Run script in screen. + echo 'Kicking off script' + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" "screen -S pshtt_screen -X -p 0 stuff $'cd pshtt && ./running_script.sh $z\n'" + echo $? + ((i=i+1)) +done diff --git a/gce-scripts/running_script.sh b/gce-scripts/running_script.sh new file mode 100755 index 00000000..895b2f8e --- /dev/null +++ b/gce-scripts/running_script.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Runs pshtt with a 10 second timeout, with roots.pem as the CA file, +# and debug on. Logging goes to time_.txt + +# ./running_script.sh test_file.csv +# output files: test_file.csv.json, time_test_file.csv.txt + +input_file=$1 +(time python3 -m pshtt.cli "${input_file}" -t 10 -u -j -o "${input_file}".json -f "roots.pem" --debug) 2> time_"${input_file}".txt diff --git a/gce-scripts/scp_and_setup.sh b/gce-scripts/scp_and_setup.sh new file mode 100755 index 00000000..a9a99a7b --- /dev/null +++ b/gce-scripts/scp_and_setup.sh @@ -0,0 +1,139 @@ +#!/bin/bash + +# This file is broken up into three distinct parts. +# The first part is uploading the packages to install +# script to all machines, and kicking it off. +# We do this first because 1) we need those packages to do anything else +# and 2) it takes about 10 - 15 seconds per machine, so we parallelize it. + +# The second part is simply a check to see if the packages are finished +# installing. We test the last machine in the list first because if that is +# finished then all the other machines SHOULD also be finished. After we verify +# that the last machine is finished, loop back through all of the machines and +# make sure that they've all finished. If they haven't print out an error +# warning for that machine and stop the whole process. +# Takes the host file and the list of shards and +# scps shards to hosts. +# Also scps various scripts and installs pshtt +# and all the necessary packages. +# List of IPs, separated by line +hosts_file='hosts.txt' +# number of files that we need to cycle through +num_files=$(ls -1q input_files/ | wc -l) +# list of files; we do this deterministically +# because then we can run this command across +# other scripts and expect the same order of files. +list_of_files=$(ls -1q input_files) +# counter to keep track of which machine we're on (for logging purposes). +i=1 +# We flip this bit if we find an error with any of the machines. This tells us +# to stop the process so that the user can go by hand and fix the machine. +error_with_packages=1 + +# Upload script and install packages on all machines. +# parallelized. +################################################################ +for x in $list_of_files; +do + # Grab the ip from hosts.txt that corresponds to the file number we are + # uploading. + # If we are uploading file #3 in the list, go to line 3 in the hosts file + # and upload to that ip. + + machine=$(sed "${i}q;d" $hosts_file) + echo 'Now on '"${machine}"' number '$i + # Do not do strict host key checking so that you dont have to type "yes" for + # each machine. + echo 'Uploading packages_to_install.sh' + scp -i ~/.ssh/gce_pshtt_key -o "StrictHostKeyChecking no" packages_to_install.sh ubuntu@"${machine}":~/ + echo $? + # We echo after each command to ensure that it worked. 0 means success. + # The Log file is how we can tell if the packages have all been uploaded. + echo 'Creating packages log file' + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" touch package_log_file.txt + echo $? + # Check to see if this screen exists already. + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" screen -list | grep -q "package_screen" + answer=$(echo $?) + # If the screen exists, then we won't create another one. Otherwise, create. + if [[ "${answer}" -eq 1 ]] ; then + echo 'Creating screen' + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" screen -S package_screen -d -m + echo $? + fi + # Run packages_to_install and pipe to packages_log_file.txt on each machine. + ssh -i ~/.ssh/gce_pshtt_key -t ubuntu@"${machine}" "screen -S package_screen -X -p 0 stuff $'sudo ./packages_to_install.sh > package_log_file.txt\n'" + echo $? + ((i=i+1)) +done + + +# Check that all machines have finished installing packages. +################################################################### +# Grab the last machine in the hosts file. This was the last one to +# be uploaded and kicked off, so presumably it will be the last one +# to finish. +machine=$(sed "${num_files}q;d" $hosts_file) +while true +do + echo 'Waiting on packages to install' + # Wait 10 seconds before checking the file again. + sleep 10 + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" tail package_log_file.txt | grep -q 'FINISHED INSTALLING PACKAGES' + finished=$(echo $?) + if [[ "${finished}" -eq 0 ]]; then + break + fi +done + +# Since the last machine is finished, go check the other machines. +i=1 +for z in $list_of_files; +do + machine=$(sed "${i}q;d" $hosts_file) + echo 'Now on '"${machine}"' number '$i + echo 'Checking packages finished installing' + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" tail package_log_file.txt | grep -q 'FINISHED INSTALLING PACKAGES' + finished=$(echo $?) + if [[ "${finished}" -eq 0 ]]; then + # Check if any of the machines had a problem installing packages. + ssh -i ~/.ssh/gce_pshtt_key ubuntu@"${machine}" cat package_log_file.txt | grep -q '1 ERROR CODE' + error=$(echo $?) + if [[ "${error}" -eq 0 ]]; then + echo 'ERROR WITH '"${machine}" + error_with_packages=0 + fi + fi + ((i=i+1)) +done + +# If any of the machines had an error with a package, stop the entire process, +# inform the user. +if [[ "${error_with_packages}" -eq 0 ]]; then + echo 'ERROR FOUND WITH PACKAGES' + exit 1 +fi + +# Upload remaining data files. +##################################################################### +i=1 +for y in $list_of_files; +do + machine=$(sed "${i}q;d" $hosts_file) + echo 'Now on '"${machine}"' number '$i + echo 'Cloning github repo file' + ssh -i ~/.ssh/gce_pshtt_key -t ubuntu@"${machine}" git clone https://github.com/dhs-ncats/pshtt.git + echo $? + echo 'copying data file to pshtt directory' + scp -i ~/.ssh/gce_pshtt_key input_files/"${y}" ubuntu@"${machine}":~/pshtt/ + echo $? + echo 'Copying roots.pem into pshtt directory' + scp -i ~/.ssh/gce_pshtt_key "roots.pem" ubuntu@"${machine}":~/pshtt/ + echo $? + echo 'Copying running script into pshtt directory' + scp -i ~/.ssh/gce_pshtt_key running_script.sh ubuntu@"${machine}":~/pshtt/ + echo $? + echo "${y}"; + ((i=i+1)) +done + diff --git a/gce-scripts/split_up_dataset.sh b/gce-scripts/split_up_dataset.sh new file mode 100755 index 00000000..3b70473e --- /dev/null +++ b/gce-scripts/split_up_dataset.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# ./split_up_dataset +# Ex: ./split_up_dataset.sh top-1m.nocommas.8.31.2017 100 alexa + +# Uses split to break up the input file into N shards. +# Because of how split works, some files will be larger or smaller +# than others, but the sum of the files will equal the length of the +# original file. + +# Add .csv suffix because that's what pshtt takes in. + +# Place all files into input_files dir for posterity. + +input_file=$1 +number_of_shards=${2-10} +output_file_name=${3-shard_} + +split -a 3 --number=l/"${number_of_shards}" -d "${input_file}" input_files/"${output_file_name}" --additional-suffix=.csv diff --git a/pshtt/__init__.py b/pshtt/__init__.py index d93b5b24..2385e834 100644 --- a/pshtt/__init__.py +++ b/pshtt/__init__.py @@ -1 +1 @@ -__version__ = '0.2.3' +__version__ = '0.3.0-dev' diff --git a/pshtt/models.py b/pshtt/models.py index 47b0c0fc..1ae71a39 100644 --- a/pshtt/models.py +++ b/pshtt/models.py @@ -1,5 +1,5 @@ -class Domain: +class Domain(object): def __init__(self, domain): self.domain = domain @@ -23,7 +23,7 @@ def to_object(self): } -class Endpoint: +class Endpoint(object): def __init__(self, protocol, host, base_domain): # Basic endpoint description diff --git a/requirements.txt b/requirements.txt index 9371b6ea..28ad4e89 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -requests==2.14.2 +requests>=2.18.4 sslyze==1.1.0 wget==3.2 docopt diff --git a/setup.cfg b/setup.cfg index b88034e4..e5cc9829 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,5 @@ +[bdist_wheel] +universal = true + [metadata] description-file = README.md diff --git a/setup.py b/setup.py index 07c33eb9..179b2645 100755 --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', ], # What does your project relate to? @@ -57,7 +58,7 @@ packages=['pshtt'], install_requires=[ - 'requests>=2.14.2', + 'requests>=2.18.4', 'sslyze>=1.1.0', 'wget>=3.2', 'docopt',