Skip to content

Commit 1fb1fdd

Browse files
cclaussgithub-actions
and
github-actions
authored
requirements.txt: Unpin numpy (TheAlgorithms#2287)
* requirements.txt: Unpin numpy * fixup! Format Python code with psf/black push * Less clutter * fixup! Format Python code with psf/black push Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
1 parent f0d7879 commit 1fb1fdd

File tree

4 files changed

+29
-25
lines changed

4 files changed

+29
-25
lines changed

graphs/karger.py

+19-15
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,19 @@
55
import random
66
from typing import Dict, List, Set, Tuple
77

8-
98
# Adjacency list representation of this graph:
109
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
1110
TEST_GRAPH = {
12-
'1': ['2', '3', '4', '5'],
13-
'2': ['1', '3', '4', '5'],
14-
'3': ['1', '2', '4', '5', '10'],
15-
'4': ['1', '2', '3', '5', '6'],
16-
'5': ['1', '2', '3', '4', '7'],
17-
'6': ['7', '8', '9', '10', '4'],
18-
'7': ['6', '8', '9', '10', '5'],
19-
'8': ['6', '7', '9', '10'],
20-
'9': ['6', '7', '8', '10'],
21-
'10': ['6', '7', '8', '9', '3']
11+
"1": ["2", "3", "4", "5"],
12+
"2": ["1", "3", "4", "5"],
13+
"3": ["1", "2", "4", "5", "10"],
14+
"4": ["1", "2", "3", "5", "6"],
15+
"5": ["1", "2", "3", "4", "7"],
16+
"6": ["7", "8", "9", "10", "4"],
17+
"7": ["6", "8", "9", "10", "5"],
18+
"8": ["6", "7", "9", "10"],
19+
"9": ["6", "7", "8", "10"],
20+
"10": ["6", "7", "8", "9", "3"],
2221
}
2322

2423

@@ -61,8 +60,9 @@ def partition_graph(graph: Dict[str, List[str]]) -> Set[Tuple[str, str]]:
6160
for neighbor in uv_neighbors:
6261
graph_copy[neighbor].append(uv)
6362

64-
contracted_nodes[uv] = {contracted_node for contracted_node in
65-
contracted_nodes[u].union(contracted_nodes[v])}
63+
contracted_nodes[uv] = {
64+
node for node in contracted_nodes[u].union(contracted_nodes[v])
65+
}
6666

6767
# Remove nodes u and v.
6868
del graph_copy[u]
@@ -75,8 +75,12 @@ def partition_graph(graph: Dict[str, List[str]]) -> Set[Tuple[str, str]]:
7575

7676
# Find cutset.
7777
groups = [contracted_nodes[node] for node in graph_copy]
78-
return {(node, neighbor) for node in groups[0]
79-
for neighbor in graph[node] if neighbor in groups[1]}
78+
return {
79+
(node, neighbor)
80+
for node in groups[0]
81+
for neighbor in graph[node]
82+
if neighbor in groups[1]
83+
}
8084

8185

8286
if __name__ == "__main__":

other/scoring_algorithm.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
'''
1+
"""
22
developed by: markmelnic
33
original repo: https://github.com/markmelnic/Scoring-Algorithm
44
@@ -23,17 +23,17 @@
2323
2424
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
2525
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
26-
'''
26+
"""
2727

2828

29-
def procentual_proximity(source_data : list, weights : list) -> list:
29+
def procentual_proximity(source_data: list, weights: list) -> list:
3030

31-
'''
31+
"""
3232
weights - int list
3333
possible values - 0 / 1
3434
0 if lower values have higher weight in the data set
3535
1 if higher values have higher weight in the data set
36-
'''
36+
"""
3737

3838
# getting data
3939
data_lists = []

requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ flake8
55
keras
66
matplotlib
77
mypy
8-
numpy>=1.17.4
8+
numpy
99
opencv-python
1010
pandas
1111
pillow

web_programming/world_covid19_stats.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#!/usr/bin/env python3
22

3-
'''
3+
"""
44
Provide the current worldwide COVID-19 statistics.
55
This data is being scrapped from 'https://www.worldometers.info/coronavirus/'.
6-
'''
6+
"""
77

88
import requests
99
from bs4 import BeautifulSoup
@@ -13,8 +13,8 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus")
1313
"""
1414
Return a dict of current worldwide COVID-19 statistics
1515
"""
16-
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
17-
keys = soup.findAll('h1')
16+
soup = BeautifulSoup(requests.get(url).text, "html.parser")
17+
keys = soup.findAll("h1")
1818
values = soup.findAll("div", {"class": "maincounter-number"})
1919
keys += soup.findAll("span", {"class": "panel-title"})
2020
values += soup.findAll("div", {"class": "number-table-main"})

0 commit comments

Comments
 (0)