Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 103 additions & 9 deletions hash_practice/exercises.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,123 @@

def grouped_anagrams(strings):
""" This method will return an array of arrays.
Each subarray will have strings which are anagrams of each other
Time Complexity: ?
Space Complexity: ?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing complexity calculations here but I haven't been dinging people for it, simply calling attention to it.

"""
pass
if not strings:
return []
res_list = [[strings[0]]]
dict_list = [create_dict(strings[0])]

for i in range(1, len(strings)):
cur_str = strings[i]
cur_str_dict = create_dict(cur_str)
if cur_str_dict in dict_list:

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Calling in on a list takes O(n) time, so by using the list dict_list instead of a dictionary, your entire solution takes O(n^2) time. You can achieve O(n) time by replacing dict_list with a dictionary, and that is the intended solution given the material, so I'm going to mark this as Yellow and encourage you to attempt that solution.

match_index = dict_list.index(cur_str_dict)
res_list[match_index].append(cur_str)
else:
res_list.append([cur_str])
dict_list.append(cur_str_dict)
return res_list



def create_dict(word):

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

create_dict is a bit too generic of a name, I think I would call this something like create_frequency_dict, because otherwise, my expectation is that this just creates an arbitrary dictionary rather than the specific frequency dictionary that is created here.

word_dict = {}
for letter in word:
if letter in word_dict:
word_dict[letter] += 1
else:
word_dict[letter] = 1
return word_dict


def top_k_frequent_elements(nums, k):
""" This method will return the k most common elements
In the case of a tie it will select the first occuring element.
Time Complexity: ?
Space Complexity: ?
Time Complexity: O(nlogn)
Space Complexity: O(n)
"""
pass

nums_dict = create_dict(nums)
num_freq_tuple_list = []
for num, freq in nums_dict.items():
num_freq_tuple_list.append((num, freq))
num_freq_tuple_list.sort(key=sort_by_tuple_freq, reverse=True)
res_list = []
i = 0
while k > 0 and i < len(num_freq_tuple_list):
res_list.append(num_freq_tuple_list[i][0])
i += 1
k -= 1
return res_list


def sort_by_tuple_freq(x) :
return x[1]


def valid_sudoku(table):
""" This method will return the true if the table is still
a valid sudoku table.
Each element can either be a ".", or a digit 1-9
The same digit cannot appear twice or more in the same
row, column or 3x3 subgrid
Time Complexity: ?
Space Complexity: ?
"""
pass
Time Complexity: O(1)
Space Complexity: O(1)
"""
for row in range(len(table)):
if valid_row(row, table) == False:
return False


for col in range(len(table[0])):
if valid_col(col, table) == False:
return False

row = 0
while row < 9:
col = 0
while col < 9:
if valid_grid(row, col, table) == False:
return False
col += 3
row += 3
return True


def valid_grid(start_row, start_col, table):
num_set = set()
for i in range(start_row, start_row + 3):
for j in range(start_col, start_col + 3):
digit = table[i][j]
if check_valid(digit, num_set) is False:
return False
return True



def valid_row(cur_row, table):
num_set = set()
for digit in table[cur_row]:
if check_valid(digit, num_set) is False:
return False
return True

def valid_col(cur_col, table):
num_set = set()
for row in range(len(table)):
digit = table[row][cur_col]
if check_valid(digit, num_set) is False:
return False
return True


def check_valid(digit, num_set):
if digit != ".":
if digit in num_set:
return False
else:
num_set.add(digit)
return True