-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit f455092
Showing
35 changed files
with
84,709 additions
and
0 deletions.
There are no files selected for viewing
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
time,user id,movie id |
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
time,user id,ip,recs,ms |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
from pyspark.sql import SparkSession | ||
from pyspark.sql.functions import max,col,when | ||
spark=SparkSession.builder.appName("mpg1").getOrCreate() | ||
|
||
#create dictionary, compress and save new ratings | ||
mpg1=spark.read.csv("/home/team14/Downloads/kafkaPipe/Data/mpg_from_raw_mpg.csv",header=True,inferSchema=True) | ||
|
||
result=mpg1.groupBy("user id","movie id").agg(max("val").alias("max_timing")) | ||
d={(row['user id'],row['movie id']):row["max_timing"] for row in result.collect()} | ||
print(d) | ||
|
||
d_list=[(key[0],key[1],value) for key,value in d.items()] | ||
d_df=spark.createDataFrame(d_list,["userid","movieid","timing"]) | ||
mpg1=mpg1.join(d_df,(col("userid")==col("user id"))&(col("movieid")==col("movie id"))&(col("timing")==col("timing"))) | ||
|
||
mpg1=mpg1.withColumn("rating", | ||
when(col("timing")<=20,1) | ||
.when(col("timing")<=50,2) | ||
.when(col("timing")<=100,3) | ||
.when(col("timing")<=150,4) | ||
.otherwise(5)) | ||
|
||
df=mpg1.select("timing","user id","movie id","rating").toPandas() | ||
compression_opts = dict(method='zip', | ||
archive_name='spark_additional_rating_from_mpg_rating_score.csv') | ||
df.to_csv('spark_additional_rating_from_mpg_rating_score.zip', index=False, | ||
compression=compression_opts) | ||
|
||
|
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
# Use an official Python runtime as a parent image | ||
FROM python:3.9 | ||
|
||
# Set the working directory to /app | ||
WORKDIR /app | ||
|
||
# Copy the requirements file into the container and install the necessary packages | ||
COPY requirements.txt ./ | ||
#RUN pip install scikit-learn | ||
#RUN pip install scikit-surprise==1.1.1 | ||
RUN pip install --no-cache-dir -r requirements.txt | ||
|
||
# Copy the Flask app files into the container | ||
COPY movies/ ./movies/ | ||
# COPY movies/ /app/movies/ | ||
|
||
# Set the environment variable for Flask | ||
# ENV FLASK_APP=movies/app.py | ||
ENV FLASK_APP=movies/recommend.py | ||
|
||
|
||
# Expose the Flask port | ||
EXPOSE 5000 | ||
|
||
# Run the Flask app | ||
CMD ["flask", "run", "--host=0.0.0.0"] |
Binary file not shown.
Oops, something went wrong.