Compare commits

..

2 Commits

3 changed files with 49 additions and 35 deletions

View File

@@ -59,6 +59,7 @@ Additionally, after organising submissions, you can inspect all submitted files
- For this feature you also need to install the pandas package - For this feature you also need to install the pandas package
- `python -m pip install pandas` - `python -m pip install pandas`
- Usage: `python inspect_submissions.py GRADEBOOK_DIR_NAME` - Usage: `python inspect_submissions.py GRADEBOOK_DIR_NAME`
- Note: run *after* organising a gradebook with `organise_gradebook.py`
- Generated CSV files can be found in directory *csv*, with *GRADEBOOK_DIR_NAME* as file name prefix - Generated CSV files can be found in directory *csv*, with *GRADEBOOK_DIR_NAME* as file name prefix
- e.g. inspecting submissions for *AssignmentX* will create 2 csv files: - e.g. inspecting submissions for *AssignmentX* will create 2 csv files:
- AssignmentX_file_hashes_[datetime].csv - AssignmentX_file_hashes_[datetime].csv

View File

@@ -1,7 +1,5 @@
import os, sys import os, sys
import pandas as pd from utils.inspector import hash_submissions, inspect_for_duplicate_hashes
from datetime import datetime
from utils.inspector import hash_submissions, suspicious_by_hash
CSV_DIR = os.path.join(os.getcwd(), 'csv') CSV_DIR = os.path.join(os.getcwd(), 'csv')
@@ -9,17 +7,10 @@ def main():
submissions_dir_name = ' '.join(sys.argv[1:]) if len(sys.argv) > 1 else exit(f'\nNo submissions dir name given. Provide the name as an argument.\n\nUsage: python {sys.argv[0]} [submissions dir name]\nExample: python {sys.argv[0]} AssignmentX\n') submissions_dir_name = ' '.join(sys.argv[1:]) if len(sys.argv) > 1 else exit(f'\nNo submissions dir name given. Provide the name as an argument.\n\nUsage: python {sys.argv[0]} [submissions dir name]\nExample: python {sys.argv[0]} AssignmentX\n')
submissions_dir_path = os.path.join('BB_submissions', submissions_dir_name) submissions_dir_path = os.path.join('BB_submissions', submissions_dir_name)
if not os.path.isdir(submissions_dir_path): if not os.path.isdir(submissions_dir_path):
exit(f'Directory {submissions_dir_path} does not exist.\nMake sure "{submissions_dir_name}" exists in "BB_submissions".') exit(f'Directory {submissions_dir_path} does not exist.\nMake sure "{submissions_dir_name}" exists in "BB_submissions".\n')
else: else:
hashes_csv_file_path = hash_submissions(submissions_dir_path) # generate hashes for all files and return output csv file to load & find duplicate/suspicious hashes hashes_csv_file_path = hash_submissions(submissions_dir_path) # generate CSV file with hashes for all files (except for any 'excluded') & return path to CSV file for finding duplicate/suspicious hashes
csv = pd.read_csv(hashes_csv_file_path) inspect_for_duplicate_hashes(hashes_csv_file_path) # generate CSV file with files having duplicate/suspicious hashes
df = pd.DataFrame(csv) # df with all files and their hashes
df_suspicious = suspicious_by_hash(df) # df with all files with duplicate/suspicious hash, excludes files from the same student id
csv_name = f'{submissions_dir_name}_suspicious_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv'
csv_out = os.path.join(CSV_DIR, csv_name)
df_suspicious.to_csv(csv_out, index=False)
print(f'[INFO] Created CSV file with duplicate/suspicious hashes in {submissions_dir_name}\nCSV file: {csv_out}')
if __name__ == '__main__': if __name__ == '__main__':

View File

@@ -6,21 +6,41 @@ import pandas as pd
CSV_DIR = os.path.join(os.getcwd(), 'csv') CSV_DIR = os.path.join(os.getcwd(), 'csv')
def get_hashes_in_dir(dir_path: str) -> list:
def load_excluded_filenames(submissions_dir_name: str) -> list[str]: # helper function for hashing all files
csv_file_path = os.path.join(CSV_DIR, f'{submissions_dir_name}_excluded.csv')
if not os.path.exists(csv_file_path): # if csv file with excluded file names for submission does not exist
print(f'[WARNING] Cannot find CSV file with list of excluded file names: {csv_file_path}\n[INFO] All files will be hashed & inspected')
return [] # return empty list to continue without any excluded file names
else: # if csv file with excluded file names for submission exists
try:
df = pd.read_csv(csv_file_path)
filename_list = df['exclude_filename'].tolist() # get the values of the 'filename' column as a list
print(f'[INFO] Using CSV file with list of excluded file names: {csv_file_path}')
return filename_list
except Exception as e: # any exception, print error and return empty list to continue without any excluded file names
print(f'[WARNING] Unable to load / read CSV file with list of excluded file names: {csv_file_path}\n[INFO] All files will be hashed & inspected')
print(f'[INFO] Error message: {e}')
return []
def get_hashes_in_dir(dir_path: str, excluded_filenames: list = []) -> list: # helper function for hashing all files
hash_list = [] hash_list = []
for subdir, dirs, files in os.walk(dir_path): # loop through all files in the directory and generate hashes for subdir, dirs, files in os.walk(dir_path): # loop through all files in the directory and generate hashes
for file in files: for filename in files:
filepath = os.path.join(subdir, file) if filename not in excluded_filenames: # do not hash for inspection file names in the excluded list
with open(filepath, 'rb') as f: filepath = os.path.join(subdir, filename)
filehash = hashlib.sha256(f.read()).hexdigest() with open(filepath, 'rb') as f:
hash_list.append({ 'filepath': filepath, 'filename': file, 'sha256 hash': filehash}) filehash = hashlib.sha256(f.read()).hexdigest()
hash_list.append({ 'filepath': filepath, 'filename': filename, 'sha256 hash': filehash})
return hash_list return hash_list
def hash_submissions(submissions_dir_path: str) -> str: def hash_submissions(submissions_dir_path: str) -> str: # main function for hashing all files
os.makedirs(CSV_DIR, exist_ok=True) os.makedirs(CSV_DIR, exist_ok=True)
submissions_dir_name = os.path.abspath(submissions_dir_path).split(os.path.sep)[-1] # get name of submission/assignment by separating path and use rightmost part submissions_dir_name = os.path.abspath(submissions_dir_path).split(os.path.sep)[-1] # get name of submission/assignment by separating path and use rightmost part
excluded_filenames = load_excluded_filenames(submissions_dir_name)
csv_file_name = f'{submissions_dir_name}_file_hashes_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv' csv_file_name = f'{submissions_dir_name}_file_hashes_{datetime.now().strftime("%Y%m%d-%H%M%S")}.csv'
csv_file_path = os.path.join(CSV_DIR, csv_file_name) csv_file_path = os.path.join(CSV_DIR, csv_file_name)
with open(csv_file_path, 'w', newline='') as csvfile: # open the output CSV file for writing with open(csv_file_path, 'w', newline='') as csvfile: # open the output CSV file for writing
@@ -30,28 +50,30 @@ def hash_submissions(submissions_dir_path: str) -> str:
for student_dir_name in os.listdir(submissions_dir_path): # loop through each student dir to get hashes for all files per student for student_dir_name in os.listdir(submissions_dir_path): # loop through each student dir to get hashes for all files per student
student_dir_path = os.path.join(submissions_dir_path, student_dir_name) student_dir_path = os.path.join(submissions_dir_path, student_dir_name)
hashes_dict = get_hashes_in_dir(student_dir_path) # dict with hashes for all student files hashes_dict = get_hashes_in_dir(student_dir_path, excluded_filenames) # dict with hashes for all student files - except for 'excluded' file names
for d in hashes_dict: for d in hashes_dict:
d.update({'Student ID': student_dir_name}) # update hash records with student id d.update({'Student ID': student_dir_name}) # update hash records with student id
writer.writerows(hashes_dict) writer.writerows(hashes_dict)
print(f'[INFO] Created CSV file with all files & hashes in {submissions_dir_name}\nCSV file: {csv_file_path}') print(f'[INFO] Created CSV file with all files & hashes in {submissions_dir_name}\nCSV file: {csv_file_path}')
return csv_file_path return csv_file_path
def inspect_for_duplicate_hashes(hashes_csv_file_path: str): # main function for finding duplicate / suspicious hashes
def get_suspicious_hashes(df: pd.DataFrame) -> list: csv = pd.read_csv(hashes_csv_file_path)
df = pd.DataFrame(csv) # df with all files and their hashes
drop_columns = ['filepath', 'filename'] # only need to keep 'student id' and 'sha256 hash' for groupby later drop_columns = ['filepath', 'filename'] # only need to keep 'student id' and 'sha256 hash' for groupby later
df = df.drop(columns=drop_columns).sort_values('sha256 hash') # clear not needed colums & sort by hash df = df.drop(columns=drop_columns) # clear not needed columns
duplicate_hash = df.loc[df.duplicated(subset=['sha256 hash'], keep=False), :] # all files with duplicate hash - incl. files from the same student id duplicate_hash = df.loc[df.duplicated(subset=['sha256 hash'], keep=False), :] # all files with duplicate hash - incl. files from the same student id
hash_with_multiple_student_ids = duplicate_hash.groupby('sha256 hash').agg(lambda x: len(x.unique())>1) # true if more than 1 unique student ids (= files with the same hash by multiple student ids), false if unique student id (= files from the same student id with the same hash) hash_with_multiple_student_ids = duplicate_hash.groupby('sha256 hash').agg(lambda x: len(x.unique())>1) # true if more than 1 unique student ids (= files with the same hash by multiple student ids), false if unique student id (= files from the same student id with the same hash)
suspicious_hashes_list = hash_with_multiple_student_ids[hash_with_multiple_student_ids['Student ID']==True].index.to_list() # list with duplicate hashes - only if different student id (doesn't include files from same student id) suspicious_hashes_list = hash_with_multiple_student_ids[hash_with_multiple_student_ids['Student ID']==True].index.to_list() # list with duplicate hashes - only if different student id (doesn't include files from same student id)
return suspicious_hashes_list files_with_suspicious_hash = df[df['sha256 hash'].isin(suspicious_hashes_list)] # df with all files with duplicate/suspicious hash, excludes files from the same student id
df_suspicious = files_with_suspicious_hash.sort_values(['sha256 hash', 'Student ID']) # sort before output to csv
try:
submissions_dir_name = os.path.basename(hashes_csv_file_path).split('_file_hashes_')[0]
csv_out = hashes_csv_file_path.rsplit('_', 1)[0].replace('file_hashes', 'suspicious_') + datetime.now().strftime("%Y%m%d-%H%M%S") + '.csv'
df_suspicious.to_csv(csv_out, index=False)
print(f'[INFO] Created CSV file with duplicate/suspicious hashes in {submissions_dir_name}\nCSV file: {csv_out}')
except Exception as e:
exit(f'[ERROR] Something went wrong while trying to save csv file with suspicious hashes\nError message: {e}')
def suspicious_by_hash(df: pd.DataFrame) -> pd.DataFrame:
suspicious_hashes_list = get_suspicious_hashes(df)
files_with_suspicious_hash = df[df['sha256 hash'].isin(suspicious_hashes_list)] # excluding duplicate from same student id
return files_with_suspicious_hash.sort_values(['sha256 hash', 'Student ID'])