diff --git a/TF00-initial-exploration.ipynb b/TF00-initial-exploration.ipynb index b7818fd..2813dea 100644 --- a/TF00-initial-exploration.ipynb +++ b/TF00-initial-exploration.ipynb @@ -62,26 +62,10 @@ "metadata": {}, "outputs": [], "source": [ - "meanie = filters.Mean(maskSize=5)\n", - "print(meanie)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "imgFiltered = meanie.convolve(img)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "handler.plotFigs([img, imgFiltered])" + "for maskSize in [3,5,7,9,11,21,51]:\n", + " meanie = filters.Mean(maskSize=maskSize)\n", + " imgFiltered = meanie.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], meanie)" ] }, { @@ -275,6 +259,86 @@ "handler.saveAll(imgFiltered, lowP)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create and Apply Adaptive Weighted Median Filter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "awMed = filters.AdaptiveWeightedMedian(maskSize=7, constant=20, centralWeight=100)\n", + "print(awMed)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "imgFiltered = awMed.convolve(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgFiltered])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "handler.saveAll(imgFiltered, awMed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create and Apply Trimmed Mean Filter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " for trim in [maskSize, maskSize*2, maskSize*3]:\n", + " trimmedMean = filters.TrimmedMean(maskSize=5, trimStart=4, trimEnd=4)\n", + " imgFiltered = trimmedMean.convolve(img)\n", + " filteredTitle = trimmedMean.name + \"_maskSize\" + str(trimmedMean.maskSize) + \"_trim\" + str(trim)\n", + " handler.plotFigs([img, imgFiltered], trimmedMean, filteredTitle)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "handler.saveAll(imgFiltered, trimmedMean)" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/TF02-histogram-exploration.ipynb b/TF02-histogram-exploration.ipynb index fdc9ca6..f9b7959 100644 --- a/TF02-histogram-exploration.ipynb +++ b/TF02-histogram-exploration.ipynb @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "filtr = filters.Equalise()" + "eq = filters.Equalise()" ] }, { @@ -74,7 +74,7 @@ "metadata": {}, "outputs": [], "source": [ - "imgNew = filtr.filter(img)" + "imgNew = eq.filter(img)" ] }, { @@ -83,7 +83,7 @@ "metadata": {}, "outputs": [], "source": [ - "handler.plotFigs([img, imgNew])" + "handler.plotFigs([img, imgNew], eq, filteredTitle=eq.name)" ] }, { @@ -99,7 +99,7 @@ "metadata": {}, "outputs": [], "source": [ - "filtr = filters.AHE(maskSize=126)" + "ahe = filters.AHE(maskSize=21)" ] }, { @@ -108,7 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "imgNew = filtr.filter(img)" + "imgNew = ahe.filter(img)" ] }, { @@ -117,60 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "handler.plotFigs([img, imgNew])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Save Results" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "import os.path\n", - "from PIL import Image" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "currentDir = Path().absolute()\n", - "root = str(currentDir) + '\\\\..\\outputs\\\\hist_Adaptive_Equalise\\\\'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "if not os.path.exists(root):\n", - " os.makedirs(root)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create pillow image object from filtered image array\n", - "img_PIL = Image.fromarray(imgNew, 'L')\n", - "\n", - "# Save filtered image from pillow image object\n", - "filePath = root+'filtered_foetus_maskSize_126.png'\n", - "img_PIL.save(filePath, 'PNG')\n", - "print(\"Saved filtered image to... \\n{}\\n\\n\".format(filePath))" + "handler.plotFigs([img, imgNew], ahe)" ] }, { diff --git a/TF03-histogram-exploration-sliding-window.ipynb b/TF03-histogram-exploration-sliding-window.ipynb new file mode 100644 index 0000000..15e069b --- /dev/null +++ b/TF03-histogram-exploration-sliding-window.ipynb @@ -0,0 +1,221 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Histogram Exploration" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import filters\n", + "import handler" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np # for debugging purposes\n", + "import matplotlib.pyplot as plt\n", + "from sklearn.preprocessing import normalize\n", + "from math import ceil" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img, _ = handler.getImageAsArray(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create and Apply SWAHE Filter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "swahe = filters.SWAHE(maskSize = 127)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgNew = swahe.filter(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgNew], swahe)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img.shape" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Save Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import os.path\n", + "from PIL import Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "currentDir = Path().absolute()\n", + "root = str(currentDir) + '\\\\..\\outputs\\\\hist_Adaptive_Equalise\\\\'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if not os.path.exists(root):\n", + " os.makedirs(root)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create pillow image object from filtered image array\n", + "img_PIL = Image.fromarray(imgNew[0], 'L')\n", + "\n", + "# Save filtered image from pillow image object\n", + "filePath = root+'filtered_foetus_maskSize_52.png'\n", + "img_PIL.save(filePath, 'PNG')\n", + "print(\"Saved filtered image to... \\n{}\\n\\n\".format(filePath))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3rd Part Check" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use CV2 library built-in equalise and CLAHe functions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import third_party_filters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "third_party_filters.equalise(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "third_party_filters.CLAHE(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/TF04-spatial-filters-results-generation-foetus.ipynb b/TF04-spatial-filters-results-generation-foetus.ipynb new file mode 100644 index 0000000..e7a9fff --- /dev/null +++ b/TF04-spatial-filters-results-generation-foetus.ipynb @@ -0,0 +1,298 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Initial Script" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import handler\n", + "import filters\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Load Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img, _ = handler.getImageAsArray(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " meanie = filters.Mean(maskSize=maskSize)\n", + " imgFiltered = meanie.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], meanie)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gaussian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for sig in [0.5,1,2,3,4,5,8]:\n", + " gaus = filters.Gaussian(sig=sig)\n", + " imgFiltered = gaus.convolve(img)\n", + " filteredTitle = gaus.name + \"_maskSize\" + str(gaus.maskSize) + \"_sig\" + str(sig)\n", + " handler.plotFigs([img, imgFiltered], gaus, filteredTitle)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgFiltered = gaus.convolve(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgFiltered])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Median" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " med = filters.Median(maskSize=maskSize)\n", + " imgFiltered = med.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], med)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## High Pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "highP = filters.Sharpening(maskSize=21)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgFiltered = highP.convolve(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgFiltered], highP)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Low Pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lowP = filters.LowPass(maskSize=7)\n", + "print(lowP)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgFiltered = lowP.convolve(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgFiltered], lowP)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "handler.saveAll(imgFiltered, lowP)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adaptive Weighted Median" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,11,21,51]:\n", + " for constant in [10, 50,150]:\n", + " for centralWeight in [50, 100]:\n", + " awMed = filters.AdaptiveWeightedMedian(maskSize=maskSize, constant=constant, centralWeight=centralWeight)\n", + " imgFiltered = awMed.convolve(img)\n", + " print(\"maskSize = {}\\nconstant = {}\\ncentral weight = {}\\n\".format(maskSize, constant, centralWeight))\n", + " handler.plotFigs([img, imgFiltered], awMed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Trimmed Mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " for trim in [maskSize, maskSize*2, maskSize*3]:\n", + " if trim > maskSize**2/2+1:\n", + " continue\n", + " else:\n", + " trimmedMean = filters.TrimmedMean(maskSize=maskSize, trimStart=trim, trimEnd=trim)\n", + " imgFiltered = trimmedMean.convolve(img)\n", + " filteredTitle = trimmedMean.name + \"_maskSize\" + str(trimmedMean.maskSize) + \"_trim\" + str(trim)\n", + " handler.plotFigs([img, imgFiltered], trimmedMean, filteredTitle)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trimmedMean = filters.TrimmedMean(maskSize=9, trimStart=1, trimEnd=1)\n", + "imgFiltered = trimmedMean.convolve(img)\n", + "filteredTitle = trimmedMean.name + \"_maskSize\" + str(trimmedMean.maskSize) + \"_trim\" + str(1)\n", + "handler.plotFigs([img, imgFiltered], trimmedMean, filteredTitle)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/TF04-spatial-filters-results-generation-nzjers.ipynb b/TF04-spatial-filters-results-generation-nzjers.ipynb new file mode 100644 index 0000000..f018b7a --- /dev/null +++ b/TF04-spatial-filters-results-generation-nzjers.ipynb @@ -0,0 +1,189 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Initial Script" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import handler\n", + "import filters\n", + "import numpy as np" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Load Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img, _ = handler.getImageAsArray(handler.NZJERS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " meanie = filters.Mean(maskSize=maskSize)\n", + " imgFiltered = meanie.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], meanie)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Gaussian" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for sig in [0.5,1,2,3,4,5,8]:\n", + " gaus = filters.Gaussian(sig=sig)\n", + " imgFiltered = gaus.convolve(img)\n", + " filteredTitle = gaus.name + \"_maskSize\" + str(gaus.maskSize) + \"_sig\" + str(sig)\n", + " handler.plotFigs([img, imgFiltered], gaus, filteredTitle)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Median" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " med = filters.Median(maskSize=maskSize)\n", + " imgFiltered = med.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], med)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## High Pass" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " sharp = filters.Sharpening(maskSize=maskSize)\n", + " imgFiltered = sharp.convolve(img)\n", + " handler.plotFigs([img, imgFiltered], sharp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adaptive Weighted Median" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,11,21,51]:\n", + " for constant in [10, 50,150]:\n", + " for centralWeight in [50, 100]:\n", + " awMed = filters.AdaptiveWeightedMedian(maskSize=maskSize, constant=constant, centralWeight=centralWeight)\n", + " imgFiltered = awMed.convolve(img)\n", + " print(\"maskSize = {}\\nconstant = {}\\ncentral weight = {}\\n\".format(maskSize, constant, centralWeight))\n", + " handler.plotFigs([img, imgFiltered], awMed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Trimmed Mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [3,5,7,9,11,21,51]:\n", + " for trim in [maskSize, maskSize*2, maskSize*3]:\n", + " if trim > maskSize**2/2+1:\n", + " continue\n", + " else:\n", + " trimmedMean = filters.TrimmedMean(maskSize=maskSize, trimStart=trim, trimEnd=trim)\n", + " imgFiltered = trimmedMean.convolve(img)\n", + " filteredTitle = trimmedMean.name + \"_maskSize\" + str(trimmedMean.maskSize) + \"_trim\" + str(trim)\n", + " handler.plotFigs([img, imgFiltered], trimmedMean, filteredTitle)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/TF05-histogram-filters-results-generation.ipynb b/TF05-histogram-filters-results-generation.ipynb new file mode 100644 index 0000000..ef84a78 --- /dev/null +++ b/TF05-histogram-filters-results-generation.ipynb @@ -0,0 +1,212 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Histogram Results Generation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import filters\n", + "import handler" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np # for debugging purposes\n", + "import matplotlib.pyplot as plt\n", + "from sklearn.preprocessing import normalize\n", + "from math import ceil" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "img, _ = handler.getImageAsArray(handler.NZJERS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adaptive Histogram Equalise Filter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ahe = filters.AHE(maskSize=53)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgNew = ahe.filter(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgNew], ahe)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### SWAHE Filter - FOETUS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "swahe = filters.SWAHE(maskSize = 201)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "imgNew = swahe.filter(img)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "handler.plotFigs([img, imgNew], swahe)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### SWAHE Filter - NZJERS" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for maskSize in [9, 21, 51, 127]:\n", + " swahe = filters.SWAHE(maskSize=maskSize)\n", + " imgNew = swahe.filter(img)\n", + " handler.plotFigs([img, imgNew], swahe)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3rd Part Check" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use CV2 library built-in equalise and CLAHe functions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import third_party_filters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "third_party_filters.equalise(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "third_party_filters.CLAHE(handler.FOETUS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/TF06-edge-detection.ipynb b/TF06-edge-detection.ipynb new file mode 100644 index 0000000..d896bae --- /dev/null +++ b/TF06-edge-detection.ipynb @@ -0,0 +1,217 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Edge Detection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Imports" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import filters\n", + "import handler\n", + "import third_party_filters" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Load Image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "FOETUS, _ = handler.getImageAsArray(handler.FOETUS_PATH_ORIGINAL)\n", + "NZJERS, _ = handler.getImageAsArray(handler.NZJERS_PATH_ORIGINAL)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### NZJERS Trimmed Mean" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "maskSize: 5; trimEnds: 10" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tMed = filters.TrimmedMean(maskSize=5, trimStart=10, trimEnd=10)\n", + "imgMedFiltered = tMed.convolve(NZJERS)\n", + "edge = third_party_filters.edgeDetect(imgMedFiltered)\n", + "handler.plotFigs([NZJERS, imgMedFiltered, edge], tMed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### NZJERS Gaussian" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "sig: 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "gaus = filters.Gaussian(sig=1)\n", + "imgGausFiltered = gaus.convolve(NZJERS)\n", + "edge = third_party_filters.edgeDetect(imgGausFiltered)\n", + "handler.plotFigs([NZJERS, imgGausFiltered, edge], gaus)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Foetus Adaptive Weighted Median" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "maskSize: 11; \n", + "constant: 10; \n", + "centralWeight: 50; " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "awmF = filters.AdaptiveWeightedMedian(maskSize=11, constant=10, centralWeight=50)\n", + "foetAWMFiltered = awmF.convolve(FOETUS)\n", + "edge = third_party_filters.edgeDetect(foetAWMFiltered)\n", + "handler.plotFigs([FOETUS, foetAWMFiltered, edge], awmF)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### NZJERS Adaptive Weighted Median" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "maskSize: 5; \n", + "constant: 50; \n", + "centralWeight: 50; " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "awmN = filters.AdaptiveWeightedMedian(maskSize=5, constant=50, centralWeight=50)\n", + "nzjAWMFiltered = awmN.convolve(NZJERS)\n", + "edge = third_party_filters.edgeDetect(nzjAWMFiltered)\n", + "handler.plotFigs([NZJERS, nzjAWMFiltered, edge], awmN)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Foetus SWAHE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "swahe = filters.SWAHE(maskSize=201)\n", + "imgSWAHEFiltered = swahe.filter(FOETUS)\n", + "edge = third_party_filters.edgeDetect(imgSWAHEFiltered)\n", + "handler.plotFigs([FOETUS, imgSWAHEFiltered, edge], swahe)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "highPF = filters.Sharpening(maskSize=27)\n", + "highPN = filters.Sharpening(maskSize=11)\n", + "foetHighPFiltered = highPF.convolve(FOETUS)\n", + "nzjersHighPFiltered = highPN.convolve(NZJERS)\n", + "edgeF = third_party_filters.edgeDetect(foetHighPFiltered)\n", + "edgeN = third_party_filters.edgeDetect(nzjersHighPFiltered)\n", + "handler.plotFigs([FOETUS, foetHighPFiltered, edgeF], highPF)\n", + "handler.plotFigs([NZJERS, nzjersHighPFiltered, edgeN], highPN)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/edge_detector.py b/edge_detector.py deleted file mode 100644 index e69de29..0000000 diff --git a/filters.py b/filters.py index 946eb6c..cc3f2a9 100644 --- a/filters.py +++ b/filters.py @@ -1,19 +1,27 @@ """ -Module to define filters and their computation algorithms. Used by handler.py. +Main module to define filters and their computation algorithms. + +These design requirements have been achieved by implementing abstract base classes (ABC) for each ‘family’ of filter +(spatial, Fourier, histogram), and several associated child classes for the filters themselves. +The functionality common to each filter within a given family has been defined in the base class and then +inherited by each child class. Any method implemented as an abstract method, in which it is not defined within +the base class but only declared, must be defined in the child class as a requirement, as is the case +with the method ‘compute’. This framework-like approach both enforces and supports the level of granularity +required to uniquely define the computation method for a given filtering technique. Digital Image Processing 2020, Assignment 1/1, 20% """ +# Import relevant packages from abc import ABC, abstractmethod import numpy as np import matplotlib.pyplot as plt -from math import ceil, floor +from math import ceil import statistics -from scipy import fftpack -from matplotlib.colors import LogNorm from tqdm import tqdm import logging +# Initialise logging used to track info and warning messages logging.basicConfig() def padImage(img, maskSize): @@ -21,28 +29,29 @@ def padImage(img, maskSize): Function pads image in two dimensions. Pad size is dependant on mask shape and therefore both pads are currently always equal since we only use square mask sizes. Added pixels have intensity zero, 0. :param maskSize: used to calculate number of pixels to be added on image - :param img: img to be padded - :return: + :param img: image to be padded + :return: padded array of pixel intensities """ - # Create padding for edges + # Calculate number of pixels required for padding pad = ceil((maskSize - 1) / 2) assert isinstance(pad, int) - # Add padding of zeros to the input image + # Add pad number of rows and columns of zeros around the sides of the input image array imgPadded = np.zeros((img.shape[0] + 2 * pad, img.shape[1] + 2 * pad)).astype('uint8') # Insert image pixel values into padded array imgPadded[pad:-pad, pad:-pad] = img - print("Padding of {} pixels created.".format(pad)) + # Log success result to console + logging.info("Padding of {} pixels created.".format(pad)) return imgPadded def scale(x, ceiling=255): """ - Function scales array between 0 and maximo - :param x: array of values to be scaled + Function scales n-dimensional array of values between zero to max value, cieling + :param x: n-dimensional array of values to be scaled :param ceiling: max values/ top of scale :return: scaled array """ @@ -59,11 +68,22 @@ def scale(x, ceiling=255): raise Exception("Can't scale as min and max are the same and will cause div(0) error but not " "all values are the same in array. Printing array... ", x) + # Return array with values scaled between zero and max return ceiling * (x - x.min()) / (x.max() - x.min()) class SpatialFilter(ABC): - # TODO: implement standard mask shapes of square or cross and implement kernel creation based on this for each filter + """ + Base class for all spatial filters, inherited by any spatial filter and containing all methods and attributes + common to operation of all spatial filters. + """ def __init__(self, maskSize, kernel, name, linearity): + """ + Object initialisation override used to assign parameters passed on creation of new class instance to object attributes. + :param maskSize: mask size used to scan over pixels during convolution to detect surrounding pixel intensities (aka window size) + :param kernel: kernel of weights used to multiply with pixel intensities to calculate pixel update value + :param name: meta information - name of filter + :param linearity: meta information - linearity of filter + """ self.assertTypes(maskSize, kernel) self.name = name self.linearity = linearity @@ -74,131 +94,88 @@ def __str__(self): """ Override built-in str function so a description of the filter is shown when you run print(filter), where filter is an instance of class Filter. - :return: string describing filter instance. + :return: generated string describing filter instance/ object state """ + # Combine various object attributes into descriptive string to be displayed descriptor = "Filter name: {},\nLinearity: {},\nMask size: {}\nKernel shown below where possible:".format( self.name, self.linearity, self.maskSize ) + # Generate plot of kernel weights, used to visualise kernel weight distribution plt.imshow(self.kernel, interpolation='none') return descriptor @staticmethod def assertTypes(maskSize, kernel): + """ + Static method used for basic type checking during filtering computation. + :param maskSize: filter window/ mask size + :param kernel: filter kernel of weights + :return: None + """ assert isinstance(maskSize, int) # Mask size must be integer assert maskSize % 2 == 1 # Mask size must be odd assert isinstance(kernel, np.ndarray) # kernel should be n-dimensional numpy array @abstractmethod - def computePixel(self, sub): + def compute(self, sub): + """ + Abstract method declared here in base class and later defined in child classes that must as a rule inherit this method. + This is the krux of the ABC design approach - each filter will and must uniquely implement its own computation method to + calculate the pixel update value based on its intended filtering function. + :param sub: the sub matrix/ window of pixel values generated from convolution of the window with image + :return: pixel update value + """ pass def convolve(self, img, padding=True): """ - This function which takes an image and a kernel and returns the convolution of them. - :param padding: bool defines if padding is used - :param img: numpy array of image to be filtered - :return: numpy array of filtered image (image convoluted with kernel) + Convolution of filter object's kernel over the image recieved as a parameter to this function. + :param padding: boolean used to configure the addition of zero-padding to image. + :param img: n-dimensional numpy array of original image pixel values that will each be updates during filtering i.e the original image data + :return: numpy array of dimension equal to original image array with updated pixel values i.e. the filtered image data """ + # If padding required, create padding, else original image stored as padded image if padding: imgPadded = padImage(img, self.maskSize) else: imgPadded = img - print("No padding added.") + logging.warning("No padding added. This may mean the first/ last pixels of each row may not be filtered.") # Flip the kernel up/down and left/right self.kernel = np.flipud(np.fliplr(self.kernel)) - # Create output array of zeros with same shape and type as img array + # Create output array of zeros with same shape and type as original image data output = np.zeros_like(img) - # Loop over every pixel of padded image - for col in range(img.shape[1]): + # Iterate over every column in that row + for col in tqdm(range(img.shape[1])): + + # Iterate over every row in the image for row in range(img.shape[0]): + # Create sub matrix of mask size surrounding pixel under consideration sub = imgPadded[row: row+self.maskSize, col: col+self.maskSize] - output[row, col] = self.computePixel(sub) - - return output -class FourierFilter: - def fft2D_scipy(self, img, plot=False): - """ - Function transforms image into Fourier domain - :param plot: bool to configure plotting of fourier spectum. default=False - :param img: image to be transformed - :return: image in fourier domain/ fourier spectrum of image - """ - imgFFT = fftpack.fft2(img) - if plot: self.plotFourierSpectrum(imgFFT) - return imgFFT - - @staticmethod - def dft(x): # not in use - """ - Function computes the discrete fourier transform of a 1D array - :param x: input array, 1 dimensional - :return: np.array of fourier transformed input array - """ - x = np.asarray(x, dtype=float) - N = x.shape[0] - n = np.arange(N) - k = n.reshape((N, 1)) - M = np.exp((-2j * np.pi * k * n) / N) - return np.dot(M, x) - - def fft(self, x): - """ - Function recursively implements 1D Cooley-Turkey fast fourier transform - :param x: input array, 1 dimensional - :return: np.array of fourier transformed input array - """ - x = np.array(x, dtype=float) - N = x.shape[0] - - if N % 2 > 0: - raise ValueError("size of x must be a power of 2") - elif N <= 32: - return self.dft(x) - else: - X_even = self.fft(x[::2]) - X_odd = self.fft(x[1::2]) - factor = np.exp((-2j * np.pi * np.arange(N)) /N) - return np.concatenate([X_even + factor[:N / 2] * X_odd, - X_even + factor[N / 2:] * X_odd ]) - - def fft2D(self, x): - """ - Function recursively implements 1D Cooley-Turkey fast fourier transform - :param x: input array, 1 dimensional - :return: np.array of fourier transformed input array - """ - x = np.array(x, dtype=float) - xRot = x.T - - self.fft(x) - - @staticmethod - def inverseFFT_scipy(img): - return fftpack.ifft2(img).real + # Store the updated pixel intensity (returned from the filter's own computation method) in the filtered image array + output[row, col] = self.compute(sub) - @staticmethod - def plotFourierSpectrum(imgFFT): - """ - Function displays fourier spectrum of image that has been fourier transformed - :param imgFFT: fourier spectrum of img - :return: None - """ - plt.figure() - plt.imshow(np.abs(imgFFT), norm=LogNorm(vmin=5)) - plt.colorbar() - plt.title('Fourier Spectrum') + return output class HistogramFilter(ABC): + """ + Base class for all histogram filters, inherited by any histogram filter and containing all methods and attributes + common to operation of all histogram filters. + """ def __init__(self, maskSize, name): + """ + Object initialisation override used to assign parameters passed on creation of new class instance to object attributes. + :param maskSize: mask size used to scan over pixels during convolution to detect surrounding pixel intensities (aka window size) + :param name: meta information - name of filter + """ assert isinstance(maskSize, int) # Mask size must be integer try: assert maskSize % 2 == 1 # Mask size must be odd @@ -212,10 +189,11 @@ def __init__(self, maskSize, name): def getHistogramWithCS(self, img): """ - Function takes in image as array of pixel intensities and generates a histogram and scaled cumulative sum + Function takes in image as an n-dimensional array of pixel intensities and generates a histogram and scaled cumulative sum :param img: numpy array of pixel intensities :return: histogram array and scaled cumulative sum of histogram """ + # Catch errors for wrong data type, allowing for one exception by casting to integer on first exception try: assert img.dtype == 'uint8' except AssertionError: @@ -236,14 +214,27 @@ def getHistogramWithCS(self, img): return histogram.astype('uint8'), csScaled def filter(self, img, plotHistograms=True): + """ + Primary access point from external code for any histogram filter. Equivalent to convolve for Spatial filters. + Function computes and returns filtered image. + :param img: original image data + :param plotHistograms: boolean used to configure if a plot of original and updated histograms should be displayed to + Jupyter notebook or not. + :return: filtered image + """ + # Call computation method unique to each filter implementation. imgFiltered, histogram, cs = self.compute(img) + # Plot histograms if required if plotHistograms: + # Generate histogram and cumulative sum for filtered image histogramNew, csNew = self.getHistogramWithCS(imgFiltered) + # Plot histograms for display in notebook self.plotHistograms(histogram, histogramNew, cs, csNew) else: pass + # Return filtered image return imgFiltered @staticmethod @@ -267,13 +258,13 @@ def plotHistograms(histogram, histogramNew, cs, csNew): """ Function plots overlaying histograms with cumulative sum to show change between original and filtered histogram. If no filtered histogram present, second series will be skipped. - :param csNew: - :param cs: cumulative sum of histogram - :param histogramNew: histogram after filtering technique + :param csNew: cumulative sum of filtered image histogram values + :param cs: cumulative sum of original image histogram values + :param histogramNew: histogram after filter has been applied :param histogram: histogram of original image :return: None """ - # Set up figure + # Set dimensions of figure fig = plt.figure() fig.set_figheight(5) fig.set_figwidth(15) @@ -285,165 +276,229 @@ def plotHistograms(histogram, histogramNew, cs, csNew): plt.fill_between(np.arange(np.size(histogramNew)), scale(histogramNew), label='filtered_hist', alpha=0.4) plt.plot(csNew, label='filtered_cs') except ValueError: - print("Only one histogram to plot.") + logging.info("Only one histogram to plot.") pass + # Add legend and show plot of histograms plt.legend() plt.show() - @staticmethod - def interpolate(subBin, LU, RU, LB, RB, subX, subY): - """ - - :param subBin: - :param LU: - :param RU: - :param LB: - :param RB: - :param subX: - :param subY: - :return: - """ - subImage = np.zeros(subBin.shape) - num = subX * subY - for i in range(subX): - inverseI = subX - i - for j in range(subY): - inverseJ = subY - j - val = subBin[i, j].astype(int) - subImage[i, j] = np.floor( - (inverseI * (inverseJ * LU[val] + j * RU[val]) + i * (inverseJ * LB[val] + j * RB[val])) / float( - num)) - return subImage - @abstractmethod def compute(self, img): + """ + Abstract method declared here in base class and later defined in child classes that must as a rule inherit this method. + This is the krux of the ABC design approach - each filter will and must uniquely implement its own computation method to + calculate the pixel update value based on its intended filtering function. + :param img: the n-dimensional array of pixel values that represent the original image data + :return: pixel update value + """ pass class Median(SpatialFilter): def __init__(self, maskSize): - kernel = np.zeros((maskSize,maskSize)) - middle = int((maskSize-1)/2) - kernel[middle, middle] = 1 - - super().__init__(maskSize, kernel, name='median', linearity='non-linear') + # Arbitrary kernel weights assigned since kernel is not used + super().__init__(maskSize, np.zeros((maskSize,maskSize)), name='median', linearity='non-linear') - def computePixel(self, sub): + def compute(self, sub): + # Python's statistics library is used to compute the statistical median of + # the flattened pixel array return statistics.median(sub.flatten()) +class AdaptiveWeightedMedian(SpatialFilter): + def __init__(self, maskSize, constant, centralWeight): + + # Create 1D array of linearly distributed values with given start/ stop values and a step size of maskSize + ax = np.linspace(-(maskSize - 1) / 2., (maskSize - 1) / 2., maskSize) + + # Create coordinate grid using 1D linspace array + xx, yy = np.meshgrid(ax, ax) + + # Finally, create kernel of weight corresponding to distance from centre using pythagoras theorem + kernel = np.sqrt(np.square(xx) + np.square(yy)) + + # set max weight, used for centre of kernel, and constant used in formula + self.constant = constant + self.centralWeight = centralWeight + + super().__init__(maskSize, kernel, name='adaptive-weighted-median', linearity='non-linear') + + def compute(self, sub): + # Calculate the standard deviation and mean of sub matrix + std = np.std(sub) + mean = np.mean(sub) + + if mean == 0: + mean = 1 + else: + pass + + # Create matrix of weights based on sub matrix, using formula for adaptive weighted median filter + weights = self.centralWeight - self.constant*std*np.divide(self.kernel, mean) + + # Identify any negative weights in boolean array + mask = weights < 0 + # Use as inverse mask truncate negative weights to zero to ensure low pass characteristics + weights = np.multiply(np.invert(mask), weights) + + # Use list comprehension to pair each element from sub matrix with respective weighting in tuple + # and sort based on sub matrix values/ pixel intensities + pairings = sorted((pixelIntensity, weight) for pixelIntensity, weight in zip(sub.flatten(), weights.flatten())) + + # Calculate where median position will be + medIndex = ceil((np.sum(weights) + 1)/ 2) + cs = np.cumsum([pair[1] for pair in pairings]) + medPairIndex = np.searchsorted(cs, medIndex) + + # Return median of list of weighted sub matrix values + return pairings[medPairIndex][0] + class Mean(SpatialFilter): """ - Effectively a low pass filter. Alternative kernel implemented in class LowPass(Filter). + Effectively a blurring filter. Alternative kernel implemented in class LowPass(Filter). """ def __init__(self, maskSize): + + # Kernel weights defined as one over the number of weights, thus summing to one kernel = np.ones((maskSize,maskSize))/(maskSize**2) - super().__init__(maskSize, kernel, name='mean', linearity='linear') - def computePixel(self, sub): + # Ensure sum of mean kernel weights is essentially 1 try: - assert self.kernel.sum() == 1 + assert kernel.sum() == 1 except AssertionError: - raise Exception("Sum of kernel weights for mean filter should equal 1. They equal {}!".format(self.kernel.sum())) + if abs(1 - kernel.sum()) < 0.01: + pass + else: + raise Exception("Sum of kernel weights for mean filter should equal 1. They equal {}!".format(kernel.sum())) + + super().__init__(maskSize, kernel, name='mean', linearity='linear') + + def compute(self, sub): # element-wise multiplication of the kernel and image pixel under consideration - return (self.kernel * sub).sum() + return np.sum(np.multiply(self.kernel, sub)) + +class TrimmedMean(SpatialFilter): + """ + Can be used to discard a number of outliers from the higher and lower ends of the retrieved sub matrix of pixel values. + """ + def __init__(self, maskSize, trimStart=1, trimEnd=1): + + # Same as the mean filter, kernel weights defined as one over the number of weights, thus summing to one + kernel = np.ones((maskSize,maskSize))/(maskSize**2) + + # Ensure sum of weights equals one + try: + assert kernel.sum() == 1 + except AssertionError: + if abs(1 - kernel.sum()) < 0.01: + pass + else: + raise Exception("Sum of kernel weights for mean filter should equal 1. They equal {}!".format(kernel.sum())) + + # Assign trim parameters as attributes specific to this class for use in computation + self.trimStart = trimStart + self.trimEnd = trimEnd + super().__init__(maskSize, kernel, name='trimmed-mean', linearity='linear') + + def compute(self, sub): + + # Flatten sub matrix + trimmedSub = list(sub.flatten()) + + # Index a specified number of elements from either end of the flattened array + # Return mean of this selection of elements + return np.mean(trimmedSub[self.trimStart:-self.trimStart]) class Gaussian(SpatialFilter): def __init__(self, sig): # Calculate mask size from sigma value. Ensures filter approaches zero at edges (always round up) maskSize = ceil((6 * sig) + 1) - # TODO: implement mask size override? or scaling down of kernel values + # Ensure mask size is odd if maskSize % 2 == 0: maskSize += 1 else: pass + # Create kernel with weights representing gaussian distribution with input standard deviation + # Create 1D array of linearly distributed values with given start/ stop values and a step size of maskSize ax = np.linspace(-(maskSize - 1) / 2., (maskSize - 1) / 2., maskSize) + + # Create coordinate grid using 1D linspace array xx, yy = np.meshgrid(ax, ax) + # Finally, create kernel using gaussian distribution formula kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig)) super().__init__(maskSize, kernel, name='gaussian', linearity='linear') - def computePixel(self, sub): + def compute(self, sub): """ Element-wise multiplication of the kernel and image pixel under consideration, accounting for normalisation to mitigate DC distortion effects. :param sub: sub matrix of image pixel under consideration and surrounding pixels within mask size. :return: product of sub matrix with kernel normalised by sum of kernel weights """ - return (self.kernel * sub).sum()/ self.kernel.sum() + return np.sum(np.multiply(self.kernel, sub))/ self.kernel.sum() -class HighPass(SpatialFilter): +class Sharpening(SpatialFilter): """ High pass filter to have sharpening effect on image. """ def __init__(self, maskSize): - # TODO: Make ratio of intensity reduction vs. increase configurable for both high and low pass - kernel = np.full((maskSize, maskSize), -1/(maskSize**2)) + # Create kernel of negative one over the square of mask size + kernel = np.full((maskSize, maskSize), -1) + + # Set centre pixel to positive fraction such that kernel weights sum to zero middle = int((maskSize-1)/2) - kernel[middle, middle] = 1 - 1/(maskSize**2) - #TODO: Check for high and low pass filter if they are non-linear or linear - super().__init__(maskSize, kernel, name='high-pass', linearity='non-linear') + kernel[middle, middle] = maskSize**2 - 1 + + # Divide all elements by the number of elements in the window + kernel = np.divide(kernel, maskSize**2) - def computePixel(self, sub): + super().__init__(maskSize, kernel, name='high-pass', linearity='linear') + + def compute(self, sub): + + # Ensure sum of kernel weights is effectively zero try: - assert -0.01 < self.kernel.sum() < 0.01 + assert -0.01 < np.sum(self.kernel) < 0.01 except AssertionError: - raise Exception("Sum of high pass filter weights should be effectively zero.") + raise Exception("Sum of high pass filter weights should be effectively 0.") - return (self.kernel * sub).sum() + # Perform element-wise multiplication of kernel and window contents, then sum + return np.sum(np.multiply(self.kernel, sub)) class LowPass(SpatialFilter): - def __init__(self, maskSize): + def __init__(self, maskSize, middleWeight=1/2, otherWeights=1/8): kernel = np.zeros((maskSize, maskSize)) middle = int((maskSize-1)/2) - kernel[middle, :] = 1/8 - kernel[:, middle] = 1/8 - kernel[middle, middle] = 1/2 + kernel[middle, :] = otherWeights + kernel[:, middle] = otherWeights + kernel[middle, middle] = middleWeight super().__init__(maskSize, kernel, name='low-pass', linearity='non-linear') - def computePixel(self, sub): + def compute(self, sub): return (self.kernel * sub).sum()/ self.kernel.sum() -class TruncateCoefficients(FourierFilter): - def __init__(self, keep=0.1): - self.keep = keep - - def compute(self, img, plot=False): - # Get fourier transform of image - imgFFT = self.fft2D_scipy(img, plot=plot) - - # Call ff a copy of original transform - imgFFT2 = imgFFT.copy() - - # Get shape of image: rows and columns - row, col = imgFFT2.shape - - # Set all rows and cols to zero not within the keep fraction - imgFFT2[ceil(row*self.keep):floor(row*(1-self.keep)), :] = 0 - imgFFT2[:, ceil(col*self.keep):floor(col*(1-self.keep))] = 0 - - if plot: self.plotFourierSpectrum(imgFFT2) - - return self.inverseFFT_scipy(imgFFT2) - class Equalise(HistogramFilter): """ This filter normalises the brightness whilst increasing the contrast of the image at the same time. """ def __init__(self): - super().__init__(None, name='histogram-equalise') + super().__init__(3, name='histogram-equalise') def compute(self, img): + # Generate histogram and cumulative sum of original image histogram, cs = self.getHistogramWithCS(img) + # Index pixel values from flattened original image at each value of the cumulative sum imgNew = cs[img.flatten()] + # Return the image with evenly distributed pixel intensities with the same dimensions as original image return np.reshape(imgNew, img.shape), histogram, cs class AHE(HistogramFilter): @@ -466,14 +521,15 @@ def compute(self, img, padding=True): imgPadded = padImage(img, self.maskSize) else: imgPadded = img - print("No padding added.") + logging.info("No padding added.") # Create output array of zeros with same shape and type as img array imgFiltered = np.zeros_like(img) # Loop over every pixel of padded image - for col in tqdm(range(img.shape[1])): - for row in range(img.shape[0]): + for row in tqdm(range(img.shape[0])): + for col in range(img.shape[1]): + # Create sub matrix of mask size surrounding pixel under consideration sub = imgPadded[row: row+self.maskSize, col: col+self.maskSize] @@ -496,12 +552,19 @@ def __init__(self, maskSize=32): super().__init__(maskSize, name='sliding-window-adaptive-histogram-equalise') def updateHistogramAndSub(self, histogram, sub, nextCol): + + # Pair pixels in the corresponding rows of the trailing and next columns for pixelSub, pixelAdd in zip(sub[:, 0], nextCol): + # Subtract 1 from the histogram at the occurrence of each pixel intensity in the trailing column histogram[pixelSub] -= 1 + + # Add one for each pixel intensity occurrence in the next kernel window column histogram[pixelAdd] += 1 + # Drop the trailing column of the sub matrix sub = np.delete(sub, 0, axis=1) + # Return the histogram and sub matrix with next column appended return histogram, np.append(sub, nextCol.reshape((self.maskSize, 1)), axis=1) def compute(self, img, padding=True): @@ -519,7 +582,7 @@ def compute(self, img, padding=True): imgPadded = padImage(img, self.maskSize) else: imgPadded = img - print("No padding added.") + logging.info("No padding added.") # Create output array of zeros with same shape and type as img array imgFiltered = np.zeros_like(img) @@ -543,11 +606,18 @@ def compute(self, img, padding=True): try: # Get next column of sub array in image nextCol = imgPadded[row: row+self.maskSize, col+self.maskSize] + except IndexError: - if col + self.maskSize == imgPadded.shape[1] + 1: + + # Allow index error due to it being the last row in the row. + # Favoured computationally over running an if statement during each iteration + if col + self.maskSize <= imgPadded.shape[1] + 1: continue else: - raise IndexError("Index error triggered unexpectedly when at column {}, row {}.".format(col, row)) + raise IndexError("Index error triggered unexpectedly when at column {}, row {}.\n" + "mask size = {}\n" + "col+self.maskSize = {}\n" + "imgPadded.shape[1] = {}\n".format(col, row, self.maskSize, col+self.maskSize, imgPadded.shape[1])) # Create sub matrix of mask size surrounding pixel under consideration histogram, sub = self.updateHistogramAndSub(histogram, sub, nextCol) @@ -566,175 +636,3 @@ def compute(self, img, padding=True): # return pattern of filter function in parent class return imgFiltered, histogramOriginal, csOriginal -class CLAHE(HistogramFilter): - def __init__(self, maskSize): - super().__init__(maskSize, name='contrast-limited-adaptive-histogram-equalise') - - def compute(self, img): - raise NotImplementedError - - @staticmethod - def clahe(img, clipLimit, bins=128, maskSize=32): - """ - Function performs clipped adaptive histogram equalisation on input image - :param maskSize: size of kernel to scan over image - :param img: input image as array - :param clipLimit: normalised clip limit - :param bins: number of gray level bins for histogram - :return: return calhe image - """ - if clipLimit == 1: return - - # Get number of rows and columns of img array - row, col = img.shape - # Allow min 128 bins - bins = max(bins, 128) - - # Pad image to allow for integer number of kernels to fit in rows and columns - subRows = ceil(row / maskSize) - subCols = ceil(col / maskSize) - - # Get size of padding - padX = int(maskSize * (subRows - row / maskSize)) - padY = int(maskSize * (subCols - col / maskSize)) - - if padX != 0 or padY != 0: - imgPadded = padImage(img, padX, padY) - else: - imgPadded = img - print("No padding needed as the mask size of {} creates {} mini rows from the original image with {} rows." - "Likewise, {} mini columns from the original image with {} columns.".format(maskSize,subRows,row,subCols,col)) - - noPixels = maskSize**2 - # xsz2 = round(kernelX / 2) - # ysz2 = round(kernelY / 2) - claheImage = np.zeros(imgPadded.shape) - - if clipLimit > 0: - # Allow minimum clip limit of 1 - clipLimit = max(1, clipLimit * maskSize**2 / bins) - else: - # Convert any negative clip limit to 50 - clipLimit = 50 - - # makeLUT - print("...Make the LUT...") - minVal = 0 # np.min(img) - maxVal = 255 # np.max(img) - - # maxVal1 = maxVal + np.maximum(np.array([0]),minVal) - minVal - # minVal1 = np.maximum(np.array([0]),minVal) - - binSz = np.floor(1 + (maxVal - minVal) / float(bins)) - LUT = np.floor((np.arange(minVal, maxVal + 1) - minVal) / float(binSz)) - - # BACK TO CLAHE - bins = LUT[img] - print(bins.shape) - # makeHistogram - print("...Making the Histogram...") - hist = np.zeros((subRows, subCols, bins)) - print(subRows, subCols, hist.shape) - for i in range(subRows): - for j in range(subCols): - bin_ = bins[i * maskSize:(i + 1) * maskSize, j * maskSize:(j + 1) * maskSize].astype(int) - for i1 in range(maskSize): - for j1 in range(maskSize): - hist[i, j, bin_[i1, j1]] += 1 - - # clipHistogram - print("...Clipping the Histogram...") - if clipLimit > 0: - for i in range(subRows): - for j in range(subCols): - nrExcess = 0 - for nr in range(bins): - excess = hist[i, j, nr] - clipLimit - if excess > 0: - nrExcess += excess - - binIncr = nrExcess / bins - upper = clipLimit - binIncr - for nr in range(bins): - if hist[i, j, nr] > clipLimit: - hist[i, j, nr] = clipLimit - else: - if hist[i, j, nr] > upper: - nrExcess += upper - hist[i, j, nr] - hist[i, j, nr] = clipLimit - else: - nrExcess -= binIncr - hist[i, j, nr] += binIncr - - if nrExcess > 0: - stepSz = max(1, np.floor(1 + nrExcess / bins)) - for nr in range(bins): - nrExcess -= stepSz - hist[i, j, nr] += stepSz - if nrExcess < 1: - break - - # mapHistogram - print("...Mapping the Histogram...") - map_ = np.zeros((subRows, subCols, bins)) - # print(map_.shape) - scale = (maxVal - minVal) / float(noPixels) - for i in range(subRows): - for j in range(subCols): - sum_ = 0 - for nr in range(bins): - sum_ += hist[i, j, nr] - map_[i, j, nr] = np.floor(min(minVal + sum_ * scale, maxVal)) - - # BACK TO CLAHE - # INTERPOLATION - print("...interpolation...") - xI = 0 - for i in range(subRows + 1): - if i == 0: - subX = int(maskSize / 2) - xU = 0 - xB = 0 - elif i == subRows: - subX = int(maskSize / 2) - xU = subRows - 1 - xB = subRows - 1 - else: - subX = maskSize - xU = i - 1 - xB = i - - yI = 0 - for j in range(subCols + 1): - if j == 0: - subY = int(maskSize / 2) - yL = 0 - yR = 0 - elif j == subCols: - subY = int(maskSize / 2) - yL = subCols - 1 - yR = subCols - 1 - else: - subY = maskSize - yL = j - 1 - yR = j - UL = map_[xU, yL, :] - UR = map_[xU, yR, :] - BL = map_[xB, yL, :] - BR = map_[xB, yR, :] - # print("CLAHE vals...") - subBin = bins[xI:xI + subX, yI:yI + subY] - # print("clahe subBin shape: ",subBin.shape) - subImage = HistogramFilter.interpolate(subBin, UL, UR, BL, BR, subX, subY) - claheImage[xI:xI + subX, yI:yI + subY] = subImage - yI += subY - xI += subX - - if padX == 0 and padY != 0: - return claheImage[:, :-padY] - elif padX != 0 and padY == 0: - return claheImage[:-padX, :] - elif padX != 0 and padY != 0: - return claheImage[:-padX, :-padY] - else: - return claheImage \ No newline at end of file diff --git a/handler.py b/handler.py index 82a4536..ef486a7 100644 --- a/handler.py +++ b/handler.py @@ -5,17 +5,19 @@ Digital Image Processing 2020, Assignment 1/1, 20% """ +# Import packages used in code import numpy as np from PIL import Image import logging -from IPython.display import display import filters import matplotlib.pyplot as plt from pathlib import Path import os.path +# Initialise logging used to track info and warning messages logging.basicConfig() +# Global variables used for ease of access to test images FOETUS_PATH_ORIGINAL = ".\\images\\foetus.png" NZJERS_PATH_ORIGINAL = ".\\images\\NZjers1.png" @@ -27,12 +29,14 @@ def getImageAsArray(path, convertToGrey=True): :return: both numpy array of image pixel values and original Pillow image data for optional use (likely will discard) """ try: - # Use Pillow to load image data + # Use library Pillow to open image data imgData = Image.open(path) except FileNotFoundError: raise Exception("No file found at that file path. Check it's there and try again. If error persists, check for special characters in file path.") + # Convert to grey scale by default if convertToGrey: + # Convert image to grey scale imgData.convert("L") else: pass @@ -40,10 +44,14 @@ def getImageAsArray(path, convertToGrey=True): # Convert to 2D numpy array and return return np.asarray(imgData), imgData -def plotFigs(images): +def plotFigs(images, imageFilter=None, filteredTitle=None, edgeTitle=None): """ - Simple function to display image(s) in notebook. Intended for use to see original vs filtered images. - + Function provides uniform plotting of figures to show results of filtering alongisde original image. Later, image + edges can be displayed in additional third column, automatically determined by the number of images passed in. + :param images: list of images to display - determines number of columns of sub plot + :param imageFilter: filter object used to get filter name used in most image titles + :param filteredTitle: filtered image title override, used to display more than default information in title + :param edgeTitle: edge title override :return: None """ # Check types and correct common error if necessary @@ -56,32 +64,55 @@ def plotFigs(images): else: raise Exception("Make sure you pass in either a single image as np ndarray or list of images as np ndarray.") - # set up side-by-side image display + # Set dimensions of side-by-side image display fig = plt.figure() fig.set_figheight(15) fig.set_figwidth(15) - ax1 = fig.add_subplot(1,2,1) + # Create axis for first figure, set title and show grey-scale original image on plot + ax1 = fig.add_subplot(1,len(images),1) ax1.title.set_text('original') plt.imshow(images[0], cmap='gray') - # display the new image - ax2 = fig.add_subplot(1,2,2) - ax2.title.set_text('filtered') + # Create axis for second figure, set title and show grey-scale filtered image on plot + ax2 = fig.add_subplot(1,len(images),2) + if filteredTitle: + title = filteredTitle + elif imageFilter: + title = imageFilter.name + "_maskSize" + str(imageFilter.maskSize) + else: + title = 'filtered' + ax2.title.set_text(title) plt.imshow(images[1], cmap='gray') + # Create axis for third figure if more than 2 images available, set title and show grey-scale edge detections on plot + if len(images) > 2: + # display the new image + ax3 = fig.add_subplot(1, len(images), 3) + if edgeTitle: + title = edgeTitle + else: + title = 'edge image' + ax3.title.set_text(title) + plt.imshow(images[2], cmap='gray') + + # Show all plots in Jupyter display plt.show(block=True) -def saveAll(img, filtr, saveFilter=True): +def saveAll(img, imageFilter, saveFilter=True): """ - Function to save all figures relevant to report. Currently filtered image and plot of kernel. - :return: + Function used to save all filter results to disk, including kernel plot, filter object state and images. + :param img: filtered image + :param imageFilter: filter object + :param saveFilter: boolean used to configure if filter object attributes saved to text file + :return: None """ - assert isinstance(filtr, filters.SpatialFilter) + # Locate current directory, used for joining with relative paths currentDir = Path().absolute() - root = str(currentDir) + '\\..\outputs\{}\maskSize_{}\\'.format(filtr.name, filtr.maskSize) + root = str(currentDir) + '\\..\outputs\{}\maskSize_{}\\'.format(imageFilter.name, imageFilter.maskSize) + # Create root path if not present if not os.path.exists(root): os.makedirs(root) @@ -92,19 +123,21 @@ def saveAll(img, filtr, saveFilter=True): img_PIL.save(root+'filtered_image.png', 'PNG') print("Saved filtered image to... \n{}\n\n".format(root+'filtered_image.png')) - # TODO: Make saved image of plot larger. Currently will be tiny if mask size is eg 9x9. # Save figure of kernel plot to image - plt.imsave(root+'kernel_plot.png', filtr.kernel) + plt.imsave(root +'kernel_plot.png', imageFilter.kernel) print("Saved filtered image to... \n{}\n\n".format(root+'kernel.png')) if saveFilter: # Save filter attributes (including kernel as array.tolist()) to text file for traceability + # Open text file with write permissions with open(root+'filter.txt', 'w') as f: - for k, v in filtr.__dict__.items(): + # Retrieve attributes from list of filter object dictionary items as key-value pairs + for k, v in imageFilter.__dict__.items(): if isinstance(v, np.ndarray): v = v.tolist() else: pass + # Write attribute field and values to text file f.write(''.join("filter.{} = {}\n".format(k, v))) print("Saved filter object attributes to... \n{}\n\n".format(root + 'filter.txt')) else: diff --git a/third_party_filters.py b/third_party_filters.py index 81c2124..1b5ea52 100644 --- a/third_party_filters.py +++ b/third_party_filters.py @@ -1,5 +1,7 @@ import cv2.cv2 as cv import handler +import numpy as np +import matplotlib as plt def equalise(path): """ @@ -21,4 +23,9 @@ def CLAHE(path): # create a CLAHE object (Arguments are optional). clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) cl1 = clahe.apply(img) - handler.plotFigs([img, cl1]) \ No newline at end of file + handler.plotFigs([img, cl1]) + +def edgeDetect(img, minVal=100, maxVal=200): + + return cv.Canny(img, minVal, maxVal) +