diff --git a/data/body-v2.ply b/data/body-v2.ply
new file mode 100755
index 0000000..2815a81
Binary files /dev/null and b/data/body-v2.ply differ
diff --git a/etc/FOSSEE_Image_Processing_Toolbox.quit b/etc/FOSSEE_Image_Processing_Toolbox.quit
new file mode 100644
index 0000000..459eb07
--- /dev/null
+++ b/etc/FOSSEE_Image_Processing_Toolbox.quit
@@ -0,0 +1,34 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+
+function quitModule()
+
+ etc_tlbx = get_absolute_file_path("FOSSEE_Image_Processing_Toolbox.quit");
+ etc_tlbx = getshortpathname(etc_tlbx);
+ root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+ //unlink libraries
+ [bOK, ilib] = c_link('FOSSEE_Optimization_Toolbox');
+ if bOK then
+ ulink(ilib);
+ end
+
+ // Remove Preferences GUI
+ // =============================================================================
+ if getscilabmode() == "STD" then
+ removeModulePreferences(root_tlbx);
+ end
+ ulink();
+endfunction
+
+clear quitModule;
+
diff --git a/etc/FOSSEE_Image_Processing_Toolbox.quit~ b/etc/FOSSEE_Image_Processing_Toolbox.quit~
new file mode 100644
index 0000000..ca53a1c
--- /dev/null
+++ b/etc/FOSSEE_Image_Processing_Toolbox.quit~
@@ -0,0 +1,34 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+
+function quitModule()
+
+ etc_tlbx = get_absolute_file_path("FOSSEE_Image_Processin_Toolbox.quit");
+ etc_tlbx = getshortpathname(etc_tlbx);
+ root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+ //unlink libraries
+ [bOK, ilib] = c_link('FOSSEE_Optimization_Toolbox');
+ if bOK then
+ ulink(ilib);
+ end
+
+ // Remove Preferences GUI
+ // =============================================================================
+ if getscilabmode() == "STD" then
+ removeModulePreferences(root_tlbx);
+ end
+ ulink();
+endfunction
+
+clear quitModule;
+
diff --git a/etc/FOSSEE_Image_Processing_Toolbox.start b/etc/FOSSEE_Image_Processing_Toolbox.start
new file mode 100644
index 0000000..77c3e11
--- /dev/null
+++ b/etc/FOSSEE_Image_Processing_Toolbox.start
@@ -0,0 +1,83 @@
+// Copyright (C) 2017 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+mprintf("Start FOSSEE Image Processing Toolbox\n");
+
+[a, opt] = getversion();
+Version = opt(2);
+
+etc_tlbx = get_absolute_file_path("FOSSEE_Image_Processing_Toolbox.start");
+etc_tlbx = getshortpathname(etc_tlbx);
+root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+//Load functions library
+// =============================================================================
+mprintf("\tLoad macros\n");
+pathmacros = pathconvert( root_tlbx ) + "macros" + filesep();
+fipt_lib = lib(pathmacros);
+clear pathmacros;
+
+// load gateways
+// =============================================================================
+
+mprintf("\tLoad gateways\n");
+[a, opt] = getversion();
+Version = opt(2);
+ilib_verbose(0);
+if getos()=="Windows" then
+// lib_path = root_tlbx + "/thirdparty/windows/bin/" + Version;*/
+// link(lib_path+filesep()+"IpOptFSS.dll");*/
+// link(lib_path+filesep()+"IpOpt-vc10.dll");*/
+else
+ //lib_path = root_tlbx + "/thirdparty/linux/lib/" + Version;
+// lib_path="/usr/lib";*/
+ //link(lib_path + "/libopencv_core.so");
+ //link(lib_path + "/libopencv_highgui.so");
+ //link(lib_path + "/libopencv_imgproc.so");
+ lib_path="/usr/local/lib";
+ link(lib_path + "/libopencv_core.so");
+ link(lib_path + "/libopencv_highgui.so");
+ link(lib_path + "/libopencv_imgproc.so");
+
+
+end
+exec(pathconvert(root_tlbx + filesep() + "sci_gateway" + filesep() + "loader_gateway.sce",%f));
+
+// Load and add help chapter
+// =============================================================================
+//if ( %t ) then
+//if or(getscilabmode() == ["NW";"STD"]) then
+// mprintf("\tLoad help\n");
+// path_addchapter = pathconvert(root_tlbx+"/jar");
+// if ( isdir(path_addchapter) <> [] ) then
+// add_help_chapter("FOSSEE_Image_Processing_Toolbox", path_addchapter, %F);
+// clear add_help_chapter;
+// end
+// clear path_addchapter;
+//end
+//end
+
+
+// add demos
+// =============================================================================
+
+if ( %t ) then
+if or(getscilabmode() == ["NW";"STD"]) then
+ mprintf("\tLoad demos\n");
+ //pathdemos = pathconvert(root_tlbx+"/demos/sci_FOSSEE_Image_Processing_Toolbox.dem.gateway.sce",%f,%t);
+ //add_demo("FOSSEE_Image_Processing_Toolbox",pathdemos);
+ //clear pathdemos ;
+end
+end
+
+// =============================================================================
+
+clear etc_tlbx root_tlbx Version a opt lib_path;
diff --git a/etc/FOSSEE_Image_Processing_Toolbox.start~ b/etc/FOSSEE_Image_Processing_Toolbox.start~
new file mode 100644
index 0000000..bf6a60e
--- /dev/null
+++ b/etc/FOSSEE_Image_Processing_Toolbox.start~
@@ -0,0 +1,77 @@
+// Copyright (C) 2017 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+mprintf("Start FOSSEE Image Processing Toolbox\n");
+
+[a, opt] = getversion();
+Version = opt(2);
+
+etc_tlbx = get_absolute_file_path("FOSSEE_Image_Processing_Toolbox.start");
+etc_tlbx = getshortpathname(etc_tlbx);
+root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+//Load functions library
+// =============================================================================
+mprintf("\tLoad macros\n");
+pathmacros = pathconvert( root_tlbx ) + "macros" + filesep();
+fipt_lib = lib(pathmacros);
+clear pathmacros;
+
+// load gateways
+// =============================================================================
+
+mprintf("\tLoad gateways\n");
+[a, opt] = getversion();
+Version = opt(2);
+ilib_verbose(0);
+if getos()=="Windows" then
+// lib_path = root_tlbx + "/thirdparty/windows/bin/" + Version;*/
+// link(lib_path+filesep()+"IpOptFSS.dll");*/
+// link(lib_path+filesep()+"IpOpt-vc10.dll");*/
+else
+ lib_path = root_tlbx + "/thirdparty/linux/lib/" + Version;
+// lib_path="/usr/lib";*/
+// link(lib_path + "/libopencv_core.so");*/
+// link(lib_path + "/libopencv_highgui.so");*/
+// link(lib_path + "/libopencv_imgproc.so");*/
+
+end
+exec(pathconvert(root_tlbx + filesep() + "sci_gateway" + filesep() + "loader_gateway.sce",%f));
+
+// Load and add help chapter
+// =============================================================================
+if ( %t ) then
+if or(getscilabmode() == ["NW";"STD"]) then
+ mprintf("\tLoad help\n");
+ path_addchapter = pathconvert(root_tlbx+"/jar");
+ if ( isdir(path_addchapter) <> [] ) then
+ add_help_chapter("FOSSEE_Image_Processing_Toolbox", path_addchapter, %F);
+ clear add_help_chapter;
+ end
+ clear path_addchapter;
+end
+end
+
+// add demos
+// =============================================================================
+
+if ( %t ) then
+if or(getscilabmode() == ["NW";"STD"]) then
+ mprintf("\tLoad demos\n");
+ //pathdemos = pathconvert(root_tlbx+"/demos/sci_FOSSEE_Image_Processing_Toolbox.dem.gateway.sce",%f,%t);
+ //add_demo("FOSSEE_Image_Processing_Toolbox",pathdemos);
+ //clear pathdemos ;
+end
+end
+
+// =============================================================================
+
+clear etc_tlbx root_tlbx Version a opt lib_path;
diff --git a/etc/FOSSEE_Optimization_Toolbox.quit~ b/etc/FOSSEE_Optimization_Toolbox.quit~
new file mode 100644
index 0000000..86ed18a
--- /dev/null
+++ b/etc/FOSSEE_Optimization_Toolbox.quit~
@@ -0,0 +1,34 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Harpreet Singh
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+
+function quitModule()
+
+ etc_tlbx = get_absolute_file_path("FOSSEE_Optimization_Toolbox.quit");
+ etc_tlbx = getshortpathname(etc_tlbx);
+ root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+ //unlink libraries
+ [bOK, ilib] = c_link('FOSSEE_Optimization_Toolbox');
+ if bOK then
+ ulink(ilib);
+ end
+
+ // Remove Preferences GUI
+ // =============================================================================
+ if getscilabmode() == "STD" then
+ removeModulePreferences(root_tlbx);
+ end
+ ulink();
+endfunction
+
+clear quitModule;
+
diff --git a/etc/FOSSEE_Optimization_Toolbox.start~ b/etc/FOSSEE_Optimization_Toolbox.start~
new file mode 100644
index 0000000..f2a5a06
--- /dev/null
+++ b/etc/FOSSEE_Optimization_Toolbox.start~
@@ -0,0 +1,85 @@
+// Copyright (C) 2017 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+
+mprintf("Start FOSSEE Image Processing Toolbox\n");
+
+[a, opt] = getversion();
+Version = opt(2);
+
+etc_tlbx = get_absolute_file_path("FOSSEE_Image_Processing_Toolbox.start");
+etc_tlbx = getshortpathname(etc_tlbx);
+root_tlbx = strncpy( etc_tlbx, length(etc_tlbx)-length("\etc\") );
+
+//Load functions library
+// =============================================================================
+mprintf("\tLoad macros\n");
+pathmacros = pathconvert( root_tlbx ) + "macros" + filesep();
+symphony_lib = lib(pathmacros);
+clear pathmacros;
+
+// load gateways
+// =============================================================================
+
+mprintf("\tLoad gateways\n");
+[a, opt] = getversion();
+Version = opt(2);
+ilib_verbose(0);
+if getos()=="Windows" then
+ lib_path = root_tlbx + "/thirdparty/windows/bin/" + Version;
+ link(lib_path+filesep()+"IpOptFSS.dll");
+ link(lib_path+filesep()+"IpOpt-vc10.dll");
+else
+ lib_path = root_tlbx + "/thirdparty/linux/lib/" + Version;
+ link(lib_path + "/libCoinUtils.so");
+ link(lib_path + "/libClp.so");
+ link(lib_path + "/libClpSolver.so");
+ link(lib_path + "/libOsi.so");
+ link(lib_path + "/libOsiCommonTests.so");
+ link(lib_path + "/libOsiClp.so");
+ link(lib_path + "/libCgl.so");
+ link(lib_path + "/libSym.so");
+ link(lib_path + "/libOsiSym.so");
+ link(lib_path + "/libcoinblas.so");
+ link(lib_path + "/libcoinlapack.so");
+ link(lib_path + "/libcoinmumps.so");
+ link(lib_path + "/libipopt.so");
+end
+exec(pathconvert(root_tlbx + filesep() + "sci_gateway" + filesep() + "loader_gateway.sce",%f));
+
+// Load and add help chapter
+// =============================================================================
+if ( %t ) then
+if or(getscilabmode() == ["NW";"STD"]) then
+ mprintf("\tLoad help\n");
+ path_addchapter = pathconvert(root_tlbx+"/jar");
+ if ( isdir(path_addchapter) <> [] ) then
+ add_help_chapter("FOSSEE_Optimization_Toolbox", path_addchapter, %F);
+ clear add_help_chapter;
+ end
+ clear path_addchapter;
+end
+end
+
+// add demos
+// =============================================================================
+
+if ( %t ) then
+if or(getscilabmode() == ["NW";"STD"]) then
+ mprintf("\tLoad demos\n");
+ pathdemos = pathconvert(root_tlbx+"/demos/sci_FOSSEE_Image_Processing_Toolbox.dem.gateway.sce",%f,%t);
+ add_demo("FOSSEE_Image_Processing_Toolbox",pathdemos);
+ clear pathdemos ;
+end
+end
+
+// =============================================================================
+
+clear etc_tlbx root_tlbx Version a opt lib_path;
diff --git a/help/builder_help.sce b/help/builder_help.sce
new file mode 100644
index 0000000..4d4e031
--- /dev/null
+++ b/help/builder_help.sce
@@ -0,0 +1,21 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+
+mode(-1)
+lines(0)
+
+toolbox_title = "FOSSEE_Image_Processing_Toolbox"
+
+help_dir = get_absolute_file_path('builder_help.sce');
+
+tbx_builder_help_lang("en_US", help_dir);
+
+clear toolbox_title;
diff --git a/help/builder_help.sce~ b/help/builder_help.sce~
new file mode 100644
index 0000000..da2f4f7
--- /dev/null
+++ b/help/builder_help.sce~
@@ -0,0 +1,21 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Harpreet Singh
+// Organization: FOSSEE, IIT Bombay
+// Email: harpreet.mertia@gmail.com
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+
+mode(-1)
+lines(0)
+
+toolbox_title = "FOSSEE_Optimization_Toolbox"
+
+help_dir = get_absolute_file_path('builder_help.sce');
+
+tbx_builder_help_lang("en_US", help_dir);
+
+clear toolbox_title;
diff --git a/help/en_US/align.xml b/help/en_US/align.xml
new file mode 100644
index 0000000..dd0501e
--- /dev/null
+++ b/help/en_US/align.xml
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+ align
+ This function aligns the set of input images for HDR image creation.
+
+
+
+
+ Calling Sequence
+
+ [out1, out2, out3] = align(maxBits, excludeRange, cut, num, srcImg_1, srcImg_2, srcImg_3)
+ [out1, out2, out3, out4] = align(maxBits, excludeRange, cut, num, srcImg1, srcImg_2, srcImg_3, srcImg_4)
+ [out1, out2, out3, out4, out5] = align(maxBits, excludeRange, cut, num, srcImg_1, srcImg_2, srcImg_3, srcImg_4, srcImg_5)
+
+
+
+
+
+ Parameters
+
+ maxBits :
+ Logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are usually good enough (31 and 63 pixels shift respectively). Value should not exceed 6. It is of Double type.
+ excludeRange :
+ Range for exclusion bitmap that is constructed to suppress noise around the median value. It is of Double type.
+ cut :
+ If true, cuts images. Otherwise fills the new regions with zeros. It is of Boolean type.
+ num :
+ Number of images given as input source images(3 - 5). It is of double type.
+ srcImg_i :
+ Hypermat of image_i.
+
+
+
+
+ Description
+
+This function uses AlignMTB algorithm which converts images to median threshold bitmaps (1 for pixels brighter than median luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/applyTransformer.xml b/help/en_US/applyTransformer.xml
new file mode 100644
index 0000000..28edb15
--- /dev/null
+++ b/help/en_US/applyTransformer.xml
@@ -0,0 +1,111 @@
+
+
+
+
+
+
+
+ applyTransformer
+ This function is used to apply affine or TPS transformation to image.
+
+
+
+
+ Calling Sequence
+
+ [ tImg] = applyTransformer(srcImg1, srcImg2, typeOfMethod, hessianThreshold, rpTPS, sfAffine)
+
+
+
+
+
+ Parameters
+
+ srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image, which is also the target image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of transformation. Use value '1' for 'Affine Transformation' and '2' for 'Thin Plate Spline Shape Transformation'. It is of double type.
+ hessianThreshold :
+ It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+ rpTPS :
+ It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+ sfAffine :
+ It is used to set the full-affine condition for Affine Transformation. If true, the function finds as optimal transformation with no additional restrictions(6 degrees of freedom). Otherwise, the class of transformations to choose from is limited to combination of translation, rotation & uniform scaling(5 degrees of freedom).
+ tImg :
+ The transformed image of the target(srcImg2). It is of hypermat type.
+
+
+
+
+ Description
+
+This function is used to perform shape transformation, the user gets to choose and apply the type of transformation she/he wishes to perform.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/build_help.sce b/help/en_US/build_help.sce
new file mode 100644
index 0000000..81154f3
--- /dev/null
+++ b/help/en_US/build_help.sce
@@ -0,0 +1,17 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+
+help_lang_dir = get_absolute_file_path('build_help.sce');
+
+tbx_build_help(TOOLBOX_TITLE, help_lang_dir);
+
+clear help_lang_dir;
+
diff --git a/help/en_US/build_help.sce~ b/help/en_US/build_help.sce~
new file mode 100644
index 0000000..0205c56
--- /dev/null
+++ b/help/en_US/build_help.sce~
@@ -0,0 +1,17 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// Author: Shamika Mohanan
+// Organization: FOSSEE, IIT Bombay
+// Email: harpreet.mertia@gmail.com
+// This file must be used under the terms of the BSD.
+// This source file is licensed as described in the file LICENSE, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// https://opensource.org/licenses/BSD-3-Clause
+
+help_lang_dir = get_absolute_file_path('build_help.sce');
+
+tbx_build_help(TOOLBOX_TITLE, help_lang_dir);
+
+clear help_lang_dir;
+
diff --git a/help/en_US/bwLookUp.xml b/help/en_US/bwLookUp.xml
new file mode 100644
index 0000000..689d176
--- /dev/null
+++ b/help/en_US/bwLookUp.xml
@@ -0,0 +1,70 @@
+
+
+
+
+
+
+
+ bwLookUp
+ This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+
+
+
+
+ Calling Sequence
+
+ [out] = bwLookUp(image,lut)
+
+
+
+
+
+ Parameters
+
+ image :
+ The input is a grayscale image. If the image is not binary, it is converted to one.
+ lut :
+ The lut is a 1*16 double vector [2*2 filtering], or a [1*512] double vector [3*3 filtering].
+ out :
+ The output image is the same size as image, same data type as lut.
+
+
+
+
+ Description
+
+The function performs a 2-by-2 or 3-by-3 nonlinear neighborhood filtering operation on a grayscale image and returns the results in the output image. The neighborhood processing determines an integer index value used to access values in a lookup table 'lut'. The fetched lut value becomes the pixel value in the output image at the targeted position.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Samiran Roy
+
+
+
diff --git a/help/en_US/contourArea.xml b/help/en_US/contourArea.xml
new file mode 100644
index 0000000..24a004c
--- /dev/null
+++ b/help/en_US/contourArea.xml
@@ -0,0 +1,71 @@
+
+
+
+
+
+
+
+ contourArea
+ This function calculates the contour area.
+
+
+
+
+ Calling Sequence
+
+ [out] = contourArea(inputArrayContour, booloriented)
+
+
+
+
+
+ Parameters
+
+ inputArrayContour :
+ The input vector of 2D points.
+ booloriented :
+ The oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine the orientation of a contour by taking the sign of an area.
+ out :
+ The output is the calculated area.
+
+
+
+
+ Description
+
+It computes the contour area. Also, the function will most certainly give a wrong results for contours with self-intersections.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Priyanka Hiranandani, NIT Surat
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/copyMakeBorder.xml b/help/en_US/copyMakeBorder.xml
new file mode 100644
index 0000000..2b7e315
--- /dev/null
+++ b/help/en_US/copyMakeBorder.xml
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+
+ copyMakeBorder
+ This function forms a border around the input image.
+
+
+
+
+ Calling Sequence
+
+ [new_image] = copyMakeBorder(image, top, bottom, left, right, borderType, value)
+
+
+
+
+
+ Parameters
+
+ image :
+ The source image.
+ top :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ bottom :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ left :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ right :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ borderType :
+ Stating the border type.
+ value :
+ Border value if borderType==BORDER_CONSTANT.
+ new_image :
+ The output image with specified borders.
+
+
+
+
+ Description
+
+This function forms a border around the input image. The areas to the left, to the right, above and below the copied source image are filled with the extrapolated pixels.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik
+ Shubheksha Jalan
+
+
+
diff --git a/help/en_US/detectBRIEFDescriptors.xml b/help/en_US/detectBRIEFDescriptors.xml
new file mode 100644
index 0000000..da11473
--- /dev/null
+++ b/help/en_US/detectBRIEFDescriptors.xml
@@ -0,0 +1,103 @@
+
+
+
+
+
+
+
+ detectBRIEFDescriptors
+ This function is used for computing BRIEF descriptors using Star keypoints.
+
+
+
+
+ Calling Sequence
+
+ [ a ] = detectBRIEFDescriptors(srcImg)
+ [ a ] = detectVRIEFDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, bytes, use_orientation )
+
+
+
+
+
+ Parameters
+
+ srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ bytes :
+ legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64.
+ use_orientation :
+ sample patterns using keypoints orientation, disabled by default.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+
+
+
+ Description
+
+For extracting keypoints(StarDetector) and computing descriptors. BRIEF which gives the shortcut to find binary descriptors with less memory, faster matching, still higher recognition rate.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
diff --git a/help/en_US/detectDAISYDescriptors.xml b/help/en_US/detectDAISYDescriptors.xml
new file mode 100644
index 0000000..487bd15
--- /dev/null
+++ b/help/en_US/detectDAISYDescriptors.xml
@@ -0,0 +1,116 @@
+
+
+
+
+
+
+
+ detectDAISYDescriptors
+ This function is used for computing DAISY descriptors using Star keypoints.
+
+
+
+
+ Calling Sequence
+
+ [ a ] = detectDAISYDescriptors(srcImg)
+ [ a ] = detectDAISYDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, radius, q_radius, q_theta, q_hist, norm, interpolation, use_orientation)
+ [ a ] = detectDAISYDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, radius, q_radius, q_theta, q_hist, norm, homography, interpolation, use_orientation)
+
+
+
+
+
+ Parameters
+
+ srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ radius :
+ radius of the descriptor at the initial scale.
+ q_radius :
+ amount of radial range division quantity.
+ q_theta :
+ amount of angular range division quantity.
+ q_hist :
+ amount of gradient orientations range division quantity.
+ norm :
+ choose descriptors normalization type, where DAISY::NRM_NONE will not do any normalization (default), DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0, DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0, DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
+ homography :
+ optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image.
+ interpolation :
+ switch to disable interpolation for speed improvement at minor quality loss.
+ use_orientation :
+ sample patterns using keypoints orientation, disabled by default.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+
+
+
+ Description
+
+For extracting keypoints(using StarDetector) and computing descriptors(DAISY).
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
diff --git a/help/en_US/detectLATCHDescriptors.xml b/help/en_US/detectLATCHDescriptors.xml
new file mode 100644
index 0000000..3e34219
--- /dev/null
+++ b/help/en_US/detectLATCHDescriptors.xml
@@ -0,0 +1,105 @@
+
+
+
+
+
+
+
+ detectLATCHDescriptors
+ This function is used for computing the LATCH descriptors using Star keypoints.
+
+
+
+
+ Calling Sequence
+
+ [ a ] = detectLATCHDescriptors(srcImg)
+ [ a ] = detectLATCHDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, bytes, rotationInvariance, half_ssd_size)
+
+
+
+
+
+ Parameters
+
+ srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ bytes :
+ It is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1.
+ rotationInvariance :
+ whether or not the descriptor should compansate for orientation changes.
+ half_ssd_size) :
+ the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x then the half_ssd_size should be (7-1)/2 = 3.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+
+
+
+ Description
+
+For extracting keypoints(using StarDetectors) and computing descriptors(LATCH).
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
diff --git a/help/en_US/detectSIFTFeatures.xml b/help/en_US/detectSIFTFeatures.xml
new file mode 100644
index 0000000..93ab7cf
--- /dev/null
+++ b/help/en_US/detectSIFTFeatures.xml
@@ -0,0 +1,98 @@
+
+
+
+
+
+
+
+ detectSIFTFeatures
+ This function is used to find scale-invariant features.
+
+
+
+
+ Calling Sequence
+
+ [ a ] = detectSIFTFeatures(srcImg)
+ [ a ] = detectSIFTFeatures(srcImg, features, nOctaveLayers, contrastThreshold, edgeThreshold, sigma)
+
+
+
+
+
+ Parameters
+
+ srcImg :
+ Hyper of input image.
+ nfeatures :
+ The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast). If valued as 0, uses all detected keypoints.
+ nOctaveLayers :
+ The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
+ contrastThreshold :
+ The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
+ edgeThreshold :
+ The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are filtered out (more features are retained).
+ sigma :
+ The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+
+
+
+ Description
+
+For extracting keypoints and computing descriptors using the Scale Invariant Feature Transform. RGB images are converted to Grayscale images before processing.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/distanceExtractor.xml b/help/en_US/distanceExtractor.xml
new file mode 100644
index 0000000..96e0b39
--- /dev/null
+++ b/help/en_US/distanceExtractor.xml
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+ distanceExtractor
+ This function computes the shape-distance between two images.
+
+
+
+
+ Calling Sequence
+
+ [ dist ] = distanceExtractor(srcImg1, srcImg2, typeOfMethod); // Hausdorrf distance
+ [ dist ] = distanceExtractor(srcImg1, srcImg2, typeOfMethod, nAngularBins, innerRadius, nRadialBins, outerRadius, iterations); // Shape Context
+
+
+
+
+
+ Parameters
+
+ srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of Shape Distance calculation technique. Use '1' for 'Shape Context' and '2' for 'Hausdorrf'.
+ nAngularBins :
+ Establish the number of angular bins for the Shape Context Descriptor used in the shape matching pipeline.
+ nRadialBins :
+ Establish the number of radial bins for the Shape Context Descriptor used in the shape matching pipeline.
+ innerRadius :
+ Set the inner radius of the shape context descriptor.
+ outerRadius :
+ Set the outer radius of the shape context descriptor.
+ dist :
+ It is the calculated distance. It is of double type.
+
+
+
+
+ Description
+
+This function is used to compute the shape distance between two shapes defined by its contours.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/fillConvexPoly.xml b/help/en_US/fillConvexPoly.xml
new file mode 100644
index 0000000..eb8f6ce
--- /dev/null
+++ b/help/en_US/fillConvexPoly.xml
@@ -0,0 +1,81 @@
+
+
+
+
+
+
+
+ fillConvexPoly
+ This function fills a convex polygon.
+
+
+
+
+ Calling Sequence
+
+ [out] = fillConvexPoly(img, pstData, npts, r_value, g_value, b_value, linetype, shift)
+
+
+
+
+
+ Parameters
+
+ img :
+ The input source image.
+ pstData :
+ The vector of polygon vertices.
+ npts :
+ The number of polygon vertices.
+ r_value :
+ The red value of RGB color for the polygon.
+ g_value :
+ The green value of RGB color for the polygon.
+ b_value :
+ The blue value of RGB color for the polygon.
+ linetype :
+ This is the type of the polygon boundaries. It has only 3 valid types: 4, 8 and 16(CV_AA). Passing any other value as lineType is not legal.
+ shift :
+ This is the number of fractional bits in the vertex coordinates.
+
+
+
+
+ Description
+
+The function fillConvexPoly draws a filled convex polygon. It can fill not only convex polygons but any monotonic polygon without self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) twice at the most (though, its top-most and/or the bottom edge could be horizontal).
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Abhilasha Sancheti
+ Sukul Bagai
+
+
+
diff --git a/help/en_US/gabor.xml b/help/en_US/gabor.xml
new file mode 100644
index 0000000..1dfaa27
--- /dev/null
+++ b/help/en_US/gabor.xml
@@ -0,0 +1,70 @@
+
+
+
+
+
+
+
+ gabor
+ This function creates a Gabor filter.
+
+
+
+
+ Calling Sequence
+
+ [outputImg] = gabor(wavelength,orientation)
+
+
+
+
+
+ Parameters
+
+ wavelength :
+ It is the wavelength of sinusoid, specified as a numeric scalar or vector, in pixels/cycle.
+ orientation :
+ It is the orientation of filter in degrees, specified as a numeric scalar in the range [0 180], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+ outputImg :
+ The Gabor filter.
+
+
+
+
+ Description
+
+It creates a Gabor filter with the specified wavelength (in pixels/cycle) and orientation (in degrees). If you specify wavelength or orientation as vectors, gabor returns an array of gabor objects, called a filter bank, that contain all the unique combinations of wavelength and orientation. For example, if wavelength is a vector of length 2 and orientation is a vector of length 3, then the output array is a vector of length 6.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Samiran Roy
+
+
+
diff --git a/help/en_US/gaussianBlur.xml b/help/en_US/gaussianBlur.xml
new file mode 100644
index 0000000..5270c36
--- /dev/null
+++ b/help/en_US/gaussianBlur.xml
@@ -0,0 +1,74 @@
+
+
+
+
+
+
+
+ gaussianBlur
+ This function blurs the input image using a Gaussian filter.
+
+
+
+
+ Calling Sequence
+
+ outputImg = gaussianblur(inputImage,ksize_height,ksize_width,sigmaX,sigmaY)
+
+
+
+
+
+ Parameters
+
+ inputImage :
+ The input source image.
+ ksize_height :
+ It is the gaussian kernel height. It must be positive and odd.
+ ksize_width :
+ It is the gaussian kernel width. It must be positive and odd.
+ sigmaX :
+ It is the gaussian kernel standard deviation in X direction.
+ sigmaY :
+ It is the gaussian kernel standard deviation in Y direction.
+ outputImg :
+ The output filtered image is of the same size and type as the input image.
+
+
+
+
+ Description
+
+The function convolves the source image with the specified Gaussian kernel.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Sukul Bagai
+
+
+
diff --git a/help/en_US/histogramCostExtractor.xml b/help/en_US/histogramCostExtractor.xml
new file mode 100644
index 0000000..8bc86c0
--- /dev/null
+++ b/help/en_US/histogramCostExtractor.xml
@@ -0,0 +1,111 @@
+
+
+
+
+
+
+
+ histogramCostExtractor
+ This function computes the cost matrix.
+
+
+
+
+ Calling Sequence
+
+ [ costMatrix ] = histogramCostExtractor(srcImg1, srcImg2, typeOfMethod=3, hessianThreshold); // Norm based cost
+ [ costMatrix ] = histogramcostExtractor(srcImg1, srcImg2, typeOfMethod=1, hessianThreshold, nDummies, defaultCost); // Chi as well as EMDL1 based cost extraction
+ [ costMatrix ] = histogramCostExtractor(srcImg1, srcImg2, typeOfMethod=2, hessianThreshold, nDummies, defaultCost); // EMDL1 based cost extraction
+
+
+
+
+
+ Parameters
+
+ srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of transformation. Use value '1' for 'Chi based cost ectraction', '2' for 'EMDL1 based cost extraction' and '3' for 'Norm based cost extraction'. It is of double type.
+ hessianThreshold :
+ It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+ rpTPS :
+ It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+ costMatrix :
+ It is the cost matrix.
+
+
+
+
+ Description
+
+This function is used to calculate the histogram based cost matrix of two images, the user gets to choose and apply the type of transformation she/he wishes to perform.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Mantosh Barik, NIT Rouekela
+ Shubham Lohakare, NITK Surathkal
+
+
+
diff --git a/help/en_US/imGaborFilt.xml b/help/en_US/imGaborFilt.xml
new file mode 100644
index 0000000..380c46e
--- /dev/null
+++ b/help/en_US/imGaborFilt.xml
@@ -0,0 +1,71 @@
+
+
+
+
+
+
+
+ imGaborFilt
+ The function applies Gabor filter or set of filters to 2-D image.
+
+
+
+
+ Calling Sequence
+
+ [new_image] = imGaborFilt(image, wavelength, orientation)
+
+
+
+
+
+ Parameters
+
+ image :
+ The input grayscale image.
+ wavelength :
+ It is the wavelength of the sinusoidal carrier, specified as a numeric scalar in the range [2,Inf), in pixels/cycle.
+ orientation :
+ Orientation value of filter in degrees, specified as a numeric scalar in the range [0 360], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+
+
+
+
+ Description
+
+It computes the magnitude and phase response of a Gabor filter for the input grayscale image.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Samiran Roy
+
+
+
diff --git a/help/en_US/lab2rgb.xml b/help/en_US/lab2rgb.xml
new file mode 100644
index 0000000..b1a6392
--- /dev/null
+++ b/help/en_US/lab2rgb.xml
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+ lab2rgb
+ This function converts CIE 1976 L*a*b* to RGB.
+
+
+
+
+ Calling Sequence
+
+ [output] = lab2rgb(pstData)
+
+
+
+
+
+ Parameters
+
+ pstData :
+ The color values to convert, specified as a list of values.
+ output :
+ The converted color values, returned as an array of the same shape as the input.
+
+
+
+
+ Description
+
+Convert CIE 1976 L*a*b* to RGB.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
diff --git a/help/en_US/lab2uint8.xml b/help/en_US/lab2uint8.xml
new file mode 100644
index 0000000..78fe0a8
--- /dev/null
+++ b/help/en_US/lab2uint8.xml
@@ -0,0 +1,157 @@
+
+
+
+
+
+
+
+ lab2uint8
+ This function converts L*a*b* data to uint8.
+
+
+
+
+ Calling Sequence
+
+ [output] = lab2uint8(pstData)
+
+
+
+
+
+ Parameters
+
+ pstData :
+ It is a list of color values.
+ output :
+ The converted uint8 value. lab8 has the same size as lab.
+
+
+
+
+ Description
+
+Converts L*a*b* data to uint8.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Tess Zacharias
+
+
+
diff --git a/help/en_US/makeHDR.xml b/help/en_US/makeHDR.xml
new file mode 100644
index 0000000..ee020ec
--- /dev/null
+++ b/help/en_US/makeHDR.xml
@@ -0,0 +1,251 @@
+
+
+
+
+
+
+
+ makeHDR
+ This function is used to create HDR image.
+
+
+
+
+ Calling Sequence
+
+ [out1, out2] = makeHDR(typeOfMethod=1, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, max_iter, threshold) // Robertson merging
+ [out1, out2] = makeHDR(typeOfMethod=2, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, samples, lambda, random) // Debevec merging
+ [out1, out2] = makeHDR(typeOfMethod=3, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+ [out1, out2] = makeHDR(typeOfMethod=1, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, max_iter, threshold) // Robertson merging
+ [out1, out2] = makeHDR(typeOfMethod=2, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, samples, lambda, random) // Debevec merging
+ [out1, out2] = makeHDR(typeOfMethod=3, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+ [out1, out2] = makeHDR(typeOfMethod=1, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, max_iter, threshold) // Robertson merging
+ [out1, out2] = makeHDR(typeOfMethod=2, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, samples, lambda, random) // Debevec merging
+ [out1, out2] = makeHDR(typeOfMethod=3, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+ [out1, out2] = makeHDR(typeOfMethod=1, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, max_iter, threshold) // Robertson merging
+ [out1, out2] = makeHDR(typeOfMethod=2, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, samples, lambda, random) // Debevec merging
+ [out1, out2] = makeHDR(typeOfMethod=3, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+
+
+
+
+
+ Parameters
+
+ typeOfMethod :
+ Use '1' for 'Robertson', '2' for 'Debevec', or '3' for 'Mertens'.
+ num :
+ It is the number of images being fed as input. It is of Double type.
+ srcMat_i :
+ It is the hypermat of input source image.
+ ex_i :
+ It is the exposure value of the corresponding image_i. It is of double type.
+ max_iter :
+ (Robertson) maximal number of Gauss-Seidel solver iterations. It is of Double type.
+ threshold :
+ (Robertson) target difference between results of two successive steps of the minimization. It is of Double type.
+ samples :
+ (Debevec) number of pixel locations to use. It is of Double type.
+ lambda :
+ (Debevec) smoothness term weight. Greater values produce smoother results, but can alter the response. It is of Double type.
+ random :
+ (Debevec) if true sample pixel locations are chosen at random, otherwise they form a rectangular grid. It is of Boolean type.
+ contrast_weight :
+ (Mertens) contrast measure weight. It is of Double type.
+ saturation_weight :
+ (Mertens) saturation measure weight. It is of Double type.
+ exposure_weight :
+ (Mertens) well-exposedness measure weight. It is of Double type.
+ out1 :
+ HDR image
+ out2 :
+ LDR image
+
+
+
+
+ Description
+
+This function takes a set of images of the same scene in different exposures which have been aligned accordingly and outputs the HDR image.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/master_help.xml b/help/en_US/master_help.xml
new file mode 100644
index 0000000..119b0b2
--- /dev/null
+++ b/help/en_US/master_help.xml
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+]>
+
+
+ FOSSEE Image Processing Toolbox
+
+
+
+FOSSEE Image Processing Toolbox
+&aec09a1d8cc19bb05a53d7b60d8ca8004;
+&aabce97dfa16507a3ad934a077cc63187;
+&a6a07ad97dcbef341722ac76091020341;
+&a942913ea089e7d518fc5279fb2c4eb78;
+&aa7bd30194244dde6a89253409d1fd4e4;
+&a594d04d99491a65dd9c7830521fd0391;
+&a5daa6a73fadebf9d150e8fcd94609356;
+&a36a6f3f67476d2c7f770b0f0e2546451;
+&a6bde14b3511ded5d246775a1467523cf;
+&a430cd41198f40c6111e958c2a18cf140;
+&af81275f66c3b3b4a5461f87a2848d06f;
+&a40be16c8195f79a9d4ea09eacbbbc87a;
+&a11d670111a8d1c2c7a25d17c25a4ddad;
+&ad10e78f2ee8a998906601242aafcae4a;
+&acc7c71cb7675456b4b4aa6f8cfae73ae;
+&ac45a87e0679d3007b05fe683f5623a14;
+&a1ed4e5f4e6cfcb3f027f455c8a305a2d;
+&a811a0ca601bbe82229ac5ae4a2e31cbd;
+&aec618785d5bc28aa3cad96f93ff47904;
+&a90b028d690ae13a22ae5e64de37a644d;
+&a0f7c44c67c3adaf79be74f5b0b4627b2;
+&a277aaf4c69882229945db9df7a91b457;
+&a582dab75ff2f89fdf954bbff1576ac6e;
+&a8506f8ce404f3e266aeea3863bd2336c;
+&a81864cacd0775ab7a1f74487dad7a196;
+&a6de7fd8ca7caa7f95ac8214249550d87;
+&ab2274fd613bd5c40fdf9e8842c648628;
+
+
diff --git a/help/en_US/ntsc2rgb.xml b/help/en_US/ntsc2rgb.xml
new file mode 100644
index 0000000..b869a4a
--- /dev/null
+++ b/help/en_US/ntsc2rgb.xml
@@ -0,0 +1,137 @@
+
+
+
+
+
+
+
+ ntsc2rgb
+ This function converts NTSC values to RGB color space.
+
+
+
+
+ Calling Sequence
+
+ [output] = ntsc2rgb(pstData)
+
+
+
+
+
+ Parameters
+
+ pstData :
+ It is a list of the NTSC luminance (Y) and chrominance (I and Q) color components.
+ output :
+ It is a list that contains the red, green, and blue values equivalent to those colors.
+
+
+
+
+ Description
+
+Converts NTSC values to RGB color space.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Tess Zacharias
+
+
+
diff --git a/help/en_US/pcwrite.xml b/help/en_US/pcwrite.xml
new file mode 100644
index 0000000..10c4170
--- /dev/null
+++ b/help/en_US/pcwrite.xml
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+
+ pcwrite
+ This function is used to write 3-D point cloud to PLY or PCD file.
+
+
+
+
+ Calling Sequence
+
+ [] = pcwrite(pointCloud, filename)
+ [] = pcwrite(pointCloud, filename, fileFormat, fileType)
+
+
+
+
+
+ Parameters
+
+ pointCloud :
+ Object for storing point cloud, specified as a pointCloud object.
+ filename :
+ File name, specified as a character vector, specify the file name with an extension incase of two input argument.(default encoding is ASCII)
+ fileFormat :
+ The input file type must be a PLY or PCD format file.(choose between ".ply" or ".pcd")
+ fileType :
+ Choose from the following encoding, PLY - 'ascii', 'binary' and PCD - 'ascii', 'binary', or 'compressed'.
+
+
+
+
+ Description
+
+Writes the point cloud object, ptCloud, to the PLY or PCD file specified by the input.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/rgb2lab.xml b/help/en_US/rgb2lab.xml
new file mode 100644
index 0000000..3c793c3
--- /dev/null
+++ b/help/en_US/rgb2lab.xml
@@ -0,0 +1,154 @@
+
+
+
+
+
+
+
+ rgb2lab
+ This function converts RGB to CIE 1976 L*a*b*.
+
+
+
+
+ Calling Sequence
+
+ [output] = rgb2lab(inputImage)
+
+
+
+
+
+ Parameters
+
+ inputImage :
+ It is a list of color values to convert.
+ output :
+ The converted color values, returned as a list.
+
+
+
+
+ Description
+
+Converts RGB to CIE 1976 L*a*b*.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Sridhar Reddy
+ Ashish Manatosh Barik
+
+
+
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS b/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS
new file mode 100644
index 0000000..a6ce6a4
Binary files /dev/null and b/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS differ
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS.TAB b/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS.TAB
new file mode 100644
index 0000000..928ba57
Binary files /dev/null and b/help/en_US/scilab_en_US_help/JavaHelpSearch/DOCS.TAB differ
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/OFFSETS b/help/en_US/scilab_en_US_help/JavaHelpSearch/OFFSETS
new file mode 100644
index 0000000..1a443d7
Binary files /dev/null and b/help/en_US/scilab_en_US_help/JavaHelpSearch/OFFSETS differ
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/POSITIONS b/help/en_US/scilab_en_US_help/JavaHelpSearch/POSITIONS
new file mode 100644
index 0000000..8911dba
Binary files /dev/null and b/help/en_US/scilab_en_US_help/JavaHelpSearch/POSITIONS differ
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/SCHEMA b/help/en_US/scilab_en_US_help/JavaHelpSearch/SCHEMA
new file mode 100644
index 0000000..ce41e57
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/JavaHelpSearch/SCHEMA
@@ -0,0 +1,2 @@
+JavaSearch 1.0
+TMAP bs=2048 rt=1 fl=-1 id1=1011 id2=1
diff --git a/help/en_US/scilab_en_US_help/JavaHelpSearch/TMAP b/help/en_US/scilab_en_US_help/JavaHelpSearch/TMAP
new file mode 100644
index 0000000..3452435
Binary files /dev/null and b/help/en_US/scilab_en_US_help/JavaHelpSearch/TMAP differ
diff --git a/help/en_US/scilab_en_US_help/ScilabCaution.png b/help/en_US/scilab_en_US_help/ScilabCaution.png
new file mode 100644
index 0000000..231159a
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabCaution.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabEdit.png b/help/en_US/scilab_en_US_help/ScilabEdit.png
new file mode 100644
index 0000000..ff0cca7
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabEdit.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabExecute.png b/help/en_US/scilab_en_US_help/ScilabExecute.png
new file mode 100644
index 0000000..4acd4b9
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabExecute.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabImportant.png b/help/en_US/scilab_en_US_help/ScilabImportant.png
new file mode 100644
index 0000000..73931b2
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabImportant.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabNote.png b/help/en_US/scilab_en_US_help/ScilabNote.png
new file mode 100644
index 0000000..6607ece
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabNote.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabTip.png b/help/en_US/scilab_en_US_help/ScilabTip.png
new file mode 100644
index 0000000..6607ece
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabTip.png differ
diff --git a/help/en_US/scilab_en_US_help/ScilabWarning.png b/help/en_US/scilab_en_US_help/ScilabWarning.png
new file mode 100644
index 0000000..231159a
Binary files /dev/null and b/help/en_US/scilab_en_US_help/ScilabWarning.png differ
diff --git a/help/en_US/scilab_en_US_help/align.html b/help/en_US/scilab_en_US_help/align.html
new file mode 100644
index 0000000..b507a62
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/align.html
@@ -0,0 +1,140 @@
+
+
+ align
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > align
+
+
+ align
+
This function aligns the set of input images for HDR image creation.
+
+
+Calling Sequence
+
[ out1 , out2 , out3 ] = align ( maxBits , excludeRange , cut , num , srcImg_1 , srcImg_2 , srcImg_3 )
+[ out1 , out2 , out3 , out4 ] = align ( maxBits , excludeRange , cut , num , srcImg1 , srcImg_2 , srcImg_3 , srcImg_4 )
+[ out1 , out2 , out3 , out4 , out5 ] = align ( maxBits , excludeRange , cut , num , srcImg_1 , srcImg_2 , srcImg_3 , srcImg_4 , srcImg_5 )
+
+Parameters
+
maxBits :
+ Logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are usually good enough (31 and 63 pixels shift respectively). Value should not exceed 6. It is of Double type.
+ excludeRange :
+ Range for exclusion bitmap that is constructed to suppress noise around the median value. It is of Double type.
+ cut :
+ If true, cuts images. Otherwise fills the new regions with zeros. It is of Boolean type.
+ num :
+ Number of images given as input source images(3 - 5). It is of double type.
+ srcImg_i :
+ Hypermat of image_i.
+
+Description
+
This function uses AlignMTB algorithm which converts images to median threshold bitmaps (1 for pixels brighter than median luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
+
+
+Examples
+
a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+num = 3 ;
+maxBits = 6 ;
+excludeRange = 4 ;
+cut = %t ;
+[ x , y , z ] = align ( maxBits , excludeRange , cut , num , a , b , c ) ;
+
+Examples
+
a = imread ( " t1.jpeg " ) ;
+b = imread ( " t2.jpeg " ) ;
+c = imread ( " t3.jpeg " ) ;
+d = imread ( " t4.jpeg " ) ;
+num = 4 ;
+maxBits = 6 ;
+excludeRange = 4 ;
+cut = %f ;
+[ x , y , z , p ] = align ( maxBits , excludeRange , cut , num , a , b , c , d ) ;
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+d = imread ( " m4.jpeg " ) ;
+num = 4 ;
+maxBits = 7 ;
+excludeRange = 4 ;
+cut = %t ;
+[ x , y , z , p ] = align ( maxBits , excludeRange , cut , num , a , b , c , d ) ;
+
+Examples
+
a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+d = imread ( " m4.jpeg " ) ;
+e = imread ( " m5.jpeg " ) ;
+num = 5 ;
+maxBits = 6 ;
+excludeRange = 4 ;
+cut = %t ;
+[ x , y , z , p , q ] = align ( maxBits , excludeRange , cut , num , a , b , c , d , e ) ;
+
+Examples
+
+a = imread ( " t1.jpeg " ) ;
+b = imread ( " t2.jpeg " ) ;
+c = imread ( " t3.jpeg " ) ;
+num = 3 ;
+maxBits = 1 ;
+excludeRange = 4 ;
+cut = %t ;
+[ x , y , z ] = align ( maxBits , excludeRange , cut , num , a , b , c ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/applyTransformer.html b/help/en_US/scilab_en_US_help/applyTransformer.html
new file mode 100644
index 0000000..ff53983
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/applyTransformer.html
@@ -0,0 +1,115 @@
+
+
+ applyTransformer
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > applyTransformer
+
+
+ applyTransformer
+
This function is used to apply affine or TPS transformation to image.
+
+
+Calling Sequence
+
[ tImg ] = applyTransformer ( srcImg1 , srcImg2 , typeOfMethod , hessianThreshold , rpTPS , sfAffine )
+
+Parameters
+
srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image, which is also the target image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of transformation. Use value '1' for 'Affine Transformation' and '2' for 'Thin Plate Spline Shape Transformation'. It is of double type.
+ hessianThreshold :
+ It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+ rpTPS :
+ It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+ sfAffine :
+ It is used to set the full-affine condition for Affine Transformation. If true, the function finds as optimal transformation with no additional restrictions(6 degrees of freedom). Otherwise, the class of transformations to choose from is limited to combination of translation, rotation & uniform scaling(5 degrees of freedom).
+ tImg :
+ The transformed image of the target(srcImg2). It is of hypermat type.
+
+Description
+
This function is used to perform shape transformation, the user gets to choose and apply the type of transformation she/he wishes to perform.
+
+
+Examples
+
affine transformation
+a = imread ( " bryan.jpeg " ) ;
+b = imread ( " p1.jpg " ) ;
+typeOfMethod = 1
+hessianThreshold = 5000 ;
+rpTPS = 25000 ;
+sfAffine = %f ;
+img = applyTransformer ( a , b , typeOfMethod , hessianThreshold , rpTPS ,
+
+Examples
+
a = imread ( " lena.jpeg " ) ;
+b = imread ( " bryan.jpeg " ) ;
+typeOfMethod = 1
+hessianThreshold = 5000 ;
+rpTPS = 2000 ;
+sfAffine = %t ;
+img = applyTransformer ( a , b , typeOfMethod , hessianThreshold , rpTPS , sfAffine ) ;
+
+Examples
+
TPS shape transformation
+a = imread ( " photo.jpg " ) ;
+b = imread ( " photo1.jpg " ) ;
+typeOfMethod = 2
+hessianThreshold = 5000 ;
+rpTPS = 800 ;
+sfAffine = %t ;
+img = applyTransformer ( a , b , typeOfMethod , hessianThreshold , rpTPS , sfAffine ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/bwLookUp.html b/help/en_US/scilab_en_US_help/bwLookUp.html
new file mode 100644
index 0000000..66d80ad
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/bwLookUp.html
@@ -0,0 +1,84 @@
+
+
+ bwLookUp
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > bwLookUp
+
+
+ bwLookUp
+
This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+
+
+Calling Sequence
+
[ out ] = bwLookUp ( image , lut )
+
+Parameters
+
image :
+ The input is a grayscale image. If the image is not binary, it is converted to one.
+ lut :
+ The lut is a 1*16 double vector [2*2 filtering], or a [1*512] double vector [3*3 filtering].
+ out :
+ The output image is the same size as image, same data type as lut.
+
+Description
+
The function performs a 2-by-2 or 3-by-3 nonlinear neighborhood filtering operation on a grayscale image and returns the results in the output image. The neighborhood processing determines an integer index value used to access values in a lookup table 'lut'. The fetched lut value becomes the pixel value in the output image at the targeted position.
+
+
+Examples
+
+a = imread ( " lena.jpeg " , 0 ) ;
+lut = [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ] ;
+b = bwLookUp ( a , lut ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/c_code.css b/help/en_US/scilab_en_US_help/c_code.css
new file mode 100644
index 0000000..948d2ee
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/c_code.css
@@ -0,0 +1,54 @@
+.ccomment {
+ font-style: italic;
+ color: #b22222
+}
+
+.cdefault {
+ font-style: normal;
+ color: #000000
+}
+
+.copenclose {
+ font-style: normal;
+ color: #000000
+}
+
+.coperator {
+ font-style: normal;
+ color: #000000
+}
+
+.cstring {
+ font-style: normal;
+ color: #a6557a
+}
+
+.ctype {
+ font-style: normal;
+ color: #55a655
+}
+
+.cpreprocessor {
+ font-style: normal;
+ color: #9965a6
+}
+
+.cid {
+ font-style: normal;
+ color: #000000
+}
+
+.ckeyword {
+ font-style: normal;
+ color: #ad3ff2
+}
+
+.cmodifier {
+ font-style: normal;
+ color: #ad3ff2
+}
+
+.cnumber {
+ font-style: normal;
+ color: #008b8b
+}
diff --git a/help/en_US/scilab_en_US_help/contourArea.html b/help/en_US/scilab_en_US_help/contourArea.html
new file mode 100644
index 0000000..08bce81
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/contourArea.html
@@ -0,0 +1,85 @@
+
+
+ contourArea
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > contourArea
+
+
+ contourArea
+
This function calculates the contour area.
+
+
+Calling Sequence
+
[ out ] = contourArea ( inputArrayContour , booloriented )
+
+Parameters
+
inputArrayContour :
+ The input vector of 2D points.
+ booloriented :
+ The oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine the orientation of a contour by taking the sign of an area.
+ out :
+ The output is the calculated area.
+
+Description
+
It computes the contour area. Also, the function will most certainly give a wrong results for contours with self-intersections.
+
+
+Examples
+
+inputArrayContour = [ 0 0 ; 10 0 ; 10 10 ; 5 4 S ] ;
+booloriented = %t ;
+b = contourArea ( inputArrayContour , booloriented ) ;
+
+Authors
+
Priyanka Hiranandani, NIT Surat
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/copyMakeBorder.html b/help/en_US/scilab_en_US_help/copyMakeBorder.html
new file mode 100644
index 0000000..ba545f1
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/copyMakeBorder.html
@@ -0,0 +1,98 @@
+
+
+ copyMakeBorder
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > copyMakeBorder
+
+
+ copyMakeBorder
+
This function forms a border around the input image.
+
+
+Calling Sequence
+
[ new_image ] = copyMakeBorder ( image , top , bottom , left , right , borderType , value )
+
+Parameters
+
image :
+ The source image.
+ top :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ bottom :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ left :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ right :
+ No. of pixels in this direction from the source image rectangle to extrapolate.
+ borderType :
+ Stating the border type.
+ value :
+ Border value if borderType==BORDER_CONSTANT.
+ new_image :
+ The output image with specified borders.
+
+Description
+
This function forms a border around the input image. The areas to the left, to the right, above and below the copied source image are filled with the extrapolated pixels.
+
+
+Examples
+
+a = imread ( " lena.jpeg " ) ;
+top = 1 ;
+bottom = 1 ;
+left = 1 ;
+right = 1 ;
+b = copyMakeBorder ( a , top , bottom , left , right , " BORDER_CONSTANT " , 1 ) ;
+
+Authors
+
Ashish Manatosh Barik
+ Shubheksha Jalan
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/detectBRIEFDescriptors.html b/help/en_US/scilab_en_US_help/detectBRIEFDescriptors.html
new file mode 100644
index 0000000..4ad8bab
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/detectBRIEFDescriptors.html
@@ -0,0 +1,112 @@
+
+
+ detectBRIEFDescriptors
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > detectBRIEFDescriptors
+
+
+ detectBRIEFDescriptors
+
This function is used for computing BRIEF descriptors using Star keypoints.
+
+
+Calling Sequence
+
[ a ] = detectBRIEFDescriptors ( srcImg )
+[ a ] = detectVRIEFDescriptors ( srcImg , maxSize , responseThreshold , lineThresholdProjected , lineThresholdBinarized , suppressNonmaxSize , bytes , use_orientation )
+
+Parameters
+
srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ bytes :
+ legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64.
+ use_orientation :
+ sample patterns using keypoints orientation, disabled by default.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+Description
+
For extracting keypoints(StarDetector) and computing descriptors. BRIEF which gives the shortcut to find binary descriptors with less memory, faster matching, still higher recognition rate.
+
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( " max);
+[ c ] = detectBRIEFDescriptors ( a ) ;
+[ d ] = detectBRIEFDescriptors ( b ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( " max);
+[ c ] = detectBRIEFDescriptors ( a , 45 , 30 , 10 , 8 , 5 , 32 , %f ) ;
+[ d ] = detectBRIEFDEscriptors ( b , 45 , 30 , 10 , 8 , 5 , 32 , %f ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/detectDAISYDescriptors.html b/help/en_US/scilab_en_US_help/detectDAISYDescriptors.html
new file mode 100644
index 0000000..89abd4d
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/detectDAISYDescriptors.html
@@ -0,0 +1,125 @@
+
+
+ detectDAISYDescriptors
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > detectDAISYDescriptors
+
+
+ detectDAISYDescriptors
+
This function is used for computing DAISY descriptors using Star keypoints.
+
+
+Calling Sequence
+
[ a ] = detectDAISYDescriptors ( srcImg )
+[ a ] = detectDAISYDescriptors ( srcImg , maxSize , responseThreshold , lineThresholdProjected , lineThresholdBinarized , suppressNonmaxSize , radius , q_radius , q_theta , q_hist , norm , interpolation , use_orientation )
+[ a ] = detectDAISYDescriptors ( srcImg , maxSize , responseThreshold , lineThresholdProjected , lineThresholdBinarized , suppressNonmaxSize , radius , q_radius , q_theta , q_hist , norm , homography , interpolation , use_orientation )
+
+Parameters
+
srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ radius :
+ radius of the descriptor at the initial scale.
+ q_radius :
+ amount of radial range division quantity.
+ q_theta :
+ amount of angular range division quantity.
+ q_hist :
+ amount of gradient orientations range division quantity.
+ norm :
+ choose descriptors normalization type, where DAISY::NRM_NONE will not do any normalization (default), DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0, DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0, DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
+ homography :
+ optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image.
+ interpolation :
+ switch to disable interpolation for speed improvement at minor quality loss.
+ use_orientation :
+ sample patterns using keypoints orientation, disabled by default.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+Description
+
For extracting keypoints(using StarDetector) and computing descriptors(DAISY).
+
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( ' max ' )
+[ c ] = detectDAISYDescriptors ( a ) ;
+[ d ] = detectDAISYDescriptors ( b ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( ' max ' )
+[ c ] = detectDAISYDescriptors ( a , 45 , 30 , 10 , 8 , 5 , 15 , 3 , 8 , 8 , 100 , %t , %f ) ;
+[ d ] = detectDAISYDEscriptors ( b , 45 , 30 , 10 , 8 , 5 , 15 , 3 , 8 , 8 , 100 , %t , %f ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Authors
+
Ashish Manatosh Barik NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/detectLATCHDescriptors.html b/help/en_US/scilab_en_US_help/detectLATCHDescriptors.html
new file mode 100644
index 0000000..7c56cb2
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/detectLATCHDescriptors.html
@@ -0,0 +1,114 @@
+
+
+ detectLATCHDescriptors
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > detectLATCHDescriptors
+
+
+ detectLATCHDescriptors
+
This function is used for computing the LATCH descriptors using Star keypoints.
+
+
+Calling Sequence
+
[ a ] = detectLATCHDescriptors ( srcImg )
+[ a ] = detectLATCHDescriptors ( srcImg , maxSize , responseThreshold , lineThresholdProjected , lineThresholdBinarized , suppressNonmaxSize , bytes , rotationInvariance , half_ssd_size )
+
+Parameters
+
srcImg :
+ Hyper of input image
+ maxSize :
+ Choose the number of filters to be applied, the parameter value set the maximum size.
+ responseThreshold :
+ To eliminate weak corners.
+ lineThresholdProjected :
+ Harris of responses.
+ lineThresholdBinarized :
+ Harris of sizes.
+ suppressNonmaxSize :
+ Window size (n-by-n) to apply the non-maximal suppression.
+ bytes :
+ It is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1.
+ rotationInvariance :
+ whether or not the descriptor should compansate for orientation changes.
+ half_ssd_size) :
+ the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x then the half_ssd_size should be (7-1)/2 = 3.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+Description
+
For extracting keypoints(using StarDetectors) and computing descriptors(LATCH).
+
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( ' max ' )
+[ c ] = detectLATCHdescriptors ( a ) ;
+[ d ] = detectLATCHDescriptors ( b ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Examples
+
+[ a ] = imread ( " b1.jpeg " ) ;
+[ b ] = imread ( " b2.jpeg " ) ;
+stacksize ( ' max ' )
+[ c ] = detectLATCHdescriptors ( a , 45 , 30 , 10 , 8 , 5 , 32 , %t , 3 ) ;
+[ d ] = detectLATCHDEscriptors ( b , 45 , 30 , 10 , 8 , 5 , 32 , %t , 3 ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+ Shubham Lohakare, NITK Surathkal
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/detectSIFTFeatures.html b/help/en_US/scilab_en_US_help/detectSIFTFeatures.html
new file mode 100644
index 0000000..d8b30d5
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/detectSIFTFeatures.html
@@ -0,0 +1,107 @@
+
+
+ detectSIFTFeatures
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > detectSIFTFeatures
+
+
+ detectSIFTFeatures
+
This function is used to find scale-invariant features.
+
+
+Calling Sequence
+
[ a ] = detectSIFTFeatures ( srcImg )
+[ a ] = detectSIFTFeatures ( srcImg , features , nOctaveLayers , contrastThreshold , edgeThreshold , sigma )
+
+Parameters
+
srcImg :
+ Hyper of input image.
+ nfeatures :
+ The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast). If valued as 0, uses all detected keypoints.
+ nOctaveLayers :
+ The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
+ contrastThreshold :
+ The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
+ edgeThreshold :
+ The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are filtered out (more features are retained).
+ sigma :
+ The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
+ a :
+ It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+
+Description
+
For extracting keypoints and computing descriptors using the Scale Invariant Feature Transform. RGB images are converted to Grayscale images before processing.
+
+
+Examples
+
+a = imread ( " photo1.jpeg " ) ;
+b = imread ( " photo2.jpeg " ) ;
+stacksize ( " max " ) ;
+c = detectSIFTFeatures ( a ) ;
+d = detectSIFTFeatures ( b ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Examples
+
+a = imread ( " photo1.jpeg " ) ;
+b = imread ( " photo2.jpeg " ) ;
+stacksize ( " max " ) ;
+c = detectSIFTFeatures ( a , 0 , 3 , 0.05 , 11 , 1.6 ) ;
+d = detectSIFTFeatures ( b , 0 , 3 , 0.05 , 11 , 1.6 ) ;
+[ e f ] = matchFeatures ( c . Features , d . Features ) ;
+out = drawMatch ( a , b , c . KeyPoints , d . KeyPoints , e , f ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/distanceExtractor.html b/help/en_US/scilab_en_US_help/distanceExtractor.html
new file mode 100644
index 0000000..1a5b645
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/distanceExtractor.html
@@ -0,0 +1,126 @@
+
+
+ distanceExtractor
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > distanceExtractor
+
+
+ distanceExtractor
+
This function computes the shape-distance between two images.
+
+
+Calling Sequence
+
[ dist ] = distanceExtractor ( srcImg1 , srcImg2 , typeOfMethod );
+[ dist ] = distanceExtractor ( srcImg1 , srcImg2 , typeOfMethod , nAngularBins , innerRadius , nRadialBins , outerRadius , iterations );
+
+Parameters
+
srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of Shape Distance calculation technique. Use '1' for 'Shape Context' and '2' for 'Hausdorrf'.
+ nAngularBins :
+ Establish the number of angular bins for the Shape Context Descriptor used in the shape matching pipeline.
+ nRadialBins :
+ Establish the number of radial bins for the Shape Context Descriptor used in the shape matching pipeline.
+ innerRadius :
+ Set the inner radius of the shape context descriptor.
+ outerRadius :
+ Set the outer radius of the shape context descriptor.
+ dist :
+ It is the calculated distance. It is of double type.
+
+Description
+
This function is used to compute the shape distance between two shapes defined by its contours.
+
+
+Examples
+
+a = imread ( " bnwhite.jpg " ) ;
+b = imread ( " bryan.jpeg " ) ;
+typeOfMethod = 2 ;
+c = distanceExtractor ( a , b , typeOfMethod ) ; orff
+
+Examples
+
+a = imread ( " photo.jpg " ) ;
+b = imread ( " photo1.jpg " ) ;
+typeOfMethod = 1 ;
+nAngularBins = 12 ;
+nRadialBins = 4 ;
+innerRadius = 0.2 ;
+outerRadius = 2 ;
+iterations = 3 ;
+ndummies = 25 ;
+defaultCost = 0.2 ;
+rpTps = 0 ;
+dist = distanceExtractor ( a , b , typeOfMethod , nAngularBins , nRadialBins , innerRadius , outerRadius , iterations , ndummies , dC , rpTps ) ;
+
+Examples
+
Incorrect usage
+a = 4 ; ( not hypermat )
+b = 88 ; ( not hypermat )
+typeOfMethod = 1 ;
+nAngularBins = 12 ;
+nRadialBins = 4 ;
+innerRadius = 2 ;
+outerRadius = 0.2 ;
+iterations = 300 ;
+ndummies = 25 ;
+defaultCost = 0.2 ;
+rpTps = 0 ;
+dist = distanceExtractor ( a , b , typeOfMethod , nAngularBins , nRadialBins , innerRadius , outerRadius , iterations , ndummies , dC , rpTps ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/fillConvexPoly.html b/help/en_US/scilab_en_US_help/fillConvexPoly.html
new file mode 100644
index 0000000..393542c
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/fillConvexPoly.html
@@ -0,0 +1,95 @@
+
+
+ fillConvexPoly
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > fillConvexPoly
+
+
+ fillConvexPoly
+
This function fills a convex polygon.
+
+
+Calling Sequence
+
[ out ] = fillConvexPoly ( img , pstData , npts , r_value , g_value , b_value , linetype , shift )
+
+Parameters
+
img :
+ The input source image.
+ pstData :
+ The vector of polygon vertices.
+ npts :
+ The number of polygon vertices.
+ r_value :
+ The red value of RGB color for the polygon.
+ g_value :
+ The green value of RGB color for the polygon.
+ b_value :
+ The blue value of RGB color for the polygon.
+ linetype :
+ This is the type of the polygon boundaries. It has only 3 valid types: 4, 8 and 16(CV_AA). Passing any other value as lineType is not legal.
+ shift :
+ This is the number of fractional bits in the vertex coordinates.
+
+Description
+
The function fillConvexPoly draws a filled convex polygon. It can fill not only convex polygons but any monotonic polygon without self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) twice at the most (though, its top-most and/or the bottom edge could be horizontal).
+
+
+Examples
+
+a = imread ( " lena.jpeg " ) ;
+b = [ 0 10 ; 10 0 ; - 10 0 ] ;
+c = fillConvexPoly ( a , b , 3 , 1 , 1 , 1 , 8 , 0 ) ;
+
+Authors
+
Abhilasha Sancheti
+ Sukul Bagai
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/gabor.html b/help/en_US/scilab_en_US_help/gabor.html
new file mode 100644
index 0000000..44d2aa0
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/gabor.html
@@ -0,0 +1,84 @@
+
+
+ gabor
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > gabor
+
+
+ gabor
+
This function creates a Gabor filter.
+
+
+Calling Sequence
+
[ outputImg ] = gabor ( wavelength , orientation )
+
+Parameters
+
wavelength :
+ It is the wavelength of sinusoid, specified as a numeric scalar or vector, in pixels/cycle.
+ orientation :
+ It is the orientation of filter in degrees, specified as a numeric scalar in the range [0 180], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+ outputImg :
+ The Gabor filter.
+
+Description
+
It creates a Gabor filter with the specified wavelength (in pixels/cycle) and orientation (in degrees). If you specify wavelength or orientation as vectors, gabor returns an array of gabor objects, called a filter bank, that contain all the unique combinations of wavelength and orientation. For example, if wavelength is a vector of length 2 and orientation is a vector of length 3, then the output array is a vector of length 6.
+
+
+Examples
+
+wavelength = 20 ;
+orientation = 45 ;
+a = gabor ( wavelength , orientation ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/gaussianBlur.html b/help/en_US/scilab_en_US_help/gaussianBlur.html
new file mode 100644
index 0000000..8222e45
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/gaussianBlur.html
@@ -0,0 +1,88 @@
+
+
+ gaussianBlur
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > gaussianBlur
+
+
+ gaussianBlur
+
This function blurs the input image using a Gaussian filter.
+
+
+Calling Sequence
+
outputImg = gaussianblur ( inputImage , ksize_height , ksize_width , sigmaX , sigmaY )
+
+Parameters
+
inputImage :
+ The input source image.
+ ksize_height :
+ It is the gaussian kernel height. It must be positive and odd.
+ ksize_width :
+ It is the gaussian kernel width. It must be positive and odd.
+ sigmaX :
+ It is the gaussian kernel standard deviation in X direction.
+ sigmaY :
+ It is the gaussian kernel standard deviation in Y direction.
+ outputImg :
+ The output filtered image is of the same size and type as the input image.
+
+Description
+
The function convolves the source image with the specified Gaussian kernel.
+
+
+Examples
+
inputImage = imread ( ' lena.jpg ' ) ;
+outputImg = gaussianBlur ( inputImage , 5 , 5 , 1 , 1 ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/histogramCostExtractor.html b/help/en_US/scilab_en_US_help/histogramCostExtractor.html
new file mode 100644
index 0000000..00b5fed
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/histogramCostExtractor.html
@@ -0,0 +1,115 @@
+
+
+ histogramCostExtractor
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > histogramCostExtractor
+
+
+ histogramCostExtractor
+
This function computes the cost matrix.
+
+
+Calling Sequence
+
[ costMatrix ] = histogramCostExtractor ( srcImg1 , srcImg2 , typeOfMethod =3, hessianThreshold );
+[ costMatrix ] = histogramcostExtractor ( srcImg1 , srcImg2 , typeOfMethod =1, hessianThreshold , nDummies , defaultCost );
+[ costMatrix ] = histogramCostExtractor ( srcImg1 , srcImg2 , typeOfMethod =2, hessianThreshold , nDummies , defaultCost );
+
+Parameters
+
srcImg1 :
+ It is the first input image.
+ srcImg2 :
+ It is the second input image.
+ typeOfMethod :
+ It is used as a flag to pick a certain type of transformation. Use value '1' for 'Chi based cost ectraction', '2' for 'EMDL1 based cost extraction' and '3' for 'Norm based cost extraction'. It is of double type.
+ hessianThreshold :
+ It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+ rpTPS :
+ It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+ costMatrix :
+ It is the cost matrix.
+
+Description
+
This function is used to calculate the histogram based cost matrix of two images, the user gets to choose and apply the type of transformation she/he wishes to perform.
+
+
+Examples
+
+a = imread ( " n.jpeg " ) ;
+b = imread ( " n1.jpeg " ) ;
+typeOfMethod = 1 ;
+hessianThreshold = 5000 ;
+nDummies = 25 ;
+defaultCost = 0.2 ;
+c = histogramCostExtractor ( a , b , typeOfMethod , hessianThreshold , nDummies , defaultCost ) ;
+
+Examples
+
+a = imread ( " n.jpeg " ) ;
+b = imread ( " n1.jpeg " ) ;
+typeOfMethod = 2 ;
+hessianThreshold = 5000 ;
+nDummies = 25 ;
+defaultCost = 0.2 ;
+c = histogramCostExtractor ( a , b , typeOfMethod , hessianThreshold , nDummies , defaultCost ) ;
+
+Examples
+
Norm based cost extraction
+a = imread ( " n.jpeg " ) ;
+b = imread ( " n1.jpeg " ) ;
+typeOfMethod = 3 ;
+hessianThreshold = 5000 ;
+c = histogramCostExtractor ( a , b , typeOfMethod , hessianThreshold ) ;
+
+Authors
+
Ashish Mantosh Barik, NIT Rouekela
+ Shubham Lohakare, NITK Surathkal
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/imGaborFilt.html b/help/en_US/scilab_en_US_help/imGaborFilt.html
new file mode 100644
index 0000000..60754b5
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/imGaborFilt.html
@@ -0,0 +1,85 @@
+
+
+ imGaborFilt
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > imGaborFilt
+
+
+ imGaborFilt
+
The function applies Gabor filter or set of filters to 2-D image.
+
+
+Calling Sequence
+
[ new_image ] = imGaborFilt ( image , wavelength , orientation )
+
+Parameters
+
image :
+ The input grayscale image.
+ wavelength :
+ It is the wavelength of the sinusoidal carrier, specified as a numeric scalar in the range [2,Inf), in pixels/cycle.
+ orientation :
+ Orientation value of filter in degrees, specified as a numeric scalar in the range [0 360], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+
+Description
+
It computes the magnitude and phase response of a Gabor filter for the input grayscale image.
+
+
+Examples
+
+a = imread ( " lena.jpeg " , 0 ) ;
+wavelength = 4 ;
+orientation = 90 ;
+b = imGaborFilt ( a , wavelength , orientation )
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/index.html b/help/en_US/scilab_en_US_help/index.html
new file mode 100644
index 0000000..a38276a
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/index.html
@@ -0,0 +1,211 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox
+
+
+FOSSEE Image Processing Toolbox
+align — This function aligns the set of input images for HDR image creation.
+
+
+
+
+
+applyTransformer — This function is used to apply affine or TPS transformation to image.
+
+
+
+
+
+bwLookUp — This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+
+
+
+
+
+contourArea — This function calculates the contour area.
+
+
+
+
+
+copyMakeBorder — This function forms a border around the input image.
+
+
+
+
+
+detectBRIEFDescriptors — This function is used for computing BRIEF descriptors using Star keypoints.
+
+
+
+
+
+detectDAISYDescriptors — This function is used for computing DAISY descriptors using Star keypoints.
+
+
+
+
+
+detectLATCHDescriptors — This function is used for computing the LATCH descriptors using Star keypoints.
+
+
+
+
+
+detectSIFTFeatures — This function is used to find scale-invariant features.
+
+
+
+
+
+distanceExtractor — This function computes the shape-distance between two images.
+
+
+
+
+
+fillConvexPoly — This function fills a convex polygon.
+
+
+
+
+
+gabor — This function creates a Gabor filter.
+
+
+
+
+
+gaussianBlur — This function blurs the input image using a Gaussian filter.
+
+
+
+
+
+histogramCostExtractor — This function computes the cost matrix.
+
+
+
+
+
+imGaborFilt — The function applies Gabor filter or set of filters to 2-D image.
+
+
+
+
+
+lab2rgb — This function converts CIE 1976 L*a*b* to RGB.
+
+
+
+
+
+lab2uint8 — This function converts L*a*b* data to uint8.
+
+
+
+
+
+makeHDR — This function is used to create HDR image.
+
+
+
+
+
+ntsc2rgb — This function converts NTSC values to RGB color space.
+
+
+
+
+
+pcwrite — Write 3-D point cloud to PLY or PCD file.
+
+
+
+
+
+rgb2lab — This function converts RGB to CIE 1976 L*a*b*.
+
+
+
+
+
+ssim — This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+
+
+
+
+
+wiener2 — This function is used for 2-D adaptive noise-removal filtering.
+
+
+
+
+
+xyz2double — This function converts XYZ color values to double.
+
+
+
+
+
+xyz2lab — This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+
+
+
+xyz2rgb — This function converts CIE 1931 XYZ to RGB.
+
+
+
+
+
+xyz2uint16 — This function converts XYZ color values to uint16.
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/jhelpidx.xml b/help/en_US/scilab_en_US_help/jhelpidx.xml
new file mode 100644
index 0000000..aa5a66e
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/jhelpidx.xml
@@ -0,0 +1,3 @@
+
+
+
\ No newline at end of file
diff --git a/help/en_US/scilab_en_US_help/jhelpmap.jhm b/help/en_US/scilab_en_US_help/jhelpmap.jhm
new file mode 100644
index 0000000..d68a422
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/jhelpmap.jhm
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/help/en_US/scilab_en_US_help/jhelpset.hs b/help/en_US/scilab_en_US_help/jhelpset.hs
new file mode 100644
index 0000000..7c93217
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/jhelpset.hs
@@ -0,0 +1,28 @@
+
+
+
+FOSSEE Image Processing Toolbox
+
+top
+
+
+
+TOC
+Table Of Contents
+javax.help.TOCView
+jhelptoc.xml
+
+
+Index
+Index
+javax.help.IndexView
+jhelpidx.xml
+
+
+Search
+Search
+javax.help.SearchView
+JavaHelpSearch
+
+
\ No newline at end of file
diff --git a/help/en_US/scilab_en_US_help/jhelptoc.xml b/help/en_US/scilab_en_US_help/jhelptoc.xml
new file mode 100644
index 0000000..b100ac9
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/jhelptoc.xml
@@ -0,0 +1,35 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/help/en_US/scilab_en_US_help/lab2rgb.html b/help/en_US/scilab_en_US_help/lab2rgb.html
new file mode 100644
index 0000000..bc85673
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/lab2rgb.html
@@ -0,0 +1,127 @@
+
+
+ lab2rgb
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > lab2rgb
+
+
+ lab2rgb
+
This function converts CIE 1976 L*a*b* to RGB.
+
+
+Calling Sequence
+
[ output ] = lab2rgb ( pstData )
+
+Parameters
+
pstData :
+ The color values to convert, specified as a list of values.
+ output :
+ The converted color values, returned as an array of the same shape as the input.
+
+Description
+
Convert CIE 1976 L*a*b* to RGB.
+
+
+Examples
+
+a = list ( 70 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 71 , 50 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 7.3 , 5.53 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70 , 5 , 10.6656 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70 , 5.45 , 10.45 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 7.343 , 5.34 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70 , 500 , 1012 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 701.2 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70 , 5.545 , 1.0 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 23 , 51 , 18 ) ;
+b = lab2rgb ( a ) ;
+
+Authors
+
Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/lab2uint8.html b/help/en_US/scilab_en_US_help/lab2uint8.html
new file mode 100644
index 0000000..50286f9
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/lab2uint8.html
@@ -0,0 +1,126 @@
+
+
+ lab2uint8
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > lab2uint8
+
+
+ lab2uint8
+
This function converts L*a*b* data to uint8.
+
+
+Calling Sequence
+
[ output ] = lab2uint8 ( pstData )
+
+Parameters
+
pstData :
+ It is a list of color values.
+ output :
+ The converted uint8 value. lab8 has the same size as lab.
+
+Description
+
Converts L*a*b* data to uint8.
+
+
+Examples
+
+a = list ( 70 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 71 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 0 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 89 , 50 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70 , 5 , 10.78 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 7 , 5 , 89 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70.344 , 5.34 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 0 , 0 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 70.89 , 5.11 , 10.33 ) ;
+b = lab2rgb ( a ) ;
+
+Examples
+
+a = list ( 10 , 5 , 10 ) ;
+b = lab2rgb ( a ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/makeHDR.html b/help/en_US/scilab_en_US_help/makeHDR.html
new file mode 100644
index 0000000..618247c
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/makeHDR.html
@@ -0,0 +1,235 @@
+
+
+ makeHDR
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > makeHDR
+
+
+ makeHDR
+
This function is used to create HDR image.
+
+
+Calling Sequence
+
[ out1 , out2 ] = makeHDR ( typeOfMethod =1, num =3, srcMat_1 , srcMat_2 , srcMat_3 , ex_1 , ex_2 , ex_3 , max_iter , threshold )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =2, num =3, srcMat_1 , srcMat_2 , srcMat_3 , ex_1 , ex_2 , ex_3 , samples , lambda , random )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =3, num =3, srcMat_1 , srcMat_2 , srcMat_3 , ex_1 , ex_2 , ex_3 , contrast_weight , saturation_weight , exposure_weight )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =1, num =4, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , ex_1 , ex_2 , ex_3 , ex_4 , max_iter , threshold )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =2, num =4, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , ex_1 , ex_2 , ex_3 , ex_4 , samples , lambda , random )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =3, num =4, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , ex_1 , ex_2 , ex_3 , ex_4 , contrast_weight , saturation_weight , exposure_weight )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =1, num =5, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , max_iter , threshold )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =2, num =5, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , samples , lambda , random )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =3, num =5, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , contrast_weight , saturation_weight , exposure_weight )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =1, num =6, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , srcMat_6 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , ex_6 , max_iter , threshold )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =2, num =6, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , srcMat_6 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , ex_6 , samples , lambda , random )
+[ out1 , out2 ] = makeHDR ( typeOfMethod =3, num =6, srcMat_1 , srcMat_2 , srcMat_3 , srcMat_4 , srcMat_5 , srcMat_6 , ex_1 , ex_2 , ex_3 , ex_4 , ex_5 , ex_6 , contrast_weight , saturation_weight , exposure_weight )
+
+Parameters
+
typeOfMethod :
+ Use '1' for 'Robertson', '2' for 'Debevec', or '3' for 'Mertens'.
+ num :
+ It is the number of images being fed as input. It is of Double type.
+ srcMat_i :
+ It is the hypermat of input source image.
+ ex_i :
+ It is the exposure value of the corresponding image_i. It is of double type.
+ max_iter :
+ (Robertson) maximal number of Gauss-Seidel solver iterations. It is of Double type.
+ threshold :
+ (Robertson) target difference between results of two successive steps of the minimization. It is of Double type.
+ samples :
+ (Debevec) number of pixel locations to use. It is of Double type.
+ lambda :
+ (Debevec) smoothness term weight. Greater values produce smoother results, but can alter the response. It is of Double type.
+ random :
+ (Debevec) if true sample pixel locations are chosen at random, otherwise they form a rectangular grid. It is of Boolean type.
+ contrast_weight :
+ (Mertens) contrast measure weight. It is of Double type.
+ saturation_weight :
+ (Mertens) saturation measure weight. It is of Double type.
+ exposure_weight :
+ (Mertens) well-exposedness measure weight. It is of Double type.
+ out1 :
+ HDR image
+ out2 :
+ LDR image
+
+Description
+
This function takes a set of images of the same scene in different exposures which have been aligned accordingly and outputs the HDR image.
+
+
+Examples
+
+a = imread ( " t1.jpeg " ) ;
+b = imread ( " t2.jpeg " ) ;
+c = imread ( " t3.jpeg " ) ;
+num = 3 ;
+typeOfMethod = 1 ;
+ex1 = 15 ;
+ex2 = 2.5 ;
+ex3 = 0.25 ;
+maxIter = 30 ;
+thres = 0.01 ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , ex1 , ex2 , ex3 , maxIter , thres ) ;
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+d = imread ( " m4.jpeg " ) ;
+e = imread ( " m5.jpeg " ) ;
+f = imread ( " m6.jpeg " ) ;
+num = 6 ;
+typeOfMethod = 2 ;
+ex1 = 0.0167 ;
+ex2 = 0.034 ;
+ex3 = 0.067 ;
+ex4 = 0.125 ;
+ex5 = 0.25 ;
+ex6 = 0.5 ;
+samples = 70 ;
+lambda = 10.0 ;
+random = %f ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , e , f , ex1 , ex2 , ex3 , ex4 , ex5 , ex6 , samples , lambda , random ) ;
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+d = imread ( " m4.jpeg " ) ;
+e = imread ( " m5.jpeg " ) ;
+f = imread ( " m6.jpeg " ) ;
+num = 6 ;
+typeOfMethod = 1 ;
+ex1 = 0.0167 ;
+ex2 = 0.034 ;
+ex3 = 0.067 ;
+ex4 = 0.125 ;
+ex5 = 0.25 ;
+ex6 = 0.5 ;
+maxIter = 30 ;
+thres = 0.01 ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , e , f , ex1 , ex2 , ex3 , ex4 , ex5 , ex6 , maxIter , thres ) ;
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = imread ( " m3.jpeg " ) ;
+d = imread ( " m4.jpeg " ) ;
+e = imread ( " m5.jpeg " ) ;
+f = imread ( " m6.jpeg " ) ;
+num = 6 ;
+typeOfMethod = 3 ;
+ex1 = 0.0167 ;
+ex2 = 0.034 ;
+ex3 = 0.067 ;
+ex4 = 0.125 ;
+ex5 = 0.25 ;
+ex6 = 0.5 ;
+contrastWeight = 1.0 ;
+saturationWeight = 1.0 ;
+exposureWeight = 0.0 ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , e , f , ex1 , ex2 , ex3 , ex4 , ex5 , ex6 , contrastWeight , saturationWeight , exposureWeight ) ;
+
+Examples
+
a = imread ( " i1.jpeg " ) ;
+b = imread ( " i2.jpeg " ) ;
+c = imread ( " i3.jpeg " ) ;
+d = imread ( " i4.jpeg " ) ;
+num = 4 ;
+typeOfMethod = 2 ;
+ex1 = 0.034 ;
+ex2 = 0.008 ;
+ex3 = 0.0034 ;
+ex4 = 0.00073 ;
+samples = 70 ;
+lambda = 10.0 ;
+random = %f ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , ex1 , ex2 , ex3 , ex4 , samples , lambda , random ) ;
+
+Examples
+
a = imread ( " i1.jpeg " ) ;
+b = imread ( " i2.jpeg " ) ;
+c = imread ( " i3.jpeg " ) ;
+d = imread ( " i4.jpeg " ) ;
+num = 4 ;
+typeOfMethod = 1 ;
+ex1 = 0.034 ;
+ex2 = 0.008 ;
+ex3 = 0.0034 ;
+ex4 = 0.00073 ;
+maxIter = 30 ;
+thres = 0.01 ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , ex1 , ex2 , ex3 , ex4 , maxIter , thres ) ;
+
+Examples
+
a = imread ( " i1.jpeg " ) ;
+b = imread ( " i2.jpeg " ) ;
+c = imread ( " i3.jpeg " ) ;
+d = imread ( " i4.jpeg " ) ;
+num = 4 ;
+typeOfMethod = 3 ;
+ex1 = 0.034 ;
+ex2 = 0.008 ;
+ex3 = 0.0034 ;
+ex4 = 0.00073 ;
+maxIter = 30 ;
+contrastWeight = 1.0 ;
+saturationWeight = 1.0 ;
+exposureWeight = 0.0 ;
+[ hdr , ldr ] = makeHDR ( typeOfMethod , num , a , b , c , d , ex1 , ex2 , ex3 , ex4 , contrastWeight , saturationWeight , exposureWeight ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/ntsc2rgb.html b/help/en_US/scilab_en_US_help/ntsc2rgb.html
new file mode 100644
index 0000000..0d91790
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/ntsc2rgb.html
@@ -0,0 +1,116 @@
+
+
+ ntsc2rgb
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > ntsc2rgb
+
+
+ ntsc2rgb
+
This function converts NTSC values to RGB color space.
+
+
+Calling Sequence
+
[ output ] = ntsc2rgb ( pstData )
+
+Parameters
+
pstData :
+ It is a list of the NTSC luminance (Y) and chrominance (I and Q) color components.
+ output :
+ It is a list that contains the red, green, and blue values equivalent to those colors.
+
+Description
+
Converts NTSC values to RGB color space.
+
+
+Examples
+
+a = imread ( " b1.jpeg " , 0 )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " b2.jpeg " , 0 )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " graf1.jpeg " , 0 )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " graf2.jpeg " , 0 )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " b2.jpeg " )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " graf1.jpeg " )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " garf2.jpeg " )
+b = ntsc2rgb ( a ) ;
+
+Examples
+
+a = imread ( " lena.jpeg " , 0 )
+b = ntsc2rgb ( a ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/pcwrite.html b/help/en_US/scilab_en_US_help/pcwrite.html
new file mode 100644
index 0000000..168b670
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/pcwrite.html
@@ -0,0 +1,93 @@
+
+
+ pcwrite
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > pcwrite
+
+
+ pcwrite
+
This function is used to write 3-D point cloud to PLY or PCD file.
+
+
+Calling Sequence
+
[] = pcwrite ( pointCloud , filename )
+[] = pcwrite ( pointCloud , filename , fileFormat , fileType )
+
+Parameters
+
pointCloud :
+ Object for storing point cloud, specified as a pointCloud object.
+ filename :
+ File name, specified as a character vector, specify the file name with an extension incase of two input argument.(default encoding is ASCII)
+ fileFormat :
+ The input file type must be a PLY or PCD format file.(choose between ".ply" or ".pcd")
+ fileType :
+ Choose from the following encoding, PLY - 'ascii', 'binary' and PCD - 'ascii', 'binary', or 'compressed'.
+
+Description
+
Writes the point cloud object, ptCloud, to the PLY or PCD file specified by the input.
+
+
+Examples
+
+ptCloud = pcread ( ' teapot.ply ' ) ;
+pcshow ( ptCloud ) ;
+pcwrite ( ptCloud , ' teapotOut ' , ' ply ' , ' binary ' ) ;
+
+Examples
+
+ptCloud = pcread ( ' teapot.ply ' ) ;
+pcshow ( ptCloud ) ;
+pcwrite ( ptCloud , ' teapotOut ' , ' pcd ' , ' binary ' ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/rgb2lab.html b/help/en_US/scilab_en_US_help/rgb2lab.html
new file mode 100644
index 0000000..deae45f
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/rgb2lab.html
@@ -0,0 +1,123 @@
+
+
+ rgb2lab
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > rgb2lab
+
+
+ rgb2lab
+
This function converts RGB to CIE 1976 L*a*b*.
+
+
+Calling Sequence
+
[ output ] = rgb2lab ( inputImage )
+
+Parameters
+
inputImage :
+ It is a list of color values to convert.
+ output :
+ The converted color values, returned as a list.
+
+Description
+
Converts RGB to CIE 1976 L*a*b*.
+
+
+Examples
+
+rgb2lab ( [ 1 1 1 ] )
+
+Examples
+
+rgb2lab ( [ .2 .3 .4 ] )
+
+Examples
+
+a = imread ( " b1.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Examples
+
+a = imread ( " b2.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Examples
+
+rgb2lab ( [ 23 23 22 ] )
+
+Examples
+
+a = imread ( " lena.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Examples
+
+rgb2lab ( [ 34.2 43.3 343.4 ] )
+
+Examples
+
+a = imread ( " graf1.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Examples
+
+a = imread ( " graf2.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Examples
+
+a = imread ( " b.jpeg " ) ;
+b = rgb2lab ( a ) ;
+
+Authors
+
Sridhar Reddy
+ Ashish Manatosh Barik
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/scilab_code.css b/help/en_US/scilab_en_US_help/scilab_code.css
new file mode 100644
index 0000000..658f42e
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/scilab_code.css
@@ -0,0 +1,96 @@
+.scilabcomment {
+ font-style: italic;
+ color: #01a801
+}
+
+.scilabdefault {
+ font-style: normal;
+ color: #000000
+}
+
+.scilabspecial {
+ font-style: normal;
+ color: #ffaa00
+}
+
+.scilabconstants {
+ font-style: normal;
+ color: #da70d6
+}
+
+.scilaboperator {
+ font-style: normal;
+ color: #5c5c5c
+}
+
+.scilabnumber {
+ font-style: normal;
+ color: #bc8f8f
+}
+
+.scilabfkeyword {
+ font-style: normal;
+ color: #b01813
+}
+
+.scilabskeyword {
+ font-style: normal;
+ color: #a020f0
+}
+
+.scilabckeyword {
+ font-style: normal;
+ color: #5f9ea0
+}
+
+.scilabcommand {
+ font-style: normal;
+ color: #32b9b9
+}
+
+.scilabmacro {
+ font-style: normal;
+ color: #ae5cb0
+}
+
+a.scilabcommand {
+ font-style: normal;
+ text-decoration: underline;
+ color: #32b9b9
+}
+
+a.scilabmacro {
+ font-style: normal;
+ text-decoration: underline;
+ color: #ae5cb0
+}
+
+.scilabstring {
+ font-style: normal;
+ color: #bc8f8f
+}
+
+.scilabid {
+ font-style: normal;
+ color: #000000
+}
+
+.scilabinputoutputargs {
+ font-weight: bold;
+ color: #834310
+}
+
+.scilabfunctionid {
+ font-weight: bold;
+ color: #000000
+}
+
+.scilabfield {
+ font-style: normal;
+ color: #aaaaaa
+}
+
+.scilabopenclose {
+ font-style: normal;
+ color: #4a55db
+}
diff --git a/help/en_US/scilab_en_US_help/section_38a53415444fbf6feb416c6cf0aef456.html b/help/en_US/scilab_en_US_help/section_38a53415444fbf6feb416c6cf0aef456.html
new file mode 100644
index 0000000..806c996
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/section_38a53415444fbf6feb416c6cf0aef456.html
@@ -0,0 +1,211 @@
+
+
+
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox
+
+
+ FOSSEE Image Processing Toolbox
+align — This function aligns the set of input images for HDR image creation.
+
+
+
+
+
+applyTransformer — This function is used to apply affine or TPS transformation to image.
+
+
+
+
+
+bwLookUp — This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+
+
+
+
+
+contourArea — This function calculates the contour area.
+
+
+
+
+
+copyMakeBorder — This function forms a border around the input image.
+
+
+
+
+
+detectBRIEFDescriptors — This function is used for computing BRIEF descriptors using Star keypoints.
+
+
+
+
+
+detectDAISYDescriptors — This function is used for computing DAISY descriptors using Star keypoints.
+
+
+
+
+
+detectLATCHDescriptors — This function is used for computing the LATCH descriptors using Star keypoints.
+
+
+
+
+
+detectSIFTFeatures — This function is used to find scale-invariant features.
+
+
+
+
+
+distanceExtractor — This function computes the shape-distance between two images.
+
+
+
+
+
+fillConvexPoly — This function fills a convex polygon.
+
+
+
+
+
+gabor — This function creates a Gabor filter.
+
+
+
+
+
+gaussianBlur — This function blurs the input image using a Gaussian filter.
+
+
+
+
+
+histogramCostExtractor — This function computes the cost matrix.
+
+
+
+
+
+imGaborFilt — The function applies Gabor filter or set of filters to 2-D image.
+
+
+
+
+
+lab2rgb — This function converts CIE 1976 L*a*b* to RGB.
+
+
+
+
+
+lab2uint8 — This function converts L*a*b* data to uint8.
+
+
+
+
+
+makeHDR — This function is used to create HDR image.
+
+
+
+
+
+ntsc2rgb — This function converts NTSC values to RGB color space.
+
+
+
+
+
+pcwrite — Write 3-D point cloud to PLY or PCD file.
+
+
+
+
+
+rgb2lab — This function converts RGB to CIE 1976 L*a*b*.
+
+
+
+
+
+ssim — This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+
+
+
+
+
+wiener2 — This function is used for 2-D adaptive noise-removal filtering.
+
+
+
+
+
+xyz2double — This function converts XYZ color values to double.
+
+
+
+
+
+xyz2lab — This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+
+
+
+xyz2rgb — This function converts CIE 1931 XYZ to RGB.
+
+
+
+
+
+xyz2uint16 — This function converts XYZ color values to uint16.
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/section_90534096b3a447538ddfbb9df9cf6c8b.html b/help/en_US/scilab_en_US_help/section_90534096b3a447538ddfbb9df9cf6c8b.html
new file mode 100644
index 0000000..d4c0a2e
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/section_90534096b3a447538ddfbb9df9cf6c8b.html
@@ -0,0 +1,109 @@
+
+
+
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox
+
+
+ FOSSEE Image Processing Toolbox
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/section_a2c9cca36f8f25d9e603fc42b80e6232.html b/help/en_US/scilab_en_US_help/section_a2c9cca36f8f25d9e603fc42b80e6232.html
new file mode 100644
index 0000000..628c1f8
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/section_a2c9cca36f8f25d9e603fc42b80e6232.html
@@ -0,0 +1,211 @@
+
+
+
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox
+
+
+ FOSSEE Image Processing Toolbox
+align — This function aligns the set of input images for HDR image creation.
+
+
+
+
+
+applyTransformer — This function is used to apply affine or TPS transformation to image.
+
+
+
+
+
+bwLookUp — This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+
+
+
+
+
+contourArea — This function calculates the contour area.
+
+
+
+
+
+copyMakeBorder — This function forms a border around the input image.
+
+
+
+
+
+detectBRIEFDescriptors — This function is used for computing BRIEF descriptors using Star keypoints.
+
+
+
+
+
+detectDAISYDescriptors — This function is used for computing DAISY descriptors using Star keypoints.
+
+
+
+
+
+detectLATCHDescriptors — This function is used for computing the LATCH descriptors using Star keypoints.
+
+
+
+
+
+detectSIFTFeatures — This function is used to find scale-invariant features.
+
+
+
+
+
+distanceExtractor — This function computes the shape-distance between two images.
+
+
+
+
+
+fillConvexPoly — This function fills a convex polygon.
+
+
+
+
+
+gabor — This function creates a Gabor filter.
+
+
+
+
+
+gaussianBlur — This function blurs the input image using a Gaussian filter.
+
+
+
+
+
+histogramCostExtractor — This function computes the cost matrix.
+
+
+
+
+
+imGaborFilt — The function applies Gabor filter or set of filters to 2-D image.
+
+
+
+
+
+lab2rgb — This function converts CIE 1976 L*a*b* to RGB.
+
+
+
+
+
+lab2uint8 — This function converts L*a*b* data to uint8.
+
+
+
+
+
+makeHDR — This function is used to create HDR image.
+
+
+
+
+
+ntsc2rgb — This function converts NTSC values to RGB color space.
+
+
+
+
+
+pcwrite — This function is used to write 3-D point cloud to PLY or PCD file.
+
+
+
+
+
+rgb2lab — This function converts RGB to CIE 1976 L*a*b*.
+
+
+
+
+
+ssim — This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+
+
+
+
+
+wiener2 — This function is used for 2-D adaptive noise-removal filtering.
+
+
+
+
+
+xyz2double — This function converts XYZ color values to double.
+
+
+
+
+
+xyz2lab — This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+
+
+
+xyz2rgb — This function converts CIE 1931 XYZ to RGB.
+
+
+
+
+
+xyz2uint16 — This function converts XYZ color values to uint16.
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/ssim.html b/help/en_US/scilab_en_US_help/ssim.html
new file mode 100644
index 0000000..99a07c2
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/ssim.html
@@ -0,0 +1,84 @@
+
+
+ ssim
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > ssim
+
+
+ ssim
+
This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+
+
+Calling Sequence
+
[ ssim_val ] = ssim ( srcImg , reference )
+
+Parameters
+
srcImg :
+ The input image whose quality is to be measured. Must be the same size and class as reference.
+ reference :
+ Reference image against which quality if measured.
+ ssim_val :
+ Structural Similarity (SSIM) Index.
+
+Description
+
Computes the Structural Similarity Index (SSIM) value.
+
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+b = imread ( " m2.jpeg " ) ;
+c = ssim ( a , b ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/style.css b/help/en_US/scilab_en_US_help/style.css
new file mode 100644
index 0000000..0fe8923
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/style.css
@@ -0,0 +1,350 @@
+body {
+ color:#000000;
+ background-color:#ffffff;
+ font-family:sans-serif;
+ font-size:100%;
+ margin:5px;
+ padding:0;
+ background : url("/img/body.png");
+ background-repeat : repeat-x;
+}
+
+.para {
+ padding-left: 10px;
+}
+
+.refname {
+ color: #ff6c0a;
+}
+.refpurpose {
+ font-size: 110%;
+}
+
+.synopsis {
+ border: 1px solid black;
+ width:80%;
+ padding: 0.5em;
+}
+
+.editbar {
+ text-align: right;
+}
+
+.term {
+ color:#800000;
+ font-size:100%;
+}
+
+h3 {
+ color: #000063;
+ font-weight: bold;
+ font-size:130%;
+ margin-bottom: 10px;
+}
+
+.programlisting {
+ font-family: monospace;
+ font-size: 100%;
+ background-color:#EEEEFF;
+ border-color:#CCCCCC;
+ border-style:solid;
+ border-width:2px medium;
+ width:80%;
+ color:#333333;
+ line-height:120%;
+ padding:10px;
+}
+
+.literal {
+ font-family: monospace;
+ font-size: 100%;
+}
+
+.option {
+ font-family: monospace;
+ font-style: italic;
+ font-size: 100%;
+}
+
+.command {
+ font-family: monospace;
+ font-size: 100%;
+ color: #32b9b9;
+}
+
+.function {
+ font-family: monospace;
+ font-size: 100%;
+ color: #32b9b9;
+}
+
+.varname {
+ font-family: monospace;
+ font-weight: bold;
+ font-size: 100%;
+}
+
+.constant {
+ font-family: monospace;
+ font-size: 100%;
+ color: #da70d6;
+}
+
+a {
+ color: blue;
+ text-decoration:none;
+}
+
+a:hover {
+ text-decoration:underline;
+}
+
+.itemizedlist {
+ list-style-type: disk;
+}
+
+.inline-list li {
+ display: inline;
+ list-style-type: disk;
+}
+
+.vert-list {
+ list-style-type: disk;
+}
+
+pre {
+ margin-bottom: 0px;
+ margin-top: 0px;
+}
+
+.leftpart {
+ position:absolute;
+ float:left;
+ width: 186px;
+ padding: 5px;
+ font-size: smaller;
+}
+
+.content {
+ margin-top: 100px;
+ margin-left: 196px
+}
+
+.container {
+ margin: 0 auto;
+ width:1024px;
+}
+
+ul.toc li.list-active {
+ list-style-type:disc;
+ font-weight:bold;
+}
+
+ul.toc li.parent {
+ font-size: 115%;
+ padding: 5px 0px 5px 11px;
+ border-bottom: 1px solid #cccccc;
+ margin-bottom: 5px;
+}
+
+ul.toc li.root {
+ font-size: 135%;
+ padding: 5px 0px 5px 11px;
+ border-bottom: 1px solid #cccccc;
+ margin-bottom: 5px;
+}
+
+ul.toc li {
+ font-size: 85%;
+ margin: 1px 0 1px 1px;
+ padding: 1px 0 1px 11px;
+ background-repeat: no-repeat;
+ background-position: center left;
+ list-style-type: circle;
+}
+
+.next {
+ float:right;
+ text-align: right;
+}
+
+.center {
+ text-align: center;
+}
+
+.screen {
+ font-family: monospace;
+ font-size: 100%;
+ background-color:#FFFFFF;
+ border-color:#CCCCCC;
+ border-style:solid;
+ border-width:2px medium;
+ width:80%;
+ color:#333333;
+ line-height:120%;
+ padding:10px;
+}
+
+/* Top and bottom navigation controls on manual pages --------------------- */
+div.manualnavbar {
+ background-color: #E0E0E0;
+ color: inherit;
+ padding: 4px;
+ margin-bottom: 10px;
+}
+div.manualnavbar .prev {
+ padding-right: 4px;
+}
+div.manualnavbar .next {
+ text-align: right;
+ padding-left: 4px;
+}
+
+div.manualnavbar .top {
+ text-align: center;
+ display: block;
+}
+
+div.manualnavbar hr {
+ color: #cccccc;
+ background-color: #cccccc;
+}
+
+/* Footer navigation area ------------------------------------------------- */
+
+#pagefooter {
+ position: relative;
+ font-size: 75%;
+ color: inherit;
+ background-color: #e5e5e5;
+ width: 100%;
+}
+
+#pagefooterleft {
+ top: 0px;
+ left: 0px;
+ padding: 6px;
+ margin-right: 30%;
+}
+
+#pagefooterright {
+ text-align: right;
+ margin-left: 50%;
+ padding: 6px;
+}
+#footnav {
+ color: inherit;
+ background-color: #9999cc;
+ border-width: 1px 0px;
+ border-color: #333366;
+ border-style: solid;
+ text-align: right;
+ padding-right: 6px;
+}
+
+
+
+
+ #global{
+/* width: 90%; */
+ max-width: 90em;
+/* min-width: 850px; */
+ margin-left: auto;
+ margin-right: auto;
+ }
+
+ #myHeader{
+ background-color:#000000;
+ color:white;
+ margin-bottom : 10px;
+ position : relative;
+ text-align: center;
+/* width : 1024px;*/
+ height : 100px;
+ padding-left : 20px;
+ background : url("http://atoms.scilab.org/images/homepage/cadre_head.png");
+ background-repeat : no-repeat;
+ background-position : 0px 0px;
+
+ }
+
+ #myFooter{
+ background-color:#E5E5E5;
+ font-color:black;
+/* width: 90%;*/
+ max-width: 90em;
+/* min-width: 850px;
+ margin-left: 5%;
+ margin-right: 5%;*/
+ margin-top:10px;
+ padding:10px;
+ }
+
+ #mainArea{
+ width: 100%;
+ overflow: hidden;
+ }
+
+ #myLeftArea{
+ color:white;
+ float: left;
+ width: 180px;
+ padding:5px;
+ }
+
+ #myMiddleArea{
+ color:black;
+ margin-left: 200px;
+ padding: 10px 20px;
+ }
+
+ #myRightArea{
+ color:white;
+ float: right;
+ width: 200px;
+ padding: 12px 20px;
+ }
+
+
+div#cadre_head
+{
+ position : relative;
+ text-align: center;
+/* width : 1024px;*/
+ height : 100px;
+ padding-left : 20px;
+ background : url("/img/cadre_head.png");
+ background-repeat : no-repeat;
+ background-position : 0px 0px;
+}
+
+
+div#slogan{
+ position: absolute;
+ top : 50px;
+ left:251px;
+ color:#0000AA;
+ font: 120%, Georgia,Serif;
+}
+div#cadre_help
+{
+ position: absolute;
+ top:45px;
+ right:0px;
+ font-size:0.8em;
+ color:#0000AA;
+}
+
+table.revhistory
+{
+ width:80%;
+ border-color:#CCCCCC;
+ border-style:solid;
+ border-width:2px medium;
+ margin-bottom: 10px;
+}
+
+table.revhistory tr.title td
+{
+ background-color: #9999CC;
+}
\ No newline at end of file
diff --git a/help/en_US/scilab_en_US_help/wiener2.html b/help/en_US/scilab_en_US_help/wiener2.html
new file mode 100644
index 0000000..c644169
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/wiener2.html
@@ -0,0 +1,87 @@
+
+
+ wiener2
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > wiener2
+
+
+ wiener2
+
This function is used for 2-D adaptive noise-removal filtering.
+
+
+Calling Sequence
+
[ outputImg ] = wiener2 ( inputImage , filtsize , sigma )
+
+Parameters
+
inputImage :
+ The input image, grayscale only.
+ filtsize :
+ The filter size.
+ sigma :
+ The additive noise (Gaussian white noise) power is assumed to be noise. if sigma = 0 then the variance is estimated from data
+ outputImg :
+ The output image, is of the same size and class as the input image
+
+Description
+
It lowpass-filters a grayscale image that has been degraded by constant power additive noise.
+
+
+Examples
+
+a = imread ( " m1.jpeg " ) ;
+filtsize = 5 ;
+sigma = 0 ;
+c = ssim ( a , b ) ;
+
+
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/xml_code.css b/help/en_US/scilab_en_US_help/xml_code.css
new file mode 100644
index 0000000..9e4c27f
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/xml_code.css
@@ -0,0 +1,94 @@
+.xmlcomment {
+ font-style: italic;
+ color: #01a801
+}
+
+.xmldefault {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlentity {
+ font-style: normal;
+ color: #ffaa00
+}
+
+.xmlopeninstr {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlcloseinstr {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlinstrname {
+ font-style: normal;
+ color: #9965a6
+}
+
+.xmllowtag {
+ font-style: normal;
+ color: #000000
+}
+
+.xmltagname {
+ font-style: normal;
+ color: #0303ff
+}
+
+.xmllowclose {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlopencomment {
+ font-style: italic;
+ color: #01a801
+}
+
+.xmlcommentend {
+ font-style: italic;
+ color: #01a801
+}
+
+.xmlcomment {
+ font-style: italic;
+ color: #01a801
+}
+
+.xmlopencdata {
+ font-style: normal;
+ color: #c45555
+}
+
+.xmlcdataend {
+ font-style: normal;
+ color: #c45555
+}
+
+.xmlcdata {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlattributename {
+ font-style: normal;
+ color: #9965a6
+}
+
+.xmlequal {
+ font-style: normal;
+ color: #000000
+}
+
+.xmlattributevalue {
+ font-style: normal;
+ color: #973964
+}
+
+.xmlautoclose {
+ font-style: normal;
+ color: #000000
+}
diff --git a/help/en_US/scilab_en_US_help/xyz2double.html b/help/en_US/scilab_en_US_help/xyz2double.html
new file mode 100644
index 0000000..e0b28b6
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/xyz2double.html
@@ -0,0 +1,126 @@
+
+
+ xyz2double
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > xyz2double
+
+
+ xyz2double
+
This function converts XYZ color values to double.
+
+
+Calling Sequence
+
[ output ] = xyz2double ( pstData )
+
+Parameters
+
pstData :
+ list of uint16 or double array that must be real and nonsparse.
+ output :
+ list of converted values.
+
+Description
+
Converts an M-by-3 or M-by-N-by-3 array of pstData color values to double. output has the same size as XYZ.
+
+
+Examples
+
+a = uint16 ( [ 100 32768 65535 ] ) ;
+b = xyz2double ( c ) ;
+
+Examples
+
+a = uint16 ( [ 100 32768 65536 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 1 3 5 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 100 32768 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 100 32 67 56 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 0.0031 1 2 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 100 32 678 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 100 32768 3244 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = uint16 ( [ 0.0031 1.56 2.454 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = double ( [ 9 1 2 ] ) ;
+b = xyz2double ( a ) ;
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/xyz2lab.html b/help/en_US/scilab_en_US_help/xyz2lab.html
new file mode 100644
index 0000000..334dde1
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/xyz2lab.html
@@ -0,0 +1,127 @@
+
+
+ xyz2lab
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > xyz2lab
+
+
+ xyz2lab
+
This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+Calling Sequence
+
[ output ] = xyz2lab ( vartype )
+
+Parameters
+
varType :
+ list of color values to convert.
+ output :
+ list of converted color values.
+
+Description
+
Convert CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+Examples
+
+a = list ( 0.25 , 0.40 , 0.10 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 0.29 , 0.23 , 0.11 )
+xyz2lab ( a ) )
+
+Examples
+
+a = list ( 0.29 , 34 , 0.10 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 0.25 , 0.56 , 0.18 )
+xyz2lab ( a )
+
+Examples
+
+a = uint16 ( [ 100 32 67 56 ] ) ;
+b = xyz2double ( a ) ;
+
+Examples
+
+a = list ( 89.25 , 89.40 , 0.10 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 78 , 89 , 11 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 0.25 , 0.40 , 90.67 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 0.76 , 0.67 , 9.10 )
+xyz2lab ( a )
+
+Examples
+
+a = list ( 78.25 , 34.40 , 0.10 )
+xyz2lab ( a ) )
+
+Authors
+
Ashish Manatosh Barik, NIT Rourkela
+ Priyanka Hiranandani, NIT Surat
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/xyz2rgb.html b/help/en_US/scilab_en_US_help/xyz2rgb.html
new file mode 100644
index 0000000..a36d27a
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/xyz2rgb.html
@@ -0,0 +1,127 @@
+
+
+ xyz2rgb
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > xyz2rgb
+
+
+ xyz2rgb
+
This function converts CIE 1931 XYZ to RGB.
+
+
+
+
+Parameters
+
data :
+ list of color values to convert.
+ output :
+ list of converted color values.
+
+Description
+
Converts CIE 1931 XYZ to RGB.
+
+
+Examples
+
+a = list ( 0.25 , 0.40 , 0.10 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 3.25 , 5.40 , 12.10 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 2 , 5 , 4 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 0.65 , 0.43 , 0.19 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 89.25 , 23 , 0.6710 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 0.2534 , 0.4340 , 0.143 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 67.25 , 34.40 , 44.10 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 34.25 , 56.40 , 223.189 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 0.1 , 0.1 , 0.1 ) ;
+xyz2rgb ( a )
+
+Examples
+
+a = list ( 78.25 , 34.40 , 23.10 ) ;
+xyz2rgb ( a )
+
+Authors
+
Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
+
+
diff --git a/help/en_US/scilab_en_US_help/xyz2uint16.html b/help/en_US/scilab_en_US_help/xyz2uint16.html
new file mode 100644
index 0000000..1cc36a4
--- /dev/null
+++ b/help/en_US/scilab_en_US_help/xyz2uint16.html
@@ -0,0 +1,125 @@
+
+
+ xyz2uint16
+
+
+
+
+
+
+
+ FOSSEE Image Processing Toolbox >> FOSSEE Image Processing Toolbox > xyz2uint16
+
+
+ xyz2uint16
+
This function converts XYZ color values to uint16.
+
+
+Calling Sequence
+
[ output ] = xyz2uint16 ( pstData )
+
+Parameters
+
pstData :
+ list of uint16 or double array that must be real and nonsparse
+ output :
+ list of puint8.
+
+Description
+
Converts an M-by-3 or M-by-N-by-3 array of XYZ color values to uint16. output has the same size as pstData.
+
+
+Examples
+
+a = list ( 0.1 , 0.5 , 1.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 0.14 , 0.35 , 1.20 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 45.1 , 22.5 , 45.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 200 , 334 , 2112 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 56.1 , 0.5 , 1.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 0.1 , 8378.5 , 1.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 878.1 , 32.5 , 1.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 0.12323 , 0.53434 , 1.878 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 44 , 55 , 1.0 )
+xyz2uint16 ( a )
+
+Examples
+
+a = list ( 0.134 , 55.5 , 1.121 )
+xyz2uint16 ( a )
+
+Authors
+
Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
+
+
diff --git a/help/en_US/ssim.xml b/help/en_US/ssim.xml
new file mode 100644
index 0000000..a2ebb3c
--- /dev/null
+++ b/help/en_US/ssim.xml
@@ -0,0 +1,70 @@
+
+
+
+
+
+
+
+ ssim
+ This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+
+
+
+
+ Calling Sequence
+
+ [ssim_val] = ssim(srcImg, reference)
+
+
+
+
+
+ Parameters
+
+ srcImg :
+ The input image whose quality is to be measured. Must be the same size and class as reference.
+ reference :
+ Reference image against which quality if measured.
+ ssim_val :
+ Structural Similarity (SSIM) Index.
+
+
+
+
+ Description
+
+Computes the Structural Similarity Index (SSIM) value.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Dhruti Shah
+
+
+
diff --git a/help/en_US/wiener2.xml b/help/en_US/wiener2.xml
new file mode 100644
index 0000000..e1d994c
--- /dev/null
+++ b/help/en_US/wiener2.xml
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+
+ wiener2
+ This function is used for 2-D adaptive noise-removal filtering.
+
+
+
+
+ Calling Sequence
+
+ [outputImg] = wiener2(inputImage,filtsize,sigma)
+
+
+
+
+
+ Parameters
+
+ inputImage :
+ The input image, grayscale only.
+ filtsize :
+ The filter size.
+ sigma :
+ The additive noise (Gaussian white noise) power is assumed to be noise. if sigma = 0 then the variance is estimated from data
+ outputImg :
+ The output image, is of the same size and class as the input image
+
+
+
+
+ Description
+
+It lowpass-filters a grayscale image that has been degraded by constant power additive noise.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Riddhish Bhalodia
+
+
+
diff --git a/help/en_US/xyz2double.xml b/help/en_US/xyz2double.xml
new file mode 100644
index 0000000..8d4b17b
--- /dev/null
+++ b/help/en_US/xyz2double.xml
@@ -0,0 +1,157 @@
+
+
+
+
+
+
+
+ xyz2double
+ This function converts XYZ color values to double.
+
+
+
+
+ Calling Sequence
+
+ [output] = xyz2double(pstData)
+
+
+
+
+
+ Parameters
+
+ pstData :
+ list of uint16 or double array that must be real and nonsparse.
+ output :
+ list of converted values.
+
+
+
+
+ Description
+
+Converts an M-by-3 or M-by-N-by-3 array of pstData color values to double. output has the same size as XYZ.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+
+
+
diff --git a/help/en_US/xyz2lab.xml b/help/en_US/xyz2lab.xml
new file mode 100644
index 0000000..4c28c67
--- /dev/null
+++ b/help/en_US/xyz2lab.xml
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+ xyz2lab
+ This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+
+
+ Calling Sequence
+
+ [output] = xyz2lab(vartype)
+
+
+
+
+
+ Parameters
+
+ varType :
+ list of color values to convert.
+ output :
+ list of converted color values.
+
+
+
+
+ Description
+
+Convert CIE 1931 XYZ to CIE 1976 L*a*b*.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Ashish Manatosh Barik, NIT Rourkela
+ Priyanka Hiranandani, NIT Surat
+
+
+
diff --git a/help/en_US/xyz2rgb.xml b/help/en_US/xyz2rgb.xml
new file mode 100644
index 0000000..3f01b25
--- /dev/null
+++ b/help/en_US/xyz2rgb.xml
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+ xyz2rgb
+ This function converts CIE 1931 XYZ to RGB.
+
+
+
+
+ Calling Sequence
+
+ [output] = xyz2rgb(data)
+
+
+
+
+
+ Parameters
+
+ data :
+ list of color values to convert.
+ output :
+ list of converted color values.
+
+
+
+
+ Description
+
+Converts CIE 1931 XYZ to RGB.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
diff --git a/help/en_US/xyz2uint16.xml b/help/en_US/xyz2uint16.xml
new file mode 100644
index 0000000..e9a446b
--- /dev/null
+++ b/help/en_US/xyz2uint16.xml
@@ -0,0 +1,158 @@
+
+
+
+
+
+
+
+ xyz2uint16
+ This function converts XYZ color values to uint16.
+
+
+
+
+ Calling Sequence
+
+ [output] = xyz2uint16(pstData)
+
+
+
+
+
+ Parameters
+
+ pstData :
+ list of uint16 or double array that must be real and nonsparse
+ output :
+ list of puint8.
+
+
+
+
+ Description
+
+Converts an M-by-3 or M-by-N-by-3 array of XYZ color values to uint16. output has the same size as pstData.
+
+
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Examples
+
+
+
+
+ Authors
+
+ Tess Zacharias
+ Ashish Manatosh Barik
+
+
+
diff --git a/images/b1.jpeg b/images/b1.jpeg
new file mode 100644
index 0000000..50c363d
Binary files /dev/null and b/images/b1.jpeg differ
diff --git a/images/b2.jpeg b/images/b2.jpeg
new file mode 100644
index 0000000..9ce3594
Binary files /dev/null and b/images/b2.jpeg differ
diff --git a/images/bnwhite.jpg b/images/bnwhite.jpg
new file mode 100644
index 0000000..38faf4c
Binary files /dev/null and b/images/bnwhite.jpg differ
diff --git a/images/bryan.jpeg b/images/bryan.jpeg
new file mode 100644
index 0000000..d314b8f
Binary files /dev/null and b/images/bryan.jpeg differ
diff --git a/images/bw.jpg b/images/bw.jpg
new file mode 100644
index 0000000..93b6468
Binary files /dev/null and b/images/bw.jpg differ
diff --git a/images/cat.jpg b/images/cat.jpg
new file mode 100644
index 0000000..e05ffad
Binary files /dev/null and b/images/cat.jpg differ
diff --git a/images/graf1.jpeg b/images/graf1.jpeg
new file mode 100644
index 0000000..500b24d
Binary files /dev/null and b/images/graf1.jpeg differ
diff --git a/images/graf2.jpeg b/images/graf2.jpeg
new file mode 100644
index 0000000..ed57f8e
Binary files /dev/null and b/images/graf2.jpeg differ
diff --git a/images/i1.jpeg b/images/i1.jpeg
new file mode 100644
index 0000000..4cb1818
Binary files /dev/null and b/images/i1.jpeg differ
diff --git a/images/i2.jpeg b/images/i2.jpeg
new file mode 100644
index 0000000..2765e7c
Binary files /dev/null and b/images/i2.jpeg differ
diff --git a/images/i3.jpeg b/images/i3.jpeg
new file mode 100644
index 0000000..85ec008
Binary files /dev/null and b/images/i3.jpeg differ
diff --git a/images/i4.jpeg b/images/i4.jpeg
new file mode 100644
index 0000000..02f5bc5
Binary files /dev/null and b/images/i4.jpeg differ
diff --git a/images/l1.jpeg b/images/l1.jpeg
new file mode 100644
index 0000000..cfae4cf
Binary files /dev/null and b/images/l1.jpeg differ
diff --git a/images/l2.jpeg b/images/l2.jpeg
new file mode 100644
index 0000000..f53ac88
Binary files /dev/null and b/images/l2.jpeg differ
diff --git a/images/lena.jpeg b/images/lena.jpeg
new file mode 100644
index 0000000..21d5012
Binary files /dev/null and b/images/lena.jpeg differ
diff --git a/images/lena_noiseImg.jpg b/images/lena_noiseImg.jpg
new file mode 100644
index 0000000..200a3a7
Binary files /dev/null and b/images/lena_noiseImg.jpg differ
diff --git a/images/m1.jpeg b/images/m1.jpeg
new file mode 100644
index 0000000..cc3040f
Binary files /dev/null and b/images/m1.jpeg differ
diff --git a/images/m2.jpeg b/images/m2.jpeg
new file mode 100644
index 0000000..e69201f
Binary files /dev/null and b/images/m2.jpeg differ
diff --git a/images/m3.jpeg b/images/m3.jpeg
new file mode 100644
index 0000000..1f5d7cc
Binary files /dev/null and b/images/m3.jpeg differ
diff --git a/images/m4.jpeg b/images/m4.jpeg
new file mode 100644
index 0000000..7da25af
Binary files /dev/null and b/images/m4.jpeg differ
diff --git a/images/m5.jpeg b/images/m5.jpeg
new file mode 100644
index 0000000..ff3b2db
Binary files /dev/null and b/images/m5.jpeg differ
diff --git a/images/m6.jpeg b/images/m6.jpeg
new file mode 100644
index 0000000..5b062b4
Binary files /dev/null and b/images/m6.jpeg differ
diff --git a/images/man.tiff b/images/man.tiff
new file mode 100644
index 0000000..e1a2c4e
Binary files /dev/null and b/images/man.tiff differ
diff --git a/images/noise.jpg b/images/noise.jpg
new file mode 100644
index 0000000..24263da
Binary files /dev/null and b/images/noise.jpg differ
diff --git a/images/p1.jpeg b/images/p1.jpeg
new file mode 100644
index 0000000..264180d
Binary files /dev/null and b/images/p1.jpeg differ
diff --git a/images/p1.jpg b/images/p1.jpg
new file mode 100644
index 0000000..9d52c39
Binary files /dev/null and b/images/p1.jpg differ
diff --git a/images/p2.jpeg b/images/p2.jpeg
new file mode 100644
index 0000000..7f1ab65
Binary files /dev/null and b/images/p2.jpeg differ
diff --git a/images/p2.jpg b/images/p2.jpg
new file mode 100644
index 0000000..25678aa
Binary files /dev/null and b/images/p2.jpg differ
diff --git a/images/photo.jpg b/images/photo.jpg
new file mode 100644
index 0000000..fdb63a1
Binary files /dev/null and b/images/photo.jpg differ
diff --git a/images/photo1.jpg b/images/photo1.jpg
new file mode 100644
index 0000000..5b95da4
Binary files /dev/null and b/images/photo1.jpg differ
diff --git a/images/portrait2.jpg b/images/portrait2.jpg
new file mode 100644
index 0000000..ea1fb07
Binary files /dev/null and b/images/portrait2.jpg differ
diff --git a/images/t1.jpeg b/images/t1.jpeg
new file mode 100644
index 0000000..f166632
Binary files /dev/null and b/images/t1.jpeg differ
diff --git a/images/t2.jpeg b/images/t2.jpeg
new file mode 100644
index 0000000..7327ff4
Binary files /dev/null and b/images/t2.jpeg differ
diff --git a/images/t3.jpeg b/images/t3.jpeg
new file mode 100644
index 0000000..111df83
Binary files /dev/null and b/images/t3.jpeg differ
diff --git a/images/t4.jpeg b/images/t4.jpeg
new file mode 100644
index 0000000..583dcb4
Binary files /dev/null and b/images/t4.jpeg differ
diff --git a/images/taj.jpg b/images/taj.jpg
new file mode 100644
index 0000000..6624ba9
Binary files /dev/null and b/images/taj.jpg differ
diff --git a/images/wall.tiff b/images/wall.tiff
new file mode 100644
index 0000000..22b61eb
Binary files /dev/null and b/images/wall.tiff differ
diff --git a/images/wall1.jpeg b/images/wall1.jpeg
new file mode 100644
index 0000000..bcfa62c
Binary files /dev/null and b/images/wall1.jpeg differ
diff --git a/images/wall2.jpeg b/images/wall2.jpeg
new file mode 100644
index 0000000..1f76811
Binary files /dev/null and b/images/wall2.jpeg differ
diff --git a/images/wall3.jpg b/images/wall3.jpg
new file mode 100644
index 0000000..d5b37af
Binary files /dev/null and b/images/wall3.jpg differ
diff --git a/macros/align.sci b/macros/align.sci
new file mode 100644
index 0000000..3f18999
--- /dev/null
+++ b/macros/align.sci
@@ -0,0 +1,282 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [outImg1, outImg2, outImg3, varargout] = align(maxBits, excludeRange, cut, num, srcImg1, srcImg2, srcImg3, varargin)
+// This function aligns the set of input images for HDR image creation.
+//
+// Calling Sequence
+// [out1, out2, out3] = align(maxBits, excludeRange, cut, num, srcImg_1, srcImg_2, srcImg_3)
+// [out1, out2, out3, out4] = align(maxBits, excludeRange, cut, num, srcImg1, srcImg_2, srcImg_3, srcImg_4)
+// [out1, out2, out3, out4, out5] = align(maxBits, excludeRange, cut, num, srcImg_1, srcImg_2, srcImg_3, srcImg_4, srcImg_5)
+//
+// Parameters
+// maxBits : Logarithm to the base 2 of maximal shift in each dimension. Values of 5 and 6 are usually good enough (31 and 63 pixels shift respectively). Value should not exceed 6. It is of Double type.
+// excludeRange : Range for exclusion bitmap that is constructed to suppress noise around the median value. It is of Double type.
+// cut : If true, cuts images. Otherwise fills the new regions with zeros. It is of Boolean type.
+// num : Number of images given as input source images(3 - 5). It is of double type.
+// srcImg_i : Hypermat of image_i.
+//
+// Description
+// This function uses AlignMTB algorithm which converts images to median threshold bitmaps (1 for pixels brighter than median luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
+//
+// Examples
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// num = 3;
+// maxBits= 6;
+// excludeRange = 4;
+// cut = %t;
+// [x, y, z] = align(maxBits, excludeRange, cut, num, a, b, c);
+//
+// Examples
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// d = imread("/images/t4.jpeg");
+// num = 4;
+// maxBits= 6;
+// excludeRange = 4;
+// cut = %f;
+// [x, y, z, p] = align(maxBits, excludeRange, cut, num, a, b, c, d);
+//
+// Examples
+// // error cause maxBits value is greater than 6
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// d = imread("/images/m4.jpeg");
+// num = 4;
+// maxBits= 7;
+// excludeRange = 4;
+// cut = %t;
+// [x, y, z, p] = align(maxBits, excludeRange, cut, num, a, b, c, d);
+//
+// Examples
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// d = imread("/images/m4.jpeg");
+// e = imread("/images/m5.jpeg");
+// num = 5;
+// maxBits= 6;
+// excludeRange = 4;
+// cut = %t;
+// [x, y, z, p, q] = align(maxBits, excludeRange, cut, num, a, b, c, d, e);
+//
+// Examples
+// // cut is set false here (if true cuts images, otherwise fills the new regions with zeros. )
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// num = 3;
+// maxBits= 1;
+// excludeRange = 4;
+// cut = %t;
+// [x, y, z] = align(maxBits, excludeRange, cut, num, a, b, c);
+//
+// Examples
+// // aligns the images for the making of 1 HDR image
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// num = 3;
+// maxBits= 5;
+// excludeRange = 4;
+// cut = %t;
+//[x, y, z] = align(maxBits, excludeRange, cut, num, a, b, c);
+//
+// Examples
+// // aligns the images for the making of 1 HDR image
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// num = 3;
+// maxBits= 5;
+// excludeRange = 4;
+// cut = %t;
+// [x, y, z] = align(maxBits, excludeRange, cut, num, a, b, c);
+//
+// Examples
+// // aligns the images for the making of 1 HDR image
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// num = 3;
+// maxBits= 5;
+// excludeRange = 3;
+// cut = %f;
+// [x, y, z] = align(maxBits, excludeRange, cut, num, a, b, c);
+//
+// Examples
+// // maxBits = 6, leads to noticeable pixel shift
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// d = imread("/images/t4.jpeg");
+// num = 4;
+// maxBits= 5;
+// excludeRange = 5;
+// cut = %t;
+// [x, y, z, p] = align(maxBits, excludeRange, cut, num, a, b, c, d);
+//
+// Examples
+// // maxBits = 6, leads to noticeable pixel shift
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// num = 3;
+// maxBits= 5;
+// excludeRange = 6;
+// cut = %f;
+// [x, y, z, p] = align(maxBits, excludeRange, cut, num, a, b, c, d);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+//
+ srcMat1 = mattolist(srcImg1)
+ srcMat2 = mattolist(srcImg2)
+ srcMat3 = mattolist(srcImg3)
+
+ [lhs, rhs] = argn(0)
+
+ select rhs
+ case 7 then
+ [out1, out2, out3] = raw_align(maxBits, excludeRange, cut, num, srcMat1, srcMat2, srcMat3)
+
+ channels1 = size(out1)
+ channels2 = size(out2)
+ channels3 = size(out3)
+
+ for i = 1:channels1
+ outImg1(:, :, i) = (out1(i))
+ end
+
+ for j = 1:channels2
+ outImg2(:, :, j) = (out2(j))
+ end
+
+ for k = 1:channels3
+ outImg3(:, :, k) = (out3(k))
+ end
+
+ case 8 then
+ srcMat4 = mattolist(varargin(1))
+
+ [out1, out2, out3, out4] = raw_align(maxBits, excludeRange, cut, num, srcMat1, srcMat2, srcMat3, srcMat4)
+
+ channels1 = size(out1)
+ channels2 = size(out2)
+ channels3 = size(out3)
+ channels4 = size(out4)
+
+ for i = 1:channels1
+ outImg1(:, :, i) = (out1(i))
+ end
+
+ for j = 1:channels2
+ outImg2(:, :, j) = (out2(j))
+ end
+
+ for k = 1:channels3
+ outImg3(:, :, k) = (out3(k))
+ end
+
+ for l = 1:channels4
+ outImg4(:, :, l) = (out4(l))
+ end
+
+ varargout(1) = outImg4
+
+
+
+ case 9 then
+ srcMat4 = mattolist(varargin(1))
+ srcMat5 = mattolist(varargin(2))
+
+ [out1, out2, out3, out4, out5] = raw_align(maxBits, excludeRange, cut, num, srcMat1, srcMat2, srcMat3, srcMat4, srcMat5)
+
+ channels1 = size(out1)
+ channels2 = size(out2)
+ channels3 = size(out3)
+ channels4 = size(out4)
+ channels5 = size(out5)
+
+ for i = 1:channels1
+ outImg1(:, :, i) = (out1(i))
+ end
+
+ for j = 1:channels2
+ outImg2(:, :, j) = (out2(j))
+ end
+
+ for k = 1:channels3
+ outImg3(:, :, k) = (out3(k))
+ end
+
+ for l = 1:channels4
+ outImg4(:, :, l) = (out4(l))
+ end
+
+ varargout(1) = outImg4
+
+ for m = 1:channels5
+ outImg5(:, :, m) = (out5(m))
+ end
+
+ varargout(2) = outImg5
+
+ case 10 then
+ srcMat4 = mattolist(varargin(1))
+ srcMat5 = mattolist(varargin(2))
+ srcMat6 = mattolist(varargin(3))
+
+ [out1, out2, out3, out4, out5, out6] = raw_align(maxBits, excludeRange, cut, num, srcMat1, srcMat2, srcMat3, srcMat4, srcMat5, srcMat6)
+
+ channels1 = size(out1)
+ channels2 = size(out2)
+ channels3 = size(out3)
+ channels4 = size(out4)
+ channels5 = size(out5)
+ channels6 - size(out6)
+
+ for i = 1:channels1
+ outImg1(:, :, i) = (out1(i))
+ end
+
+ for j = 1:channels2
+ outImg2(:, :, j) = (out2(j))
+ end
+
+ for k = 1:channels3
+ outImg3(:, :, k) = (out3(k))
+ end
+
+ for l = 1:channels4
+ outImg4(:, :, l) = (out4(l))
+ end
+
+ varargout(1) = outImg4
+
+ for m = 1:channels5
+ outImg5(:, :, m) = (out5(m))
+ end
+
+ varargout(2) = outImg5
+
+ for n = 1:channels6
+ outImg6(:, :, n) = (out6(n))
+ end
+
+ varargout(3) = outImg6
+ end
+
+endfunction
diff --git a/macros/applyTransformer.sci b/macros/applyTransformer.sci
new file mode 100644
index 0000000..42fc26d
--- /dev/null
+++ b/macros/applyTransformer.sci
@@ -0,0 +1,92 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [tImg] = applyTransformer(srcImg1, srcImg2, typeOfMethod, hessianThreshold, rpTPS, sfAffine)
+// This function is used to apply affine or TPS transformation to image.
+//
+// Calling Sequence
+// [ tImg] = applyTransformer(srcImg1, srcImg2, typeOfMethod, hessianThreshold, rpTPS, sfAffine)
+//
+// Parameters
+// srcImg1 : It is the first input image.
+// srcImg2 : It is the second input image, which is also the target image.
+// typeOfMethod : It is used as a flag to pick a certain type of transformation. Use value '1' for 'Affine Transformation' and '2' for 'Thin Plate Spline Shape Transformation'. It is of double type.
+// hessianThreshold : It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+// rpTPS : It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+// sfAffine : It is used to set the full-affine condition for Affine Transformation. If true, the function finds as optimal transformation with no additional restrictions(6 degrees of freedom). Otherwise, the class of transformations to choose from is limited to combination of translation, rotation & uniform scaling(5 degrees of freedom).
+// tImg : The transformed image of the target(srcImg2). It is of hypermat type.
+//
+// Description
+// This function is used to perform shape transformation, the user gets to choose and apply the type of transformation she/he wishes to perform.
+//
+// Examples
+// affine transformation
+// a = imread("/images/bryan.jpeg");
+// b = imread("/images/p1.jpg");
+// typeOfMethod=1
+// hessianThreshold=5000;
+// rpTPS=25000;
+// sfAffine=%f;
+// img=applyTransformer(a,b,typeOfMethod,hessianThreshold, rpTPS,
+//
+// Examples
+// a= imread("/images/lena.jpeg");
+// b= imread("/images/bryan.jpeg");
+// typeOfMethod=1
+// hessianThreshold=5000;
+// rpTPS=2000;
+// sfAffine=%t;
+// img=applyTransformer(a,b,typeOfMethod,hessianThreshold, rpTPS,sfAffine);
+//
+// Examples
+// TPS shape transformation
+// a = imread("/images/photo.jpg");
+// b= imread("/images/photo1.jpg");
+// typeOfMethod=2
+// hessianThreshold=5000;
+// rpTPS=800;
+// sfAffine=%t;
+// img=applyTransformer(a,b,typeOfMethod,hessianThreshold, rpTPS,sfAffine);
+//
+// Examples
+// a = imread("/images/b1.jpeg");
+// b= imread("/images/b2.jpeg");
+// typeOfMethod=1
+// hessianThreshold=5000;
+// rpTPS=800;
+// sfAffine=%f;
+// img=applyTransformer(a,b,typeOfMethod,hessianThreshold, rpTPS,sfAffine);
+//
+// Examples
+// a = imread("/images/photo.jpg");
+// b= imread("/images/photo1.jpg");
+// typeOfMethod=2
+// hessianThreshold=5000;
+// rpTPS=800;
+// sfAffine=%t;
+// img=applyTransformer(a,b,typeOfMethod,hessianThreshold, rpTPS,sfAffine);
+//
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+
+ srcMat1 = mattolist(srcImg1);
+ srcMat2 = mattolist(srcImg2);
+
+ [out] = raw_applyTransformer(srcMat1, srcMat2, typeOfMethod, hessianThreshold, rpTPS, sfAffine)
+
+ channels = size(out)
+
+ for i = 1:channels
+ tImg(:, :, i) = (out(i))
+ end
+
+endfunction
diff --git a/macros/bwLookUp.sci b/macros/bwLookUp.sci
new file mode 100644
index 0000000..d4c7e52
--- /dev/null
+++ b/macros/bwLookUp.sci
@@ -0,0 +1,44 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Samiran Roy
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [out] = bwLookUp(image,lut)
+// This function performs 2*2 and 3*3 nonlinear filtering using a lookup table.
+//
+// Calling Sequence
+// [out] = bwLookUp(image,lut)
+//
+// Parameters
+// image : The input is a grayscale image. If the image is not binary, it is converted to one.
+// lut : The lut is a 1*16 double vector [2*2 filtering], or a [1*512] double vector [3*3 filtering].
+// out : The output image is the same size as image, same data type as lut.
+//
+// Description
+// The function performs a 2-by-2 or 3-by-3 nonlinear neighborhood filtering operation on a grayscale image and returns the results in the output image. The neighborhood processing determines an integer index value used to access values in a lookup table 'lut'. The fetched lut value becomes the pixel value in the output image at the targeted position.
+//
+// Examples
+// // a simple example
+// a = imread("/images/lena.jpeg", 0);
+// lut = [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ];
+// b = bwLookUp(a,lut);
+//
+// Authors
+// Samiran Roy
+ image1 = mattolist(image);
+
+ a = raw_bwLookUp(image1,lut);
+
+ dimension = size(a)
+
+ for i = 1:dimension
+ out(:,:,i)=a(i);
+ end
+
+endfunction;
diff --git a/macros/contourArea.sci b/macros/contourArea.sci
new file mode 100644
index 0000000..f378ba1
--- /dev/null
+++ b/macros/contourArea.sci
@@ -0,0 +1,38 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Priyanka Hiranandani, Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [out] = contourArea(inputArrayContour, booloriented)
+// This function calculates the contour area.
+//
+// Calling Sequence
+// [out] = contourArea(inputArrayContour, booloriented)
+//
+// Parameters
+// inputArrayContour : The input vector of 2D points.
+// booloriented : The oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine the orientation of a contour by taking the sign of an area.
+// out : The output is the calculated area.
+//
+// Description
+// It computes the contour area. Also, the function will most certainly give a wrong results for contours with self-intersections.
+//
+// Examples
+// // a simple example
+// inputArrayContour = [0 0; 10 0; 10 10; 5 4S];
+// booloriented = %t;
+// b = contourArea(inputArrayContour, booloriented);
+//
+// Authors
+// Priyanka Hiranandani, NIT Surat
+// Ashish Manatosh Barik, NIT Rourkela
+ out = raw_contourArea(inputArrayContour, booloriented);
+
+endfunction
+
diff --git a/macros/copyMakeBorder.sci b/macros/copyMakeBorder.sci
new file mode 100644
index 0000000..89d8ee1
--- /dev/null
+++ b/macros/copyMakeBorder.sci
@@ -0,0 +1,53 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik, Shubheksha Jalan
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function new_image = copyMakeBorder(image, top, bottom, left, right, borderType, value)
+// This function forms a border around the input image.
+//
+// Calling Sequence
+// [new_image] = copyMakeBorder(image, top, bottom, left, right, borderType, value)
+//
+// Parameters
+// image : The source image.
+// top : No. of pixels in this direction from the source image rectangle to extrapolate.
+// bottom : No. of pixels in this direction from the source image rectangle to extrapolate.
+// left : No. of pixels in this direction from the source image rectangle to extrapolate.
+// right : No. of pixels in this direction from the source image rectangle to extrapolate.
+// borderType : Stating the border type.
+// value : Border value if borderType==BORDER_CONSTANT.
+// new_image : The output image with specified borders.
+//
+// Description
+// This function forms a border around the input image. The areas to the left, to the right, above and below the copied source image are filled with the extrapolated pixels.
+//
+// Examples
+// // a simple example
+// a = imread("/images/lena.jpeg");
+// top=1;
+// bottom=1;
+// left=1;
+// right=1;
+// b = copyMakeBorder(a, top, bottom, left, right, "BORDER_CONSTANT", 1);
+//
+// Authors
+// Ashish Manatosh Barik
+// Shubheksha Jalan
+ image_list = mattolist(image)
+
+ out = raw_copyMakeBorder(image_list, top, bottom, left, right, borderType, value)
+
+ sz = size(out)
+
+ for i = 1:sz
+ new_image(:, :, i) = (out(i))
+ end
+
+endfunction
diff --git a/macros/detectBRIEFDescriptors.sci b/macros/detectBRIEFDescriptors.sci
new file mode 100644
index 0000000..0ccad06
--- /dev/null
+++ b/macros/detectBRIEFDescriptors.sci
@@ -0,0 +1,77 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik & Shubham Lohakare
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [varargout] = detectBRIEFDescriptors(srcImg, varargin)
+// This function is used for computing BRIEF descriptors using Star keypoints.
+//
+// Calling Sequence
+// [ a ] = detectBRIEFDescriptors(srcImg)
+// [ a ] = detectVRIEFDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, bytes, use_orientation )
+//
+// Parameters
+// srcImg : Hyper of input image
+// maxSize : Choose the number of filters to be applied, the parameter value set the maximum size.
+// responseThreshold : To eliminate weak corners.
+// lineThresholdProjected : Harris of responses.
+// lineThresholdBinarized : Harris of sizes.
+// suppressNonmaxSize : Window size (n-by-n) to apply the non-maximal suppression.
+// bytes : legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64.
+// use_orientation : sample patterns using keypoints orientation, disabled by default.
+// a : It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+//
+// Description
+// For extracting keypoints(StarDetector) and computing descriptors. BRIEF which gives the shortcut to find binary descriptors with less memory, faster matching, still higher recognition rate.
+//
+// Examples
+// // with default values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize("max);
+// [ c ] = detectBRIEFDescriptors(a);
+// [ d ] = detectBRIEFDescriptors(b);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Examples
+// // user assigned values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize("max);
+// [ c ] = detectBRIEFDescriptors(a, 45, 30, 10, 8, 5, 32, %f);
+// [ d ] = detectBRIEFDEscriptors(b, 45, 30, 10, 8, 5, 32, %f);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+// Shubham Lohakare, NITK Surathkal
+ srcMat = mattolist(srcImg)
+
+ [lhs, rhs] = argn(0)
+
+ if rhs > 8 then
+ error(msprintf("Too many input arguments"))
+ end
+ if lhs > 1 then
+ error(msprintf("Too many output arguments"))
+ end
+
+
+ select rhs
+ case 1 then
+ [a b c d e] = raw_detectBRIEFDescriptors(srcMat)
+ case 8 then
+ [a b c d e] = raw_detectBRIEFDescriptors(srcMat, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7))
+ end
+
+ varargout(1) = struct('Type','Brief features','Features',a,'NumBits',b,'NumFeatures',c,'KeyPoints',d,'keypointsCount',e);
+
+endfunction
diff --git a/macros/detectDAISYDescriptors.sci b/macros/detectDAISYDescriptors.sci
new file mode 100644
index 0000000..193e24c
--- /dev/null
+++ b/macros/detectDAISYDescriptors.sci
@@ -0,0 +1,86 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik & Shubham Lohakare
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [varargout] = detectDAISYDescriptors(srcImg, varargin)
+// This function is used for computing DAISY descriptors using Star keypoints.
+//
+// Calling Sequence
+// [ a ] = detectDAISYDescriptors(srcImg)
+// [ a ] = detectDAISYDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, radius, q_radius, q_theta, q_hist, norm, interpolation, use_orientation)
+// [ a ] = detectDAISYDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, radius, q_radius, q_theta, q_hist, norm, homography, interpolation, use_orientation)
+//
+// Parameters
+// srcImg : Hyper of input image
+// maxSize : Choose the number of filters to be applied, the parameter value set the maximum size.
+// responseThreshold : To eliminate weak corners.
+// lineThresholdProjected : Harris of responses.
+// lineThresholdBinarized : Harris of sizes.
+// suppressNonmaxSize : Window size (n-by-n) to apply the non-maximal suppression.
+// radius : radius of the descriptor at the initial scale.
+// q_radius : amount of radial range division quantity.
+// q_theta : amount of angular range division quantity.
+// q_hist : amount of gradient orientations range division quantity.
+// norm : choose descriptors normalization type, where DAISY::NRM_NONE will not do any normalization (default), DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0, DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0, DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
+// homography : optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image.
+// interpolation : switch to disable interpolation for speed improvement at minor quality loss.
+// use_orientation : sample patterns using keypoints orientation, disabled by default.
+// a : It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+//
+// Description
+// For extracting keypoints(using StarDetector) and computing descriptors(DAISY).
+//
+// Examples
+// // with default values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize('max')
+// [ c ] = detectDAISYDescriptors(a);
+// [ d ] = detectDAISYDescriptors(b);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Examples
+// // user assigned values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize('max')
+// [ c ] = detectDAISYDescriptors(a, 45, 30, 10, 8, 5, 15, 3, 8, 8, 100, %t, %f);
+// [ d ] = detectDAISYDEscriptors(b, 45, 30, 10, 8, 5, 15, 3, 8, 8, 100, %t, %f);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Authors
+// Ashish Manatosh Barik NIT Rourkela
+// Shubham Lohakare, NITK Surathkal
+ srcMat = mattolist(srcImg)
+
+ [lhs, rhs] = argn(0)
+
+ if rhs > 14 then
+ error(msprintf("Too many input arguments"))
+ end
+ if lhs > 1 then
+ error(msprintf("Too many output arguments"))
+ end
+
+
+ select rhs
+ case 1 then
+ [a b c d e] = raw_detectDAISYDescriptors(srcMat)
+ case 13 then
+ [a b c d e] = raw_detectDAISYDescriptors(srcMat, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12))
+ case 14 then
+ [a b c d e] = raw_detectDAISYDescriptors(srcMat, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12), varargin(13))
+ end
+
+ varargout(1) = struct('Type','Brief features','Features',a,'NumBits',b,'NumFeatures',c,'KeyPoints',d,'keypointsCount',e);
+
+endfunction
diff --git a/macros/detectLATCHDescriptors.sci b/macros/detectLATCHDescriptors.sci
new file mode 100644
index 0000000..fcb22e2
--- /dev/null
+++ b/macros/detectLATCHDescriptors.sci
@@ -0,0 +1,78 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik & Shubham Lohakare
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [varargout] = detectLATCHDescriptors(srcImg, varargin)
+// This function is used for computing the LATCH descriptors using Star keypoints.
+//
+// Calling Sequence
+// [ a ] = detectLATCHDescriptors(srcImg)
+// [ a ] = detectLATCHDescriptors(srcImg, maxSize, responseThreshold, lineThresholdProjected, lineThresholdBinarized, suppressNonmaxSize, bytes, rotationInvariance, half_ssd_size)
+//
+// Parameters
+// srcImg : Hyper of input image
+// maxSize : Choose the number of filters to be applied, the parameter value set the maximum size.
+// responseThreshold : To eliminate weak corners.
+// lineThresholdProjected : Harris of responses.
+// lineThresholdBinarized : Harris of sizes.
+// suppressNonmaxSize : Window size (n-by-n) to apply the non-maximal suppression.
+// bytes : It is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1.
+// rotationInvariance : whether or not the descriptor should compansate for orientation changes.
+// half_ssd_size) : the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x then the half_ssd_size should be (7-1)/2 = 3.
+// a : It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+//
+// Description
+// For extracting keypoints(using StarDetectors) and computing descriptors(LATCH).
+//
+// Examples
+// // with default values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize('max')
+// [ c ] = detectLATCHdescriptors(a);
+// [ d ] = detectLATCHDescriptors(b);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Examples
+// // user assigned values
+// [ a ] = imread("/images/b1.jpeg");
+// [ b ] = imread("/images/b2.jpeg");
+// stacksize('max')
+// [ c ] = detectLATCHdescriptors(a, 45, 30, 10, 8, 5, 32, %t, 3);
+// [ d ] = detectLATCHDEscriptors(b, 45, 30, 10, 8, 5, 32, %t, 3);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+// Shubham Lohakare, NITK Surathkal
+ srcMat = mattolist(srcImg)
+
+ [lhs, rhs] = argn(0)
+
+ if rhs > 9 then
+ error(msprintf("Too many input arguments"))
+ end
+ if lhs > 1 then
+ error(msprintf("Too many output arguments"))
+ end
+
+
+ select rhs
+ case 1 then
+ [a b c d e] = raw_detectLATCHDescriptors(srcMat)
+ case 9 then
+ [a b c d e] = raw_detectLATCHDescriptors(srcMat, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8))
+ end
+
+ varargout(1) = struct('Type','Brief features','Features',a,'NumBits',b,'NumFeatures',c,'KeyPoints',d,'keypointsCount',e);
+
+endfunction
diff --git a/macros/detectSIFTFeatures.sci b/macros/detectSIFTFeatures.sci
new file mode 100644
index 0000000..0869102
--- /dev/null
+++ b/macros/detectSIFTFeatures.sci
@@ -0,0 +1,74 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik & Shubham Lohakare
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [varargout] = detectSIFTFeatures(srcImg, varargin)
+// This function is used to find scale-invariant features.
+//
+// Calling Sequence
+// [ a ] = detectSIFTFeatures(srcImg)
+// [ a ] = detectSIFTFeatures(srcImg, features, nOctaveLayers, contrastThreshold, edgeThreshold, sigma)
+//
+// Parameters
+// srcImg : Hyper of input image.
+// nfeatures : The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast). If valued as 0, uses all detected keypoints.
+// nOctaveLayers : The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
+// contrastThreshold : The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
+// edgeThreshold : The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are filtered out (more features are retained).
+// sigma : The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
+// a : It is a struct consisting of 'Type'(Type of Feature) , 'Features'(descriptors) , 'NumBits', 'NumFeatures', 'KeyPoints', 'keypointsCount'.
+//
+// Description
+// For extracting keypoints and computing descriptors using the Scale Invariant Feature Transform. RGB images are converted to Grayscale images before processing.
+//
+// Examples
+// // with default values
+// a = imread("/images/photo1.jpeg");
+// b = imread("/images/photo2.jpeg");
+// stacksize("max");
+// c = detectSIFTFeatures(a);
+// d = detectSIFTFeatures(b);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Examples
+// // user assigned values
+// a = imread("/images/photo1.jpeg");
+// b = imread("/images/photo2.jpeg");
+// stacksize("max");
+// c = detectSIFTFeatures(a, 0, 3, 0.05, 11, 1.6);
+// d = detectSIFTFeatures(b, 0, 3, 0.05, 11, 1.6);
+// [ e f ] = matchFeatures(c.Features, d.Features);
+// out = drawMatch(a, b, c.KeyPoints, d.KeyPoints, e, f);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+ srcMat = mattolist(srcImg)
+
+ [lhs, rhs] = argn(0)
+
+ if rhs > 6 then
+ error(msprintf("Too many input arguments"))
+ end
+ if lhs > 1 then
+ error(msprintf("Too many output arguments"))
+ end
+
+
+ select rhs
+ case 1 then
+ [a b c d e] = raw_detectSIFTFeatures(srcMat)
+ case 6 then
+ [a b c d e] = raw_detectSIFTFeatures(srcMat, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5))
+ end
+
+ varargout(1) = struct('Type','Scale-Invariant-Features','Features',a,'NumBits',b,'NumFeatures',c,'KeyPoints',d,'keypointsCount',e);
+
+endfunction
diff --git a/macros/distanceExtractor.sci b/macros/distanceExtractor.sci
new file mode 100644
index 0000000..3a6bdad
--- /dev/null
+++ b/macros/distanceExtractor.sci
@@ -0,0 +1,88 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function dist = distanceExtractor(srcImg1, srcImg2, typeOfMethod, varargin)
+// This function computes the shape-distance between two images.
+//
+// Calling Sequence
+// [ dist ] = distanceExtractor(srcImg1, srcImg2, typeOfMethod); // Hausdorrf distance
+// [ dist ] = distanceExtractor(srcImg1, srcImg2, typeOfMethod, nAngularBins, innerRadius, nRadialBins, outerRadius, iterations); // Shape Context
+//
+// Parameters
+// srcImg1 : It is the first input image.
+// srcImg2 : It is the second input image.
+// typeOfMethod : It is used as a flag to pick a certain type of Shape Distance calculation technique. Use '1' for 'Shape Context' and '2' for 'Hausdorrf'.
+// nAngularBins : Establish the number of angular bins for the Shape Context Descriptor used in the shape matching pipeline.
+// nRadialBins : Establish the number of radial bins for the Shape Context Descriptor used in the shape matching pipeline.
+// innerRadius : Set the inner radius of the shape context descriptor.
+// outerRadius : Set the outer radius of the shape context descriptor.
+// dist : It is the calculated distance. It is of double type.
+//
+// Description
+// This function is used to compute the shape distance between two shapes defined by its contours.
+//
+// Examples
+// // Hausdorff distance extractor
+// a = imread("/images/bnwhite.jpg");
+// b = imread("/images/bryan.jpeg");
+// typeOfMethod=2;//2 is for hausdorff
+// c=distanceExtractor(a,b,typeOfMethod);orff
+//
+// Examples
+// // Shape Context Distance extractor
+// a = imread("/images/photo.jpg");
+// b = imread("/images/photo1.jpg");
+// typeOfMethod=1; //1 for ShapeContext
+// nAngularBins=12;
+// nRadialBins=4;
+// innerRadius=0.2;
+// outerRadius=2;
+// iterations=3;
+// ndummies = 25;
+// defaultCost = 0.2;
+// rpTps =0 ;
+// dist=distanceExtractor(a,b,typeOfMethod,nAngularBins,nRadialBins,innerRadius,outerRadius,iterations,ndummies,dC,rpTps);
+//
+// Examples
+// Incorrect usage
+// a=4; (not hypermat)
+// b=88; (not hypermat)
+// typeOfMethod=1; //1 for ShapeContext
+// nAngularBins=12;
+// nRadialBins=4;
+// innerRadius=2;
+// outerRadius=0.2;
+// iterations=300;
+// ndummies = 25;
+// defaultCost = 0.2;
+// rpTps =0 ;
+// dist=distanceExtractor(a,b,typeOfMethod,nAngularBins,nRadialBins,innerRadius,outerRadius,iterations,ndummies,dC,rpTps);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+
+
+
+ srcMat1 = mattolist(srcImg1);
+ srcMat2 = mattolist(srcImg2);
+
+ [lhs, rhs] = argn(0)
+
+ select rhs
+ case 3 then // Hausdorff
+ dist = raw_distanceExtractor(srcMat1, srcMat2, typeOfMethod)
+
+ case 8 then // Shape Context
+ dist = raw_distanceExtractor(srcMat1, srcMat2, typeOfMethod, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8))
+ end
+
+
+endfunction
diff --git a/macros/fillConvexPoly.sci b/macros/fillConvexPoly.sci
new file mode 100644
index 0000000..9a0f08e
--- /dev/null
+++ b/macros/fillConvexPoly.sci
@@ -0,0 +1,50 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Abhilasha Sancheti & Sukul Bagai
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [out] = fillConvexPoly(img, pstData, npts, r_value, g_value, b_value, linetype, shift)
+// This function fills a convex polygon.
+//
+// Calling Sequence
+// [out] = fillConvexPoly(img, pstData, npts, r_value, g_value, b_value, linetype, shift)
+//
+// Parameters
+// img : The input source image.
+// pstData : The vector of polygon vertices.
+// npts : The number of polygon vertices.
+// r_value : The red value of RGB color for the polygon.
+// g_value : The green value of RGB color for the polygon.
+// b_value : The blue value of RGB color for the polygon.
+// linetype : This is the type of the polygon boundaries. It has only 3 valid types: 4, 8 and 16(CV_AA). Passing any other value as lineType is not legal.
+// shift : This is the number of fractional bits in the vertex coordinates.
+//
+// Description
+// The function fillConvexPoly draws a filled convex polygon. It can fill not only convex polygons but any monotonic polygon without self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line) twice at the most (though, its top-most and/or the bottom edge could be horizontal).
+//
+// Examples
+// // a simple example
+// a = imread("/images/lena.jpeg");
+// b = [ 0 10; 10 0; -10 0 ];
+// c = fillConvexPoly(a, b, 3, 1, 1, 1, 8, 0);
+//
+// Authors
+// Abhilasha Sancheti
+// Sukul Bagai
+ image = mattolist(img);
+
+ a = raw_fillConvexPoly(image, pstData, npts, r_value, g_value, b_value, linetype, shift)
+
+ d = size(a);
+
+ for i=1:d
+ out(:,:,i) = a(i);
+ end
+
+endfunction
diff --git a/macros/gabor.sci b/macros/gabor.sci
new file mode 100644
index 0000000..69e35c0
--- /dev/null
+++ b/macros/gabor.sci
@@ -0,0 +1,40 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Samiran Roy
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [outputImg] = gabor(wavelength,orientation)
+// This function creates a Gabor filter.
+//
+// Calling Sequence
+// [outputImg] = gabor(wavelength,orientation)
+//
+// Parameters
+// wavelength : It is the wavelength of sinusoid, specified as a numeric scalar or vector, in pixels/cycle.
+// orientation : It is the orientation of filter in degrees, specified as a numeric scalar in the range [0 180], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+// outputImg : The Gabor filter.
+//
+// Description
+// It creates a Gabor filter with the specified wavelength (in pixels/cycle) and orientation (in degrees). If you specify wavelength or orientation as vectors, gabor returns an array of gabor objects, called a filter bank, that contain all the unique combinations of wavelength and orientation. For example, if wavelength is a vector of length 2 and orientation is a vector of length 3, then the output array is a vector of length 6.
+//
+// Examples
+// // Create an array of Gabor filters.
+// wavelength = 20;
+// orientation = 45;
+// a = gabor(wavelength, orientation);
+//
+// Authors
+// Samiran Roy
+ outputList = raw_gabor(wavelength,orientation);
+
+ for i=1:size(outputList)
+ outputImg(:,:,i)=outputList(i)
+ end
+
+endfunction
diff --git a/macros/gaussianBlur.sci b/macros/gaussianBlur.sci
new file mode 100644
index 0000000..3a6c0a2
--- /dev/null
+++ b/macros/gaussianBlur.sci
@@ -0,0 +1,43 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Sukul Bagai
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [outputImg]= gaussianBlur(inputImage,ksize_height,ksize_width,sigmaX,sigmaY)
+// This function blurs the input image using a Gaussian filter.
+//
+// Calling Sequence
+// outputImg = gaussianblur(inputImage,ksize_height,ksize_width,sigmaX,sigmaY)
+//
+// Parameters
+// inputImage : The input source image.
+// ksize_height : It is the gaussian kernel height. It must be positive and odd.
+// ksize_width : It is the gaussian kernel width. It must be positive and odd.
+// sigmaX : It is the gaussian kernel standard deviation in X direction.
+// sigmaY : It is the gaussian kernel standard deviation in Y direction.
+// outputImg : The output filtered image is of the same size and type as the input image.
+//
+// Description
+// The function convolves the source image with the specified Gaussian kernel.
+//
+// Examples
+// inputImage = imread('/images/lena.jpg');
+// outputImg = gaussianBlur(inputImage,5,5,1,1);
+//
+// Authors
+// Sukul Bagai
+ inputList=mattolist(inputImage);
+
+ outputList=raw_gaussianBlur(inputList,ksize_height,ksize_width,sigmaX,sigmaY);
+
+ for i=1:size(outputList)
+ outputImg(:,:,i)=outputList(i)
+ end
+
+endfunction
diff --git a/macros/histogramCostExtractor.sci b/macros/histogramCostExtractor.sci
new file mode 100644
index 0000000..37b81fa
--- /dev/null
+++ b/macros/histogramCostExtractor.sci
@@ -0,0 +1,75 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik & Shubham Lohakare
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [costMat] = histogramCostExtractor(srcImg1, srcImg2, typeOfMethod, hessianThreshold, varargin)
+// This function computes the cost matrix.
+//
+// Calling Sequence
+// [ costMatrix ] = histogramCostExtractor(srcImg1, srcImg2, typeOfMethod=3, hessianThreshold); // Norm based cost
+// [ costMatrix ] = histogramcostExtractor(srcImg1, srcImg2, typeOfMethod=1, hessianThreshold, nDummies, defaultCost); // Chi as well as EMDL1 based cost extraction
+// [ costMatrix ] = histogramCostExtractor(srcImg1, srcImg2, typeOfMethod=2, hessianThreshold, nDummies, defaultCost); // EMDL1 based cost extraction
+//
+// Parameters
+// srcImg1 : It is the first input image.
+// srcImg2 : It is the second input image.
+// typeOfMethod : It is used as a flag to pick a certain type of transformation. Use value '1' for 'Chi based cost ectraction', '2' for 'EMDL1 based cost extraction' and '3' for 'Norm based cost extraction'. It is of double type.
+// hessianThreshold : It is the threshold value for Hessian keypoint detector in SURF(Speeded-Up Robust Features). It is of double type.
+// rpTPS : It is used to set the regularization parameter for relaxing the exact interpolation requirements of the TPS algorithm. It is of double type.
+// costMatrix : It is the cost matrix.
+//
+// Description
+// This function is used to calculate the histogram based cost matrix of two images, the user gets to choose and apply the type of transformation she/he wishes to perform.
+//
+// Examples
+// // Chi based cost extraction
+// a= imread("/images/n.jpeg");
+// b= imread("/images/n1.jpeg");
+// typeOfMethod=1;
+// hessianThreshold=5000;
+// nDummies=25;
+// defaultCost=0.2;
+// c=histogramCostExtractor(a,b,typeOfMethod,hessianThreshold,nDummies,defaultCost);
+//
+// Examples
+// // EMDL1
+// a = imread("/images/n.jpeg");
+// b = imread("/images/n1.jpeg");
+// typeOfMethod=2;
+// hessianThreshold=5000;
+// nDummies=25;
+// defaultCost=0.2;
+// c=histogramCostExtractor(a,b,typeOfMethod,hessianThreshold,nDummies,defaultCost);
+//
+// Examples
+// Norm based cost extraction
+// a = imread("/images/n.jpeg");
+// b= imread("/images/n1.jpeg");
+// typeOfMethod=3;
+// hessianThreshold=5000;
+// c=histogramCostExtractor(a,b,typeOfMethod,hessianThreshold);
+//
+// Authors
+// Ashish Mantosh Barik, NIT Rouekela
+// Shubham Lohakare, NITK Surathkal
+
+ srcMat1 = mattolist(srcImg1)
+ srcMat2 = mattolist(srcImg2)
+
+ [lhs, rhs] = argn(0)
+
+ select rhs
+ case 4 then
+ costMat = raw_histogramCostExtractor(srcMat1, srcMat2, typeOfMethod, hessianThreshold)
+ case 6 then
+ costMat = raw_histogramCostExtractor(srcMat1, srcMat2, typeOfMethod, hessianThreshold, varargin(1), varargin(2))
+ end
+
+endfunction
diff --git a/macros/imGaborFilt.sci b/macros/imGaborFilt.sci
new file mode 100644
index 0000000..546d075
--- /dev/null
+++ b/macros/imGaborFilt.sci
@@ -0,0 +1,45 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Samiran Roy
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function new_image = imGaborFilt(image, wavelength, orientation)
+// The function applies Gabor filter or set of filters to 2-D image.
+//
+// Calling Sequence
+// [new_image] = imGaborFilt(image, wavelength, orientation)
+//
+// Parameters
+// image : The input grayscale image.
+// wavelength : It is the wavelength of the sinusoidal carrier, specified as a numeric scalar in the range [2,Inf), in pixels/cycle.
+// orientation : Orientation value of filter in degrees, specified as a numeric scalar in the range [0 360], where the orientation is defined as the normal direction to the sinusoidal plane wave.
+//
+// Description
+// It computes the magnitude and phase response of a Gabor filter for the input grayscale image.
+//
+// Examples
+// // apply Single Gabor Filter to Input Image
+// a = imread("/images/lena.jpeg", 0);
+// wavelength = 4;
+// orientation = 90;
+// b = imGaborFilt(a, wavelength, orientation)
+//
+// Authors
+// Samiran Roy
+ image_list = mattolist(image)
+
+ out = raw_imGaborFilt(image_list, wavelength, orientation)
+
+ sz = size(out)
+
+ for i=1:sz
+ new_image(:, :, i) = out(i)
+ end
+
+endfunction
diff --git a/macros/lab2rgb.sci b/macros/lab2rgb.sci
new file mode 100644
index 0000000..c69e986
--- /dev/null
+++ b/macros/lab2rgb.sci
@@ -0,0 +1,88 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Tess Zacharias, Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = lab2rgb(pstData)
+// This function converts CIE 1976 L*a*b* to RGB.
+//
+// Calling Sequence
+// [output] = lab2rgb(pstData)
+//
+// Parameters
+// pstData : The color values to convert, specified as a list of values.
+// output : The converted color values, returned as an array of the same shape as the input.
+//
+// Description
+// Convert CIE 1976 L*a*b* to RGB.
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(70, 5, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(71, 50, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(7.3, 5.53, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(70, 5, 10.6656);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(70, 5.45, 10.45);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(7.343, 5.34, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(70, 500, 1012);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(701.2, 5, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(70, 5.545, 1.0);
+// b = lab2rgb(a);
+//
+// Examples
+// // Convert a color value in L*a*b* color space to the Adobe RGB (1998) color space.
+// a = list(23, 51, 18);
+// b = lab2rgb(a);
+//
+// Authors
+// Tess Zacharias
+// Ashish Manatosh Barik
+ out = raw_lab2rgb(pstData)
+
+ channels = size(out)
+
+ for i = 1:channels
+ output(:, :, i) = (out(i))
+ end
+
+ output = double(output)
+
+endfunction
diff --git a/macros/lab2uint8.sci b/macros/lab2uint8.sci
new file mode 100644
index 0000000..3f6c650
--- /dev/null
+++ b/macros/lab2uint8.sci
@@ -0,0 +1,86 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Tess Zacharias
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = lab2uint8(pstData)
+// This function converts L*a*b* data to uint8.
+//
+// Calling Sequence
+// [output] = lab2uint8(pstData)
+//
+// Parameters
+// pstData : It is a list of color values.
+// output : The converted uint8 value. lab8 has the same size as lab.
+//
+// Description
+// Converts L*a*b* data to uint8.
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(70, 5, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(71, 5, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(0, 5, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(89, 50, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(70, 5, 10.78);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(7, 5, 89);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(70.344, 5.34, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(0, 0, 10);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(70.89, 5.11, 10.33);
+// b = lab2rgb(a);
+//
+// Examples
+// // to convert L*a*b* color values from double to uint8.
+// a = list(10, 5, 10);
+// b = lab2rgb(a);
+//
+// Authors
+// Tess Zacharias
+
+ out = raw_lab2uint8(pstData);
+
+ channels = size(out)
+
+ for i = 1:channels
+ output(:, :, i) = (out(i))
+ end
+
+endfunction
diff --git a/macros/makeHDR.sci b/macros/makeHDR.sci
new file mode 100644
index 0000000..0c1d2bc
--- /dev/null
+++ b/macros/makeHDR.sci
@@ -0,0 +1,248 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [outHDR, outLDR] = makeHDR(typeOfMethod, num, varargin)
+// This function is used to create HDR image.
+//
+// Calling Sequence
+// [out1, out2] = makeHDR(typeOfMethod=1, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, max_iter, threshold) // Robertson merging
+// [out1, out2] = makeHDR(typeOfMethod=2, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, samples, lambda, random) // Debevec merging
+// [out1, out2] = makeHDR(typeOfMethod=3, num=3, srcMat_1, srcMat_2, srcMat_3, ex_1, ex_2, ex_3, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+// [out1, out2] = makeHDR(typeOfMethod=1, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, max_iter, threshold) // Robertson merging
+// [out1, out2] = makeHDR(typeOfMethod=2, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, samples, lambda, random) // Debevec merging
+// [out1, out2] = makeHDR(typeOfMethod=3, num=4, srcMat_1, srcMat_2, srcMat_3, srcMat_4, ex_1, ex_2, ex_3, ex_4, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+// [out1, out2] = makeHDR(typeOfMethod=1, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, max_iter, threshold) // Robertson merging
+// [out1, out2] = makeHDR(typeOfMethod=2, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, samples, lambda, random) // Debevec merging
+// [out1, out2] = makeHDR(typeOfMethod=3, num=5, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, ex_1, ex_2, ex_3, ex_4, ex_5, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+// [out1, out2] = makeHDR(typeOfMethod=1, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, max_iter, threshold) // Robertson merging
+// [out1, out2] = makeHDR(typeOfMethod=2, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, samples, lambda, random) // Debevec merging
+// [out1, out2] = makeHDR(typeOfMethod=3, num=6, srcMat_1, srcMat_2, srcMat_3, srcMat_4, srcMat_5, srcMat_6, ex_1, ex_2, ex_3, ex_4, ex_5, ex_6, contrast_weight, saturation_weight, exposure_weight) // Mertens merging
+//
+// Parameters
+// typeOfMethod : Use '1' for 'Robertson', '2' for 'Debevec', or '3' for 'Mertens'.
+// num : It is the number of images being fed as input. It is of Double type.
+// srcMat_i : It is the hypermat of input source image.
+// ex_i : It is the exposure value of the corresponding image_i. It is of double type.
+// max_iter : (Robertson) maximal number of Gauss-Seidel solver iterations. It is of Double type.
+// threshold : (Robertson) target difference between results of two successive steps of the minimization. It is of Double type.
+// samples : (Debevec) number of pixel locations to use. It is of Double type.
+// lambda : (Debevec) smoothness term weight. Greater values produce smoother results, but can alter the response. It is of Double type.
+// random : (Debevec) if true sample pixel locations are chosen at random, otherwise they form a rectangular grid. It is of Boolean type.
+// contrast_weight : (Mertens) contrast measure weight. It is of Double type.
+// saturation_weight : (Mertens) saturation measure weight. It is of Double type.
+// exposure_weight : (Mertens) well-exposedness measure weight. It is of Double type.
+// out1 : HDR image
+// out2 : LDR image
+//
+// Description
+// This function takes a set of images of the same scene in different exposures which have been aligned accordingly and outputs the HDR image.
+//
+// Examples
+// // input of 3 images(min), using Robertson merging technique
+// a = imread("/images/t1.jpeg");
+// b = imread("/images/t2.jpeg");
+// c = imread("/images/t3.jpeg");
+// num = 3;
+// typeOfMethod = 1;
+// ex1 = 15;
+// ex2 = 2.5;
+// ex3 = 0.25;
+// maxIter = 30;
+// thres = 0.01;
+//[hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, ex1, ex2, ex3, maxIter, thres);
+//
+// Examples
+// // Use of Debevec merging technique
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// d = imread("/images/m4.jpeg");
+// e = imread("/images/m5.jpeg");
+// f = imread("/images/m6.jpeg");
+// num = 6;
+// typeOfMethod = 2;
+// ex1 = 0.0167;
+// ex2 = 0.034;
+// ex3 = 0.067;
+// ex4 = 0.125;
+// ex5 = 0.25;
+// ex6 = 0.5;
+// samples = 70;
+// lambda = 10.0;
+// random = %f;
+// [hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, e, f, ex1, ex2, ex3, ex4, ex5, ex6, samples, lambda, random);
+//
+// Examples
+// // use of Robertson merging technique
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// d = imread("/images/m4.jpeg");
+// e = imread("/images/m5.jpeg");
+// f = imread("/images/m6.jpeg");
+// num = 6;
+// typeOfMethod = 1;
+// ex1 = 0.0167;
+// ex2 = 0.034;
+// ex3 = 0.067;
+// ex4 = 0.125;
+// ex5 = 0.25;
+// ex6 = 0.5;
+// maxIter = 30;
+// thres = 0.01;
+// [hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, e, f, ex1, ex2, ex3, ex4, ex5, ex6, maxIter, thres);
+//
+// Examples
+// // alternative to creating an HDR image, resulting image is of average exposure. Faster compared to rendering a HDR image.
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = imread("/images/m3.jpeg");
+// d = imread("/images/m4.jpeg");
+// e = imread("/images/m5.jpeg");
+// f = imread("/images/m6.jpeg");
+// num = 6;
+// typeOfMethod = 3;
+// ex1 = 0.0167;
+// ex2 = 0.034;
+// ex3 = 0.067;
+// ex4 = 0.125;
+// ex5 = 0.25;
+// ex6 = 0.5;
+// contrastWeight = 1.0;
+// saturationWeight = 1.0;
+//exposureWeight = 0.0;
+//[hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, e, f, ex1, ex2, ex3, ex4, ex5, ex6, contrastWeight, saturationWeight, exposureWeight);
+//
+// Examples
+// a = imread("/images/i1.jpeg");
+// b = imread("/images/i2.jpeg");
+// c = imread("/images/i3.jpeg");
+// d = imread("/images/i4.jpeg");
+// num = 4;
+// typeOfMethod = 2;
+// ex1 = 0.034;
+// ex2 = 0.008;
+// ex3 = 0.0034;
+// ex4 = 0.00073;
+// samples = 70;
+// lambda = 10.0;
+// random = %f;
+//[hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, ex1, ex2, ex3, ex4, samples, lambda, random);
+//
+// Examples
+// a = imread("/images/i1.jpeg");
+// b = imread("/images/i2.jpeg");
+// c = imread("/images/i3.jpeg");
+// d = imread("/images/i4.jpeg");
+// num = 4;
+// typeOfMethod = 1;
+// ex1 = 0.034;
+// ex2 = 0.008;
+// ex3 = 0.0034;
+// ex4 = 0.00073;
+// maxIter = 30;
+// thres = 0.01;
+// [hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, ex1, ex2, ex3, ex4, maxIter, thres);
+//
+// Examples
+// a = imread("/images/i1.jpeg");
+// b = imread("/images/i2.jpeg");
+// c = imread("/images/i3.jpeg");
+// d = imread("/images/i4.jpeg");
+// num = 4;
+// typeOfMethod = 3;
+// ex1 = 0.034;
+// ex2 = 0.008;
+// ex3 = 0.0034;
+// ex4 = 0.00073;
+// maxIter = 30;
+// contrastWeight = 1.0;
+// saturationWeight = 1.0;
+//exposureWeight = 0.0;
+// [hdr, ldr] = makeHDR(typeOfMethod, num, a, b, c, d, ex1, ex2, ex3, ex4, contrastWeight, saturationWeight, exposureWeight);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+//
+ [lhs, rhs] = argn(0)
+
+ select rhs
+ case 10 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8))
+
+ case 11 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9))
+
+ case 12 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10))
+
+ case 13 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11))
+
+ case 14 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12))
+
+ case 15 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12), varargin(13))
+
+ case 16 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12), varargin(13), varargin(14))
+
+ case 17 then
+ for i = 1:num
+ varargin(i) = mattolist(varargin(i))
+ end
+
+ [out1, out2] = raw_makeHDR(typeOfMethod, num, varargin(1), varargin(2), varargin(3), varargin(4), varargin(5), varargin(6), varargin(7), varargin(8), varargin(9), varargin(10), varargin(11), varargin(12), varargin(13), varargin(14), varargin(15))
+
+ end
+
+
+ channels1 = size(out1)
+ channels2 = size(out2)
+
+ for i = 1:channels1
+ outHDR(:, :, i) = out1(i)
+ end
+
+ for i = 1:channels2
+ outLDR(:, :, i) = out2(i)
+ end
+
+endfunction
diff --git a/macros/ntsc2rgb.sci b/macros/ntsc2rgb.sci
new file mode 100644
index 0000000..2df465d
--- /dev/null
+++ b/macros/ntsc2rgb.sci
@@ -0,0 +1,76 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Tess Zacharias
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = ntsc2rgb(pstData)
+// This function converts NTSC values to RGB color space.
+//
+// Calling Sequence
+// [output] = ntsc2rgb(pstData)
+//
+// Parameters
+// pstData : It is a list of the NTSC luminance (Y) and chrominance (I and Q) color components.
+// output : It is a list that contains the red, green, and blue values equivalent to those colors.
+//
+// Description
+// Converts NTSC values to RGB color space.
+//
+// Examples
+// // Convert the grayscale image back to RGB color space.
+// a = imread("/images/b1.jpeg",0)
+// b = ntsc2rgb(a);
+//
+// Examples
+// // Convert the grayscale image back to RGB color space.
+// a = imread("/images/b2.jpeg",0)
+// b = ntsc2rgb(a);
+//
+// Examples
+// // Convert the grayscale image back to RGB color space.
+// a = imread("/images/graf1.jpeg",0)
+// b = ntsc2rgb(a);
+//
+// Examples
+// // Convert the grayscale image back to RGB color space.
+// a = imread("/images/graf2.jpeg",0)
+// b = ntsc2rgb(a);
+//
+// Examples
+// // input RGB image
+// a = imread("/images/b2.jpeg")
+// b = ntsc2rgb(a);
+//
+// Examples
+// // input RGB image
+// a = imread("/images/graf1.jpeg")
+// b = ntsc2rgb(a);
+//
+// Examples
+// // input RGB image
+// a = imread("/images/garf2.jpeg")
+// b = ntsc2rgb(a);
+//
+// Examples
+// // Convert the grayscale image back to RGB color space.
+// a = imread("/images/lena.jpeg",0)
+// b = ntsc2rgb(a);
+//
+// Authors
+// Tess Zacharias
+
+ out = raw_ntsc2rgb(pstData)
+
+ channels = size(out)
+
+ for i= 1:channels
+ output(:, :, i) = out(i)
+ end
+
+endfunction
diff --git a/macros/pcwrite.sci b/macros/pcwrite.sci
new file mode 100644
index 0000000..258eb35
--- /dev/null
+++ b/macros/pcwrite.sci
@@ -0,0 +1,57 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [] = pcwrite(pointCloud, filename, varargin)
+// This function is used to write 3-D point cloud to PLY or PCD file.
+//
+// Calling Sequence
+// [] = pcwrite(pointCloud, filename)
+// [] = pcwrite(pointCloud, filename, fileFormat, fileType)
+//
+// Parameters
+// pointCloud : Object for storing point cloud, specified as a pointCloud object.
+// filename : File name, specified as a character vector, specify the file name with an extension incase of two input argument.(default encoding is ASCII)
+// fileFormat : The input file type must be a PLY or PCD format file.(choose between ".ply" or ".pcd")
+// fileType : Choose from the following encoding, PLY - 'ascii', 'binary' and PCD - 'ascii', 'binary', or 'compressed'.
+//
+// Description
+// Writes the point cloud object, ptCloud, to the PLY or PCD file specified by the input.
+//
+// Examples
+// // Write 3-D Point Cloud to PLY File
+// ptCloud = pcread('teapot.ply');
+// pcshow(ptCloud);
+// pcwrite(ptCloud,'teapotOut','ply','binary');
+//
+// Examples
+// // Write 3-D Point Cloud to PCD File
+// ptCloud = pcread('teapot.ply');
+// pcshow(ptCloud);
+// pcwrite(ptCloud,'teapotOut','pcd','binary');
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+ if(strcmp(pointCloud.dataType,'PointCloud')~=0) then
+ error(msprintf("function expects a PointCloud"))
+ end
+typeof(pointCloud.Width)
+ [lhs, rhs] = argn(0)
+
+ select rhs
+
+ case 2 then
+ raw_pcwrite(pointCloud.Width, pointCloud.Height, pointCloud.is_dense, pointCloud.Location, pointCloud.Count, pointCloud.Color, pointCloud.XLimits, pointCloud.YLimits, pointCloud.ZLimits, filename)
+ case 4 then
+ raw_pcwrite(pointCloud.Width, pointCloud.Height, pointCloud.is_dense, pointCloud.Location, pointCloud.Count, pointCloud.Color, pointCloud.XLimits, pointCloud.YLimits, pointCloud.ZLimits, filename, varargin(1), varargin(2))
+
+ end
+
+endfunction
diff --git a/macros/rgb2lab.sci b/macros/rgb2lab.sci
new file mode 100644
index 0000000..7f331a1
--- /dev/null
+++ b/macros/rgb2lab.sci
@@ -0,0 +1,80 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Sridhar Reddy, Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = rgb2lab(inputImage)
+// This function converts RGB to CIE 1976 L*a*b*.
+//
+// Calling Sequence
+// [output] = rgb2lab(inputImage)
+//
+// Parameters
+// inputImage : It is a list of color values to convert.
+// output : The converted color values, returned as a list.
+//
+// Description
+// Converts RGB to CIE 1976 L*a*b*.
+//
+// Examples
+// // to convert the RGB white value to L*a*b.
+// rgb2lab([1 1 1])
+//
+// Examples
+// // to convert the RGB white value to L*a*b.
+// rgb2lab([.2 .3 .4])
+//
+// Examples
+// // Read RGB image to convert
+// a = imread("../images/b1.jpeg");
+// b = rgb2lab(a);
+//
+// Examples
+// // Read RGB image to convert
+// a = imread("../images/b2.jpeg");
+// b = rgb2lab(a);
+//
+// Examples
+// // to convert the RGB white value to L*a*b.
+// rgb2lab([23 23 22])
+//
+// Examples
+// // Read RGB image to convert
+// a = imread("../images/lena.jpeg");
+// b = rgb2lab(a);
+//
+// Examples
+// // to convert the RGB white value to L*a*b.
+// rgb2lab([34.2 43.3 343.4])
+//
+// Examples
+// // Read RGB image to convert
+// a = imread("../images/graf1.jpeg");
+// b = rgb2lab(a);
+//
+// Examples
+// // Read RGB image to convert
+// a = imread("../images/graf2.jpeg");
+// b = rgb2lab(a);
+//
+// Examples
+// // Read RGB image to convert which doesnt exit
+// a = imread("../images/b.jpeg");
+// b = rgb2lab(a);
+//
+// Authors
+// Sridhar Reddy
+// Ashish Manatosh Barik
+ inputList = mattolist(inputImage);
+
+ output = raw_rgb2lab(inputList);
+
+
+endfunction
+
diff --git a/macros/ssim.sci b/macros/ssim.sci
new file mode 100644
index 0000000..1e12738
--- /dev/null
+++ b/macros/ssim.sci
@@ -0,0 +1,39 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Dhruti Shah
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [ssim_val] = ssim(srcImg, reference)
+// This function is used to compute the Structural Similarity Index (SSIM) for measuring image quality.
+//
+// Calling Sequence
+// [ssim_val] = ssim(srcImg, reference)
+//
+// Parameters
+// srcImg : The input image whose quality is to be measured. Must be the same size and class as reference.
+// reference : Reference image against which quality if measured.
+// ssim_val : Structural Similarity (SSIM) Index.
+//
+// Description
+// Computes the Structural Similarity Index (SSIM) value.
+//
+// Examples
+// // a simple example
+// a = imread("/images/m1.jpeg");
+// b = imread("/images/m2.jpeg");
+// c = ssim(a,b);
+//
+// Authors
+// Dhruti Shah
+ srcMat1 = mattolist(srcImg)
+ srcMat2 = mattolist(reference)
+
+ ssim_val = raw_ssim(srcMat1, srcMat2)
+
+endfunction
diff --git a/macros/wiener2.sci b/macros/wiener2.sci
new file mode 100644
index 0000000..65c1c07
--- /dev/null
+++ b/macros/wiener2.sci
@@ -0,0 +1,46 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Riddhish Bhalodia
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [outputImg] = wiener2(inputImage,filtsize,sigma)
+// This function is used for 2-D adaptive noise-removal filtering.
+//
+// Calling Sequence
+// [outputImg] = wiener2(inputImage,filtsize,sigma)
+//
+// Parameters
+// inputImage : The input image, grayscale only.
+// filtsize : The filter size.
+// sigma : The additive noise (Gaussian white noise) power is assumed to be noise. if sigma = 0 then the variance is estimated from data
+// outputImg : The output image, is of the same size and class as the input image
+//
+// Description
+// It lowpass-filters a grayscale image that has been degraded by constant power additive noise.
+//
+// Examples
+// // a simple example
+// a = imread("/images/m1.jpeg");
+// filtsize = 5;
+// sigma = 0;
+// c = ssim(a,b);
+//
+// Authors
+// Riddhish Bhalodia
+ inputList = mattolist(inputImage);
+
+ outputList = raw_wiener2(inputList,filtsize,sigma);
+
+ for i=1:size(outputList)
+
+ outputImg(:,:,i)=outputList(i)
+
+ end
+
+endfunction
diff --git a/macros/xyz2double.sci b/macros/xyz2double.sci
new file mode 100644
index 0000000..f05f949
--- /dev/null
+++ b/macros/xyz2double.sci
@@ -0,0 +1,89 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = xyz2double(pstData)
+// This function converts XYZ color values to double.
+//
+// Calling Sequence
+// [output] = xyz2double(pstData)
+//
+// Parameters
+// pstData : list of uint16 or double array that must be real and nonsparse.
+// output : list of converted values.
+//
+// Description
+// Converts an M-by-3 or M-by-N-by-3 array of pstData color values to double. output has the same size as XYZ.
+//
+// Examples
+// // check for boundary level values
+// a = uint16([100 32768 65535]);
+// b = xyz2double(c);
+//
+// Examples
+// // check for boundary level values
+// a = uint16([100 32768 65536]);
+// b = xyz2double(a);
+//
+// Examples
+// // check for lower values
+// a = uint16([1 3 5]);
+// b = xyz2double(a);
+//
+// Examples
+// // error - inpput should be M by 3 or M by N by 3
+// a = uint16([100 32768]);
+// b = xyz2double(a);
+//
+// Examples
+// // error - inpput should be M by 3 or M by N by 3
+// a = uint16([100 32 67 56]);
+// b = xyz2double(a);
+//
+// Examples
+// // float value input
+// a = uint16([0.0031 1 2]);
+// b = xyz2double(a);
+//
+// Examples
+// // error - inpput should be M by 3 or M by N by 3
+// a = uint16([100 32 678]);
+// b = xyz2double(a);
+//
+// Examples
+// // error - inpput should be M by 3 or M by N by 3
+// a = uint16([100 32768 3244]);
+// b = xyz2double(a);
+//
+// Examples
+// // float value input
+// a = uint16([0.0031 1.56 2.454]);
+// b = xyz2double(a);
+//
+// Examples
+// // error - inpput is double, no conversion takes place.
+// a = double([9 1 2]);
+// b = xyz2double(a);
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+ pstList = mattolist(pstData)
+
+ out = raw_xyz2double(pstList);
+
+ channels = size(out)
+
+ for i = 1:channels
+ output(:, :, i) = (out(i))
+ end
+
+ output = double(output)
+
+endfunction
diff --git a/macros/xyz2lab.sci b/macros/xyz2lab.sci
new file mode 100644
index 0000000..1a13072
--- /dev/null
+++ b/macros/xyz2lab.sci
@@ -0,0 +1,81 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Ashish Manatosh Barik, Priyanka Hiranandani
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = xyz2lab(vartype)
+// This function converts CIE 1931 XYZ to CIE 1976 L*a*b*.
+//
+// Calling Sequence
+// [output] = xyz2lab(vartype)
+//
+// Parameters
+// varType : list of color values to convert.
+// output : list of converted color values.
+//
+// Description
+// Convert CIE 1931 XYZ to CIE 1976 L*a*b*.
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.25, 0.40, 0.10)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.29, 0.23, 0.11)
+// xyz2lab(a))
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.29, 34, 0.10)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.25, 0.56, 0.18)
+// xyz2lab(a)
+//
+// Examples
+// // error - inpput ahould be M by 3 or M by N by 3
+// a = uint16([100 32 67 56]);
+// b = xyz2double(a);
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(89.25, 89.40, 0.10)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(78, 89, 11)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.25, 0.40, 90.67)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(0.76, 0.67, 9.10)
+// xyz2lab(a)
+//
+// Examples
+// // Convert an XYZ color value to L*a*b*
+// a = list(78.25, 34.40, 0.10)
+// xyz2lab(a))
+//
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+// Priyanka Hiranandani, NIT Surat
+ output = raw_xyz2lab(vartype)
+
+endfunction
+
diff --git a/macros/xyz2rgb.sci b/macros/xyz2rgb.sci
new file mode 100644
index 0000000..1042f6e
--- /dev/null
+++ b/macros/xyz2rgb.sci
@@ -0,0 +1,81 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Tess Zacharias, Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = xyz2rgb(data)
+// This function converts CIE 1931 XYZ to RGB.
+//
+// Calling Sequence
+// [output] = xyz2rgb(data)
+//
+// Parameters
+// data : list of color values to convert.
+// output : list of converted color values.
+//
+// Description
+// Converts CIE 1931 XYZ to RGB.
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(0.25, 0.40, 0.10);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(3.25, 5.40, 12.10);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(2, 5, 4);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(0.65, 0.43, 0.19);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(89.25, 23, 0.6710);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(0.2534, 0.4340, 0.143);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(67.25, 34.40, 44.10);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(34.25, 56.40, 223.189);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(0.1, 0.1, 0.1);
+// xyz2rgb(a)
+//
+// Examples
+// // Convert a color value in the XYZ color space to the sRGB color space.
+// a = list(78.25, 34.40, 23.10);
+// xyz2rgb(a)
+//
+// Authors
+// Tess Zacharias
+// Ashish Manatosh Barik
+
+ output = raw_xyz2rgb(data);
+
+endfunction
diff --git a/macros/xyz2uint16.sci b/macros/xyz2uint16.sci
new file mode 100644
index 0000000..6fe7dca
--- /dev/null
+++ b/macros/xyz2uint16.sci
@@ -0,0 +1,80 @@
+// Copyright (C) 2015 - IIT Bombay - FOSSEE
+//
+// This file must be used under the terms of the CeCILL.
+// This source file is licensed as described in the file COPYING, which
+// you should have received as part of this distribution. The terms
+// are also available at
+// http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
+// Author: Tess Zacharias, Ashish Manatosh Barik
+// Organization: FOSSEE, IIT Bombay
+// Email: toolbox@scilab.in
+//
+function [output] = xyz2uint16(pstData)
+// This function converts XYZ color values to uint16.
+//
+// Calling Sequence
+// [output] = xyz2uint16(pstData)
+//
+// Parameters
+// pstData : list of uint16 or double array that must be real and nonsparse
+// output : list of puint8.
+//
+// Description
+// Converts an M-by-3 or M-by-N-by-3 array of XYZ color values to uint16. output has the same size as pstData.
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(0.1, 0.5, 1.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(0.14, 0.35, 1.20)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(45.1, 22.5, 45.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(200, 334, 2112)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(56.1, 0.5, 1.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(0.1, 8378.5, 1.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(878.1, 32.5, 1.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(0.12323, 0.53434, 1.878)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(44, 55, 1.0)
+// xyz2uint16(a)
+//
+// Examples
+// // Create a double vector specifying a color in XYZ colorspace.
+// a = list(0.134, 55.5, 1.121)
+// xyz2uint16(a)
+//
+// Authors
+// Tess Zacharias
+// Ashish Manatosh Barik
+ output = raw_xyz2uint16(pstData)
+
+endfunction
diff --git a/sci_gateway/PCL_pcwrite.cpp b/sci_gateway/PCL_pcwrite.cpp
new file mode 100644
index 0000000..60a9e54
--- /dev/null
+++ b/sci_gateway/PCL_pcwrite.cpp
@@ -0,0 +1,502 @@
+#include
+#include
+#include
+#include
+#include
+
+using namespace std;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+
+ int PCL_pcwrite(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+
+ // variables required to read argument #1
+ int *piAddr1 = NULL;
+ int intErr1 = 0;
+ double width = 0;
+
+ // variables required to read argument #2
+ int *piAddr2 = NULL;
+ int intErr2 = 0;
+ double height = 0;
+
+ // variables required to read argument #3
+ int *piAddr3 = NULL;
+ int intErr3 = 0;
+ int dense = false;
+
+ // variables required to read argument #4
+ int *piAddr4 = NULL;
+ int intErr4 = 0;
+ int rows1 = 0, cols1 = 0;
+ double* location = NULL;
+
+ // variables required to read argument #5
+ int *piAddr5 = NULL;
+ int intErr5 = 0;
+ double count = 0;
+
+ // variables required to read argument #6
+ int *piAddr6 = NULL;
+ int intErr6 = 0;
+ int rows2 = 0, cols2 = 0;
+ double* rgb = NULL;
+
+ // variables required to read argument #7
+ int *piAddr7 = NULL;
+ int intErr7 = 0;
+ int rows3 = 0, cols3 = 0;
+ double* xlimit = NULL;
+
+ // variables required to read argument #8
+ int *piAddr8 = NULL;
+ int intErr8 = 0;
+ int rows4 = 0, cols4 = 0;
+ double* ylimit = NULL;
+
+ // variables required to read argument #9
+ int *piAddr9 = NULL;
+ int intErr9 = 0;
+ int rows5 = 0, cols5 = 0;
+ double* zlimit = NULL;
+
+ // variables required to read argument #10
+ int *piAddr10 = NULL;
+ int intErr10 = 0;
+ int rows6 = 0, cols6 = 0;
+ int *pilen1 = NULL;
+ char **filename = NULL;
+
+ // variables required to read argument #11
+ int *piAddr11 = NULL;
+ int intErr11 = 0;
+ int rows7 = 0, cols7 = 0;
+ int *pilen2 = NULL;
+ char **fileFormat = NULL;
+
+ // variables required to read argument #12
+ int *piAddr12 = NULL;
+ int intErr12 = 0;
+ int rows8 = 0, cols8 = 0;
+ int *pilen3 = NULL;
+ char **fileType = NULL;
+
+ int s;
+
+ int n= *getNbInputArgument(pvApiCtx);
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 10, 12);
+// CheckOutputArgument(pvApiCtx, 0, 0);
+
+
+ // to get the argument #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 1, &piAddr1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr1 = getScalarDouble(pvApiCtx, piAddr1, &width);
+ if(intErr1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 1);
+ return -1;
+ }
+
+ // to get the argument #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr2 = getScalarDouble(pvApiCtx, piAddr2, &height);
+ if(intErr2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+
+ // to get the argument #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarBoolean(pvApiCtx, piAddr3, &dense);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ /* Check that the first input argument is a real matrix (and not complex) */
+ if( !isDoubleType(pvApiCtx, piAddr4) || isVarComplex(pvApiCtx, piAddr4) )
+ {
+ Scierror(999, "point cloud should consist of real values.");
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr4, &rows1, &cols1, &location);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ if(!(rows1==(width*height) && cols1==3))
+ {
+ Scierror(999, "Size error in point-cloud points");
+ }
+
+ // to get the argument #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &count);
+ if(intErr5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 5);
+ return -1;
+ }
+\
+ // to get the argument #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr6, &rows2, &cols2, &rgb);
+ if(sciErr.iErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 6);
+ return -1;
+ }
+
+ // to get the argument #7
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddr7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr7, &rows3, &cols3, &xlimit);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // to get the argument #8
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddr8);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr8, &rows4, &cols4, &ylimit);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // to get the argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddr9);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr9, &rows5, &cols5, &zlimit);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // to get the argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddr10);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ // Check for Argument type
+ if( !isStringType(pvApiCtx, piAddr10))
+ {
+ Scierror(999, "%s: Wrong type of argument #%d. A string is expected.\n");
+ return 0;
+ }
+ // Matrix of Stings
+ sciErr = getMatrixOfString(pvApiCtx, piAddr10, &rows6, &cols6, NULL, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ pilen1 = (int*)malloc(sizeof(int) * rows6 * cols6);
+ // second call to retrieve the length of the string
+ sciErr = getMatrixOfString(pvApiCtx, piAddr10, &rows6, &cols6, pilen1, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ filename = (char**)malloc(sizeof(char*) * rows6 * cols6);
+ filename[0] = (char*)malloc(sizeof(char) * (*pilen1 + 1));
+ // third call to retrieve data
+ sciErr = getMatrixOfString(pvApiCtx, piAddr10, &rows6, &cols6, pilen1, filename);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ string name(filename[0]);
+
+ if(n == 10)
+ {
+
+ // creating PointCloud object
+ pcl::PointCloud cloud;
+
+ cloud.width = width;
+ cloud.height = height;
+ cloud.is_dense = dense;
+ cloud.points.resize (cloud.width * cloud.height);
+
+ // size of filename string
+ s=name.length();
+
+ // checking if the filename suggests .ply or .pcd format
+ if(name.substr(s-3,s-1)=="ply")
+ {
+ sciprint("%d\n",*getNbInputArgument(pvApiCtx));
+ for(size_t i = 0; i < cloud.points.size (); ++i)
+ {
+ cloud.points[i].x = location[i];
+ cloud.points[i].y = location[i + (int)width*(int)height ];
+ cloud.points[i].z = location[i + 2*(int)width*(int)height];
+
+
+ cloud.points[i].r = rgb[i];
+ cloud.points[i].g = rgb[i + (int)width*(int)height ];
+ cloud.points[i].b = rgb[i + 2*(int)width*(int)height];
+ }
+
+ // creates ASCII encoded .ply file as default
+ pcl::io::savePLYFileASCII(name, cloud);
+
+ }
+ else if(name.substr(s-3,s-1)=="pcd")
+ {
+
+ for(size_t i = 0; i < cloud.points.size (); ++i)
+ {
+ cloud.points[i].x = location[i];
+ cloud.points[i].y = location[i + (int)width*(int)height ];
+ cloud.points[i].z = location[i + 2*(int)width*(int)height];
+
+ cloud.points[i].r = rgb[i];
+ cloud.points[i].g = rgb[i + (int)width*(int)height ];
+ cloud.points[i].b = rgb[i + 2*(int)width*(int)height];
+ }
+
+ // creates ASCII encoded .pcd file as default
+ pcl::io::savePCDFileASCII(name, cloud);
+
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #4 passed.");
+ return 0;
+ }
+
+ }
+ else if(n == 12)
+ {
+ // to get the argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr11);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ // Check for Argument type
+ if( !isStringType(pvApiCtx, piAddr11))
+ {
+ Scierror(999, "%s: Wrong type of argument #%d. A string is expected.\n");
+ return 0;
+ }
+ // Matrix of Stings
+ sciErr = getMatrixOfString(pvApiCtx, piAddr11, &rows7, &cols7, NULL, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ pilen2 = (int*)malloc(sizeof(int) * rows7 * cols7);
+ // second call to retrieve the length of the string
+ sciErr = getMatrixOfString(pvApiCtx, piAddr11, &rows7, &cols7, pilen2, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ fileFormat = (char**)malloc(sizeof(char*) * rows7 * cols7);
+ fileFormat[0] = (char*)malloc(sizeof(char) * (*pilen2 + 1));
+ // third call to retrieve data
+ sciErr = getMatrixOfString(pvApiCtx, piAddr11, &rows7, &cols7, pilen2, fileFormat);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ free(pilen2);
+ free(fileFormat);
+ return 0;
+ }
+
+ // to get the argument #12
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddr12);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ // Check for Argument type
+ if( !isStringType(pvApiCtx, piAddr12))
+ {
+ Scierror(999, "%s: Wrong type of argument #%d. A string is expected.\n");
+ return 0;
+ }
+ // Matrix of Stings
+ sciErr = getMatrixOfString(pvApiCtx, piAddr12, &rows8, &cols8, NULL, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ pilen3 = (int*)malloc(sizeof(int) * rows8 * cols8);
+ // second call to retrieve the length of the string
+ sciErr = getMatrixOfString(pvApiCtx, piAddr12, &rows8, &cols8, pilen3, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ fileType = (char**)malloc(sizeof(char*) * rows8 * cols8);
+ fileType[0] = (char*)malloc(sizeof(char) * (*pilen3 + 1));
+ // third call to retrieve data
+ sciErr = getMatrixOfString(pvApiCtx, piAddr12, &rows8, &cols8, pilen3, fileType);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ free(pilen3);
+ free(fileType);
+ return 0;
+ }
+
+ // creating PointCloud object
+ pcl::PointCloud cloud;
+
+ cloud.width = width;
+ cloud.height = height;
+ cloud.is_dense = dense;
+ cloud.points.resize (cloud.width * cloud.height);
+
+ // concatenating
+ strcat(filename[0], ".");
+ strcat(filename[0], fileFormat[0]);
+
+ if(strcmp(fileFormat[0],"ply") == 0)
+ {
+
+ for(size_t i = 0; i < cloud.points.size (); ++i)
+ {
+ cloud.points[i].x = location[i];
+ cloud.points[i].y = location[i + (int)width*(int)height ];
+ cloud.points[i].z = location[i + 2*(int)width*(int)height];
+
+ cloud.points[i].r = rgb[i];
+ cloud.points[i].g = rgb[i + (int)width*(int)height ];
+ cloud.points[i].b = rgb[i + 2*(int)width*(int)height];
+
+ }
+
+ if(strcmp(fileType[0],"ASCII") == 0)
+ {
+ pcl::io::savePLYFileASCII(filename[0], cloud);
+ }
+ else if(strcmp(fileType[0],"Binary") == 0)
+ {
+ pcl::io::savePLYFileBinary(filename[0], cloud);
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #4 passed.");
+ return 0;
+ }
+ }
+ else if(strcmp(fileFormat[0],"pcd") == 0)
+ {
+
+ for(size_t i = 0; i < cloud.points.size (); ++i)
+ {
+ cloud.points[i].x = location[i];
+ cloud.points[i].y = location[i + (int)width*(int)height ];
+ cloud.points[i].z = location[i + 2*(int)width*(int)height];
+
+ cloud.points[i].r = rgb[i];
+ cloud.points[i].g = rgb[i + (int)width*(int)height ];
+ cloud.points[i].b = rgb[i + 2*(int)width*(int)height];
+ }
+
+ if(strcmp(fileType[0],"ASCII") == 0)
+ {
+ pcl::io::savePCDFileASCII(filename[0], cloud);
+ }
+ else if(strcmp(fileType[0],"Binary") == 0)
+ {
+ pcl::io::savePCDFileBinary(filename[0], cloud);
+ }
+ else if(strcmp(fileType[0],"Compressed") == 0)
+ {
+ pcl::io::savePCDFileBinaryCompressed(filename[0], cloud);
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #4 passed.");
+ return 0;
+ }
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #3 passed.");
+ return 0;
+ }
+
+ }
+
+
+ }
+}
diff --git a/sci_gateway/opencv_align.cpp b/sci_gateway/opencv_align.cpp
new file mode 100644
index 0000000..51f98a3
--- /dev/null
+++ b/sci_gateway/opencv_align.cpp
@@ -0,0 +1,392 @@
+/*
+This is the .cpp gateway file for the 'align' scilab function.
+
+OpenCV classes :
+1. Ptr cv::createAlignMTB (int max_bits=6, int exclude_range=4, bool cut=true)
+ // Creates AlignMTB object.
+
+It includes the following OpenCV functions, belonging to the Photo module of OpenCV 3.0.0 :
+1. void process (InputArrayOfArrays src, std::vector< Mat > &dst)
+ // Aligns images.
+
+*/
+
+#include
+#include"opencv2/core/core.hpp"
+#include"opencv2/highgui/highgui.hpp"
+#include"opencv2/opencv.hpp"
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/imgproc/imgproc.hpp"
+#include"opencv2/core/utility.hpp"
+#include
+#include
+#include
+#include "opencv2/photo.hpp"
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_align(char* fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+
+ Mat img1, img2, img3, img4, img5, img6;
+
+ // retrieving images
+ retrieveImage(img1, 5);
+ retrieveImage(img2, 6);
+ retrieveImage(img3, 7);
+
+ // variables required to read argument #1
+ int *piAddr1 = NULL;
+ int intErr1 = 0;
+ double maxBits = 0;
+
+ // variables required to read argument #2
+ int *piAddr2 = NULL;
+ int intErr2 = 0;
+ double excludeRange = 0;
+
+ // variables required to read argument #3
+ int *piAddr3 = NULL;
+ int intErr3 = 0;
+ int cut = true;
+
+ // variables required to read argument #4
+ int *piAddr4 = NULL;
+ int intErr4 = 0;
+ double num = 0;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 7, 10);
+ CheckOutputArgument(pvApiCtx, 3, 5);
+
+ // to get the argument #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 1, &piAddr1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr1 = getScalarDouble(pvApiCtx, piAddr1, &maxBits);
+ if(intErr1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 1);
+ return -1;
+ }
+
+ // to get the argument #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr2 = getScalarDouble(pvApiCtx, piAddr2, &excludeRange);
+ if(intErr2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+
+ // to get the argument #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarBoolean(pvApiCtx, piAddr3, &cut);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &num);
+ if(intErr4)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+
+ vector images;
+ // output vector of aligned images
+ vector dst;
+
+
+ try
+ {
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img1.convertTo(img1, CV_8U);
+ img2.convertTo(img2, CV_8U);
+ img3.convertTo(img3, CV_8U);
+
+ // creating the AlignMTB object
+ Ptr model = createAlignMTB (maxBits, excludeRange, cut);
+
+ if(num == 3)
+ {
+ // pushing the images into vector
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+
+ // aligning images
+ model->process(images, dst);
+
+ // to return the output1 image
+ string tempstring1 = type2str(dst[0].type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, dst[0], 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output2 image
+ string tempstring2 = type2str(dst[1].type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, dst[1], 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ // to return the output3 image
+ string tempstring3 = type2str(dst[2].type());
+ char *checker3;
+ checker3 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker3, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker3, dst[2], 3);
+ free(checker3);
+ AssignOutputVariable(pvApiCtx, 3) = nbInputArgument(pvApiCtx) + 3;
+
+ }
+ else if(num == 4)
+ {
+ retrieveImage(img4, 8);
+
+ img4.convertTo(img4, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+
+ model->process(images, dst);
+
+ // to return the output1 image
+ string tempstring1 = type2str(dst[0].type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, dst[0], 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output2 image
+ string tempstring2 = type2str(dst[1].type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, dst[1], 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ // to return the output3 image
+ string tempstring3 = type2str(dst[2].type());
+ char *checker3;
+ checker3 = (char *)malloc(tempstring3.size() + 1);
+ memcpy(checker3, tempstring3.c_str(), tempstring3.size() + 1);
+ returnImage(checker3, dst[2], 3);
+ free(checker3);
+ AssignOutputVariable(pvApiCtx, 3) = nbInputArgument(pvApiCtx) + 3;
+
+ // to return the output4 image
+ string tempstring4 = type2str(dst[3].type());
+ char *checker4;
+ checker4 = (char *)malloc(tempstring4.size() + 1);
+ memcpy(checker4, tempstring4.c_str(), tempstring4.size() + 1);
+ returnImage(checker4, dst[3], 4);
+ free(checker4);
+ AssignOutputVariable(pvApiCtx, 4) = nbInputArgument(pvApiCtx) + 4;
+
+
+
+ }
+ else if(num == 5)
+ {
+ retrieveImage(img4, 8);
+ retrieveImage(img5, 9);
+
+ img4.convertTo(img4, CV_8U);
+ img5.convertTo(img5, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+ images.push_back(img5);
+
+ model->process(images, dst);
+
+ // to return the output1 image
+ string tempstring1 = type2str(dst[0].type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, dst[0], 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output2 image
+ string tempstring2 = type2str(dst[1].type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, dst[1], 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ // to return the output3 image
+ string tempstring3 = type2str(dst[2].type());
+ char *checker3;
+ checker3 = (char *)malloc(tempstring3.size() + 1);
+ memcpy(checker3, tempstring3.c_str(), tempstring3.size() + 1);
+ returnImage(checker3, dst[2], 3);
+ free(checker3);
+ AssignOutputVariable(pvApiCtx, 3) = nbInputArgument(pvApiCtx) + 3;
+
+ // to return the output4 image
+ string tempstring4 = type2str(dst[3].type());
+ char *checker4;
+ checker4 = (char *)malloc(tempstring4.size() + 1);
+ memcpy(checker4, tempstring4.c_str(), tempstring4.size() + 1);
+ returnImage(checker4, dst[3], 4);
+ free(checker4);
+ AssignOutputVariable(pvApiCtx, 4) = nbInputArgument(pvApiCtx) + 4;
+
+ // to return the output5 image
+ string tempstring5 = type2str(dst[4].type());
+ char *checker5;
+ checker5 = (char *)malloc(tempstring5.size() + 1);
+ memcpy(checker5, tempstring5.c_str(), tempstring5.size() + 1);
+ returnImage(checker5, dst[4], 5);
+ free(checker5);
+ AssignOutputVariable(pvApiCtx, 5) = nbInputArgument(pvApiCtx) + 5;
+
+
+ }
+ else if(num == 6)
+ {
+ retrieveImage(img4, 8);
+ retrieveImage(img5, 9);
+ retrieveImage(img6, 10);
+
+ img4.convertTo(img4, CV_8U);
+ img5.convertTo(img5, CV_8U);
+ img6.convertTo(img6, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+ images.push_back(img5);
+ images.push_back(img6);
+
+ model->process(images, dst);
+
+ // to return the output1 image
+ string tempstring1 = type2str(dst[0].type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, dst[0], 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output2 image
+ string tempstring2 = type2str(dst[1].type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, dst[1], 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ // to return the output3 image
+ string tempstring3 = type2str(dst[2].type());
+ char *checker3;
+ checker3 = (char *)malloc(tempstring3.size() + 1);
+ memcpy(checker3, tempstring3.c_str(), tempstring3.size() + 1);
+ returnImage(checker3, dst[2], 3);
+ free(checker3);
+ AssignOutputVariable(pvApiCtx, 3) = nbInputArgument(pvApiCtx) + 3;
+
+ // to return the output4 image
+ string tempstring4 = type2str(dst[3].type());
+ char *checker4;
+ checker4 = (char *)malloc(tempstring4.size() + 1);
+ memcpy(checker4, tempstring4.c_str(), tempstring4.size() + 1);
+ returnImage(checker4, dst[3], 4);
+ free(checker4);
+ AssignOutputVariable(pvApiCtx, 4) = nbInputArgument(pvApiCtx) + 4;
+
+ // to return the output5 image
+ string tempstring5 = type2str(dst[4].type());
+ char *checker5;
+ checker5 = (char *)malloc(tempstring5.size() + 1);
+ memcpy(checker5, tempstring5.c_str(), tempstring5.size() + 1);
+ returnImage(checker5, dst[4], 5);
+ free(checker5);
+ AssignOutputVariable(pvApiCtx, 5) = nbInputArgument(pvApiCtx) + 5;
+
+ // to return the output5 image
+ string tempstring6 = type2str(dst[5].type());
+ char *checker6;
+ checker6 = (char *)malloc(tempstring6.size() + 1);
+ memcpy(checker6, tempstring6.c_str(), tempstring6.size() + 1);
+ returnImage(checker6, dst[5], 6);
+ free(checker6);
+ AssignOutputVariable(pvApiCtx, 6) = nbInputArgument(pvApiCtx) + 6;
+
+
+
+ }
+ else
+ {
+ Scierror(999, "Wrong argument #4, Number of input images. \n", 1);
+ }
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+}
diff --git a/sci_gateway/opencv_applyTransformer.cpp b/sci_gateway/opencv_applyTransformer.cpp
new file mode 100644
index 0000000..a12e032
--- /dev/null
+++ b/sci_gateway/opencv_applyTransformer.cpp
@@ -0,0 +1,233 @@
+/*
+This is the .cpp gateway file for the 'applyTransformer' scilab function.
+
+OpenCV classes :
+1. Ptr< AffineTransformer > cv::createAffineTransformer (bool fullAffine)
+2. Ptr< ThinPlateSplineShapeTransformer > cv::createThinPlateSplineShapeTransformer (double regularizationParameter=0)
+
+It includes the following OpenCV functions, belonging to the Shape Distance and Matching module of OpenCV 3.0.0 :
+1. estimateTransformation (InputArray transformingShape, InputArray targetShape, std::vector< DMatch > &matches)
+ Estimate the transformation parameters of the current transformer algorithm, based on point matches.
+2. warpImage (InputArray transformingImage, OutputArray output, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, const Scalar &borderValue=Scalar())
+ Apply a transformation, given a pre-estimated transformation parameters, to an Image.
+
+*/
+
+#include
+#include"opencv2/core/core.hpp"
+#include"opencv2/highgui/highgui.hpp"
+#include"opencv2/opencv.hpp"
+#include"opencv2/shape/shape_transformer.hpp"
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/imgproc/imgproc.hpp"
+#include"opencv2/features2d/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/core/utility.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_applyTransformer(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ int i;
+
+
+ // variables required to read argument #3
+ int *piAddr3 = NULL;
+ int intErr3 = 0;
+ double typeOfMethod = 0;
+
+ // variables required to read argument #4
+ int *piAddr4 = NULL;
+ int intErr4 = 0;
+ double hessianThreshold = 0;
+
+ // variables required to read argument #5
+ int *piAddr5 = NULL;
+ int intErr5 = 0;
+ double rpTPS = 0;
+
+ // variables required to read argument #6
+ int *piAddr6 = NULL;
+ int intErr6 = 0;
+ int sfAffine = false;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 6, 6);
+ CheckOutputArgument(pvApiCtx, 1, 1);
+
+ Mat img1, img2;
+
+ // retrieving the input images
+ int a = retrieveImage(img1, 1);
+ if(a == 0)
+ {
+ sciprint("Error while retrieving the image1.");
+ return 0;
+ }
+ int b = retrieveImage(img2, 2);
+ if(b == 0)
+ {
+ sciprint("Error while retrieving the image2.");
+ }
+
+
+ // to get the argument #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &typeOfMethod);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &hessianThreshold);
+ if(intErr4)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+
+ // to get the argument #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &rpTPS);
+ if(intErr5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 5);
+ return -1;
+ }
+
+ // to get the argument #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarBoolean(pvApiCtx, piAddr6, &sfAffine);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 6);
+ return -1;
+ }
+
+
+ Mat img_matches;
+ Mat image1, image2;
+
+ try{
+
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img1.convertTo(img1, CV_8U);
+ cvtColor(img1, image1, CV_BGR2GRAY);
+
+ img2.convertTo(img2, CV_8U);
+ cvtColor(img2, image2, CV_BGR2GRAY);
+
+ // detecting keypoints & computing descriptors
+ Ptr surf = SURF::create(hessianThreshold);
+
+ vector keypoints1, keypoints2;
+ Mat descriptors1, descriptors2;
+
+ surf->detectAndCompute(image1, Mat(), keypoints1, descriptors1);
+ surf->detectAndCompute(image2, Mat(), keypoints2, descriptors2);
+
+ // matching descriptors
+ BFMatcher matcher(surf->defaultNorm());
+ vector matches;
+ matcher.match(descriptors1, descriptors2, matches);
+
+ // extract points
+ vector pts1, pts2;
+ for (size_t ii=0; ii model = createAffineTransformer(sfAffine);
+
+ model->estimateTransformation(pts1, pts2, matches);
+ model->warpImage(image2, image2);
+ }
+ else if(typeOfMethod == 2)
+ {
+ // TPS shape transformation
+
+ Ptr model = createThinPlateSplineShapeTransformer(rpTPS);
+
+ model->estimateTransformation(pts1, pts2, matches);
+ model->warpImage(image2, image2);
+ }
+ else
+ {
+ // incorrect input parameter of type-of-method
+
+ Scierror(999, "Wrong input for Argument #3. Use '1' for 'Affine' and '2' for 'TPS' \n");
+ return 0;
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+
+ // to return the output transformed image
+ string tempstring1 = type2str(image2.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, image2, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+}
+
diff --git a/sci_gateway/opencv_bwLookUp.cpp b/sci_gateway/opencv_bwLookUp.cpp
new file mode 100644
index 0000000..d1ab524
--- /dev/null
+++ b/sci_gateway/opencv_bwLookUp.cpp
@@ -0,0 +1,187 @@
+/*
+ * bwlookup
+ *
+ * 2*2 and 3*3 non linear filtering of a binary image based on a lookup table -
+ *lut which is taken as input from the user
+ *
+ */
+
+// Created by Samiran Roy, mail: samiranroy@cse.iitb.ac.in
+// An implementation of bwlookup
+// Usage:
+// bwlookup(I,lut)
+
+// I is the input binary grayscale image. If the image is not binary, it is
+// converted to one.
+// lut is a 1*16 double vector [2*2 filtering], or a [1*512] double vector [3*3
+// filtering]
+// The indexing method used is the same as Matlab bwlookup:
+// http://in.mathworks.com/help/images/ref/bwlookup.html
+
+// Known Changes from Matlab:
+/*
+ * 1) None, as of now
+ */
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "sciprint.h"
+ #include "../common.h"
+
+ int opencv_bwLookUp(char* fname, unsigned long fname_len)
+ {
+ SciErr sciErr;
+ int intErr = 0;
+
+ int* piAddr = NULL;
+
+ // checking input argument
+ CheckInputArgument(pvApiCtx, 2, 2);
+ CheckOutputArgument(pvApiCtx, 1, 1);
+
+ Mat image;
+ retrieveImage(image, 1);
+
+ if (image.channels() > 1)
+ {
+ Scierror(999, "The image must be grayscale.");
+ return -1;
+ }
+
+ double* lut;
+ int iRows = 0, iCols = 0;
+
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return -1;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr, &iRows, &iCols, &lut);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return -1;
+ }
+ // Error Checking
+ if (!((iCols == 16) || (iCols == 512)))
+ {
+ Scierror(999, "Expected LUT (argument 2) to have 16 or 512 elements.\n");
+ return -1;
+ }
+ if (iRows != 1)
+ {
+ Scierror(999, "Expected input number 2, LUT, to be a vector.\n");
+ return -1;
+ }
+
+ if (image.channels() != 1)
+ {
+ Scierror(999, "Expected input number 1, A, to be two-dimensional.\n");
+ return -1;
+ }
+
+ // temporary copy of image to perform computation
+ // converting the image to a binary image
+ Mat tempimg = Mat::zeros(image.size(), CV_8U);
+
+ for (int i = 0; i < image.rows; i++)
+ {
+ for (int j = 0; j < image.cols; j++)
+ {
+ if (image.at(i, j) != 0)
+ tempimg.at(i, j) = 1;
+ }
+ }
+
+ // pad the temporary copy of the image with zeroes to handle border cases
+ copyMakeBorder(tempimg, tempimg, 1, 1, 1, 1, BORDER_CONSTANT, 0);
+
+ // output images
+ Mat new_image = Mat::zeros(image.size(), CV_32F);
+
+ // temporary variables
+ int ii, jj;
+ int index;
+
+ try
+ {
+
+ // 2*2 filtering
+ if (iCols == 16)
+ {
+ for (int i = 0; i < image.rows; i++)
+ {
+ for (int j = 0; j < image.cols; j++)
+ {
+ ii = i + 1;
+ jj = j + 1;
+
+ index = tempimg.at(ii, jj) * 1 + tempimg.at(ii, jj + 1) * 2 + tempimg.at(ii + 1, jj) * 4 + tempimg.at(ii + 1, jj + 1) * 8;
+
+ new_image.at(i, j) = lut[ 0, index ];
+ }
+ }
+ }
+
+ // 3*3 filtering
+ if (iCols == 512)
+ {
+ for (int i = 0; i < image.rows; i++)
+ {
+ for (int j = 0; j < image.cols; j++)
+ {
+ ii = i + 1;
+ jj = j + 1;
+
+ index = tempimg.at(ii - 1, jj - 1) * 1 +
+ tempimg.at(ii - 1, jj) * 2 +
+ tempimg.at(ii - 1, jj + 1) * 4 +
+ tempimg.at(ii, jj - 1) * 8 +
+ tempimg.at(ii, jj) * 16 +
+ tempimg.at(ii, jj + 1) * 32 +
+ tempimg.at(ii + 1, jj - 1) * 64 + tempimg.at(ii + 1, jj) * 128 +
+ tempimg.at(ii + 1, jj + 1) * 256;
+
+ new_image.at(i, j) = lut[ 0, index ];
+ }
+ }
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to return the output image
+ int temp = nbInputArgument(pvApiCtx) + 1;
+ string tempstring = type2str(new_image.type());
+ char* checker;
+ checker = (char*)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker, new_image, 1);
+ free(checker);
+
+ // Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ // Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+ return 0;
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_contourArea.cpp b/sci_gateway/opencv_contourArea.cpp
new file mode 100644
index 0000000..90254ba
--- /dev/null
+++ b/sci_gateway/opencv_contourArea.cpp
@@ -0,0 +1,112 @@
+/* ==================================================================== */
+/* Author :Priyanka Hiranandani NIT Surat, Ashish Manatosh Barik NIT Rourkela */
+/* ==================================================================== */
+/* Syntax : return_area=contourarea(InputArray contour, bool oriented); */
+/* ==================================================================== */
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+//#include
+
+using namespace cv;
+using namespace std;
+ extern "C"
+ {
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+
+ int opencv_contourArea(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+
+ // variables required to read argument #1
+ int iRows = 0;
+ int iCols = 0;
+ int* piAddr1 = NULL;
+ double *pstData = NULL;
+ int error;
+
+ // variables required to read argument #2
+ int *piAddr2 = NULL;
+ int intErr2 = 0;
+ int orientation = false;
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx,2,2);
+ //checking output argument
+ CheckOutputArgument(pvApiCtx, 1, 1);
+ //for first argument
+
+ // get Address of first input
+ sciErr =getVarAddressFromPosition(pvApiCtx,1,&piAddr1);
+ //check for any error
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrieve input array
+ //SciErr getMatrixOfDouble(void* _pvCtx, int* _piAddress, int* _piRows, int* _piCols, double** _pdblReal)
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr1, &iRows, &iCols,&pstData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ int k=0;
+ vector contours;
+ for(int i=0;i
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include
+ #include "../common.h"
+
+ int opencv_copyMakeBorder(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr=0;
+ int iRows=0,iCols=0;
+ int *piLen = NULL;
+ int *piAddr = NULL;
+ int *piAddrNew = NULL;
+ int *piAddr2 = NULL;
+ int *piAddr3 = NULL;
+ int *piAddr4 = NULL;
+ int *piAddr5 = NULL;
+ int *piAddr6 = NULL;
+ int *piAddr7 = NULL;
+ int i,j,k;
+ char **borderType;
+ double top, bottom, right, left;
+ double *value = NULL;
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx, 6, 7);
+ CheckOutputArgument(pvApiCtx, 1, 1) ;
+
+ Mat image;
+ retrieveImage(image,1);
+
+ //for top
+ sciErr = getVarAddressFromPosition(pvApiCtx,2,&piAddr2);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr2, &top);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+
+ //for bottom
+ sciErr = getVarAddressFromPosition(pvApiCtx,3,&piAddr3);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr3, &bottom);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ //for left
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr4);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr4, &left);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+
+ //for bottom
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr5);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr5, &right);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 5);
+ return -1;
+ }
+
+
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr6);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Now, we will retrieve the string from the input parameter. For this, we will require 3 calls
+ //first call to retrieve dimensions
+ sciErr = getMatrixOfString(pvApiCtx, piAddr6, &iRows, &iCols, NULL, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ piLen = (int*)malloc(sizeof(int) * iRows * iCols);
+ //second call to retrieve length of each string
+ sciErr = getMatrixOfString(pvApiCtx, piAddr6, &iRows, &iCols, piLen, NULL);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ borderType = (char**)malloc(sizeof(char*) * iRows * iCols);
+ for(i = 0 ; i < iRows * iCols ; i++)
+ borderType[i] = (char*)malloc(sizeof(char) * (piLen[i] + 1));//+ 1 for null termination
+ //third call to retrieve data
+ sciErr = getMatrixOfString(pvApiCtx, piAddr6, &iRows, &iCols, piLen, borderType);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ //for array of Scalar object
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddr7);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr7, &iRows, &iCols ,&value);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ Mat new_image(Size(image.cols+left+right, image.rows+top+bottom), image.type());
+
+ try
+ {
+
+ if(strcmp(borderType[0], "BORDER_CONSTANT") == 0 && iCols == 4 )
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_CONSTANT, Scalar(value[0], value[1], value[2], value[3]));
+ else if(strcmp(borderType[0], "BORDER_CONSTANT") == 0 && iCols == 3)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_CONSTANT, Scalar(value[0], value[1], value[2]));
+ else if(strcmp(borderType[0], "BORDER_DEFAULT") == 0)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_DEFAULT);
+ else if(strcmp(borderType[0], "BORDER_REPLICATE") == 0)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_REPLICATE);
+ else if(strcmp(borderType[0], "BORDER_REFLECT") == 0)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_REFLECT);
+ else if(strcmp(borderType[0], "BORDER_REFLECT_101") == 0)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_REFLECT_101);
+ else if(strcmp(borderType[0], "BORDER_WRAP") == 0)
+ copyMakeBorder(image, new_image, top, bottom, left, right, BORDER_WRAP);
+
+ free(borderType);
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ string tempstring = type2str(new_image.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker,new_image,1);
+ free(checker);
+
+ //Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ //Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+ return 0;
+
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_detectBRIEFDescriptors.cpp b/sci_gateway/opencv_detectBRIEFDescriptors.cpp
new file mode 100644
index 0000000..4fcd43d
--- /dev/null
+++ b/sci_gateway/opencv_detectBRIEFDescriptors.cpp
@@ -0,0 +1,312 @@
+// Authors
+// Ashish Manatosh Barik, Shubham Lohakare
+//
+#include
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/highgui.hpp"
+#include"opencv2/imgproc.hpp"
+#include"opencv2/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/xfeatures2d/nonfree.hpp"
+#include
+#include"opencv2/core/core.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_detectBRIEFDescriptors(char* fname, unsigned long fname_len)
+ {
+
+ // error management variable
+ SciErr sciErr;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 1, 8);
+ CheckOutputArgument(pvApiCtx, 1, 5);
+
+ // input image
+ Mat img;
+
+ // to retrieve the input image from the hypermat that is passed in the scilab function
+ retrieveImage(img, 1);
+
+ // variables required to read varDouble #1
+ int *piAddrVar1 = NULL;
+ int intErrVar1 = 0;
+ double varDouble1 = 0;
+
+ // variables required to read varDouble #2
+ int *piAddrVar2 = NULL;
+ int intErrVar2 = 0;
+ double varDouble2 = 0;
+
+ // variables required to read varDouble #3
+ int *piAddrVar3 = NULL;
+ int intErrVar3 = 0;
+ double varDouble3 = 0;
+
+ // variables required to read varDouble #4
+ int *piAddrVar4 = NULL;
+ int intErrVar4 = 0;
+ double varDouble4 = 0;
+
+ // variables required to read varDouble #5
+ int *piAddrVar5 = NULL;
+ int intErrVar5 = 0;
+ double varDouble5 = 0;
+
+ // variables required to read varDouble #6
+ int *piAddrVar6 = NULL;
+ int intErrVar6 = 0;
+ double varDouble6 = 0;
+
+ // variables required to read varBool #1
+ int *piAddrVar7 = NULL;
+ int intErrVar7 = 0;
+ int varBool1 = true;
+
+ // to get the number of input argument passed in the scilab function
+ int n = *getNbInputArgument(pvApiCtx);
+
+ // keypoints of the input image
+ vector keypoints;
+
+ // image used after converting to grayscale image
+ Mat image;
+ // descriptors of the input image
+ Mat descriptors;
+
+ // matrix used to output the descriptors extracted from the input image
+ double *featureVector = NULL;
+ int feature_rows=0;
+ int feature_cols=0;
+ double numBits = 0;
+ double numFeatures = 0;
+
+ // matrix used to output the keypoints extracted from the input image
+ double *LocationData = NULL;
+
+ try
+ {
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img.convertTo(img, CV_8U);
+ cvtColor(img, image, CV_BGR2GRAY);
+
+ if(n == 1)
+ {
+ // using default OpenCV function values
+
+ Ptr star = cv::xfeatures2d::StarDetector::create();
+
+ star->detect(image, keypoints);
+
+ // object
+ Ptr model = cv::xfeatures2d::BriefDescriptorExtractor::create();
+
+ // computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else if(n == 8)
+ {
+ // to get varDouble #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddrVar1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar1 = getScalarDouble(pvApiCtx, piAddrVar1, &varDouble1);
+ if(intErrVar1)
+ {
+ return intErrVar1;
+ }
+
+ // to get varDouble #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddrVar2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar2 = getScalarDouble(pvApiCtx, piAddrVar2, &varDouble2);
+ if(intErrVar2)
+ {
+ return intErrVar2;
+ }
+
+ // to get varDouble #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddrVar3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar3 = getScalarDouble(pvApiCtx, piAddrVar3, &varDouble3);
+ if(intErrVar3)
+ {
+ return intErrVar3;
+ }
+
+ // to get varDouble #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddrVar4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar4 = getScalarDouble(pvApiCtx, piAddrVar4, &varDouble4);
+ if(intErrVar4)
+ {
+ return intErrVar4;
+ }
+
+ // to get varDouble #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrVar5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar5 = getScalarDouble(pvApiCtx, piAddrVar5, &varDouble5);
+ if(intErrVar5)
+ {
+ return intErrVar5;
+ }
+
+ // to get varDouble #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrVar6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar6 = getScalarDouble(pvApiCtx, piAddrVar6, &varDouble6);
+ if(intErrVar6)
+ {
+ return intErrVar6;
+ }
+
+ // to get varBool #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrVar7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar7 = getScalarBoolean(pvApiCtx, piAddrVar7, &varBool1);
+ if(intErrVar7)
+ {
+ return intErrVar7;
+ }
+
+ Ptr star = cv::xfeatures2d::StarDetector::create(varDouble1, varDouble2, varDouble3, varDouble4, varDouble5);
+
+ star->detect(image, keypoints);
+
+ // object
+ Ptr model = cv::xfeatures2d::BriefDescriptorExtractor::create(varDouble6, varBool1);
+
+ //computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #2 passed.");
+ return 0;
+ }
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to pass the computed desciptors as output in the form of a matrix
+ numBits = descriptors.size[1];
+ numFeatures = descriptors.size[0];
+ featureVector = (double*)malloc(sizeof(double)*descriptors.size[0]*descriptors.size[1]);
+ for( int i=0; i(i,j));
+ }
+ }
+ feature_rows = descriptors.size[0];
+ feature_cols = descriptors.size[1];
+
+ // to pass the detected keypoints as output in the form of a matrix
+ int size = keypoints.size();
+ LocationData = (double *)malloc(sizeof(double) * size * 2);
+ for(int i = 0; i < size; i++)
+ {
+ LocationData[i] = keypoints[i].pt.x;
+ LocationData[i + size] = keypoints[i].pt.y;
+ }
+
+ // descriptor
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1, feature_rows, feature_cols, featureVector);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // descriptor bits
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 2, 1, 1, &numBits);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of descriptors
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 3, 1, 1, &numFeatures);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // keypoints
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 4, size, 2, LocationData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of keypoints
+ createScalarInteger32(pvApiCtx,nbInputArgument(pvApiCtx) + 5, size);
+
+ // to return output to scilab
+ for(int i=1;i<=5;i++)
+ {
+ AssignOutputVariable(pvApiCtx, i) = nbInputArgument(pvApiCtx) + i;
+ }
+
+ // to commit the new variables to the Scilab engine
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+
+}
diff --git a/sci_gateway/opencv_detectDAISYDescriptors.cpp b/sci_gateway/opencv_detectDAISYDescriptors.cpp
new file mode 100644
index 0000000..d8f49e1
--- /dev/null
+++ b/sci_gateway/opencv_detectDAISYDescriptors.cpp
@@ -0,0 +1,618 @@
+// Authors
+// Ashish Manatosh Barik, Shubham Lohakare
+//
+#include
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/highgui.hpp"
+#include"opencv2/imgproc.hpp"
+#include"opencv2/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/xfeatures2d/nonfree.hpp"
+#include
+#include"opencv2/core/core.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_detectDAISYDescriptors(char* fname, unsigned long fname_len)
+ {
+
+ // error management variable
+ SciErr sciErr;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 1, 14);
+ CheckOutputArgument(pvApiCtx, 1, 5);
+
+ // input image
+ Mat img;
+
+ // to retrieve the input image from the hypermat that is passed in the scilab function
+ retrieveImage(img, 1);
+
+ // variables required to read varDouble #1
+ int *piAddrVar1 = NULL;
+ int intErrVar1 = 0;
+ double varDouble1 = 0;
+
+ // variables required to read varDouble #2
+ int *piAddrVar2 = NULL;
+ int intErrVar2 = 0;
+ double varDouble2 = 0;
+
+ // variables required to read varDouble #3
+ int *piAddrVar3 = NULL;
+ int intErrVar3 = 0;
+ double varDouble3 = 0;
+
+ // variables required to read varDouble #4
+ int *piAddrVar4 = NULL;
+ int intErrVar4 = 0;
+ double varDouble4 = 0;
+
+ // variables required to read varDouble #5
+ int *piAddrVar5 = NULL;
+ int intErrVar5 = 0;
+ double varDouble5 = 0;
+
+ // variables required to read varDouble #6
+ int *piAddrVar6 = NULL;
+ int intErrVar6 = 0;
+ double varDouble6 = 0;
+
+ // variables required to read varDouble #7
+ int *piAddrVar7 = NULL;
+ int intErrVar7 = 0;
+ double varDouble7 = 0;
+
+ // variables required to read varDouble #8
+ int *piAddrVar8 = NULL;
+ int intErrVar8 = 0;
+ double varDouble8 = 0;
+
+ // variables required to read varDouble #9
+ int *piAddrVar9 = NULL;
+ int intErrVar9 = 0;
+ double varDouble9 = 0;
+
+ // variables required to read varDouble #10
+ int *piAddrVar10 = NULL;
+ int intErrVar10 = 0;
+ double varDouble10 = 0;
+
+ // variable required to read varMatrix #1
+ int *piAddrVar11 = NULL;
+ int intErrVar11 = 0;
+ int r = 0, c = 0;
+ double *hMat;
+
+ // variables required to read varBool #1
+ int *piAddrVar12 = NULL;
+ int intErrVar12 = 0;
+ int varBool1 = true;
+
+ // variables required to read varBool #2
+ int *piAddrVar13 = NULL;
+ int intErrVar13 = 0;
+ int varBool2 = false;
+
+ // to get the number of input argument passed in the scilab function
+ int n = *getNbInputArgument(pvApiCtx);
+
+ // keypoints of the input image
+ vector keypoints;
+
+ // image used after converting to grayscale image
+ Mat image;
+ // descriptors of the input image
+ Mat descriptors;
+
+ // matrix used to output the descriptors extracted from the input image
+ double *featureVector = NULL;
+ int feature_rows=0;
+ int feature_cols=0;
+ double numBits = 0;
+ double numFeatures = 0;
+
+ // matrix used to output the keypoints extracted from the input image
+ double *LocationData = NULL;
+
+ try
+ {
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img.convertTo(img, CV_8U);
+ cvtColor(img, image, CV_BGR2GRAY);
+
+
+ if(n == 1)
+ {
+ // using default OpenCV function values
+
+ Ptr star = cv::xfeatures2d::StarDetector::create();
+
+ star->detect(image, keypoints);
+
+ // object
+ Ptr model = cv::xfeatures2d::DAISY::create();//using default value for H
+
+ // computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else if(n == 13)
+ {
+ // to get varDouble #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddrVar1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar1 = getScalarDouble(pvApiCtx, piAddrVar1, &varDouble1);
+ if(intErrVar1)
+ {
+ return intErrVar1;
+ }
+
+ // to get varDouble #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddrVar2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar2 = getScalarDouble(pvApiCtx, piAddrVar2, &varDouble2);
+ if(intErrVar2)
+ {
+ return intErrVar2;
+ }
+
+ // to get varDouble #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddrVar3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar3 = getScalarDouble(pvApiCtx, piAddrVar3, &varDouble3);
+ if(intErrVar3)
+ {
+ return intErrVar3;
+ }
+
+ // to get varDouble #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddrVar4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar4 = getScalarDouble(pvApiCtx, piAddrVar4, &varDouble4);
+ if(intErrVar4)
+ {
+ return intErrVar4;
+ }
+
+ // to get varDouble #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrVar5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar5 = getScalarDouble(pvApiCtx, piAddrVar5, &varDouble5);
+ if(intErrVar5)
+ {
+ return intErrVar5;
+ }
+
+ // to get varDouble #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrVar6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar6 = getScalarDouble(pvApiCtx, piAddrVar6, &varDouble6);
+ if(intErrVar6)
+ {
+ return intErrVar6;
+ }
+
+ // to get varDouble #7
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrVar7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar7 = getScalarDouble(pvApiCtx, piAddrVar7, &varDouble7);
+ if(intErrVar7)
+ {
+ return intErrVar7;
+ }
+
+
+ // to get varDouble #8
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrVar8);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar8 = getScalarDouble(pvApiCtx, piAddrVar8, &varDouble8);
+ if(intErrVar8)
+ {
+ return intErrVar8;
+ }
+
+ // to get varDouble #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddrVar9);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar9 = getScalarDouble(pvApiCtx, piAddrVar9, &varDouble9);
+ if(intErrVar9)
+ {
+ return intErrVar9;
+ }
+
+
+ // to get varDouble #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddrVar10);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar10 = getScalarDouble(pvApiCtx, piAddrVar10, &varDouble10);
+ if(intErrVar10)
+ {
+ return intErrVar10;
+ }
+
+ // to get varBool #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddrVar12);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar12 = getScalarBoolean(pvApiCtx, piAddrVar12, &varBool1);
+ if(intErrVar12)
+ {
+ return intErrVar12;
+ }
+
+ // to get varBool #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddrVar13);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar13 = getScalarBoolean(pvApiCtx, piAddrVar13, &varBool2);
+ if(intErrVar13)
+ {
+ return intErrVar13;
+ }
+
+ Ptr star = cv::xfeatures2d::StarDetector::create(varDouble1, varDouble2, varDouble3, varDouble4, varDouble5);
+
+ star->detect(image, keypoints);
+
+
+ // object
+ Ptr model = cv::xfeatures2d::DAISY::create((float)varDouble6, varDouble7, varDouble8, varDouble9, varDouble10, Mat(), varBool1, varBool2); //using user given value for H
+
+ // computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else if(n == 14)
+ {
+ // to get varDouble #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddrVar1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar1 = getScalarDouble(pvApiCtx, piAddrVar1, &varDouble1);
+ if(intErrVar1)
+ {
+ return intErrVar1;
+ }
+
+ // to get varDouble #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddrVar2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar2 = getScalarDouble(pvApiCtx, piAddrVar2, &varDouble2);
+ if(intErrVar2)
+ {
+ return intErrVar2;
+ }
+
+ // to get varDouble #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddrVar3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar3 = getScalarDouble(pvApiCtx, piAddrVar3, &varDouble3);
+ if(intErrVar3)
+ {
+ return intErrVar3;
+ }
+
+ // to get varDouble #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddrVar4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar4 = getScalarDouble(pvApiCtx, piAddrVar4, &varDouble4);
+ if(intErrVar4)
+ {
+ return intErrVar4;
+ }
+
+ // to get varDouble #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrVar5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar5 = getScalarDouble(pvApiCtx, piAddrVar5, &varDouble5);
+ if(intErrVar5)
+ {
+ return intErrVar5;
+ }
+
+ // to get varDouble #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrVar6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar6 = getScalarDouble(pvApiCtx, piAddrVar6, &varDouble6);
+ if(intErrVar6)
+ {
+ return intErrVar6;
+ }
+
+ // to get varDouble #7
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrVar7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar7 = getScalarDouble(pvApiCtx, piAddrVar7, &varDouble7);
+ if(intErrVar7)
+ {
+ return intErrVar7;
+ }
+
+
+ // to get varDouble #8
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrVar8);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar8 = getScalarDouble(pvApiCtx, piAddrVar8, &varDouble8);
+ if(intErrVar8)
+ {
+ return intErrVar8;
+ }
+
+ // to get varDouble #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddrVar9);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar9 = getScalarDouble(pvApiCtx, piAddrVar9, &varDouble9);
+ if(intErrVar9)
+ {
+ return intErrVar9;
+ }
+
+
+ // to get varDouble #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddrVar10);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar10 = getScalarDouble(pvApiCtx, piAddrVar10, &varDouble10);
+ if(intErrVar10)
+ {
+ return intErrVar10;
+ }
+
+ // reading homographic matrix
+ /* get Address of inputs */
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddrVar11);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ /* Check that the first input argument is a real matrix (and not complex) */
+ if( !isDoubleType(pvApiCtx, piAddrVar11) || isVarComplex(pvApiCtx, piAddrVar11) )
+ {
+ Scierror(999, " Wrong type for input argument #%d: A real matrix expected.\n", fname, 16);
+ return 0;
+ }
+ /* get matrix */
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddrVar11, &r, &c, &hMat);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ if(!(r==3 && c==3))
+ {
+ Scierror(999, " Wrong size for input argument #%d: A 3x3 real matrix expected.\n", fname, 16);
+ }
+ /*declare vector*/
+ vector Homography (r*c);
+
+ for(int i = 0; i < r; i++)
+ {
+ for(int j = 0; j < c; j++)
+ {
+ Homography.at(i*c + j) = hMat[i + j*r];
+
+ }
+ }
+
+ // to get varBool #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddrVar12);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar12 = getScalarBoolean(pvApiCtx, piAddrVar12, &varBool1);
+ if(intErrVar12)
+ {
+ return intErrVar12;
+ }
+
+ // to get varBool #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 14, &piAddrVar13);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar13 = getScalarBoolean(pvApiCtx, piAddrVar13, &varBool2);
+ if(intErrVar13)
+ {
+ return intErrVar13;
+ }
+
+ Ptr star = cv::xfeatures2d::StarDetector::create(varDouble1, varDouble2, varDouble3, varDouble4, varDouble5);
+
+ star->detect(image, keypoints);
+
+
+ // object
+ Ptr model = cv::xfeatures2d::DAISY::create((float)varDouble6, varDouble7, varDouble8, varDouble9, varDouble10, Homography , varBool1, varBool2); //using user given value for H
+
+ // computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to pass the computed desciptors as output in the form of a matrix
+ numBits = descriptors.size[1];
+ numFeatures = descriptors.size[0];
+ featureVector = (double*)malloc(sizeof(double)*descriptors.size[0]*descriptors.size[1]);
+ for( int i=0; i(i,j));
+ }
+ }
+ feature_rows = descriptors.size[0];
+ feature_cols = descriptors.size[1];
+
+ // to pass the detected keypoints as output in the form of a matrix
+ int size = keypoints.size();
+ LocationData = (double *)malloc(sizeof(double) * size * 2);
+ for(int i = 0; i < size; i++)
+ {
+ LocationData[i] = keypoints[i].pt.x;
+ LocationData[i + size] = keypoints[i].pt.y;
+ }
+
+ // descriptor
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1, feature_rows, feature_cols, featureVector);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // descriptor bits
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 2, 1, 1, &numBits);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of descriptors
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 3, 1, 1, &numFeatures);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // keypoints
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 4, size, 2, LocationData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of keypoints
+ createScalarInteger32(pvApiCtx,nbInputArgument(pvApiCtx) + 5, size);
+
+ // to return output to scilab
+ for(int i=1;i<=5;i++)
+ {
+ AssignOutputVariable(pvApiCtx, i) = nbInputArgument(pvApiCtx) + i;
+ }
+
+ // to commit the new variables to the Scilab engine
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+
+}
diff --git a/sci_gateway/opencv_detectLATCHDescriptors.cpp b/sci_gateway/opencv_detectLATCHDescriptors.cpp
new file mode 100644
index 0000000..9f01c84
--- /dev/null
+++ b/sci_gateway/opencv_detectLATCHDescriptors.cpp
@@ -0,0 +1,333 @@
+// Authors
+// Ashish Manatosh Barik, Shubham Lohakare
+//
+#include
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/highgui.hpp"
+#include"opencv2/imgproc.hpp"
+#include"opencv2/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/xfeatures2d/nonfree.hpp"
+#include
+#include"opencv2/core/core.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_detectLATCHDescriptors(char* fname, unsigned long fname_len)
+ {
+
+ // error management variable
+ SciErr sciErr;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 1, 9);
+ CheckOutputArgument(pvApiCtx, 1, 5);
+
+ // input image
+ Mat img;
+
+ // to retrieve the input image from the hypermat that is passed in the scilab function
+ retrieveImage(img, 1);
+
+ // variables required to read varDouble #1
+ int *piAddrVar1 = NULL;
+ int intErrVar1 = 0;
+ double varDouble1 = 0;
+
+ // variables required to read varDouble #2
+ int *piAddrVar2 = NULL;
+ int intErrVar2 = 0;
+ double varDouble2 = 0;
+
+ // variables required to read varDouble #3
+ int *piAddrVar3 = NULL;
+ int intErrVar3 = 0;
+ double varDouble3 = 0;
+
+ // variables required to read varDouble #4
+ int *piAddrVar4 = NULL;
+ int intErrVar4 = 0;
+ double varDouble4 = 0;
+
+ // variables required to read varDouble #5
+ int *piAddrVar5 = NULL;
+ int intErrVar5 = 0;
+ double varDouble5 = 0;
+
+ // variables required to read varDouble #6
+ int *piAddrVar6 = NULL;
+ int intErrVar6 = 0;
+ double varDouble6 = 0;
+
+ // variables required to read varDouble #7
+ int *piAddrVar7 = NULL;
+ int intErrVar7 = 0;
+ double varDouble7 = 0;
+
+
+ // variables required to read varBool #1
+ int *piAddrVar8 = NULL;
+ int intErrVar8 = 0;
+ int varBool1 = true;
+
+ // to get the number of input argument passed in the scilab function
+ int n = *getNbInputArgument(pvApiCtx);
+
+ // keypoints of the input image
+ vector keypoints;
+
+ // image used after converting to grayscale image
+ Mat image;
+ // descriptors of the input image
+ Mat descriptors;
+
+ // matrix used to output the descriptors extracted from the input image
+ double *featureVector = NULL;
+ int feature_rows=0;
+ int feature_cols=0;
+ double numBits = 0;
+ double numFeatures = 0;
+
+ // matrix used to output the keypoints extracted from the input image
+ double *LocationData = NULL;
+
+ try
+ {
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img.convertTo(img, CV_8U);
+ cvtColor(img, image, CV_BGR2GRAY);
+
+ if(n == 1)
+ {
+ // using default OpenCV function values
+
+ Ptr star = cv::xfeatures2d::StarDetector::create();
+
+ star->detect(image, keypoints);
+
+ // object
+ Ptr model = cv::xfeatures2d::LATCH::create();
+
+ // computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else if(n == 9)
+ {
+ // to get varDouble #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddrVar1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar1 = getScalarDouble(pvApiCtx, piAddrVar1, &varDouble1);
+ if(intErrVar1)
+ {
+ return intErrVar1;
+ }
+
+ // to get varDouble #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddrVar2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar2 = getScalarDouble(pvApiCtx, piAddrVar2, &varDouble2);
+ if(intErrVar2)
+ {
+ return intErrVar2;
+ }
+
+ // to get varDouble #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddrVar3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar3 = getScalarDouble(pvApiCtx, piAddrVar3, &varDouble3);
+ if(intErrVar3)
+ {
+ return intErrVar3;
+ }
+
+ // to get varDouble #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddrVar4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar4 = getScalarDouble(pvApiCtx, piAddrVar4, &varDouble4);
+ if(intErrVar4)
+ {
+ return intErrVar4;
+ }
+
+ // to get varDouble #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrVar5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar5 = getScalarDouble(pvApiCtx, piAddrVar5, &varDouble5);
+ if(intErrVar5)
+ {
+ return intErrVar5;
+ }
+
+ // to get varDouble #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrVar6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar6 = getScalarDouble(pvApiCtx, piAddrVar6, &varDouble6);
+ if(intErrVar6)
+ {
+ return intErrVar6;
+ }
+
+ // to get varBool #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrVar7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar7 = getScalarBoolean(pvApiCtx, piAddrVar7, &varBool1);
+ if(intErrVar7)
+ {
+ return intErrVar7;
+ }
+
+
+ // to get varDouble #7
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrVar7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar7 = getScalarDouble(pvApiCtx, piAddrVar7, &varDouble7);
+ if(intErrVar7)
+ {
+ return intErrVar7;
+ }
+
+
+ Ptr star = cv::xfeatures2d::StarDetector::create(varDouble1, varDouble2, varDouble3, varDouble4, varDouble5);
+
+ star->detect(image, keypoints);
+
+ // object
+ Ptr model = cv::xfeatures2d::LATCH::create(varDouble6, varBool1, varDouble7);
+
+ //computing descriptors
+ model->compute(image, keypoints, descriptors);
+
+ }
+ else
+ {
+ Scierror(999, "wrong value argument #2 passed.");
+ return 0;
+ }
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to pass the computed desciptors as output in the form of a matrix
+ numBits = descriptors.size[1];
+ numFeatures = descriptors.size[0];
+ featureVector = (double*)malloc(sizeof(double)*descriptors.size[0]*descriptors.size[1]);
+ for( int i=0; i(i,j));
+ }
+ }
+ feature_rows = descriptors.size[0];
+ feature_cols = descriptors.size[1];
+
+ // to pass the detected keypoints as output in the form of a matrix
+ int size = keypoints.size();
+ LocationData = (double *)malloc(sizeof(double) * size * 2);
+ for(int i = 0; i < size; i++)
+ {
+ LocationData[i] = keypoints[i].pt.x;
+ LocationData[i + size] = keypoints[i].pt.y;
+ }
+
+ // descriptor
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1, feature_rows, feature_cols, featureVector);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // descriptor bits
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 2, 1, 1, &numBits);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of descriptors
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 3, 1, 1, &numFeatures);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // keypoints
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 4, size, 2, LocationData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of keypoints
+ createScalarInteger32(pvApiCtx,nbInputArgument(pvApiCtx) + 5, size);
+
+ // to return output to scilab
+ for(int i=1;i<=5;i++)
+ {
+ AssignOutputVariable(pvApiCtx, i) = nbInputArgument(pvApiCtx) + i;
+ }
+
+ // to commit the new variables to the Scilab engine
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+
+}
diff --git a/sci_gateway/opencv_detectSIFTFeatures.cpp b/sci_gateway/opencv_detectSIFTFeatures.cpp
new file mode 100644
index 0000000..ea7a39d
--- /dev/null
+++ b/sci_gateway/opencv_detectSIFTFeatures.cpp
@@ -0,0 +1,265 @@
+// Authors
+// Ashish Manatosh Barik, Shubham Lohakare
+//
+#include
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/highgui.hpp"
+#include"opencv2/imgproc.hpp"
+#include"opencv2/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/xfeatures2d/nonfree.hpp"
+#include
+#include"opencv2/core/core.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_detectSIFTFeatures(char* fname, unsigned long fname_len)
+ {
+
+ // error management variable
+ SciErr sciErr;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 1, 6);
+ CheckOutputArgument(pvApiCtx, 1, 5);
+
+ // input image
+ Mat img;
+
+ // to retrieve the input image from the hypermat that is passed in the scilab function
+ retrieveImage(img, 1);
+
+ // variables required to read varDouble #1
+ int *piAddrVar1 = NULL;
+ int intErrVar1 = 0;
+ double varDouble1 = 0;
+
+ // variables required to read varDouble #2
+ int *piAddrVar2 = NULL;
+ int intErrVar2 = 0;
+ double varDouble2 = 0;
+
+ // variables required to read varDouble #3
+ int *piAddrVar3 = NULL;
+ int intErrVar3 = 0;
+ double varDouble3 = 0;
+
+ // variables required to read varDouble #4
+ int *piAddrVar4 = NULL;
+ int intErrVar4 = 0;
+ double varDouble4 = 0;
+
+ // variables required to read varDouble #5
+ int *piAddrVar5 = NULL;
+ int intErrVar5 = 0;
+ double varDouble5 = 0;
+
+ // to get the number of input argument passed in the scilab function
+ int n = *getNbInputArgument(pvApiCtx);
+
+ // image used after converting to grayscale image
+ Mat image;
+ // keypoints of the input image
+ vector keypoints;
+ // descriptors of the input image
+ Mat descriptors;
+
+ // matrix used to output the keypoints extracted from the input image
+ double *LocationData = NULL;
+
+ // matrix used to output the descriptors extracted from the input image
+ double *featureVector = NULL;
+ int feature_rows=0;
+ int feature_cols=0;
+ double numBits = 0;
+ double numFeatures = 0;
+
+ try
+ {
+ // OpenCV functionalities
+
+ // converting the input images to 8-bit and 1 channel images
+ img.convertTo(img, CV_8U);
+ cvtColor(img, image, CV_BGR2GRAY);
+
+ if(n == 1)
+ {
+ // using default OpenCV function values
+
+ // SIFT object
+ Ptr model = cv::xfeatures2d::SIFT::create();
+
+ // detecting keypoints and computing descriptors for the respective images
+ model->detectAndCompute(image, Mat(), keypoints, descriptors, false);
+
+ }
+ else if(n == 6)
+ {
+ // user passed values for OpenCV functions
+
+ // to get varDouble #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddrVar1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar1 = getScalarDouble(pvApiCtx, piAddrVar1, &varDouble1);
+ if(intErrVar1)
+ {
+ return intErrVar1;
+ }
+
+ // to get varDouble #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddrVar2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar2 = getScalarDouble(pvApiCtx, piAddrVar2, &varDouble2);
+ if(intErrVar2)
+ {
+ return intErrVar2;
+ }
+
+ // to get varDouble #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddrVar3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar3 = getScalarDouble(pvApiCtx, piAddrVar3, &varDouble3);
+ if(intErrVar3)
+ {
+ return intErrVar3;
+ }
+
+ // to get varDouble #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddrVar4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar4 = getScalarDouble(pvApiCtx, piAddrVar4, &varDouble4);
+ if(intErrVar4)
+ {
+ return intErrVar4;
+ }
+
+ // to get varDouble #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrVar5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrVar5 = getScalarDouble(pvApiCtx, piAddrVar5, &varDouble5);
+ if(intErrVar5)
+ {
+ return intErrVar5;
+ }
+
+ // SIFT object
+ Ptr model = cv::xfeatures2d::SIFT::create(varDouble1, varDouble2, varDouble3, varDouble4, varDouble5);
+
+ // detecting keypoints and computing descriptors for the respective images
+ model->detectAndCompute(image, Mat(), keypoints, descriptors, false);
+
+ }
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to pass the computed desciptors as output in the form of a matrix
+ numBits = descriptors.size[1];
+ numFeatures = descriptors.size[0];
+ featureVector = (double*)malloc(sizeof(double)*descriptors.size[0]*descriptors.size[1]);
+ for( int i=0; i(i,j));
+ }
+ }
+ feature_rows = descriptors.size[0];
+ feature_cols = descriptors.size[1];
+
+ // to pass the detected keypoints as output in the form of a matrix
+ int size = keypoints.size();
+ LocationData = (double *)malloc(sizeof(double) * size * 2);
+ for(int i = 0; i < size; i++)
+ {
+ LocationData[i] = keypoints[i].pt.x;
+ LocationData[i + size] = keypoints[i].pt.y;
+ }
+
+ // descriptor
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1, feature_rows, feature_cols, featureVector);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // descriptor bits
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 2, 1, 1, &numBits);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of descriptors
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 3, 1, 1, &numFeatures);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // keypoints
+ sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 4, size, 2, LocationData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ // number of keypoints
+ createScalarInteger32(pvApiCtx,nbInputArgument(pvApiCtx) + 5, size);
+
+ // to return output to scilab
+ for(int i=1;i<=5;i++)
+ {
+ AssignOutputVariable(pvApiCtx, i) = nbInputArgument(pvApiCtx) + i;
+ }
+
+ // to commit the new variables to the Scilab engine
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+
+}
+
diff --git a/sci_gateway/opencv_distanceExtractor.cpp b/sci_gateway/opencv_distanceExtractor.cpp
new file mode 100644
index 0000000..ca01afe
--- /dev/null
+++ b/sci_gateway/opencv_distanceExtractor.cpp
@@ -0,0 +1,337 @@
+/*
+This is the .cpp gateway file for the 'distanceExtractor' scilab function. Overloads two types of distance extractors :
+OpenCV classes:
+1. Ptr< ShapeContextDistanceExtractor > cv::createShapeContextDistanceExtractor (int nAngularBins=12, int nRadialBins=4, float innerRadius=0.2f, float outerRadius=2, int iterations=3, const Ptr< HistogramCostExtractor > &comparer=createChiHistogramCostExtractor(), const Ptr< ShapeTransformer > &transformer=createThinPlateSplineShapeTransformer())
+2. Ptr< ThinPlateSplineShapeTransformer > cv::createThinPlateSplineShapeTransformer (double regularizationParameter=0)
+
+It includes the following OpenCV functions, belonging to the Shape Distance and Matching module of OpenCV 3.0.0 :
+1. computeDistance (InputArray contour1, InputArray contour2)
+ Compute the shape distance between two shapes defined by its contours.
+*/
+
+#include
+#include
+#include
+#include
+#include
+#include"opencv2/opencv.hpp"
+#include"opencv2/shape/shape_distance.hpp"
+#include"opencv2/shape/shape_transformer.hpp"
+#include"opencv2/shape/shape_transformer.hpp"
+#include"opencv2/shape/hist_cost.hpp"
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ static vector sampleContour(const Mat& image, int n=300)
+ {
+ vector > _contours;
+ vector all_points;
+ findContours(image, _contours, RETR_LIST, CHAIN_APPROX_NONE);
+ for(size_t i=0; i< _contours.size(); i++)
+ {
+ for(size_t j=0;j<_contours[i].size();j++)
+ {
+ all_points.push_back(_contours[i][j]);
+ }
+ }
+
+ // if too little points, replicate them
+ int dummy = 0;
+ for(int add=(int)all_points.size();add sampled;
+ for(int i=0;i c1 = sampleContour(image1);
+ vector c2 = sampleContour(image2);
+
+ // OpenCV functionalitites
+
+ // apply Shape Distant Extractor
+ if(typeOfMethod == 1)
+ {
+ // Shape Context Distant Extractor
+
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &nAngularBins);
+ if(intErr4)
+ {
+ return intErr4;
+ }
+
+ // to get the argument #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &innerRadius);
+ if(intErr5)
+ {
+ return intErr5;
+ }
+
+
+ // to get the argument #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarDouble(pvApiCtx, piAddr6, &nRadialBins);
+ if(intErr6)
+ {
+ return intErr6;
+ }
+
+ // to get the argument #7
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddr7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr7 = getScalarDouble(pvApiCtx, piAddr7, &outerRadius);
+ if(intErr7)
+ {
+ return intErr7;
+ }
+
+ // to get the argument #8
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddr8);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr8 = getScalarDouble(pvApiCtx, piAddr8, &iterations);
+ if(intErr8)
+ {
+ return intErr8;
+ }
+
+ // to get the argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddr9);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr9 = getScalarDouble(pvApiCtx, piAddr9, &nDummies);
+ if(intErr9)
+ {
+ return intErr9;
+ }
+
+ // to get the argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddr10);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr10 = getScalarDouble(pvApiCtx, piAddr10, &dC);
+ if(intErr10)
+ {
+ return intErr10;
+ }
+
+ // to get the argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr11);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr11 = getScalarDouble(pvApiCtx, piAddr11, &rpTps);
+ if(intErr11)
+ {
+ return intErr11;
+ }
+
+
+ const Ptr& comparer = createChiHistogramCostExtractor(nDummies, dC);
+ // Smart pointer to a ShapeTransformer, an algorithm that defines the aligning transformation.
+ const Ptr& transformer = createThinPlateSplineShapeTransformer(rpTps);
+
+
+ Ptr model = createShapeContextDistanceExtractor(nAngularBins, nRadialBins, innerRadius, outerRadius, iterations, comparer, transformer );
+
+
+ dist = model->computeDistance(c1, c2);
+ }
+ else if(typeOfMethod == 2)
+ {
+ // Hausdorff Distant Extractor
+
+ Ptr model = createHausdorffDistanceExtractor();
+
+ dist = model->computeDistance(c1, c2);
+
+ cout<
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include
+ #include "../common.h"
+
+ int opencv_fillConvexPoly(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr = 0;
+ int iRows=0,iCols=0;
+ int *piAddr = NULL;
+ int *piAddrNew = NULL;
+ int *piAddr2 = NULL;
+ int *piAddr3 = NULL;
+ int *piAddr4 = NULL;
+ int *piAddr5 = NULL;
+ int *piAddr6 = NULL;
+ int *piAddr7 = NULL;
+ int *piAddr8 = NULL;
+ int i,j,k ;
+ double *pstData = NULL;
+ double npts ,r_value,g_value ,b_value ,linetype=8 ,shift=0;
+
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx, 8, 8);
+ CheckOutputArgument(pvApiCtx, 1, 1) ;
+
+
+ //retreive the value of clockwise parameter
+ Mat image;
+ retrieveImage(image,1);
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,2,&piAddr2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return -1;
+ }
+ sciErr = getMatrixOfDouble(pvApiCtx, piAddr2, &iRows, &iCols, &pstData);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return -1;
+ }
+ ///// store values in a contour
+ Point *pts = (Point *)malloc(iRows * 2 * sizeof(double));
+ for( i = 0 ; i < iRows ; i++)
+ {
+ pts[i].x = *(pstData + i);
+ pts[i].y = *(pstData + 1*iRows + i);
+ }
+
+ /// to retrieve the number of points
+ sciErr = getVarAddressFromPosition(pvApiCtx,3,&piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr3, &npts);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ //// to retreive the value of r_value
+ sciErr = getVarAddressFromPosition(pvApiCtx,4,&piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr4, &r_value);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+
+ /// to retreive the value of g_value
+ sciErr = getVarAddressFromPosition(pvApiCtx,5,&piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr5, &g_value);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 5);
+ return -1;
+ }
+
+ /// to retreive the value of b_value
+ sciErr = getVarAddressFromPosition(pvApiCtx,6,&piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr6, &b_value);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 6);
+ return -1;
+ }
+
+ /// to retreive the value of linetype
+ sciErr = getVarAddressFromPosition(pvApiCtx,7,&piAddr7);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr7, &linetype);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 7);
+ return -1;
+ }
+
+ /// to retreive the value of shift
+ sciErr = getVarAddressFromPosition(pvApiCtx,8,&piAddr8);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr8, &shift);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 8);
+ return -1;
+ }
+
+
+ try
+ {
+ // OpenCV Functionalities
+
+ // call the fillconvexpoly function of opencv
+ fillConvexPoly(image,pts,npts,Scalar(r_value,g_value,b_value),linetype,shift);
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ //temp variable was not needed, hence has been discarded
+ string tempstring = type2str(image.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker,image,1); //here, remove the temp as a parameter as it is not needed, and instead add 1 as the third parameter. 1 denotes that the first output argument will be this variable
+ free(checker); //free memory taken up by checker
+
+
+ //Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ //Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+
+ }
+
+}
diff --git a/sci_gateway/opencv_gabor.cpp b/sci_gateway/opencv_gabor.cpp
new file mode 100644
index 0000000..ae6df1c
--- /dev/null
+++ b/sci_gateway/opencv_gabor.cpp
@@ -0,0 +1,129 @@
+/*
+ * gabor
+ *
+ * gabor in scilab
+ *
+ */
+
+// Created by Samiran Roy, mail: samiranroy@cse.iitb.ac.in
+// An implementation of gabor method of matlab
+// Usage:
+// gabor(wavelength,orientation) - Generates a gabor kernel with the given wavelength and orientation
+// wavelength: pixels/cycle of the sinusoidal carrier, must be >=2
+// orientation: orientation of the filter in degrees, must be between [0,360]
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "sciprint.h"
+ #include "../common.h"
+
+ int opencv_gabor(char *fname, unsigned long fname_len)
+ {
+ SciErr sciErr;
+ int intErr = 0;
+ int iRows = 0, iCols = 0;
+ int *piAddr = NULL;
+ int *piAddr1 = NULL;
+ int *piAddr2 = NULL;
+
+ int error;
+
+ double wavelength;
+ double orientation;
+
+ // Get the number of input arguments
+ int inputarg = *getNbInputArgument(pvApiCtx);
+
+ // String holding the second argument
+ int iRet = 0;
+ char *pstData = NULL;
+
+ // Checking input argument
+ CheckInputArgument(pvApiCtx, 2, 2);
+ CheckOutputArgument(pvApiCtx, 1, 1);
+
+ // Geting the wavelength
+ sciErr = getVarAddressFromPosition(pvApiCtx, 1, &piAddr1);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr1, &wavelength);
+ if (sciErr.iErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 1);
+ return -1;
+ }
+ if (wavelength < 2)
+ {
+ Scierror(999, "Wavelength must be >=2");
+ return -1;
+ }
+
+ // Getting the orientation
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr2, &orientation);
+ if (sciErr.iErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+ if ((orientation < 0) || (orientation > 360))
+ {
+ Scierror(999, "Orientation must be in the range [0,360]");
+ return -1;
+ }
+
+ double sigma = (1 / CV_PI) * sqrt(log(2) / 2) * 3 *
+ wavelength; // calculating sigma following matlab convention
+
+ orientation = (orientation / 360) * 2 * CV_PI; // Converting degree to radian
+ int K_size = 33 + 16 * (wavelength - 2); // size of kernel following matlab convention
+
+ Mat new_image;
+
+ try
+ {
+ new_image = getGaborKernel(cv::Size(K_size, K_size), sigma, orientation, wavelength, 0.5, 0);
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ int temp = nbInputArgument(pvApiCtx) + 1;
+ string tempstring = type2str(new_image.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker, new_image, 1);
+ free(checker);
+
+ // Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ // Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+ return 0;
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_gaussianBlur.cpp b/sci_gateway/opencv_gaussianBlur.cpp
new file mode 100644
index 0000000..f5ae62c
--- /dev/null
+++ b/sci_gateway/opencv_gaussianBlur.cpp
@@ -0,0 +1,149 @@
+/***************************************************
+Author : Sukul Bagai
+****************************************************
+Usage : return_image = gaussianblur(input_image,ksize_height,ksize_width,sigmaX,sigmaY);
+***************************************************/
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+using namespace cv;
+using namespace std;
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include
+ #include "../common.h"
+
+ int opencv_gaussianBlur(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr = 0;
+ int iRows=0,iCols=0;
+ int *piAddr2 = NULL;
+ int *piAddr3 = NULL;
+ int *piAddr4 = NULL;
+ int *piAddr5 = NULL;
+ int i,j,k;
+ double ksize_width,ksize_height,sigmaX,sigmaY;
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx, 5, 5);
+ CheckOutputArgument(pvApiCtx, 1, 1) ;
+
+ Mat image;
+ retrieveImage(image,1);
+
+ //for value of ksize_height
+ sciErr = getVarAddressFromPosition(pvApiCtx,2,&piAddr2);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr2, &ksize_height);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+ //for value of ksize_width
+ sciErr = getVarAddressFromPosition(pvApiCtx,3,&piAddr3);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr3, &ksize_width);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+ //for value of sigmaX
+ sciErr = getVarAddressFromPosition(pvApiCtx,4,&piAddr4);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr4, &sigmaX);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+ //for value of sigmaY
+ sciErr = getVarAddressFromPosition(pvApiCtx,5,&piAddr5);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr5, &sigmaY);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 5);
+ return -1;
+ }
+ //applying checks to input parameters, and assigning default values in case of error
+ Mat new_image(image.rows,image.cols,CV_8UC3);
+ if(ksize_height<0)
+ {
+ Scierror(999, "Positive Value Required for Height. 1 value was used instead");
+ ksize_height=1;
+ }
+ if(ksize_width<0)
+ {
+ Scierror(999, "Positive Value Required for Width. 1 value was used instead");
+ ksize_width=1;
+ }
+ if((int)ksize_height%2==0)
+ {
+ ksize_height+=1;
+ Scierror(999, "Odd Value Required for Height. %f value was used instead",&ksize_height);
+ }
+ if((int)ksize_width%2==0)
+ {
+ ksize_width+=1;
+ Scierror(999, "Odd Value Required for Width. %f value was used instead",&ksize_width);
+ }
+
+ try
+ {
+ //temporary size variable, to use in function
+ Size sz(ksize_height,ksize_width);
+
+ //void GaussianBlur(InputArray src, OutputArray dst, Size ksize, double sigmaX, double sigmaY=0, int borderType=BORDER_DEFAULT )
+ GaussianBlur(image,new_image,sz,sigmaX,sigmaY);
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ //returning image
+ string tempstring = type2str(new_image.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker,new_image,1);
+ free(checker);
+
+ //Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ //Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_histogramCostExtractor.cpp b/sci_gateway/opencv_histogramCostExtractor.cpp
new file mode 100644
index 0000000..1ec3e56
--- /dev/null
+++ b/sci_gateway/opencv_histogramCostExtractor.cpp
@@ -0,0 +1,240 @@
+/*
+This is the .cpp gateway file for the 'applyTransformer' scilab function.
+
+OpenCV classes :
+1. Ptr< HistogramCostExtractor > cv::createChiHistogramCostExtractor (int nDummies=25, float defaultCost=0.2f)
+2 Ptr< HistogramCostExtractor > cv::createEMDL1HistogramCostExtractor (int nDummies=25, float defaultCost=0.2f)
+3. Ptr< HistogramCostExtractor > cv::createNormHistogramCostExtractor (int flag=DIST_L2, int nDummies=25, float defaultCost=0.2f)
+
+It includes the following OpenCV functions, belonging to the Shape Distance and Matching module of OpenCV 3.0.0 :
+1. void buildCostMatrix (InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix)
+
+
+*/
+
+#include
+#include"opencv2/core/core.hpp"
+#include"opencv2/highgui/highgui.hpp"
+#include"opencv2/opencv.hpp"
+#include"opencv2/shape.hpp"
+#include"opencv2/imgcodecs.hpp"
+#include"opencv2/imgproc/imgproc.hpp"
+#include"opencv2/features2d/features2d.hpp"
+#include"opencv2/xfeatures2d.hpp"
+#include"opencv2/core/utility.hpp"
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+using namespace cv::xfeatures2d;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_histogramCostExtractor(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ int i;
+
+ // variables required to read argument #3
+ int *piAddr3 = NULL;
+ int intErr3 = 0;
+ double typeOfMethod = 0;
+
+ // variables required to read argument #4
+ int *piAddr4 = NULL;
+ int intErr4 = 0;
+ double nDummies = 0;
+
+ // variables required to read argument #5
+ int *piAddr5 = NULL;
+ int intErr5 = 0;
+ double defaultCost = 0;
+
+ // variables required to read argument #6
+ int *piAddr6 = NULL;
+ int intErr6 = 0;
+ double hessianThreshold = 0;
+
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 4, 6);
+ CheckOutputArgument(pvApiCtx, 1, 1);
+
+ Mat img1, img2;
+
+ int a = retrieveImage(img1, 1);
+ if(a == 0)
+ {
+ sciprint("Error while retrieving the image1.");
+ return 0;
+ }
+ int b = retrieveImage(img2, 2);
+ if(b == 0)
+ {
+ sciprint("Error while retrieving the image2.");
+ }
+
+
+ // to get argument #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &typeOfMethod);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ // to get argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 4, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarDouble(pvApiCtx, piAddr6, &hessianThreshold);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 4);
+ return -1;
+ }
+
+
+ // output costMatrix
+ Mat costMatrix;
+ Mat image1, image2;
+
+ try
+ {
+ // OpenCV functionalitites
+
+ img1.convertTo(img1, CV_8U);
+ cvtColor(img1, image1, CV_BGR2GRAY);
+
+ img2.convertTo(img2, CV_8U);
+ cvtColor(img2, image2, CV_BGR2GRAY);
+
+ // detecting keypoints & computing descriptors
+ Ptr surf = SURF::create(hessianThreshold);
+ vector keypoints1, keypoints2;
+ Mat descriptors1, descriptors2;
+ surf->detectAndCompute(image1, Mat(), keypoints1, descriptors1);
+ surf->detectAndCompute(image2, Mat(), keypoints2, descriptors2);
+
+ if(typeOfMethod == 1)
+ {
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &nDummies);
+ if(intErr4)
+ {
+ return intErr4;
+ }
+
+ // to get the argument #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &defaultCost);
+ if(intErr5)
+ {
+ return intErr5;
+ }
+
+ // Chi Histogram Cost Extractor
+ Ptr model = createChiHistogramCostExtractor(nDummies, defaultCost);
+
+ model->buildCostMatrix(descriptors1, descriptors2, costMatrix);
+ }
+ else if(typeOfMethod == 2)
+ {
+
+ // to get the argument #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 5, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &nDummies);
+ if(intErr4)
+ {
+ return intErr4;
+ }
+
+ // to get the argument #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &defaultCost);
+ if(intErr5)
+ {
+ return intErr5;
+ }
+
+ // EMDL1 Histogram Cost Extractor
+ Ptr model = createEMDL1HistogramCostExtractor(nDummies, defaultCost);
+
+ model->buildCostMatrix(descriptors1, descriptors2, costMatrix);
+ }
+ else if(typeOfMethod == 3)
+ {
+ // Norm Histogram Cost Extractor
+ Ptr model = createNormHistogramCostExtractor();
+
+ model->buildCostMatrix(descriptors1, descriptors2, costMatrix);
+ }
+ else
+ {
+ Scierror(999,"Wrong input for Argument #3. Use '1' for 'Chi', '2' for 'EMDL1', and '3' for 'Norm'.\n", 1);
+ }
+
+ }catch(Exception& e)
+ {
+ const char* err = e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to return the output transformed image
+ string tempstring1 = type2str(costMatrix.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, costMatrix, 1);
+ free(checker1);
+
+// sciErr = createMatrixOfDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1, costMatrix.rows, costMatrix.cols, costMatrix);
+
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+}
diff --git a/sci_gateway/opencv_imGaborFilt.cpp b/sci_gateway/opencv_imGaborFilt.cpp
new file mode 100644
index 0000000..77d2193
--- /dev/null
+++ b/sci_gateway/opencv_imGaborFilt.cpp
@@ -0,0 +1,171 @@
+/*
+ * imgaborfilt
+ *
+ * imgaborfilt in scilab
+ *
+ */
+
+// Created by Samiran Roy, mail: samiranroy@cse.iitb.ac.in
+// An implementation of imgaborfilt in scilab
+// Usage:
+// imgaborfilt(I,wavelength,orientation) : Perform gabor filtering on a grayscale image with given wavelength and orientation
+// imboxfilt(I,method)
+// method : 'upright' (default)
+// method : 'rotated' The area sums are calulated over a rectangle, which is
+// rotated 45 degrees
+// Known Changes from Matlab:
+/*
+ * 1) None, as of now
+ */
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+using namespace cv;
+using namespace std;
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "sciprint.h"
+ #include "../common.h"
+
+ int opencv_imGaborFilt(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr = 0;
+ int iRows = 0, iCols = 0;
+ int *piAddr = NULL;
+ int *piAddr1 = NULL;
+ int *piAddr2 = NULL;
+ int *piAddr3 = NULL;
+
+ int error;
+
+ double wavelength;
+ double orientation;
+
+ int borderType = BORDER_REPLICATE;
+
+ // Parameters for convolution - Leave these alone
+
+ /* **********************************************************************************
+ */
+
+ Point anchor;
+ double delta;
+ int ddepth;
+
+ anchor = Point(-1, -1); // The center is unchanged
+ delta = 0; // No value is added to output
+ ddepth = -1; // The dimentions of input and output images are the same
+
+ /* **********************************************************************************
+ */
+
+ // Get the number of input arguments
+ int inputarg = *getNbInputArgument(pvApiCtx);
+
+ // String holding the second argument
+ int iRet = 0;
+ char *pstData = NULL;
+
+ // Checking input argument
+ CheckInputArgument(pvApiCtx, 3, 3);
+ CheckOutputArgument(pvApiCtx, 1, 1);
+
+ // Get input image
+
+ Mat image;
+ retrieveImage(image, 1);
+
+ if (image.channels() > 1)
+ {
+ sciprint("The image must be grayscale.");
+ return 0;
+ }
+
+ // Getting the wavelength
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr2, &wavelength);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return intErr;
+ }
+ if (wavelength < 2)
+ {
+ sciprint("Wavelength must be >=2");
+ return 0;
+ }
+
+ // Getting the orientation
+ sciErr = getVarAddressFromPosition(pvApiCtx, 3, &piAddr3);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr3, &orientation);
+ if (sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return intErr;
+ }
+ if ((orientation < 0) || (orientation > 360))
+ {
+ sciprint("Orientation must be in the range [0,360]");
+ return 0;
+ }
+
+ double sigma = (1 / CV_PI) * sqrt(log(2) / 2) * 3 *
+ wavelength; // calculating sigma following matlab convention
+
+ orientation = (orientation / 360) * 2 * CV_PI; // Converting degree to radian
+ int K_size = 33 + 16 * (wavelength - 2); // size of kernel following matlab convention
+
+ Mat kernel = getGaborKernel(cv::Size(K_size, K_size), sigma, orientation, wavelength, 0.5, 0);
+
+ Mat floatimage;
+ Mat dst;
+ Mat new_image;
+
+ try
+ {
+ image.convertTo(floatimage, CV_32F); // Converting image to float type
+
+ filter2D(floatimage, new_image, CV_32F, kernel); // Performing convolution
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ int temp = nbInputArgument(pvApiCtx) + 1;
+ string tempstring = type2str(new_image.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker, new_image, 1);
+ free(checker);
+
+ // Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ // Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_lab2rgb.cpp b/sci_gateway/opencv_lab2rgb.cpp
new file mode 100644
index 0000000..0857cc9
--- /dev/null
+++ b/sci_gateway/opencv_lab2rgb.cpp
@@ -0,0 +1,341 @@
+/* ==================================================================== */
+/* Author :Tess Zacharias, Ashish Manatosh Barik */
+/* ==================================================================== */
+/* overloaded function */
+/* Syntax : return_matrix_3_xyz_values=lab2xyz(matrix_of_3_labvalues a) */
+/* Syntax : return_matrix_3_xyz_values=lab2xyz(matrix_of_3_labvalues a,String1 Name,String2 value) */
+/* ==================================================================== */
+/* ==================================================================== */
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+ #define REF_X .95047; // Observer= 2°, Illuminant= D65
+ #define REF_Y 1.00000;
+ #define REF_Z 1.08883;
+
+ struct Color
+ {
+ float R,G,B,X,Y,Z,L,a,b;
+ };
+
+ Color lab2rgb1(float L, float a, float b)
+ {
+
+ float y = (L + 16) / 116;
+ float x = a / 500 + y;
+ float z = y - b / 200;
+
+ if( pow( y , 3 ) > 0.008856 )
+ {
+ y = pow( y , 3 );
+ }
+ else
+ {
+ y = ( y - 16 / 116 ) / 7.787;
+ }
+
+ if( pow( x , 3 ) > 0.008856 )
+ {
+ x = pow( x , 3 );
+ }
+ else
+ {
+ x = ( x - 16 / 116 ) / 7.787;
+ }
+
+ if( pow( z , 3 ) > 0.008856 )
+ {
+ z = pow( z , 3 );
+ }
+ else
+ {
+ z = ( z - 16 / 116 ) / 7.787;
+ }
+
+ float X= x *REF_X;
+ float Y= y *REF_Y;
+ float Z = z *REF_Z;
+
+ float R1=3.240479*X-1.537150*Y -0.498535 *Z;
+ float G1=-0.969256*X+1.875992 *Y+0.041556* Z;
+ float B1=0.055648*X-0.204043*Y +1.057311*Z;
+
+ if( R1 > 0.0031308 )
+ R1 = 1.055 * ( pow( R1, 1./2.4 ) ) - 0.055;
+ else
+ R1 = 12.92 * R1;
+ if( G1 > 0.0031308 )
+ G1 = 1.055 * ( pow( G1, 1./2.4 ) ) - 0.055;
+ else
+ G1 = 12.92 * G1;
+ if( B1 > 0.0031308 )
+ B1 = 1.055 * ( pow( B1, 1./2.4 ) ) - 0.055;
+ else
+ B1 = 12.92 * B1;
+
+
+ Color rgb;
+ rgb.R= R1;
+ rgb.G= G1;
+ rgb.B= B1;
+
+ return rgb;
+
+ }
+
+ Color lab2rgb2(float L, float a, float b,char *s)
+ {
+ double REF_X1;
+ double REF_Y1;
+ double REF_Z1;
+
+ if(!strcmp(s,"a"))
+ {
+ REF_X1=1.0985;
+ REF_Y1= 1.0000;
+ REF_Z1=0.3558;
+ }
+ else if(!strcmp(s,"c"))
+ {
+ REF_X1=0.9807;
+ REF_Y1= 1.0000;
+ REF_Z1=1.1822;
+ }
+ else if(!strcmp(s,"d50"))
+ {
+ REF_X1=0.9642;
+ REF_Y1= 1.0000;
+ REF_Z1=0.8251;
+ }
+ else if(!strcmp(s,"d65"))
+ {
+ REF_X1=0.9504;
+ REF_Y1= 1.0000;
+ REF_Z1= 1.0888;
+ }
+ else if(!strcmp(s,"icc"))
+ {
+ REF_X1=0.962;
+ REF_Y1=1.000;
+ REF_Z1= 0.8249;
+ }
+ else if(!strcmp(s,"e"))
+ {
+ REF_X1=1.000;
+ REF_Y1=1.000;
+ REF_Z1=1.000;
+ }
+
+ float y = (L + 16) / 116;
+ float x = a / 500 + y;
+ float z = y - b / 200;
+
+ if( pow( y , 3 ) > 0.008856 )
+ {
+ y = pow( y , 3 );
+ }
+ else
+ {
+ y = ( y - 16 / 116 ) / 7.787;
+ }
+
+ if( pow( x , 3 ) > 0.008856 )
+ {
+ x = pow( x , 3 );
+ }
+ else
+ {
+ x = ( x - 16 / 116 ) / 7.787;
+ }
+ if( pow( z , 3 ) > 0.008856 )
+ {
+ z = pow( z , 3 );
+ }
+ else
+ {
+ z = ( z - 16 / 116 ) / 7.787;
+ }
+
+ float X= x *REF_X;
+ float Y= y *REF_Y;
+ float Z = z *REF_Z;
+
+ X = X/100.f; //R 0..1
+ Y= Y/100.f; //G 0..1
+ Z= Z/100.f; //B 0..1
+
+ float R1=3.240479*X-1.537150*Y -0.498535 *Z;
+ float G1=-0.969256*X+1.875992 *Y+0.041556* Z;
+ float B1=0.055648*X-0.204043*Y +1.057311*Z;
+
+ if( R1 > 0.0031308 )
+ R1 = 1.055 * ( pow( R1, 1./2.4 ) ) - 0.055;
+ else
+ R1 = 12.92 * R1;
+ if( G1 > 0.0031308 )
+ G1 = 1.055 * ( pow( G1, 1./2.4 ) ) - 0.055;
+ else
+ G1 = 12.92 * G1;
+ if( B1 > 0.0031308 )
+ B1 = 1.055 * ( pow( B1, 1./2.4 ) ) - 0.055;
+ else
+ B1 = 12.92 * B1;
+
+
+ Color rgb;
+ rgb.R= R1*100;
+ rgb.G= G1*100;
+ rgb.B= B1*100;
+
+ return rgb;
+
+ }
+
+ int opencv_lab2rgb(char *fname, unsigned long fname_len)
+ {
+
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+ int iRows = 0;
+ int iCols = 0;
+ int piRows = 0;
+ int piCols = 0;
+ int* piAddr=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ double *value=NULL;
+ int* piLen = NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img,dst;
+ float L,a,b;
+ int *piAddrNew = NULL;
+ //checking output argument
+ CheckOutputArgument(pvApiCtx,1,1);
+ //checking if number of input arguments are 3
+
+ int k=0;
+ // SciErr getMatrixOfDoubleInList(void* _pvCtx, int* _piParent, int _iItemPos, int* _piRows, int* _piCols, double** _pdblReal)
+ double *pstDataR = NULL;
+ double *pstDataG = NULL;
+ double *pstDataB = NULL;
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the R values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 1, &iRows, &iCols, &pstDataR);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the G values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 2, &iRows, &iCols, &pstDataG);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the B values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 3, &iRows, &iCols, &pstDataB);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ int m=0;
+ double *r1,*g1,*b1;
+
+ r1=(double *)malloc(sizeof(double)*iRows*iCols);
+ g1=(double *)malloc(sizeof(double)*iRows*iCols);
+ b1=(double *)malloc(sizeof(double)*iRows*iCols);
+
+ try
+ {
+ for(int i=0;i
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+
+ struct Color
+ {
+ float R,G,B,X,Y,Z,L,a,b;
+ };
+
+ Color lab2uint8(float L, float a, float b)
+ {
+ float R1=round(((255*L)/100));
+ float G1=round(a+128);
+ float B1=round(b+128);
+
+ Color rgb;
+ rgb.R= R1;
+ rgb.G = G1;
+ rgb.B = B1;
+
+ return rgb;
+
+ }
+
+ int opencv_lab2uint8(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+ int iRows = 0;
+ int iCols = 0;
+ int piRows = 0;
+ int piCols = 0;
+ int* piAddr=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ double *value=NULL;
+ int* piLen = NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img,dst;
+ float L,a,b;
+ int *piAddrNew = NULL;
+
+ //checking output argument
+ CheckOutputArgument(pvApiCtx,1,1);
+ //checking if number of input arguments are 3
+
+ int k=0;
+
+ double *pstDataR = NULL;
+ double *pstDataG = NULL;
+ double *pstDataB = NULL;
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the R values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 1, &iRows, &iCols, &pstDataR);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the G values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 2, &iRows, &iCols, &pstDataG);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the B values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 3, &iRows, &iCols, &pstDataB);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ int m=0;
+ double *r1,*g1,*b1;
+
+ r1=(double *)malloc(sizeof(double)*iRows*iCols);
+ g1=(double *)malloc(sizeof(double)*iRows*iCols);
+ b1=(double *)malloc(sizeof(double)*iRows*iCols);
+
+ try
+ {
+ for(int i=0;i cv::createCalibrateDebevec (int samples=70, float lambda=10.0f, bool random=false)
+2. Ptr< CalibrateRobertson > cv::createCalibrateRobertson (int max_iter=30, float threshold=0.01f)
+3. Ptr< MergeDebevec > cv::createMergeDebevec ()
+4. Ptr< MergeMertens > cv::createMergeMertens (float contrast_weight=1.0f, float saturation_weight=1.0f, float exposure_weight=0.0f)
+5. Ptr< MergeRobertson > cv::createMergeRobertson ()
+6. Ptr< Tonemap > cv::createTonemap (float gamma=1.0f)
+
+It includes the following OpenCV functions, belonging to the Photo module of OpenCV 3.0.0 :
+1. void process (InputArrayOfArrays src, OutputArray dst, InputArray times)
+2. void process (InputArrayOfArrays src, OutputArray dst, InputArray times, InputArray response)
+3. void process (InputArrayOfArrays src, OutputArray dst)
+
+*/
+
+#include
+#include
+#include
+#include
+#include"opencv2/opencv.hpp"
+#include"opencv2/core.hpp"
+#include"opencv2/highgui.hpp"
+#include"opencv2/imgproc.hpp"
+#include"opencv2/photo.hpp"
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include"api_scilab.h"
+ #include"Scierror.h"
+ #include"BOOL.h"
+ #include
+ #include"sciprint.h"
+ #include"../common.h"
+
+ int opencv_makeHDR(char* fname, unsigned long fname_len)
+ {
+
+ // error management variable
+ SciErr sciErr;
+
+ // Checking number of input and output arguments (enviromnet variable, min arguments, max arguments)
+ CheckInputArgument(pvApiCtx, 10, 17);
+ CheckOutputArgument(pvApiCtx, 2, 2);
+
+ // variables required to read argument #1
+ int *piAddr1 = NULL;
+ int intErr1 = 0;
+ double typeOfMethod = 0;
+
+ // variables required to read argument #2
+ int *piAddr2 = NULL;
+ int intErr2 = 0;
+ double num = 0;
+
+ // variables required to read exposure #1
+ int *piAddrExp1 = NULL;
+ int intErrExp1 = 0;
+ double ex1 = 0;
+
+ // variables required to read exposure #2
+ int *piAddrExp2 = NULL;
+ int intErrExp2 = 0;
+ double ex2 = 0;
+
+ // variables required to read exposure #3
+ int *piAddrExp3 = NULL;
+ int intErrExp3 = 0;
+ double ex3 = 0;
+
+ // variables required to read exposure #4
+ int *piAddrExp4 = NULL;
+ int intErrExp4 = 0;
+ double ex4 = 0;
+
+ // variables required to read exposure #5
+ int *piAddrExp5 = NULL;
+ int intErrExp5 = 0;
+ double ex5 = 0;
+
+ // variables required to read exposure #6
+ int *piAddrExp6 = NULL;
+ int intErrExp6 = 0;
+ double ex6 = 0;
+
+ // variables required to read argument #3
+ int *piAddr3 = NULL;
+ int intErr3 = 0;
+ double var1 = 0;
+
+ // variables required to read argument #4
+ int *piAddr4 = NULL;
+ int intErr4 = 0;
+ double var2 = 0;
+
+ // variables required to read argument #5
+ int *piAddr5 = NULL;
+ int intErr5 = 0;
+ double var3 = 0;
+
+ // variables required to read argument #6
+ int *piAddr6 = NULL;
+ int intErr6 = 0;
+ int var4 = false;
+
+ // to get argument #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 1, &piAddr1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr1 = getScalarDouble(pvApiCtx, piAddr1, &typeOfMethod);
+ if(intErr1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 1);
+ return -1;
+ }
+
+ // to get argument #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr2 = getScalarDouble(pvApiCtx, piAddr2, &num);
+ if(intErr2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+
+ Mat img1, img2, img3, img4, img5, img6;
+
+ vector images;
+ vector times;
+ Mat response;
+ Mat hdr, hdr8bit;
+ Mat ldr, ldr8bit;
+ Mat fusion;
+
+ try
+ {
+ // OpenCV functionalities
+
+ if(num == 3)
+ {
+ retrieveImage(img1, 3);
+ retrieveImage(img2, 4);
+ retrieveImage(img3, 5);
+
+ img1.convertTo(img1, CV_8U);
+ img2.convertTo(img2, CV_8U);
+ img3.convertTo(img3, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+
+ // to get exposure #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 6, &piAddrExp1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp1 = getScalarDouble(pvApiCtx, piAddrExp1, &ex1);
+ if(intErrExp1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 6);
+ return -1;
+ }
+
+ // to get exposure #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrExp2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp2 = getScalarDouble(pvApiCtx, piAddrExp2, &ex2);
+ if(intErrExp2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 7);
+ return -1;
+ }
+
+ // to get exposure #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrExp3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp3 = getScalarDouble(pvApiCtx, piAddrExp3, &ex3);
+ if(intErrExp3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 8);
+ return -1;
+ }
+
+ times.push_back((float)ex1);
+ times.push_back((float)ex2);
+ times.push_back((float)ex3);
+
+ if(typeOfMethod == 1)
+ {
+ // Merge Robertson object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+ Ptr calibrate = createCalibrateRobertson(var1, var2);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else if(typeOfMethod == 2)
+ {
+ // Merge Devevec object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarBoolean(pvApiCtx, piAddr6, &var4);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+
+ Ptr calibrate = createCalibrateDebevec(var1, var2, var4);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else if(typeOfMethod == 3)
+ {
+ // Merge Mertens object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &var3);
+ if(intErr5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+
+ Ptr model = createMergeMertens(var1, var2, var3);
+ model->process(images, ldr);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(ldr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, ldr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else
+ {
+ Scierror(999, "Invalid input for typeOfMethod. \n", 1);
+ }
+ }
+ else if(num == 4)
+ {
+ retrieveImage(img1, 3);
+ retrieveImage(img2, 4);
+ retrieveImage(img3, 5);
+ retrieveImage(img4, 6);
+
+ img1.convertTo(img1, CV_8U);
+ img2.convertTo(img2, CV_8U);
+ img3.convertTo(img3, CV_8U);
+ img4.convertTo(img4, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+
+ // to get exposure #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 7, &piAddrExp1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp1 = getScalarDouble(pvApiCtx, piAddrExp1, &ex1);
+ if(intErrExp1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 7);
+ return -1;
+ }
+
+ // to get exposure #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrExp2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp2 = getScalarDouble(pvApiCtx, piAddrExp2, &ex2);
+ if(intErrExp2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 8);
+ return -1;
+ }
+
+ // to get exposure #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrExp3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp3 = getScalarDouble(pvApiCtx, piAddrExp3, &ex3);
+ if(intErrExp3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get exposure #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddrExp4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp4 = getScalarDouble(pvApiCtx, piAddrExp4, &ex4);
+ if(intErrExp4)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+
+ times.push_back((float)ex1);
+ times.push_back((float)ex2);
+ times.push_back((float)ex3);
+ times.push_back((float)ex4);
+
+ if(typeOfMethod == 1)
+ {
+ // Merge Robertson object
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+ // to get argument #12
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 12);
+ return -1;
+ }
+
+ Ptr calibrate = createCalibrateRobertson(var1, var2);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else if(typeOfMethod == 2)
+ {
+ // Merge Devevec object
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+ // to get argument #12
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 12);
+ return -1;
+ }
+
+ // to get argument #13
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarBoolean(pvApiCtx, piAddr6, &var4);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+
+ Ptr calibrate = createCalibrateDebevec(var1, var2, var4);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+
+ }
+ else if(typeOfMethod == 3)
+ {
+ // Merge Mertens object
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+ // to get argument #12
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 12);
+ return -1;
+ }
+
+ // to get argument #13
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &var3);
+ if(intErr5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 13);
+ return -1;
+ }
+
+
+ Ptr model = createMergeMertens(var1, var2, var3);
+ model->process(images, ldr);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(ldr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, ldr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else
+ {
+ Scierror(999, "Invalid input for typeOfMethod. \n", 1);
+ }
+ }
+ else if(num == 5)
+ {
+ retrieveImage(img1, 3);
+ retrieveImage(img2, 4);
+ retrieveImage(img3, 5);
+ retrieveImage(img4, 6);
+ retrieveImage(img5, 7);
+
+ img1.convertTo(img1, CV_8U);
+ img2.convertTo(img2, CV_8U);
+ img3.convertTo(img3, CV_8U);
+ img4.convertTo(img4, CV_8U);
+ img5.convertTo(img5, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+ images.push_back(img5);
+
+ // to get exposure #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 8, &piAddrExp1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp1 = getScalarDouble(pvApiCtx, piAddrExp1, &ex1);
+ if(intErrExp1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 8);
+ return -1;
+ }
+
+ // to get exposure #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrExp2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp2 = getScalarDouble(pvApiCtx, piAddrExp2, &ex2);
+ if(intErrExp2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get exposure #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddrExp3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp3 = getScalarDouble(pvApiCtx, piAddrExp3, &ex3);
+ if(intErrExp3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+ // to get exposure #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddrExp4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp4 = getScalarDouble(pvApiCtx, piAddrExp4, &ex4);
+ if(intErrExp4)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+ // to get exposure #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddrExp5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp5 = getScalarDouble(pvApiCtx, piAddrExp5, &ex5);
+ if(intErrExp5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 12);
+ return -1;
+ }
+
+
+ times.push_back((float)ex1);
+ times.push_back((float)ex2);
+ times.push_back((float)ex3);
+ times.push_back((float)ex4);
+ times.push_back((float)ex5);
+
+ if(typeOfMethod == 1)
+ {
+ // Merge Robertson object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 13);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 14, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 14);
+ return -1;
+ }
+
+ Ptr calibrate = createCalibrateRobertson(var1, var2);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+
+ }
+ else if(typeOfMethod == 2)
+ {
+ // Merge Devevec object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 13);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 14, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 14);
+ return -1;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 15, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarBoolean(pvApiCtx, piAddr6, &var4);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 15);
+ return -1;
+ }
+
+
+ Ptr calibrate = createCalibrateDebevec(var1, var2, var4);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else if(typeOfMethod == 3)
+ {
+ // Merge Mertens object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 13);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 14, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 14);
+ return -1;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 15, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &var3);
+ if(intErr5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 15);
+ return -1;
+ }
+
+
+ Ptr model = createMergeMertens(var1, var2, var3);
+ model->process(images, ldr);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(ldr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, ldr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else
+ {
+ Scierror(999, "Invalid input for typeOfMethod. \n", 1);
+ }
+ }
+ else if(num == 6)
+ {
+ retrieveImage(img1, 3);
+ retrieveImage(img2, 4);
+ retrieveImage(img3, 5);
+ retrieveImage(img4, 6);
+ retrieveImage(img5, 7);
+ retrieveImage(img6, 8);
+
+ img1.convertTo(img1, CV_8U);
+ img2.convertTo(img2, CV_8U);
+ img3.convertTo(img3, CV_8U);
+ img4.convertTo(img4, CV_8U);
+ img5.convertTo(img5, CV_8U);
+ img6.convertTo(img6, CV_8U);
+
+ images.push_back(img1);
+ images.push_back(img2);
+ images.push_back(img3);
+ images.push_back(img4);
+ images.push_back(img5);
+ images.push_back(img6);
+
+ // to get exposure #1
+ sciErr = getVarAddressFromPosition(pvApiCtx, 9, &piAddrExp1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp1 = getScalarDouble(pvApiCtx, piAddrExp1, &ex1);
+ if(intErrExp1)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 9);
+ return -1;
+ }
+
+ // to get exposure #2
+ sciErr = getVarAddressFromPosition(pvApiCtx, 10, &piAddrExp2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp2 = getScalarDouble(pvApiCtx, piAddrExp2, &ex2);
+ if(intErrExp2)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 10);
+ return -1;
+ }
+
+ // to get exposure #3
+ sciErr = getVarAddressFromPosition(pvApiCtx, 11, &piAddrExp3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp3 = getScalarDouble(pvApiCtx, piAddrExp3, &ex3);
+ if(intErrExp3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 11);
+ return -1;
+ }
+
+ // to get exposure #4
+ sciErr = getVarAddressFromPosition(pvApiCtx, 12, &piAddrExp4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp4 = getScalarDouble(pvApiCtx, piAddrExp4, &ex4);
+ if(intErrExp4)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 12);
+ return -1;
+ }
+
+ // to get exposure #5
+ sciErr = getVarAddressFromPosition(pvApiCtx, 13, &piAddrExp5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp5 = getScalarDouble(pvApiCtx, piAddrExp5, &ex5);
+ if(intErrExp5)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 13);
+ return -1;
+ }
+
+ // to get exposure #6
+ sciErr = getVarAddressFromPosition(pvApiCtx, 14, &piAddrExp6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErrExp6 = getScalarDouble(pvApiCtx, piAddrExp6, &ex6);
+ if(intErrExp6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 14);
+ return -1;
+ }
+
+
+
+ times.push_back((float)ex1);
+ times.push_back((float)ex2);
+ times.push_back((float)ex3);
+ times.push_back((float)ex4);
+ times.push_back((float)ex5);
+ times.push_back((float)ex6);
+
+ if(typeOfMethod == 1)
+ {
+ // Merge Robertson object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 15, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 15);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 16, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 16);
+ return -1;
+ }
+
+ Ptr calibrate = createCalibrateRobertson(var1, var2);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+
+
+ }
+ else if(typeOfMethod == 2)
+ {
+ // Merge Devevec object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 15, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 15);
+ return -1;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 16, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 16);
+ return -1;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 17, &piAddr6);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr6 = getScalarBoolean(pvApiCtx, piAddr6, &var4);
+ if(intErr6)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 17);
+ return -1;
+ }
+
+
+ Ptr calibrate = createCalibrateDebevec(var1, var2, var4);
+ calibrate->process(images, response, times);
+
+ Ptr model = createMergeRobertson();
+ model->process(images, hdr, times, response);
+
+ Ptr tonemap = createTonemap();
+ tonemap->process(hdr, ldr);
+
+ hdr = hdr * 255;
+ hdr.convertTo(hdr8bit, CV_8U);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(hdr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, hdr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+ }
+ else if(typeOfMethod == 3)
+ {
+ // Merge Mertens object
+
+ // to get argument #9
+ sciErr = getVarAddressFromPosition(pvApiCtx, 15, &piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr3 = getScalarDouble(pvApiCtx, piAddr3, &var1);
+ if(intErr3)
+ {
+ return intErr3;
+ }
+
+ // to get argument #10
+ sciErr = getVarAddressFromPosition(pvApiCtx, 16, &piAddr4);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr4 = getScalarDouble(pvApiCtx, piAddr4, &var2);
+ if(intErr3)
+ {
+ return intErr3;
+ }
+
+ // to get argument #11
+ sciErr = getVarAddressFromPosition(pvApiCtx, 17, &piAddr5);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr5 = getScalarDouble(pvApiCtx, piAddr5, &var3);
+ if(intErr5)
+ {
+ return intErr5;
+ }
+
+
+ Ptr model = createMergeMertens(var1, var2, var3);
+ model->process(images, ldr);
+
+ ldr = ldr*255;
+ ldr.convertTo(ldr8bit, CV_8U);
+
+ // to return the output HDR image
+ string tempstring1 = type2str(ldr8bit.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, ldr8bit, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ // to return the output LDR image
+ string tempstring2 = type2str(ldr8bit.type());
+ char *checker2;
+ checker2 = (char *)malloc(tempstring2.size() + 1);
+ memcpy(checker2, tempstring2.c_str(), tempstring2.size() + 1);
+ returnImage(checker2, ldr8bit, 2);
+ free(checker2);
+ AssignOutputVariable(pvApiCtx, 2) = nbInputArgument(pvApiCtx) + 2;
+
+
+ }
+ else
+ {
+ Scierror(999, "Invalid input for typeOfMethod. \n", 1);
+ }
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+}
+
+
+
diff --git a/sci_gateway/opencv_ntsc2rgb.cpp b/sci_gateway/opencv_ntsc2rgb.cpp
new file mode 100644
index 0000000..d427858
--- /dev/null
+++ b/sci_gateway/opencv_ntsc2rgb.cpp
@@ -0,0 +1,190 @@
+/* ==================================================================== */
+/* Author :Tess Zacharias, Ashish Manatosh Barik */
+/* ==================================================================== */
+/* overloaded function */
+/* Syntax : return_matrix_3_xyz_values=lab2xyz(matrix_of_3_labvalues a) */
+/* Syntax : return_matrix_3_xyz_values=lab2xyz(matrix_of_3_labvalues a,String1 Name,String2 value) */
+/* ==================================================================== */
+/* ==================================================================== */
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+ #define REF_X .95047; // Observer= 2°, Illuminant= D65
+ #define REF_Y 1.00000;
+ #define REF_Z 1.08883;
+
+ struct Color
+ {
+ float R,G,B,Y,I,Q;
+ };
+
+
+ Color ntsc2rgb1(float Y, float I, float Q)
+ {
+
+ // float y=Y/255.0;
+ // float i=I/255.0;
+ // float q=Q/255.0;
+ float r= 1.000*Y+0.956*I+0.621*Q;
+ float g =1.000*Y-0.272*I-0.647*Q;
+ float b =1.000*Y-1.106*I+1.703*Q;
+
+
+ Color rgb;
+ rgb.R= r;
+ rgb.G= g;
+ rgb.B= b;
+
+ return rgb;
+
+ }
+
+ int opencv_ntsc2rgb(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+ int iRows = 0;
+ int iCols = 0;
+ int piRows = 0;
+ int piCols = 0;
+ int* piAddr=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ double *value=NULL;
+ int* piLen = NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img,dst;
+ float Y,I,Q;
+ int *piAddrNew = NULL;
+ //checking output argument
+ CheckOutputArgument(pvApiCtx,1,1);
+ //checking if number of input arguments are 3
+
+ int k=0;
+ double *pstDataR = NULL;
+ double *pstDataG = NULL;
+ double *pstDataB = NULL;
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the R values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 1, &iRows, &iCols, &pstDataR);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the G values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 2, &iRows, &iCols, &pstDataG);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the B values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 3, &iRows, &iCols, &pstDataB);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ int m=0;
+ double *r,*g,*b;
+
+ r=(double *)malloc(sizeof(double)*iRows*iCols);
+ g=(double *)malloc(sizeof(double)*iRows*iCols);
+ b=(double *)malloc(sizeof(double)*iRows*iCols);
+
+ try
+ {
+
+ for(int i=0;i
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+ #define REF_X .95047; // Observer= 2°, Illuminant= D65
+ #define REF_Y 1.00000;
+ #define REF_Z 1.08883;
+
+ struct Color
+ {
+ float R,G,B,X,Y,Z,Ls,as,bs,r,g,b;
+ };
+
+ Color rgb2lab1(float R, float G, float B)
+ {
+
+ float r = R/255.f; //R 0..1
+ float g = G/255.f; //G 0..1
+ float b = B/255.f; //B 0..1
+ float eps = 216.f/24389.f;
+ float k = 24389.f/27.f;
+ float fx,fy,fz;
+
+ if (r <= 0.04045)
+ r = r/12.92;
+ else
+ r = (float)pow((r+0.055)/1.055,2.4);
+ if (g <= 0.04045)
+ g = g/12.92;
+ else
+ g = (float)pow((g+0.055)/1.055,2.4);
+ if (b <= 0.04045)
+ b = b/12.92;
+ else
+ b = (float)pow((b+0.055)/1.055,2.4);
+
+ float X = 0.4124*r + 0.3576*g + 0.1805*b;
+ float Y = 0.2126*r + 0.7152 *g + 0.0722 *b;
+ float Z = 0.0193*r + 0.1192*g + 0.9505 *b;
+ float xr = X/REF_X;
+ float yr = Y/REF_Y;
+ float zr = Z/REF_Z;
+
+ if ( xr > eps )
+ fx = (float)pow(xr, 1/3.);
+ else
+ fx = (float) ((k * xr + 16.) / 116.);
+ if ( yr > eps )
+ fy = (float)pow(yr, 1/3.);
+ else
+ fy = (float) ((k * yr + 16.) / 116.);
+ if ( zr > eps )
+ fz = (float)pow(zr, 1/3.);
+ else
+ fz = (float) ((k * zr + 16.) / 116);
+
+ Color lab;
+ lab.Ls= ( 116 * fy ) - 16;
+ lab.as= 500*(fx-fy);
+ lab.bs= 200*(fy-fz);
+
+ return lab;
+
+ }
+
+ Color rgb2lab2(float R, float G, float B,char *s)
+ {
+ double REF_X1;
+ double REF_Y1;
+ double REF_Z1;
+
+ if(!strcmp(s,"a"))
+ {
+ REF_X1=1.0985;
+ REF_Y1= 1.0000;
+ REF_Z1=0.3558;
+ }
+ else if(!strcmp(s,"c"))
+ {
+ REF_X1=0.9807;
+ REF_Y1= 1.0000;
+ REF_Z1=1.1822;
+ }
+ else if(!strcmp(s,"d50"))
+ {
+ REF_X1=0.9642;
+ REF_Y1= 1.0000;
+ REF_Z1=0.8251;
+ }
+ else if(!strcmp(s,"d65"))
+ {
+ REF_X1=0.9504;
+ REF_Y1= 1.0000;
+ REF_Z1= 1.0888;
+ }
+ else if(!strcmp(s,"icc"))
+ {
+ REF_X1=0.962;
+ REF_Y1=1.000;
+ REF_Z1= 0.8249;
+ }
+ else if(!strcmp(s,"e"))
+ {
+ REF_X1=1.000;
+ REF_Y1=1.000;
+ REF_Z1=1.000;
+ }
+
+ float eps = 216.f/24389.f;
+ float k = 24389.f/27.f;
+ float fx,fy,fz,X,Y,Z,xr,yr,zr;
+ float r = R/255.f; //R 0..1
+ float g = G/255.f; //G 0..1
+ float b = B/255.f; //B 0..1
+
+ if (r <= 0.04045)
+ r = r/12.92;
+ else
+ r = (float)pow((r+0.055)/1.055,2.4);
+ if (g <= 0.04045)
+ g = g/12.92;
+ else
+ g = (float)pow((g+0.055)/1.055,2.4);
+ if (b <= 0.04045)
+ b = b/12.92;
+ else
+ b = (float)pow((b+0.055)/1.055,2.4);
+
+ X = 0.4124*r + 0.3576*g + 0.1805*b;
+ Y = 0.2126*r + 0.7152 *g + 0.0722 *b;
+ Z = 0.0193*r + 0.1192*g + 0.9505 *b;
+ xr = X/REF_X1;
+ yr = Y/REF_Y1;
+ zr = Z/REF_Z1;
+
+ if ( xr > eps )
+ fx = (float)pow(xr, 1/3.);
+ else
+ fx = (float) ((k * xr + 16.) / 116.);
+ if ( yr > eps )
+ fy = (float)pow(yr, 1/3.);
+ else
+ fy = (float) ((k * yr + 16.) / 116.);
+ if ( zr > eps )
+ fz = (float)pow(zr, 1/3.);
+ else
+ fz = (float) ((k * zr + 16.) / 116);
+
+ Color lab;
+ lab.Ls= ( 116 * fy ) - 16;
+ lab.as= 500*(fx-fy);
+ lab.bs= 200*(fy-fz);
+
+ return lab;
+
+ }
+
+ int opencv_rgb2lab(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+ int iRows = 0;
+ int iCols = 0;
+ int piRows = 0;
+ int piCols = 0;
+ int* piAddr2=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ int* piaddr=NULL;
+ char *value=NULL;
+ int* piLen = NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img;
+ float R,G,B;
+ int *piAddrNew = NULL;
+
+ CheckOutputArgument(pvApiCtx,1,1);
+
+ int m=0;
+ retrieveImage(img,1);
+ double *r1,*g1,*b1;
+
+ r1=(double *)malloc(sizeof(double)*img.rows*img.cols);
+ g1=(double *)malloc(sizeof(double)*img.rows*img.cols);
+ b1=(double *)malloc(sizeof(double)*img.rows*img.cols);
+
+ try
+ {
+
+ for(int i=0;i(i,j);
+ R= intensity.val[2];
+ G= intensity.val[1];
+ B= intensity.val[0];
+ out=rgb2lab1(R,G,B);
+ r1[m]=out.Ls;
+ g1[m]=out.as;
+ b1[m++]=out.bs;
+ }
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ //creating the list that will be passed to the Scilab enviroment
+ sciErr = createList(pvApiCtx, nbInputArgument(pvApiCtx) + 1, 3, &piAddrNew);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the R value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 1, img.rows,img.cols, r1);
+ free(r1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the G value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 2, img.rows, img.cols, g1);
+ free(g1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the B value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 3, img.rows, img.cols, b1);
+ free(b1);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+ return 0;
+ }
+}
diff --git a/sci_gateway/opencv_ssim.cpp b/sci_gateway/opencv_ssim.cpp
new file mode 100644
index 0000000..d6f5a89
--- /dev/null
+++ b/sci_gateway/opencv_ssim.cpp
@@ -0,0 +1,106 @@
+/********************************************************
+Author: Dhruti Shah
+********************************************************
+Usage: ssim_value = ssim (image, reference)
+Example:
+ val = ssim(image, ref)
+********************************************************/
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+using namespace cv;
+using namespace std;
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "sciprint.h"
+ #include "../common.h"
+
+ int opencv_ssim(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr = 0;
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx, 2, 2);
+ CheckOutputArgument(pvApiCtx, 1, 1) ;
+
+ // Get the input image from the Scilab environment
+ Mat image;
+ retrieveImage(image, 1);
+
+ Mat ref;
+ retrieveImage(ref, 2);
+
+ double mean_img=0, mean_ref=0, std_img=0, std_ref=0, covar=0, mean_xy=0;
+ int no_of_pixels = image.rows * image.cols;
+ double value;
+
+ try
+ {
+ for (int i = 0; i < image.rows; ++i)
+ {
+ for (int j = 0; j < image.cols; ++j)
+ {
+ mean_img += image.at(i,j);
+ mean_ref += ref.at(i,j);
+ mean_xy += (image.at(i,j)*ref.at(i,j));
+ }
+ }
+
+
+ mean_img = mean_img/no_of_pixels;
+ mean_ref = mean_ref/no_of_pixels;
+ mean_xy = mean_xy/no_of_pixels;
+
+ covar = mean_xy - (mean_img * mean_ref);
+
+ for(int m = 0; m < image.rows; ++m)
+ {
+ for(int n = 0; n < image.cols; ++n)
+ {
+ std_img += ( (image.at(m,n) - mean_img) * (image.at(m,n) - mean_img) );
+ std_ref += ( (ref.at(m,n) - mean_ref) * (ref.at(m,n) - mean_ref) );
+ }
+ }
+
+ std_ref = std_ref/no_of_pixels;
+ std_ref = sqrt(std_ref);
+
+ std_img = std_img/no_of_pixels;
+ std_img = sqrt(std_img);
+
+
+ value = ( (2*mean_img*mean_ref) + 0.0001 ) * ( (2*covar) + 0.0009 );
+ value = value / ( ((mean_img*mean_img) + (mean_ref*mean_ref) + 0.0001) * ((std_img*std_img) + (std_ref*std_ref) + 0.0009) );
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+
+ intErr = createScalarDouble(pvApiCtx, nbInputArgument(pvApiCtx) + 1,value);
+ if(intErr)
+ {
+ return intErr;
+ }
+
+ //Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+ //Returning the Output Variables as arguments to the Scilab environment
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+
+ }
+/* ==================================================================== */
+}
diff --git a/sci_gateway/opencv_wiener2.cpp b/sci_gateway/opencv_wiener2.cpp
new file mode 100644
index 0000000..f7de4fb
--- /dev/null
+++ b/sci_gateway/opencv_wiener2.cpp
@@ -0,0 +1,214 @@
+/*////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+Created By: Riddhish Bhalodia
+Date: 14th October 2015
+
+Usage:
+
+void wiener2(Mat img,int n, double sigma)
+
+1) img : Input image, grayscale only
+2) n : filt size
+3) sigma : noise var, if sigma = 0 then the variance is estimated from data
+
+*/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+using namespace cv;
+using namespace std;
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "sciprint.h"
+ #include "../common.h"
+
+ int opencv_wiener2(char *fname, unsigned long fname_len)
+ {
+
+ SciErr sciErr;
+ int intErr = 0;
+ int iRows=0,iCols=0;
+ int *piAddr = NULL;
+ int *piAddrNew = NULL;
+ int *piAddr2 = NULL;
+ int *piAddr3 = NULL;
+ double n;
+ double sigma;
+
+
+ Mat img;
+
+ //checking input argument
+ CheckInputArgument(pvApiCtx, 3, 3);
+ CheckOutputArgument(pvApiCtx, 1, 1) ;
+
+ retrieveImage(img, 1);
+ //for value of the scale factor
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,2,&piAddr2);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr2, &n);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 2);
+ return -1;
+ }
+
+ sciErr = getVarAddressFromPosition(pvApiCtx,3,&piAddr3);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ intErr = getScalarDouble(pvApiCtx, piAddr3, &sigma);
+ if(intErr)
+ {
+ Scierror(999, "gateway crashed abruptly while reading input argument #%d.", 3);
+ return -1;
+ }
+
+ // The error checks for the function
+ if(n != round(n) || n<=0 || int(n)%2==0)
+ {
+ Scierror(999, "The value of n must be an odd integer \n");
+ return 0;
+ }
+
+ Mat out = Mat::zeros(img.size(),img.type());
+ sigma=255*255*sigma;
+
+ if(img.channels() !=1)
+ {
+ Scierror(999, "Grayscale images only!");
+ return 0;
+ }
+
+ try
+ {
+
+ int i_min,i_max,j_min,j_max;
+ int w = (n-1)/2;
+
+ if(sigma==0)
+ {
+ double noise_est = 0;
+
+ for(int i=0;i(ii,jj));
+
+ }
+ }
+
+ mean = mean/(n*n);
+
+ double var=0;
+
+ for(int ii=i_min;ii<(i_max+1);ii++)
+ {
+ for(int jj=j_min;jj<(j_max+1);jj++)
+ {
+ var = var + ((double)img.at(ii,jj))*((double)img.at(ii,jj));
+ }
+ }
+
+ var = var/(n*n);
+ var = var - mean*mean;
+ noise_est = noise_est + var;
+ }
+ }
+
+ noise_est = noise_est/(img.rows*img.cols);
+ sigma = noise_est;
+ }
+
+ for(int i=0;i(ii,jj));
+
+ }
+ }
+
+ mean = mean/(n*n);
+
+ // find variance of the patch
+ double var=0;
+ for(int ii=i_min;ii<(i_max+1);ii++)
+ {
+ for(int jj=j_min;jj<(j_max+1);jj++)
+ {
+ var = var + ((double)img.at(ii,jj))*((double)img.at(ii,jj));
+ }
+ }
+ var = var/(n*n);
+ var = var - mean*mean;
+
+ double temp;
+ double sum=0;
+
+ temp = mean + (fmax(0,(var - sigma))/fmax(var,sigma))*(((double)img.at(i,j)) - mean);
+ out.at(i,j) = temp;
+ }
+ }
+
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+
+ // out is the return image
+ string tempstring = type2str(out.type());
+ char *checker;
+ checker = (char *)malloc(tempstring.size() + 1);
+ memcpy(checker, tempstring.c_str(), tempstring.size() + 1);
+ returnImage(checker,out,1);
+ free(checker);
+ //Assigning the list as the Output Variable
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+
+ }
+}
diff --git a/sci_gateway/opencv_xyz2double.cpp b/sci_gateway/opencv_xyz2double.cpp
new file mode 100644
index 0000000..9403d1e
--- /dev/null
+++ b/sci_gateway/opencv_xyz2double.cpp
@@ -0,0 +1,90 @@
+// Authors
+// Ashish Manatosh Barik, NIT Rourkela
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+
+ int opencv_xyz2double(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+
+ // to read input data as a color space
+ Mat img;
+
+ // to get the input array as a list of rgb values
+ retrieveImage(img, 1);
+
+ if(img.cols > 3 || img.cols < 3)
+ {
+ Scierror(999, "inpput should be a M by 3 or M by N by 3 \n", 1);
+ return -1;
+ }
+
+ int type = img.type();
+ if(type != 2)
+ {
+ // if input is not uint16, no conversion takes places.
+
+ // to return the output
+ string tempstring1 = type2str(img.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, img, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+
+ // output
+ Mat image;
+
+ try
+ {
+ // OpenCV functionalities
+
+ // conversion to double precision with proper scaling
+ img.convertTo(image, CV_64FC3, 1.0/32768);
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // to return the output
+ string tempstring1 = type2str(image.type());
+ char *checker1;
+ checker1 = (char *)malloc(tempstring1.size() + 1);
+ memcpy(checker1, tempstring1.c_str(), tempstring1.size() + 1);
+ returnImage(checker1, image, 1);
+ free(checker1);
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+
+ return 0;
+ }
+}
+
diff --git a/sci_gateway/opencv_xyz2lab.cpp b/sci_gateway/opencv_xyz2lab.cpp
new file mode 100644
index 0000000..ab9ea9d
--- /dev/null
+++ b/sci_gateway/opencv_xyz2lab.cpp
@@ -0,0 +1,213 @@
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+
+using namespace cv;
+using namespace std;
+
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+ #define REF_X .9504; // Observer= 2°, Illuminant= D65
+ #define REF_Y 1.00000;
+ #define REF_Z 1.08883;
+
+ struct Color
+ {
+ float R,G,B,X,Y,Z,L,a,b;
+ };
+
+ Color xyz2lab3(float X,float Y, float Z)
+ {
+ float eps = 216.f/24389.f;
+ float k = 24389.f/27.f;
+ float fx,fy,fz,Ls,as,bs;
+ float xr=X/REF_X;
+ float yr=Y/REF_Y;
+ float zr=Z/REF_Z;
+
+ if (xr > eps)
+ fx = (float)pow(xr, 1/3.);
+ else
+ fx = (float) ((k * xr + 16.) / 116.);
+ if (yr > eps)
+ fy = (float)pow(yr, 1/3.);
+ else
+ fy = (float) ((k * yr + 16.) / 116.);
+ if (zr > eps)
+ fz = (float)pow(zr, 1/3.);
+ else
+ fz = (float) ((k * zr + 16.) / 116);
+
+ Ls = ( 116 * fy ) - 16;
+ as = 500*(fx-fy);
+ bs = 200*(fy-fz);
+
+ Color lab;
+ lab.L =Ls;
+ lab.a =as;
+ lab.b =bs;
+ return lab;
+ }
+
+
+ int opencv_xyz2lab(char *fname, unsigned long fname_len)
+ {
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+ int iRows=0;
+ int iCols=0;
+ int iRows1=0;
+ int iCols1=0;
+ int iRows2=0;
+ int iCols2=0;
+ int iRows3=0;
+ int iCols3=0;
+ int piRows=0;
+ int piCols=0;
+ int* piAddr=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ double *value=NULL;
+ double *pstDataR = NULL;
+ double *pstDataG = NULL;
+ double *pstDataB = NULL;
+ int* piLen= NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img,dst;
+ float R,G,B;
+ int *piAddrNew = NULL;
+ double *r,*g,*b;
+ int k=0;
+ //checking output argument
+ CheckOutputArgument(pvApiCtx,1,1);
+
+ //retrive address of the list
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the R values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 1, &iRows, &iCols, &pstDataR);
+ if(iRows*iCols != 1 && iRows*iCols %3 != 0)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+ if(sciErr.iErr)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+ //retrive the matrix of the G values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 2, &iRows, &iCols, &pstDataG);
+ if(iRows*iCols != 1 && iRows*iCols %3 != 0)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+ if(sciErr.iErr)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+ //retrive the matrix of the B values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 3, &iRows, &iCols, &pstDataB);
+ if(iRows*iCols != 1 && iRows*iCols %3 != 0)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+ if(sciErr.iErr)
+ {
+ Scierror(999, " Color values to convert, specified as a P-by-3 matrix of color values (one color per row), an M-by-N-by-3 image array.");
+ return -1;
+ }
+
+ //dynamically allocating memory to varibles
+ r=(double *)malloc(sizeof(double)*iRows*iCols);
+ g=(double *)malloc(sizeof(double)*iRows*iCols);
+ b=(double *)malloc(sizeof(double)*iRows*iCols);
+
+ try
+ {
+ // Conversion
+
+ for(int i=0;i
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include
+#include
+#include
+using namespace cv;
+using namespace std;
+extern "C"
+{
+ #include "api_scilab.h"
+ #include "Scierror.h"
+ #include "BOOL.h"
+ #include
+ #include "../common.h"
+ #define REF_X .95047; // Observer= 2°, Illuminant= D65
+ #define REF_Y 1.00000;
+ #define REF_Z 1.08883;
+
+ struct Color
+ {
+ float X,Y,Z,R,G,B;
+ };
+
+ Color xyz2rgb1(float X, float Y, float Z)
+ {
+
+ float x = X;
+ float y = Y;
+ float z = Z;
+ float R1 = 3.2404542*x-1.5371385*y-0.4985314*z;
+ float G1 =-0.9692660*x+1.8760108*y+0.0415560 *z;
+ float B1 = 0.0556434*x-0.2040259*y+1.0572252*z;
+
+ if( R1 > 0.0031308 )
+ R1 = 1.055 * ( pow( R1, 1./2.4 ) ) - 0.055;
+ else
+ R1 = 12.92 * R1;
+ if( G1 > 0.0031308 )
+ G1 = 1.055 * ( pow( G1, 1./2.4 ) ) - 0.055;
+ else
+ G1 = 12.92 * G1;
+ if( B1 > 0.0031308 )
+ B1 = 1.055 * ( pow( B1, 1./2.4 ) ) - 0.055;
+ else
+ B1= 12.92 * B1;
+
+ Color rgb;
+ rgb.R= R1;
+ rgb.G= G1;
+ rgb.B= B1;
+ return rgb;
+
+ }
+
+ Color xyz2rgb2(float X, float Y, float Z,char *s)
+ {
+ double REF_X1;
+ double REF_Y1;
+ double REF_Z1;
+
+ if(!strcmp(s,"a"))
+ {
+ REF_X1=1.0985;
+ REF_Y1= 1.0000;
+ REF_Z1=0.3558;
+ }
+ else if(!strcmp(s,"c"))
+ {
+ REF_X1=0.9807;
+ REF_Y1= 1.0000;
+ REF_Z1=1.1822;
+ }
+ else if(!strcmp(s,"d50"))
+ {
+ REF_X1=0.9642;
+ REF_Y1= 1.0000;
+ REF_Z1=0.8251;
+ }
+ else if(!strcmp(s,"d65"))
+ {
+ REF_X1=0.9504;
+ REF_Y1= 1.0000;
+ REF_Z1= 1.0888;
+ }
+ else if(!strcmp(s,"icc"))
+ {
+ REF_X1=0.962;
+ REF_Y1=1.000;
+ REF_Z1= 0.8249;
+ }
+ else if(!strcmp(s,"e"))
+ {
+ REF_X1=1.000;
+ REF_Y1=1.000;
+ REF_Z1=1.000;
+ }
+ float x = X;
+ float y = Y;
+ float z = Z;
+ float R1 = 3.2406*x-1.5372*y-0.4986*z;
+ float G1 =-0.9689*x+1.8758*y+0.0415 *z;
+ float B1 = 0.0557*x-0.2040*y+1.0570*z;
+ if( R1 > 0.0031308 )
+ R1 = 1.055 * ( pow( R1, 1./2.4 ) ) - 0.055;
+ else
+ R1 = 12.92 * R1;
+ if( G1 > 0.0031308 )
+ G1 = 1.055 * ( pow( G1, 1./2.4 ) ) - 0.055;
+ else
+ G1 = 12.92 * G1;
+ if( B1 > 0.0031308 )
+ B1 = 1.055 * ( pow( B1, 1./2.4 ) ) - 0.055;
+ else
+ B1= 12.92 * B1;
+
+
+ Color rgb;
+ rgb.R= R1;
+ rgb.G= G1;
+ rgb.B= B1;
+ return rgb;
+
+ }
+
+ int opencv_xyz2rgb(char *fname, unsigned long fname_len)
+ {
+
+ // Error management variable
+ SciErr sciErr;
+ //variable info
+
+ int iRows = 0;
+ int iCols = 0;
+ int piRows = 0;
+ int piCols = 0;
+ int* piAddr=NULL;
+ int* piAddr1=NULL;
+ int* piAddr3=NULL;
+ double *value=NULL;
+ int* piLen = NULL;
+ char **val;
+ int i,j;
+ int error;
+ Color out;
+ double check;
+ Mat img,dst;
+ float X,Y,Z;
+ int *piAddrNew = NULL;
+
+ //checking output argument
+ CheckOutputArgument(pvApiCtx,1,1);
+
+ int k=0;
+
+ double *pstDataR = NULL;
+ double *pstDataG = NULL;
+ double *pstDataB = NULL;
+
+ //retrive address of the list
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the R values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 1, &iRows, &iCols, &pstDataR);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the G values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 2, &iRows, &iCols, &pstDataG);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive address of the list
+ sciErr = getVarAddressFromPosition(pvApiCtx,1,&piAddr);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //retrive the matrix of the B values
+ sciErr = getMatrixOfDoubleInList(pvApiCtx, piAddr, 3, &iRows, &iCols, &pstDataB);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+ int m=0;
+ double *r,*g,*b;
+
+ r=(double *)malloc(sizeof(double)*iRows*iCols);
+ g=(double *)malloc(sizeof(double)*iRows*iCols);
+ b=(double *)malloc(sizeof(double)*iRows*iCols);
+
+ try
+ {
+ // conversion
+
+ for(int i=0;i(i,j);
+ X= (pstDataR[k]);
+ Y= (pstDataG[k]);
+ Z= (pstDataB[k++]);
+
+ out=xyz2rgb1(X,Y,Z);
+
+ r[m]=(out.R);
+ g[m]=(out.G);
+ b[m++]=(out.B);
+
+ }
+ }
+ }
+ catch(Exception& e)
+ {
+ const char* err=e.what();
+ Scierror(999, "%s", err);
+ }
+
+ // creating a list
+ sciErr = createList(pvApiCtx, nbInputArgument(pvApiCtx) + 1, 3, &piAddrNew);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the R value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 1, iRows,iCols, r);
+ free(r);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the G value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 2, iRows, iCols, g);
+ free(g);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+ //Adding the B value matrix to the list
+ sciErr = createMatrixOfDoubleInList(pvApiCtx, nbInputArgument(pvApiCtx)+1 , piAddrNew, 3, iRows, iCols, b);
+ free(b);
+ if(sciErr.iErr)
+ {
+ printError(&sciErr, 0);
+ return 0;
+ }
+
+
+ AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1;
+
+ ReturnArguments(pvApiCtx);
+ return 0;
+ }
+}
diff --git a/sci_gateway/opencv_xyz2uint16.cpp b/sci_gateway/opencv_xyz2uint16.cpp
new file mode 100644
index 0000000..dcd9fa1
--- /dev/null
+++ b/sci_gateway/opencv_xyz2uint16.cpp
@@ -0,0 +1,170 @@
+/*--------------------------------------
+ function-lab2uint16
+ Author :Tess Zacharias, Ashish Manatosh Barik
+------------------------------------- */
+#include
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/opencv.hpp"
+#include
+#include