From e6fce1b239c0f7fccc366ad878fee52bdb3204a3 Mon Sep 17 00:00:00 2001 From: papyrussolution Date: Sun, 26 May 2024 19:07:10 +0300 Subject: [PATCH] Version 12.0.3 --- Src/BuildVC2017/slib.vcxproj | 1 - Src/BuildVC2017/slib.vcxproj.filters | 1 - Src/BuildVC2019/slib.vcxproj | 1 - Src/BuildVC2019/slib.vcxproj.filters | 1 - Src/BuildVC2022/slib.vcxproj | 1 - Src/BuildVC2022/slib.vcxproj.filters | 1 - Src/Include/Db.h | 22 +- Src/Include/Pp.h | 8 +- Src/Include/SLIB.H | 1 + Src/Include/wsctl.h | 3 + Src/OSF/BDB/hash_func.c | 4 +- Src/OSF/SDL/.clang-format | 90 - Src/OSF/SDL/.clang-tidy | 58 - Src/OSF/SDL/.editorconfig | 69 - Src/OSF/SDL/.gitignore | 155 - Src/OSF/gSOAP/stdsoap2.cpp | 5 +- Src/OSF/lcms2/src/cmstypes.c | 15 +- .../libarchive/archive_cryptor.c | 3 +- Src/OSF/oniguruma/src/regint.h | 3 - .../zstd/lib/include/zstd_compress_internal.h | 51 +- Src/PPEquip/EquipDll/Pirit.cpp | 25 +- Src/PPEquip/Shtrihfr.cpp | 387 +- Src/PPEquip/atoldrv.cpp | 1 + Src/PPEquip/frontol.cpp | 1 + Src/PPLib/Cshses.cpp | 5 +- Src/PPLib/chzn.cpp | 1 + Src/PPLib/ie_bill.cpp | 48 +- Src/PPLib/ppedi.cpp | 84 +- Src/PPLib/ppsupplix.cpp | 10 +- Src/PPLib/wsctl.cpp | 2 +- Src/PPMain/wsctl-main.cpp | 50 +- Src/PPTEST/pptest.cpp | 4 +- Src/Rsrc/Str/ppstr2.symb | 3 +- Src/Rsrc/Str/ppstr2.txt | 3 +- Src/Rsrc/Version/genver-open.dat | 4 +- Src/SLib/Tdialog.cpp | 2 - Src/SLib/hashtab.cpp | 8 +- Src/SLib/lz4/lz4.c | 24 +- Src/SLib/scrypt.cpp | 21 - Src/SLib/sfann.cpp | 6901 ----------------- Src/SLib/slport.c | 3 - Src/SLib/slrecmgr.cpp | 41 +- Src/SLib/tcontrol.cpp | 4 +- Src/SLib/test-openssl.cpp | 8 - Src/SLib/uri.cpp | 108 +- 45 files changed, 482 insertions(+), 7759 deletions(-) delete mode 100644 Src/OSF/SDL/.clang-format delete mode 100644 Src/OSF/SDL/.clang-tidy delete mode 100644 Src/OSF/SDL/.editorconfig delete mode 100644 Src/OSF/SDL/.gitignore delete mode 100644 Src/SLib/scrypt.cpp delete mode 100644 Src/SLib/sfann.cpp delete mode 100644 Src/SLib/slport.c delete mode 100644 Src/SLib/test-openssl.cpp diff --git a/Src/BuildVC2017/slib.vcxproj b/Src/BuildVC2017/slib.vcxproj index 3f91082cea..02115f7aa8 100644 --- a/Src/BuildVC2017/slib.vcxproj +++ b/Src/BuildVC2017/slib.vcxproj @@ -2574,7 +2574,6 @@ - diff --git a/Src/BuildVC2017/slib.vcxproj.filters b/Src/BuildVC2017/slib.vcxproj.filters index 941067269f..a64a0de66f 100644 --- a/Src/BuildVC2017/slib.vcxproj.filters +++ b/Src/BuildVC2017/slib.vcxproj.filters @@ -109,7 +109,6 @@ - diff --git a/Src/BuildVC2019/slib.vcxproj b/Src/BuildVC2019/slib.vcxproj index 38181cd96f..ad96586d02 100644 --- a/Src/BuildVC2019/slib.vcxproj +++ b/Src/BuildVC2019/slib.vcxproj @@ -2619,7 +2619,6 @@ - diff --git a/Src/BuildVC2019/slib.vcxproj.filters b/Src/BuildVC2019/slib.vcxproj.filters index 5508be969f..a08bd90a3e 100644 --- a/Src/BuildVC2019/slib.vcxproj.filters +++ b/Src/BuildVC2019/slib.vcxproj.filters @@ -109,7 +109,6 @@ - diff --git a/Src/BuildVC2022/slib.vcxproj b/Src/BuildVC2022/slib.vcxproj index 7bd8e0bb84..8eda3d2365 100644 --- a/Src/BuildVC2022/slib.vcxproj +++ b/Src/BuildVC2022/slib.vcxproj @@ -2685,7 +2685,6 @@ - diff --git a/Src/BuildVC2022/slib.vcxproj.filters b/Src/BuildVC2022/slib.vcxproj.filters index 2146542cd5..ab611ed165 100644 --- a/Src/BuildVC2022/slib.vcxproj.filters +++ b/Src/BuildVC2022/slib.vcxproj.filters @@ -109,7 +109,6 @@ - diff --git a/Src/Include/Db.h b/Src/Include/Db.h index 22f142332d..7749033b52 100644 --- a/Src/Include/Db.h +++ b/Src/Include/Db.h @@ -4645,6 +4645,7 @@ class SRecPageFreeList { int Put(uint32 type, uint64 rowId, uint32 freeSize); int Remove(uint32 type, uint64 rowId) { return Put(type, rowId, 0U); } const Entry * Get(uint32 type, uint32 reqSize) const; + int GetListForPage(uint pageSeq, TSVector & rList) const; // @debug }; struct SDataPageHeader { @@ -4723,7 +4724,7 @@ struct SDataPageHeader { uint UsableBlockCount; // Количество свободных блоков, чей размер позволяет их использовать для записи данных uint UsableBlockSize; // Общий размер данных, который может быть записан в пригодные к использованию свободные блоки }; - bool GetStat(Stat & rStat) const; + bool GetStat(Stat & rStat, TSVector * pUsableBlockList) const; bool IsValid() const; uint32 ReadRecPrefix(uint pos, RecPrefix & rPfx) const; // @@ -4810,6 +4811,24 @@ struct SDataPageHeader { class SRecPageManager { public: + // + // Структура RowId: + // | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | + // 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + // 6 5 4 3 2 1 0 + // | | | + // reserve offs bits + // + // Общий размер типа: 64 бита + // Старшие 16 бит - зарезервированы + // Следующие старшие 4 бита - индикатор битовой ширины поля смещения внутри страницы. + // Битовая ширина смещения может быть от 9 до 24 бит (0: 9, 1: 10, 15: 24) + // Битовую ширину поля смещения назовем pobw (page offset bit-width) + // Само смещение внутри страницы задается со сдвигом в 32 поскольку заголовок + // страницы длиной 32 байта не участвует в распределении рабочих блоков. + // Таким образом максимальная длина страницы = 2^(24+5) + // Следующие старшие (64-4-pobw) бит - номер страницы. + // static constexpr uint RowIdBitWidth = 48; static uint64 MakeRowId(uint pageSize, uint pageSeq, uint offset); static int SplitRowId(uint64 rowId, uint pageSize, uint * pPageSeq, uint * pOffset); @@ -4822,6 +4841,7 @@ class SRecPageManager { // !0 - actual size of the read data // uint Read(uint64 rowId, void * pBuf, size_t bufSize); + static bool TestSinglePage(uint pageSize); private: SDataPageHeader * GetPage(uint32 seq); SDataPageHeader * AllocatePage(uint32 type); diff --git a/Src/Include/Pp.h b/Src/Include/Pp.h index 73447e5537..14aafdea8f 100644 --- a/Src/Include/Pp.h +++ b/Src/Include/Pp.h @@ -22118,7 +22118,7 @@ class PPSyncCashSession { PPID NodeID; char Port[128]; // char Name[48]; // - int PortType; // 0 - file, 1 - lpt, 2 - com + int PortType; // 0 - file, 1 - lpt, 2 - com, 3 - server /*@v12.0.3*/ int Handle; // long State; PPSlipFormatter * P_SlipFmt; @@ -23977,6 +23977,7 @@ class PPObjDBDiv : public PPObjReference { #define GTCHZNPT_ALTTOBACCO 11 // @v11.9.0 Альтернативная табачная продукция. Марки очень похожи на табак, но есть нюансы в обработке. #define GTCHZNPT_DRAFTBEER 12 // @v11.9.2 Пиво разливное #define GTCHZNPT_DIETARYSUPPLEMENT 13 // @v11.9.6 БАДы +#define GTCHZNPT_BEER 14 // @v12.0.3 Пиво фасованное struct PPGoodsType2 { // @persistent @store(Reference2Tbl+) PPGoodsType2(); @@ -56365,7 +56366,7 @@ class PPEdiProcessor { enum { ctrfTestMode = 0x0001 }; - ProviderImplementation(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags); + ProviderImplementation(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger); virtual ~ProviderImplementation(); virtual int GetDocumentList(const PPBillIterchangeFilt & rP, DocumentInfoList & rList) { return -1; } virtual int ReceiveDocument(const PPEdiProcessor::DocumentInfo * pIdent, TSCollection & rList) { return -1; } @@ -56449,13 +56450,14 @@ class PPEdiProcessor { int ResolveDlvrLoc(const OwnFormatContractor & rC, PPBillPacket * pPack); int ResolveOwnFormatContractor(const OwnFormatContractor & rC, int partyQ, PPBillPacket * pPack); SString EncBuf; + PPLogger * P_Logger; // @notowned private: int GetIntermediatePath(const char * pSub, int docType, SString & rBuf); int Helper_GetPersonGLN(PPID psnID, SString & rGLN); }; static int FASTCALL GetEdiMsgTypeByText(const char * pSymb); - static ProviderImplementation * CreateProviderImplementation(PPID ediPrvID, PPID mainOrgID, long flags); + static ProviderImplementation * CreateProviderImplementation(PPID ediPrvID, PPID mainOrgID, long flags, PPLogger * pLogger); explicit PPEdiProcessor(ProviderImplementation * pImp, PPLogger * pLogger); ~PPEdiProcessor(); int SendOrders(const PPBillIterchangeFilt & rP, const PPIDArray & rArList); diff --git a/Src/Include/SLIB.H b/Src/Include/SLIB.H index 94d5b82990..dbd74c9303 100644 --- a/Src/Include/SLIB.H +++ b/Src/Include/SLIB.H @@ -1345,6 +1345,7 @@ public: static constexpr uint64 FnvHash1Init64 = 0xcbf29ce484222325ULL; // @v11.9.12 Константа инициализации хэш-функции FNV1_64 и FNVa_64 static constexpr uint32 FnvHashPrime32 = 0x01000193U/*16777619*/; // @v11.9.12 Константа для итерации хэш-функции FNV_32 (32 bit magic FNV-0 and FNV-1 prime) static constexpr uint64 FnvHashPrime64 = 0x100000001b3ULL; // @v11.9.12 Константа для итерации хэш-функции FNV_4 (64 bit magic FNV-0 and FNV-1 prime) + static constexpr uint32 OzanYigitHashPrime = 65599U; // @v12.0.3 Константа для итерации хэш-функции Ozan Yigit (используется в BDB и gSOAP) static constexpr long OneBillion = 1000000000L; // @v11.7.6 static constexpr const char * P_HxDigL = "0123456789abcdef"; static constexpr const char * P_HxDigU = "0123456789ABCDEF"; diff --git a/Src/Include/wsctl.h b/Src/Include/wsctl.h index 739ca596c7..d1259fe85d 100644 --- a/Src/Include/wsctl.h +++ b/Src/Include/wsctl.h @@ -13,7 +13,10 @@ class WsCtl_SelfIdentityBlock { MACAddrArray MacAdrList; // @v12.0.1 S_IPAddr IpAdr; // @v12.0.1 PPID PrcID; // . . + PPID ComputerID; // @v12.0.3 . + PPID CompCatID; // @v12.0.3 . SString PrcName; // . . + SString CompCatName; // @v12.0.3 }; // // diff --git a/Src/OSF/BDB/hash_func.c b/Src/OSF/BDB/hash_func.c index dd42fc4b13..61bad82666 100644 --- a/Src/OSF/BDB/hash_func.c +++ b/Src/OSF/BDB/hash_func.c @@ -65,7 +65,7 @@ uint32 __ham_func2(DB * dbp, const void * key, uint32 len) * iteration, perform 8 HASHC's so we handle all 8 bytes. Essentially, this * saves us 7 cmp & branch instructions. * - * PUBLIC: uint32 __ham_func3 __P((DB *, const void *, uint32)); + * PUBLIC: uint32 __ham_func3(DB *, const void *, uint32); */ uint32 __ham_func3(DB * dbp, const void * key, uint32 len) { @@ -75,7 +75,7 @@ uint32 __ham_func3(DB * dbp, const void * key, uint32 len) COMPQUIET(dbp, 0); if(!len) return 0; -#define HASHC n = *k+++65599*n +#define HASHC n = *k++ + SlConst::OzanYigitHashPrime/*65599*/ * n n = 0; k = static_cast(key); loop = (len+8-1)>>3; diff --git a/Src/OSF/SDL/.clang-format b/Src/OSF/SDL/.clang-format deleted file mode 100644 index e98e5cd70a..0000000000 --- a/Src/OSF/SDL/.clang-format +++ /dev/null @@ -1,90 +0,0 @@ ---- -AlignConsecutiveMacros: Consecutive -AlignConsecutiveAssignments: None -AlignConsecutiveBitFields: None -AlignConsecutiveDeclarations: None -AlignEscapedNewlines: Right -AlignOperands: Align -AlignTrailingComments: true - -AllowAllArgumentsOnNextLine: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortEnumsOnASingleLine: true -AllowShortBlocksOnASingleLine: Never -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: All -AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: false - -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: MultiLine - -# Custom brace breaking -BreakBeforeBraces: Custom -BraceWrapping: - AfterCaseLabel: true - AfterClass: true - AfterControlStatement: Never - AfterEnum: true - AfterFunction: true - AfterNamespace: true - AfterObjCDeclaration: true - AfterStruct: true - AfterUnion: true - AfterExternBlock: false - BeforeElse: false - BeforeWhile: false - IndentBraces: false - SplitEmptyFunction: true - SplitEmptyRecord: true - -# Make the closing brace of container literals go to a new line -Cpp11BracedListStyle: false - -# Never format includes -IncludeBlocks: Preserve -# clang-format version 4.0 through 12.0: -#SortIncludes: false -# clang-format version 13.0+: -#SortIncludes: Never - -# No length limit, in case it breaks macros, you can -# disable it with /* clang-format off/on */ comments -ColumnLimit: 0 - -IndentWidth: 4 -ContinuationIndentWidth: 4 -IndentCaseLabels: false -IndentCaseBlocks: false -IndentGotoLabels: true -IndentPPDirectives: None -IndentExternBlock: NoIndent - -PointerAlignment: Right -SpaceAfterCStyleCast: false -SpacesInCStyleCastParentheses: false -SpacesInConditionalStatement: false -SpacesInContainerLiterals: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeCaseColon: false -SpaceBeforeParens: ControlStatements -SpaceAroundPointerQualifiers: Default -SpaceInEmptyBlock: false -SpaceInEmptyParentheses: false - -UseCRLF: false -UseTab: Never - -ForEachMacros: - [ - "spa_list_for_each", - "spa_list_for_each_safe", - "wl_list_for_each", - "wl_array_for_each", - "udev_list_entry_foreach", - ] - ---- - diff --git a/Src/OSF/SDL/.clang-tidy b/Src/OSF/SDL/.clang-tidy deleted file mode 100644 index 739a8eb041..0000000000 --- a/Src/OSF/SDL/.clang-tidy +++ /dev/null @@ -1,58 +0,0 @@ ---- -Checks: > - -*, - bugprone-assert-side-effect, - bugprone-assignment-in-if-condition, - bugprone-bool-pointer-implicit-conversion, - bugprone-dangling-handle, - bugprone-dynamic-static-initializers, - bugprone-infinite-loop, - bugprone-integer-division, - bugprone-macro-repeated-side-effects, - bugprone-misplaced-operator-in-strlen-in-alloc, - bugprone-misplaced-pointer-arithmetic-in-alloc, - bugprone-misplaced-widening-cast, - bugprone-not-null-terminated-result, - bugprone-posix-return, - bugprone-redundant-branch-condition, - bugprone-string-literal-with-embedded-nul, - bugprone-suspicious-memset-usage, - bugprone-suspicious-semicolon, - bugprone-suspicious-string-compare, - bugprone-too-small-loop-variable, - bugprone-unused-return-value, - cert-err33-c, - clang-analyzer-core.*, - clang-analyzer-valist.*, - clang-analyzer-unix.Malloc, - google-readability-casting, - misc-misleading-bidirectional, - misc-misleading-identifier, - misc-misplaced-const, - misc-redundant-expression, - objc-*, - performance-type-promotion-in-math-fn, - readability-avoid-const-params-in-decls, - readability-braces-around-statements, - readability-const-return-type, - readability-duplicate-include, - readability-inconsistent-declaration-parameter-name, - readability-misplaced-array-index, - readability-non-const-parameter, - readability-redundant-control-flow, - readability-redundant-declaration, - readability-redundant-function-ptr-dereference, - readability-redundant-preprocessor, - readability-simplify-boolean-expr - -CheckOptions: - - key: bugprone-assert-side-effect.AssertMacros - value: "SDL_assert, SDL_assert_release, SDL_assert_paranoid, SDL_assert_always, SDL_COMPILE_TIME_ASSERT" - - key: bugprone-misplaced-widening-cast.CheckImplicitCasts - value: true - - key: bugprone-not-null-terminated-result.WantToUseSafeFunctions - value: false # Do not recommend _s functions - -FormatStyle: "file" -HeaderFilterRegex: "*.h$" -WarningsAsErrors: "" diff --git a/Src/OSF/SDL/.editorconfig b/Src/OSF/SDL/.editorconfig deleted file mode 100644 index de2d6de969..0000000000 --- a/Src/OSF/SDL/.editorconfig +++ /dev/null @@ -1,69 +0,0 @@ -# For format see editorconfig.org -# Copyright 2022 Collabora Ltd. -# SPDX-License-Identifier: Zlib - -root = true - -[*.{c,cc,cg,cpp,gradle,h,java,m,metal,pl,py,S,sh,txt}] -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.{html,js,json,m4,yml,yaml,vcxproj,vcxproj.filters}] -indent_size = 2 -indent_style = space -trim_tailing_whitespace = true - -[*.xml] -indent_size = 4 -indent_style = space - -[{CMakeLists.txt,cmake/*.cmake}] -indent_size = 2 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[{cmake/cmake_uninstall.cmake.in,test/CMakeLists.txt,cmake/SDL3Config.cmake.in}] -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[{Makefile.*,*.mk,*.sln,*.pbxproj,*.plist}] -indent_size = 8 -indent_style = tab -tab_width = 8 - -[src/joystick/controller_type.*] -indent_style = tab - -[src/joystick/hidapi/steam/*.h] -indent_style = tab - -[src/libm/*.c] -indent_style = tab - -[src/test/SDL_test_{crc32,md5,random}.c] -indent_size = 2 -indent_style = space - -[src/video/yuv2rgb/*.{c,h}] -indent_style = tab - -[wayland-protocols/*.xml] -indent_size = 2 -indent_style = space - -[*.{markdown,md}] -indent_size = 4 -indent_style = space -# Markdown syntax treats tabs as 4 spaces -tab_width = 4 - -[{*.bat,*.rc}] -end_of_line = crlf - -[*.cocci]' -insert_final_newline = true diff --git a/Src/OSF/SDL/.gitignore b/Src/OSF/SDL/.gitignore deleted file mode 100644 index e8f90c51b6..0000000000 --- a/Src/OSF/SDL/.gitignore +++ /dev/null @@ -1,155 +0,0 @@ -build/ -buildbot/ -/VERSION.txt -__pycache__ - -*.so -*.so.* -*.dll -*.exe -*.o -*.obj -*.res -*.lib -*.a -*.la -*.dSYM -*,e1f -*,ff8 -*.lnk -*.err -*.exp -*.map -*.orig -*~ -*.swp -*.tmp -*.rej - -# for CMake -CMakeFiles/ -CMakeCache.txt -cmake_install.cmake -cmake_uninstall.cmake -SDL3ConfigVersion.cmake -.ninja_* -*.ninja - -# for CLion -.idea -cmake-build-* - -# for Xcode -*.mode1* -*.perspective* -*.pbxuser -(^|/)build($|/) -.DS_Store -xcuserdata -*.xcworkspace - -# for Visual C++ -.vs -Debug -Release -*.user -*.ncb -*.suo -*.sdf -VisualC/tests/gamepadmap/axis.bmp -VisualC/tests/gamepadmap/button.bmp -VisualC/tests/gamepadmap/gamepadmap.bmp -VisualC/tests/gamepadmap/gamepadmap_back.bmp -VisualC/tests/loopwave/sample.wav -VisualC/tests/testautomation/CompareSurfaces0001_Reference.bmp -VisualC/tests/testautomation/CompareSurfaces0001_TestOutput.bmp -VisualC/tests/testgamepad/axis.bmp -VisualC/tests/testgamepad/button.bmp -VisualC/tests/testgamepad/gamepadmap.bmp -VisualC/tests/testgamepad/gamepadmap_back.bmp -VisualC/tests/testoverlay/moose.dat -VisualC/tests/testrendertarget/icon.bmp -VisualC/tests/testrendertarget/sample.bmp -VisualC/tests/testscale/icon.bmp -VisualC/tests/testscale/sample.bmp -VisualC/tests/testsprite/icon.bmp -VisualC/tests/testyuv/testyuv.bmp -VisualC-GDK/**/Layout - -# for Android -android-project/local.properties -android-project/.gradle/ - -test/checkkeys -test/checkkeysthreads -test/gamepadmap -test/loopwave -test/loopwavequeue -test/testatomic -test/testaudiocapture -test/testaudiohotplug -test/testaudioinfo -test/testautomation -test/testbounds -test/testcustomcursor -test/testdisplayinfo -test/testdraw -test/testdrawchessboard -test/testdropfile -test/testerror -test/testevdev -test/testfile -test/testfilesystem -test/testgamepad -test/testgeometry -test/testgesture -test/testgl -test/testgles -test/testgles2 -test/testhaptic -test/testhittesting -test/testhotplug -test/testiconv -test/testime -test/testintersections -test/testjoystick -test/testkeys -test/testloadso -test/testlocale -test/testlock -test/testmessage -test/testmouse -test/testmultiaudio -test/testnative -test/testoverlay -test/testplatform -test/testpower -test/testqsort -test/testrelative -test/testrendercopyex -test/testrendertarget -test/testresample -test/testrumble -test/testscale -test/testsem -test/testsensor -test/testshader -test/testshape -test/testsprite -test/testspriteminimal -test/teststreaming -test/testsurround -test/testthread -test/testtimer -test/testurl -test/testver -test/testviewport -test/testvulkan -test/testwm -test/testyuv -test/torturethread - -# for Doxygen -docs/output -SDL.tag -doxygen_warn.txt diff --git a/Src/OSF/gSOAP/stdsoap2.cpp b/Src/OSF/gSOAP/stdsoap2.cpp index b062df0569..a6031ac169 100644 --- a/Src/OSF/gSOAP/stdsoap2.cpp +++ b/Src/OSF/gSOAP/stdsoap2.cpp @@ -5849,7 +5849,7 @@ SOAP_FMAC1 size_t /*SOAP_FMAC2*/FASTCALL soap_hash(const char * s) { size_t h = 0; while(*s) - h = 65599*h+*s++; + h = SlConst::OzanYigitHashPrime/*65599*/ * h + *s++; return h%SOAP_IDHASH; } @@ -5953,8 +5953,7 @@ SOAP_FMAC1 int SOAP_FMAC2 soap_pointer_enter(struct soap * soap, const void * p, h = soap_hash_ptr(a->__ptr); else h = soap_hash_ptr(p); - DBGLOG(TEST, - SOAP_MESSAGE(fdebug, "Pointer enter location=%p array=%p size=%d dim=%d type=%d id=%d\n", p, + DBGLOG(TEST, SOAP_MESSAGE(fdebug, "Pointer enter location=%p array=%p size=%d dim=%d type=%d id=%d\n", p, a ? a->__ptr : NULL, a ? a->__size : 0, n, type, soap->idnum+1)); pp->next = soap->pht[h]; pp->type = type; diff --git a/Src/OSF/lcms2/src/cmstypes.c b/Src/OSF/lcms2/src/cmstypes.c index d15576bb68..ab9ed602ad 100644 --- a/Src/OSF/lcms2/src/cmstypes.c +++ b/Src/OSF/lcms2/src/cmstypes.c @@ -1378,16 +1378,21 @@ static boolint Write8bitTables(cmsContext ContextID, cmsIOHANDLER* io, uint32 n, // Check overflow static uint32 uipow(uint32 n, uint32 a, uint32 b) { - uint32 rv = 1, rc; - if(!a) return 0; - if(!n) return 0; + uint32 rv = 1; + uint32 rc; + if(!a) + return 0; + if(!n) + return 0; for(; b > 0; b--) { rv *= a; // Check for overflow - if(rv > UINT_MAX / a) return (uint32) -1; + if(rv > UINT_MAX / a) + return (uint32)-1; } rc = rv * n; - if(rv != rc / n) return (uint32) -1; + if(rv != rc / n) + return (uint32)-1; return rc; } diff --git a/Src/OSF/libarchive-350/libarchive/archive_cryptor.c b/Src/OSF/libarchive-350/libarchive/archive_cryptor.c index b143a46023..727d4b6982 100644 --- a/Src/OSF/libarchive-350/libarchive/archive_cryptor.c +++ b/Src/OSF/libarchive-350/libarchive/archive_cryptor.c @@ -34,9 +34,8 @@ static int pbkdf2_sha1(const char * pw, size_t pw_len, const uint8 * salt, size_ static int pbkdf2_sha1(const char * pw, size_t pw_len, const uint8 * salt, size_t salt_len, uint rounds, uint8 * derived_key, size_t derived_key_len) { - NTSTATUS status; BCRYPT_ALG_HANDLE hAlg; - status = BCryptOpenAlgorithmProvider(&hAlg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, BCRYPT_ALG_HANDLE_HMAC_FLAG); + NTSTATUS status = BCryptOpenAlgorithmProvider(&hAlg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, BCRYPT_ALG_HANDLE_HMAC_FLAG); if(!BCRYPT_SUCCESS(status)) return -1; status = BCryptDeriveKeyPBKDF2(hAlg, (PUCHAR)(uintptr_t)pw, (ULONG)pw_len, (PUCHAR)(uintptr_t)salt, (ULONG)salt_len, rounds, (PUCHAR)derived_key, (ULONG)derived_key_len, 0); diff --git a/Src/OSF/oniguruma/src/regint.h b/Src/OSF/oniguruma/src/regint.h index 6737a40487..0f39844543 100644 --- a/Src/OSF/oniguruma/src/regint.h +++ b/Src/OSF/oniguruma/src/regint.h @@ -79,9 +79,6 @@ #ifdef HAVE_INTTYPES_H #include #endif - // @v11.7.1 #if defined(_WIN32) || defined(__BORLANDC__) - // @v11.7.1 #include - // @v11.7.1 #endif #ifdef ONIG_DEBUG_STATISTICS #ifdef USE_TIMEOFDAY #ifdef HAVE_SYS_TIME_H diff --git a/Src/OSF/zstd/lib/include/zstd_compress_internal.h b/Src/OSF/zstd/lib/include/zstd_compress_internal.h index 036fbb96c1..ffbc2cd2be 100644 --- a/Src/OSF/zstd/lib/include/zstd_compress_internal.h +++ b/Src/OSF/zstd/lib/include/zstd_compress_internal.h @@ -764,19 +764,13 @@ static uint64 ZSTD_rollingHash_append(uint64 hash, const void * buf, size_t size /** ZSTD_rollingHash_compute() : * Compute the rolling hash value of the buffer. */ -/*MEM_STATIC*/inline uint64 ZSTD_rollingHash_compute(void const* buf, size_t size) -{ - return ZSTD_rollingHash_append(0, buf, size); -} +/*MEM_STATIC*/inline uint64 ZSTD_rollingHash_compute(void const* buf, size_t size) { return ZSTD_rollingHash_append(0, buf, size); } /** ZSTD_rollingHash_primePower() : * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash * over a window of length bytes. */ -/*MEM_STATIC*/inline uint64 ZSTD_rollingHash_primePower(uint32 length) -{ - return ZSTD_ipow(prime8bytes, length - 1); -} +/*MEM_STATIC*/inline uint64 ZSTD_rollingHash_primePower(uint32 length) { return ZSTD_ipow(prime8bytes, length - 1); } /** ZSTD_rollingHash_rotate() : * Rotate the rolling hash by one byte. @@ -819,32 +813,28 @@ static uint64 ZSTD_rollingHash_append(uint64 hash, const void * buf, size_t size * ZSTD_window_hasExtDict(): * Returns non-zero if the window has a non-empty extDict. */ -/*MEM_STATIC*/inline uint32 ZSTD_window_hasExtDict(ZSTD_window_t const window) -{ - return window.lowLimit < window.dictLimit; -} -/** - * ZSTD_matchState_dictMode(): - * Inspects the provided matchState and figures out what dictMode should be - * passed to the compressor. - */ +/*MEM_STATIC*/inline uint32 ZSTD_window_hasExtDict(ZSTD_window_t const window) { return window.lowLimit < window.dictLimit; } +// +// ZSTD_matchState_dictMode(): +// Inspects the provided matchState and figures out what dictMode should be +// passed to the compressor. +// /*MEM_STATIC*/inline ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t * ms) { return ZSTD_window_hasExtDict(ms->window) ? ZSTD_extDict : (ms->dictMatchState ? (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) : ZSTD_noDict); } - -/* Defining this macro to non-zero tells zstd to run the overflow correction - * code much more frequently. This is very inefficient, and should only be - * used for tests and fuzzers. - */ +// +// Defining this macro to non-zero tells zstd to run the overflow correction +// code much more frequently. This is very inefficient, and should only be +// used for tests and fuzzers. +// #ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -#define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1 -#else -#define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0 -#endif + #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + #define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1 + #else + #define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0 + #endif #endif - /** * ZSTD_window_canOverflowCorrect(): * Returns non-zero if the indices are large enough for overflow correction @@ -1026,8 +1016,7 @@ static uint64 ZSTD_rollingHash_append(uint64 hash, const void * buf, size_t size { const uint32 blockEndIdx = (uint32)((BYTE const*)blockEnd - window->base); const uint32 loadedDictEnd = *loadedDictEndPtr; - DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", - (uint)blockEndIdx, (uint)maxDist, (uint)loadedDictEnd); + DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", (uint)blockEndIdx, (uint)maxDist, (uint)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); if(blockEndIdx > loadedDictEnd + maxDist) { /* On reaching window size, dictionaries are invalidated. @@ -1084,7 +1073,7 @@ static uint64 ZSTD_rollingHash_append(uint64 hash, const void * buf, size_t size window->dictBase = window->base; window->base = ip - distanceFromBase; /* ms->nextToUpdate = window->dictLimit; */ - if(window->dictLimit - window->lowLimit < HASH_READ_SIZE) + if((window->dictLimit - window->lowLimit) < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ contiguous = 0; } diff --git a/Src/PPEquip/EquipDll/Pirit.cpp b/Src/PPEquip/EquipDll/Pirit.cpp index a6d854d079..99c2049f7b 100644 --- a/Src/PPEquip/EquipDll/Pirit.cpp +++ b/Src/PPEquip/EquipDll/Pirit.cpp @@ -2587,6 +2587,7 @@ int PiritEquip::RunCheck(int opertype) case 4: product_type_bytes = 0x444D; break; // GTCHZNPT_MEDICINE @v10.8.7 0x0003-->0x450D // @v10.8.9 0x450D-->0x444D case 5: product_type_bytes = 0x444D; break; // @v10.9.7 GTCHZNPT_CARTIRE @v10.8.7 0x0003-->0x450D // @v10.8.9 0x450D-->0x444D case 12: product_type_bytes = 0x444D; break; // @v11.9.4 GTCHZNPT_DRAFTBEER + case 14: product_type_bytes = 0x444D; break; // @v12.0.3 GTCHZNPT_BEER default: product_type_bytes = 0x444D; break; // @v11.0.5 } const char * p_serial = Check.ChZnSerial.NotEmpty() ? Check.ChZnSerial.cptr() : Check.ChZnPartN.cptr(); // @v10.7.8 @@ -2627,6 +2628,28 @@ int PiritEquip::RunCheck(int opertype) //set_chzn_mark = false; // { + /* + + (Строка[1..256]) Код товара (Тег 1163) + (Строка[1..64]) Дополнительный реквизит предмета расчёта (Тег 1191) + (Строка) Зарезервировано + (Целое число) Признак агента по предмету расчёта (Тег 1222) + (Строка)[0..12] ИНН поставщика (Тег 1226) + (Строка)[0..40] Телефон(ы) поставщика (Тег 1171) + (Строка)[0..256] Наименование поставщика (Тег 1225) + (Строка)[0..256] Адрес оператора перевода (для банк.пл.агента/субагента, иначе пустой) (Тег 1005) + (Строка)[0..12] ИНН оператора перевода (для банк.пл.агента/субагента, иначе пустой) (Тег 1016) + (Строка)[0..64] Наименование оператора перевода (для банк.пл.агента/субагента, иначе пустой) (Тег 1026) + (Строка)[0..40] Телефон(ы) оператора перевода (для банк.пл.агента/субагента, иначе пустой) (Тег 1075) + (Строка)[0..24] Операция платежного агента (для банк.пл.агента/субагента, иначе пустой) (Тег 1044) + (Строка)[0..60] Телефон(ы) платежного агента (для пл.агента/субагента, иначе пустой) (Тег 1073) + (Строка)[0..60] Телефон(ы) оператора по приему платежей (для пл.агента/субагента, иначе пустой) (Тег 1074) + (Строка)[0..3] (fiovId = 030) Идентификатор ФОИВ (тег 1262). Значение определяется ФНС РФ. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. + (Дата8) (documentDate = 26.03.2022) Дата документа основания (тег 1263) в формате ddmmyyyy. Должен содержать сведения об НПА отраслевого регулирования. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. + (Строка)[0..32] (documentNumber = 477) Номер документа основания (тег 1264). Должен содержать сведения об НПА отраслевого регулирования. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. + (Строка)[0..256] Значение отраслевого реквизита (тег 1265). Значение определяется отраслевым НПА. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. При наличии нескольких реквизитов (теги 1262,1263,1264 и 1265) они должны разделяться символом "|" (0x7C) + + */ in_data.Z(); // @v11.2.3 @fix CreateStr(str.Z(), in_data); // #1 (tag 1162) Код товарной номенклатуры (для офд 1.2 - пустая строка) if(Check.ChZnProdType == 4) { // #2 (tag 1191) GTCHZNPT_MEDICINE @@ -2664,7 +2687,7 @@ int PiritEquip::RunCheck(int opertype) CreateStr(str, in_data); // #15 (tag 1262) Идентификатор ФОИВ. Значение определяется ФНС РФ. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. } // @v11.9.3 str.Z().Cat(checkdate(Check.Timestamp.d) ? Check.Timestamp.d : getcurdate_(), DATF_DMY|DATF_NODIV|DATF_CENTURY); // @v11.2.3 // @v11.2.7 - str.Z().Cat("26032022"); // @v11.9.3 + str.Z().Cat("26.03.2022"); // @v11.9.3 // @v12.0.3 @fix "26032022"-->"26.03.2022" CreateStr(str, in_data); // #16 (tag 1263) Дата документа основания. Допускается дата после 1999 года. // Должен содержать сведения об НПА отраслевого регулирования. Параметр используется только при регистрации ККТ в режиме ФФД 1.2. /* @v11.9.3 if(Check.CheckNum > 0) diff --git a/Src/PPEquip/Shtrihfr.cpp b/Src/PPEquip/Shtrihfr.cpp index 320a19ce7c..59b6903499 100644 --- a/Src/PPEquip/Shtrihfr.cpp +++ b/Src/PPEquip/Shtrihfr.cpp @@ -210,7 +210,7 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { SCS_SHTRIHFRF(PPID n, char * name, char * port) : PPSyncCashSession(n, name, port), CashierPassword(0), AdmPassword(0), ResCode(RESCODE_NO_ERROR), ErrCode(SYNCPRN_NO_ERROR), DeviceType(devtypeUndef), CheckStrLen(DEF_STRLEN), Flags(0), RibbonParam(0), SCardPaymEntryN(0), - IsDebugMode(LOGIC(CConfig.Flags & CCFLG_DEBUG)) + IsDebugMode(LOGIC(CConfig.Flags & CCFLG_DEBUG)), ConnectionMode(connmodeUndef) { if(SCn.Flags & CASHF_NOTUSECHECKCUTTER) Flags |= sfDontUseCutter; @@ -220,8 +220,21 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { ~SCS_SHTRIHFRF() { if(Flags & sfConnected) { - if(!ExecFR(Disconnect)) - LogLastError(); + switch(ConnectionMode) { + case connmodeCom: + case connmodeComV2: + if(!ExecFR(Disconnect)) + LogLastError(); + break; + case connmodeServer: + if(!ExecFR(ServerDisconnect)) + LogLastError(); + break; + default: + constexpr int ShtrihFR_ConnectionModeUndefined = 0; + assert(ShtrihFR_ConnectionModeUndefined); + break; + } } if(--RefToIntrf == 0) ZDELETE(P_DrvFRIntrf); @@ -242,7 +255,7 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { virtual int Diagnostics(StringSet * pSs); // @v12.0.3 private: // @v10.3.9 virtual int InitChannel(); - FR_INTRF * InitDriver(); + FR_INTRF * InitDriver(); int ConnectFR(); int SetupTables(); int AnnulateCheck(); @@ -398,11 +411,11 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { ReceiptRibbonIsPresent, // #82 OutputReceipt, // #83 ReceiptOutputType, // #84 - PrintBarcodeGraph, // @v9.1.4 - BarcodeType, // @v9.1.4 - BarCode, // @v9.1.4 - FirstLineNumber, // @v9.1.5 - LineNumber, // @v9.1.5 + PrintBarcodeGraph, // + BarcodeType, // + BarCode, // + FirstLineNumber, // + LineNumber, // Summ4, // @v10.6.1 Summ5, // @v10.6.1 Summ6, // @v10.6.1 @@ -466,11 +479,7 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { // Descr: Методы вывода штрихкодов // enum { - // @v9.1.7 bcmPrintBarcode = SCS_SHTRIHFRF::PrintBarCode, - // @v9.1.7 bcmPrint2DBarcode = SCS_SHTRIHFRF::Print2DBarcode, bcmPrintBarcodeGraph = SCS_SHTRIHFRF::PrintBarcodeGraph, // ! - // @v9.1.7 bcmPrintBarcodeLine = SCS_SHTRIHFRF::PrintBarcodeLine, - // @v9.1.7 bcmPrintBarcodeUsingPrinter = SCS_SHTRIHFRF::PrintBarcodeUsingPrinter // ! }; enum DeviceTypes { devtypeUndef, @@ -489,6 +498,15 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { sfUseWghtSensor = 0x0040, // использовать весовой датчик sfUseFnMethods = 0x0080 // @v10.7.2 Разрешение на использование fn-методов (параметр pp.ini [config] ShtrihFRUseFnMethods) }; + // + // Descr: Варианты соединения с кассовым регистратором + // + enum { + connmodeUndef = 0, // Соединение не установлено + connmodeCom, // Соединение через com-port + connmodeComV2, // Соединение через com-port (вызов Connect2) + connmodeServer // Соединение с сервером (возможно local) (вызов ServerConnect) + }; static FR_INTRF * P_DrvFRIntrf; static int RefToIntrf; static uint PayTypeRegFlags; // @v10.6.1 Флаги успешности получения интерфейсов для Summ1..Summ16 @@ -522,6 +540,7 @@ class SCS_SHTRIHFRF : public PPSyncCashSession { int SCardPaymEntryN; // @v10.6.2 PPINIPARAM_SHTRIHFRSCARDPAYMENTRY Регистр аппарата, в который заносится оплата по корпоративной карте [1..16] long CheckStrLen; // long Flags; // + int ConnectionMode; // @v12.0.5 connmodeXXX SVerT ProtocolVer; // @v11.7.11 @!ConnectFR() uint RibbonParam; // SString AdmName; // Имя сист.администратора @@ -805,7 +824,7 @@ static int GetShtrihVatRateIdent(double vatRate, bool isVatFree) // @v11.2.12 // int SCS_SHTRIHFRF::PrintCheck(CCheckPacket * pPack, uint flags) { - const int use_fn_op = BIN(Flags & sfUseFnMethods); // @v10.7.2 + const bool use_fn_op = LOGIC(Flags & sfUseFnMethods); // @v10.7.2 int ok = 1; int chk_no = 0; bool is_format = false; @@ -1023,23 +1042,14 @@ int SCS_SHTRIHFRF::PrintCheck(CCheckPacket * pPack, uint flags) if(sl_param.ChZnProductType && sl_param.ChZnGTIN.NotEmpty() && sl_param.ChZnSerial.NotEmpty()) { int marking_type = 0; switch(sl_param.ChZnProductType) { - /* @v10.8.11 - case GTCHZNPT_FUR: marking_type = 2; break; - case GTCHZNPT_TOBACCO: marking_type = 5; break; - case GTCHZNPT_SHOE: marking_type = 5408; break; - case GTCHZNPT_MEDICINE: marking_type = 3; break; - */ - // @v10.8.11 { case GTCHZNPT_FUR: marking_type = 0x5246; break; - case GTCHZNPT_TOBACCO: - case GTCHZNPT_ALTTOBACCO: // @v11.9.0 - marking_type = 0x444D; break; + case GTCHZNPT_TOBACCO: marking_type = 0x444D; break; + case GTCHZNPT_ALTTOBACCO: marking_type = 0x444D; break; // @v11.9.0 case GTCHZNPT_SHOE: marking_type = 0x444D; break; case GTCHZNPT_MEDICINE: marking_type = 0x444D; break; case GTCHZNPT_CARTIRE: marking_type = 0x444D; break; // @v10.9.7 case GTCHZNPT_MILK: marking_type = 0x444D; break; // @v11.5.7 case GTCHZNPT_WATER: marking_type = 0x444D; break; // @v11.5.7 - // } @v10.8.11 } if(marking_type) { THROW(SetFR(MarkingType, marking_type)); @@ -1439,9 +1449,8 @@ int SCS_SHTRIHFRF::InitTaxTbl(BillTaxArray * pBTaxAry, PPIDArray * pVatAry, int THROW(SetFR(FieldNumber, FRTAX_FIELD_TAXNAME)); THROW(ExecFR(GetFieldStruct)); { - // @v9.7.1 temp_buf = "НАЛОГ С ПРОДАЖ"; // @cstr #5 - PPLoadText(PPTXT_CCFMT_STAX, temp_buf); // @v9.7.1 - temp_buf.ToUpper().Transf(CTRANSF_INNER_TO_OUTER); // @v9.7.1 + PPLoadText(PPTXT_CCFMT_STAX, temp_buf); // "НАЛОГ С ПРОДАЖ" + temp_buf.ToUpper().Transf(CTRANSF_INNER_TO_OUTER); THROW(SetFR(ValueOfFieldString, temp_buf)); } THROW(ExecFR(WriteTable)); @@ -1456,11 +1465,8 @@ int SCS_SHTRIHFRF::InitTaxTbl(BillTaxArray * pBTaxAry, PPIDArray * pVatAry, int THROW(SetFR(FieldNumber, FRTAX_FIELD_TAXNAME)); THROW(ExecFR(GetFieldStruct)); { - // @v9.0.2 { PPLoadString("vat", temp_buf); temp_buf.Transf(CTRANSF_INNER_TO_OUTER).Space(); - // } @v9.0.2 - // @v9.0.2 PPGetWord(PPWORD_VAT, 1, temp_buf).Space(); (vat_str = temp_buf).Cat(fdiv100i(pVatAry->at(pos)), MKSFMTD(0, 2, NMBF_NOTRAILZ)).CatChar('%'); THROW(SetFR(ValueOfFieldString, vat_str)); } @@ -1915,22 +1921,9 @@ FR_INTRF * SCS_SHTRIHFRF::InitDriver() IFC_ENTRY(ReceiptRibbonIsPresent), IFC_ENTRY(OutputReceipt), IFC_ENTRY(ReceiptOutputType), - // @v9.1.7 IFC_ENTRY(PrintBarCode), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(Print2DBarcode), // @v9.1.4 - IFC_ENTRY(PrintBarcodeGraph), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(PrintBarcodeUsingPrinter), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(PrintBarcodeLine), // @v9.1.4 - IFC_ENTRY(BarcodeType), // @v9.1.4 - IFC_ENTRY(BarCode), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(BarcodeDataLength), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(BarWidth), // @v9.1.4 - // @v9.1.7 IFC_ENTRY(BarcodeStartBlockNumber), - // @v9.1.7 IFC_ENTRY(BarcodeParameter1), - // @v9.1.7 IFC_ENTRY(BarcodeParameter2), - // @v9.1.7 IFC_ENTRY(BarcodeParameter3), - // @v9.1.7 IFC_ENTRY(BarcodeParameter4), - // @v9.1.7 IFC_ENTRY(BarcodeParameter5), - // @v9.1.7 IFC_ENTRY(BarcodeAlignment), + IFC_ENTRY(PrintBarcodeGraph), + IFC_ENTRY(BarcodeType), + IFC_ENTRY(BarCode), IFC_ENTRY(FirstLineNumber), IFC_ENTRY(LineNumber), IFC_ENTRY(FNOperation), // @v10.7.2 @@ -2006,165 +1999,6 @@ FR_INTRF * SCS_SHTRIHFRF::InitDriver() PPLogMessage(PPFILNAM_SHTRIH_LOG, 0, LOGMSGF_LASTERR_TIME_USER); ZDELETE(p_drv); } -#if 0 // @v11.6.7 { - THROW(ASSIGN_ID_BY_NAME(p_drv, ResultCode) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ResultCodeDescription) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Password) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Beep) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ComNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, BaudRate) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Timeout) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetExchangeParam) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, SetExchangeParam) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Connect) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Disconnect) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Quantity) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Price) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Summ1) > 0); - PayTypeRegFlags |= (1U << 1); // @v10.6.1 - THROW(ASSIGN_ID_BY_NAME(p_drv, Summ2) > 0); - PayTypeRegFlags |= (1U << 2); // @v10.6.1 - THROW(ASSIGN_ID_BY_NAME(p_drv, Summ3) > 0); - PayTypeRegFlags |= (1U << 3); // @v10.6.1 - - if(ASSIGN_ID_BY_NAME(p_drv, Summ4)) PayTypeRegFlags |= (1U << 4); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ5)) PayTypeRegFlags |= (1U << 5); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ6)) PayTypeRegFlags |= (1U << 6); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ7)) PayTypeRegFlags |= (1U << 7); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ8)) PayTypeRegFlags |= (1U << 8); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ9)) PayTypeRegFlags |= (1U << 9); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ10)) PayTypeRegFlags |= (1U << 10); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ11)) PayTypeRegFlags |= (1U << 11); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ12)) PayTypeRegFlags |= (1U << 12); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ13)) PayTypeRegFlags |= (1U << 13); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ14)) PayTypeRegFlags |= (1U << 14); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ15)) PayTypeRegFlags |= (1U << 15); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, Summ16)) PayTypeRegFlags |= (1U << 16); // @v10.6.1 - if(ASSIGN_ID_BY_NAME(p_drv, CloseCheckEx)) ExtMethodsFlags |= extmethfCloseCheckEx; // @v10.6.3 - // @v11.2.11 { - if(ExtMethodsFlags & extmethfCloseCheckEx) - THROW(ASSIGN_ID_BY_NAME(p_drv, TaxType) > 0); - // } @v11.2.11 - THROW(ASSIGN_ID_BY_NAME(p_drv, Tax1) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Tax2) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Tax3) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Tax4) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, StringForPrinting) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, UseReceiptRibbon) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, UseJournalRibbon) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintString) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintWideString) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, StringQuantity) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FeedDocument) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, DocumentName) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, DocumentNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintDocumentTitle) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CheckType) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, OpenCheck) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Sale) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ReturnSale) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CloseCheck) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CutCheck) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, DrawerNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, OpenDrawer) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, TableNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, RowNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FieldNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetFieldStruct) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ReadTable) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, WriteTable) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ValueOfFieldInteger) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ValueOfFieldString) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, RegisterNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetOperationReg) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ContentsOfOperationRegister) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetCashReg) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ContentsOfCashRegister) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetECRStatus) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ECRMode) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ECRModeDescription) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ECRAdvancedMode) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ReceiptRibbonOpticalSensor) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, JournalRibbonOpticalSensor) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ContinuePrint) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CancelCheck) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintReportWithCleaning) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintReportWithoutCleaning) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, UModel) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, UMajorProtocolVersion) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, UMinorProtocolVersion) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, GetDeviceMetrics) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CashIncome) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, CashOutcome) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ClearSlipDocumentBuffer) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FillSlipDocumentWithUnfiscalInfo) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, StringNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintSlipDocument) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, IsClearUnfiscalInfo) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, InfoType) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, EKLZIsPresent) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, IsEKLZOverflow) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FMOverflow) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FreeRecordInFM) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, IsFM24HoursOver) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, IsDrawerOpen) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, Department) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ECRModeStatus) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, JournalRibbonIsPresent) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ReceiptRibbonIsPresent) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, OutputReceipt) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, ReceiptOutputType) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, PrintBarCode) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, Print2DBarcode) > 0); // @v9.1.4 - THROW(ASSIGN_ID_BY_NAME(p_drv, PrintBarcodeGraph) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, PrintBarcodeUsingPrinter) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, PrintBarcodeLine) > 0); // @v9.1.4 - THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeType) > 0); // @v9.1.4 - THROW(ASSIGN_ID_BY_NAME(p_drv, BarCode) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeDataLength) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarWidth) > 0); // @v9.1.4 - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeStartBlockNumber) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeParameter1) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeParameter2) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeParameter3) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeParameter4) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeParameter5) > 0); - // @v9.1.7 THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeAlignment) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, FirstLineNumber) > 0); - THROW(ASSIGN_ID_BY_NAME(p_drv, LineNumber) > 0); - - THROW(ASSIGN_ID_BY_NAME(p_drv, FNOperation) > 0); // @v10.7.2 - THROW(ASSIGN_ID_BY_NAME(p_drv, PaymentTypeSign) > 0); // @v10.7.2 Признак способа расчета - THROW(ASSIGN_ID_BY_NAME(p_drv, PaymentItemSign) > 0); // @v10.7.2 Признак предмета расчета - THROW(ASSIGN_ID_BY_NAME(p_drv, FNSendItemCodeData) > 0); // @v10.7.2 - THROW(ASSIGN_ID_BY_NAME(p_drv, MarkingType) > 0); // @v10.7.2 - THROW(ASSIGN_ID_BY_NAME(p_drv, GTIN) > 0); // @v10.7.2 - THROW(ASSIGN_ID_BY_NAME(p_drv, SerialNumber) > 0); // @v10.7.2 - THROW(ASSIGN_ID_BY_NAME(p_drv, FNBeginSTLVTag) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, TagID) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, TagNumber) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, TagType) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, TagValueStr) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, FNAddTag) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, FNSendSTLVTag) > 0); // @v10.9.0 - THROW(ASSIGN_ID_BY_NAME(p_drv, FNCheckItemBarcode) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, FNCheckItemBarcode2) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, BarcodeHex) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, ItemStatus) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, CheckItemMode) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, TLVDataHex) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, CheckItemLocalResult) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, CheckItemLocalError) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, MarkingType2) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, KMServerErrorCode) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, KMServerCheckingStatus) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, DivisionalQuantity) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, Numerator) > 0); // @v11.6.6 - THROW(ASSIGN_ID_BY_NAME(p_drv, Denominator) > 0); // @v11.6.6 - CATCH - ZDELETE(p_drv); - ENDCATCH -#endif // } 0 @v11.6.7 return p_drv; } @@ -2351,9 +2185,8 @@ int SCS_SHTRIHFRF::SetupTables() // Наименования типов оплат // { - // @v9.7.1 temp_buf = "БЕЗНАЛИЧНАЯ ОПЛАТА"; // @cstr #13 - PPLoadText(PPTXT_CCFMT_CASHLESSPAYM, temp_buf); // @v9.7.1 - temp_buf.ToUpper().Transf(CTRANSF_INNER_TO_OUTER); // @v9.7.1 + PPLoadText(PPTXT_CCFMT_CASHLESSPAYM, temp_buf); // БЕЗНАЛИЧНАЯ ОПЛАТА + temp_buf.ToUpper().Transf(CTRANSF_INNER_TO_OUTER); THROW(WriteStringToTbl(FRPAYMTYPE_TBL, 2, FRPAYMTYPE_NAME, temp_buf)); } THROW(WriteStringToTbl(FRPAYMTYPE_TBL, 3, FRPAYMTYPE_NAME, "")); @@ -2370,69 +2203,95 @@ int SCS_SHTRIHFRF::SetupTables() int SCS_SHTRIHFRF::ConnectFR() { int ok = -1; + const char * p_server_name = "localhost"; // @v12.0.3 if(Flags & sfConnected) { if(RefToIntrf > 1) { - THROW(ExecFR(Disconnect)); - THROW(SetFR(ComNumber, Handle)); - THROW(ExecFR(Connect)); + if(sstreqi_ascii(Port, "server")) { + THROW(ExecFR(ServerDisconnect)); + THROW(SetFR(ComputerName, p_server_name)); + THROW(ExecFR_WithoutPassword(ServerConnect)); + } + else { + THROW(ExecFR(Disconnect)); + THROW(SetFR(ComNumber, Handle)); + THROW(ExecFR(Connect)); + } THROW(AnnulateCheck()); } } else { - //#define DEF_BAUD_RATE 2 - //#define MAX_BAUD_RATE 6 // Для Штрих-ФР max скорость обмена 115200 бод - const int __def_baud_rate = 2; // Для Штрих-ФР скорость обмена по умолчанию 9600 бод - const int __max_baud_rate = 6; // Для Штрих-ФР max скорость обмена 115200 бод - - int baud_rate; int model_type = 0; int major_prot_ver = 0; int minor_prot_ver = 0; int not_use_wght_sensor = 0; - long def_baud_rate = __def_baud_rate; - int def_timeout = -1; + int temp_int = 0; SString buf, buf1; PPIniFile ini_file; - int temp_int = 0; - Flags &= ~sfUseFnMethods; // @v10.7.2 - SCardPaymEntryN = ini_file.GetInt(PPINISECT_CONFIG, PPINIPARAM_SHTRIHFRSCARDPAYMENTRY, &temp_int) ? inrangeordefault(temp_int, 1, 16, 0) : 0; // @v10.6.2 + Flags &= ~sfUseFnMethods; + SCardPaymEntryN = ini_file.GetInt(PPINISECT_CONFIG, PPINIPARAM_SHTRIHFRSCARDPAYMENTRY, &temp_int) ? inrangeordefault(temp_int, 1, 16, 0) : 0; THROW_PP(ini_file.Get(PPINISECT_SYSTEM, PPINIPARAM_SHTRIHFRPASSWORD, buf) > 0, PPERR_SHTRIHFRADMPASSW); buf.Divide(',', buf1, AdmName); CashierPassword = AdmPassword = buf1.ToLong(); AdmName.Strip().Transf(CTRANSF_INNER_TO_OUTER); - /* - Тайм-аут приема байта. Тайм-аут приема байта нелинейный. Диапазон допустимых значений - [0…255] распадается на три диапазона: - -- в диапазоне [0…150] каждая единица соответствует 1 мс, т.е. данным диапазоном задаются значения тайм-аута от 0 до 150 мс; - -- в диапазоне [151…249] каждая единица соответствует 150 мс, т.е. данным диапазоном задаются значения тайм-аута от 300 мс до 15 сек; - -- в диапазоне [250…255] каждая единица соответствует 15 сек, т.е. данным диапазоном задаются значения тайм-аута от 30 сек до 105 сек. - */ - if(ini_file.Get(PPINISECT_CONFIG, PPINIPARAM_SHTRIHFRCONNECTPARAM, buf) > 0) { - SString buf2; - if(buf.Divide(',', buf1, buf2) > 0) - def_timeout = buf2.ToLong(); - def_baud_rate = buf1.ToLong(); - if(def_baud_rate > __max_baud_rate) - def_baud_rate = __def_baud_rate; - } - THROW_PP(PortType == COM_PORT, PPERR_SYNCCASH_INVPORT); - THROW(SetFR(ComNumber, Handle)); - if(def_timeout >= 0 && def_timeout < MAX_TIMEOUT) { - // @v11.6.12 (@development) THROW(SetFR(Timeout, def_timeout)); - } - THROW((ok = ExecFR(Connect)) > 0 || ResCode == RESCODE_NO_CONNECTION); - for(baud_rate = 0; !ok && baud_rate <= __max_baud_rate; baud_rate++) { - THROW(SetFR(BaudRate, baud_rate)); + // @v12.0.3 { + if(sstreqi_ascii(Port, "server")) { + // ServerConnect + int server_connected = 0; + char server_version[256]; + server_version[0] = 0; + P_DrvFRIntrf->GetProperty(ServerConnected, &server_connected); + THROW(SetFR(ComputerName, p_server_name)); + THROW(ExecFR_WithoutPassword(ServerConnect)); + P_DrvFRIntrf->GetProperty(ServerConnected, &server_connected); + P_DrvFRIntrf->GetProperty(ServerVersion, server_version, sizeof(server_version)); + //ExecFR(Beep); + Flags |= sfConnected; + ConnectionMode = connmodeServer; + } // } @v12.0.3 + else { + //#define DEF_BAUD_RATE 2 + //#define MAX_BAUD_RATE 6 // Для Штрих-ФР max скорость обмена 115200 бод + const int __def_baud_rate = 2; // Для Штрих-ФР скорость обмена по умолчанию 9600 бод + const int __max_baud_rate = 6; // Для Штрих-ФР max скорость обмена 115200 бод + + int baud_rate; + long def_baud_rate = __def_baud_rate; + int def_timeout = -1; + /* + Тайм-аут приема байта. Тайм-аут приема байта нелинейный. Диапазон допустимых значений + [0…255] распадается на три диапазона: + -- в диапазоне [0…150] каждая единица соответствует 1 мс, т.е. данным диапазоном задаются значения тайм-аута от 0 до 150 мс; + -- в диапазоне [151…249] каждая единица соответствует 150 мс, т.е. данным диапазоном задаются значения тайм-аута от 300 мс до 15 сек; + -- в диапазоне [250…255] каждая единица соответствует 15 сек, т.е. данным диапазоном задаются значения тайм-аута от 30 сек до 105 сек. + */ + if(ini_file.Get(PPINISECT_CONFIG, PPINIPARAM_SHTRIHFRCONNECTPARAM, buf) > 0) { + SString buf2; + if(buf.Divide(',', buf1, buf2) > 0) + def_timeout = buf2.ToLong(); + def_baud_rate = buf1.ToLong(); + if(def_baud_rate > __max_baud_rate) + def_baud_rate = __def_baud_rate; + } + THROW_PP(PortType == COM_PORT, PPERR_SYNCCASH_INVPORT); + THROW(SetFR(ComNumber, Handle)); + if(def_timeout >= 0 && def_timeout < MAX_TIMEOUT) { + // @v11.6.12 (@development) THROW(SetFR(Timeout, def_timeout)); + } THROW((ok = ExecFR(Connect)) > 0 || ResCode == RESCODE_NO_CONNECTION); - } - THROW(ok > 0); - Flags |= sfConnected; - THROW(GetFR(BaudRate, &baud_rate)); - if(baud_rate != def_baud_rate) { - THROW(SetFR(BaudRate, def_baud_rate)); - THROW(ExecFR(SetExchangeParam)); - THROW(ExecFR(Disconnect)); - THROW(ExecFR(Connect)); + for(baud_rate = 0; !ok && baud_rate <= __max_baud_rate; baud_rate++) { + THROW(SetFR(BaudRate, baud_rate)); + THROW((ok = ExecFR(Connect)) > 0 || ResCode == RESCODE_NO_CONNECTION); + } + THROW(ok > 0); + Flags |= sfConnected; + ConnectionMode = connmodeCom; // @v12.0.3 + THROW(GetFR(BaudRate, &baud_rate)); + if(baud_rate != def_baud_rate) { + THROW(SetFR(BaudRate, def_baud_rate)); + THROW(ExecFR(SetExchangeParam)); + THROW(ExecFR(Disconnect)); + THROW(ExecFR(Connect)); + } } THROW(ExecFR(GetDeviceMetrics) > 0); THROW(GetFR(UModel, &model_type)); @@ -2570,7 +2429,7 @@ bool SCS_SHTRIHFRF::GetFR(int id, char * pBuf, size_t bufLen) int SCS_SHTRIHFRF::GetResultCode(int methID) { const int func_without_retcode_checking[] = { GetECRStatus, Beep, Connect, OpenCheck, FNOperation, CloseCheckEx, CloseCheck, - FeedDocument }; // @v11.7.12 FeedDocument + FeedDocument, FNCheckItemBarcode, FNCheckItemBarcode2 }; // @v11.7.12 FeedDocument // @v12.0.3 FNCheckItemBarcode, FNCheckItemBarcode2 int ok = 1; { if(methID == ServerConnect) { @@ -2883,6 +2742,7 @@ void SCS_SHTRIHFRF::SetErrorMessage() } else*/ if(ExtMethodsFlags & extmethfFNCheckItemBarcode) { + const bool use_FNCheckItemBarcode2 = true; // Если false, то используем FNCheckItemBarcode THROW(ConnectFR()); // //THROW(SetFR(BarCode, pCode)); @@ -2896,9 +2756,18 @@ void SCS_SHTRIHFRF::SetErrorMessage() } THROW(SetFR(ItemStatus, 1L)); THROW(SetFR(CheckItemMode, 0L)); - THROW(SetFR(TLVDataHex, "")); - tlv_data_hex[0] = 0; - THROW(ExecFR(FNCheckItemBarcode)); + if(use_FNCheckItemBarcode2) { + //IFC_ENTRY_SS(DivisionalQuantity, ExtMethodsFlags, extmethfDivisionalQuantity), // @v11.6.6 / ! + //IFC_ENTRY_SS(Numerator, ExtMethodsFlags, extmethfNumerator), // @v11.6.6 / ! + //IFC_ENTRY_SS(Denominator, ExtMethodsFlags, extmethfDenominator), // @v11.6.6 / ! + THROW(SetFR(DivisionalQuantity, false)); + tlv_data_hex[0] = 0; + THROW(ExecFR(FNCheckItemBarcode2)); + } + else { + THROW(SetFR(TLVDataHex, "")); + THROW(ExecFR(FNCheckItemBarcode)); + } { msg_buf.Z().Cat("FNCheckItemBarcode req").CatDiv(':', 2).CatEq("BarCode", pCode).CatDiv(',', 2). CatEq("ItemStatus", 1).CatDiv(',', 2).CatEq("CheckItemMode", 0).CatDiv(',', 2). diff --git a/Src/PPEquip/atoldrv.cpp b/Src/PPEquip/atoldrv.cpp index 6dc90e1b3b..e5cfab8ae5 100644 --- a/Src/PPEquip/atoldrv.cpp +++ b/Src/PPEquip/atoldrv.cpp @@ -2140,6 +2140,7 @@ int SCS_ATOLDRV::PrintCheck(CCheckPacket * pPack, uint flags) case GTCHZNPT_PERFUMERY: marking_type = 0x444D; break; // @v11.3.12 Парфюмерия case GTCHZNPT_MILK: marking_type = 0x444D; break; // @v11.3.12 Молоко case GTCHZNPT_WATER: marking_type = 0x444D; break; // @v11.3.12 Вода + case GTCHZNPT_BEER: marking_type = 0x444D; break; // @v12.0.3 Пиво } if(marking_type >= 0) { P_Fptr10->SetParamIntProc(fph, LIBFPTR_PARAM_NOMENCLATURE_TYPE, marking_type); diff --git a/Src/PPEquip/frontol.cpp b/Src/PPEquip/frontol.cpp index f96b5e242c..d9ace3daf1 100644 --- a/Src/PPEquip/frontol.cpp +++ b/Src/PPEquip/frontol.cpp @@ -656,6 +656,7 @@ int ACS_FRONTOL::ExportData(int updOnly) case GTCHZNPT_MILK: mark_type = 13; break; // @v11.3.5 case GTCHZNPT_WATER: mark_type = 15; break; // @v11.5.6 case GTCHZNPT_DRAFTBEER: mark_type = 18; break; // @v11.9.2 + case GTCHZNPT_BEER: mark_type = 17; break; // @v12.0.3 default: if(gds_info.ChZnProdType) mark_type = 7; // 7–иная маркированная продукция diff --git a/Src/PPLib/Cshses.cpp b/Src/PPLib/Cshses.cpp index 972b421cf6..e7313d63b4 100644 --- a/Src/PPLib/Cshses.cpp +++ b/Src/PPLib/Cshses.cpp @@ -50,7 +50,7 @@ int PPSyncCashSession::Init(const char * pName, const char * pPort) if(pPort) { STRNSCPY(Port, pPort); int c = 0; - const int comdvcs = IsComDvcSymb(pPort, &c); + const int comdvcs = IsComDvcSymb(Port, &c); if(comdvcs == comdvcsCom && (c >= 1 && c <= 32)) { PortType = 2; Handle = c; @@ -63,6 +63,9 @@ int PPSyncCashSession::Init(const char * pName, const char * pPort) PortType = 1; Handle = 1; } + else if(sstreqi_ascii(Port, "server")) { // @v12.0.3 + PortType = 3; + } if(PortType == 0) Handle = open(pPort, O_CREAT|/*O_TRUNC*/O_APPEND|O_TEXT|O_WRONLY, S_IWRITE); else { diff --git a/Src/PPLib/chzn.cpp b/Src/PPLib/chzn.cpp index 375450501a..12bb552422 100644 --- a/Src/PPLib/chzn.cpp +++ b/Src/PPLib/chzn.cpp @@ -2522,6 +2522,7 @@ int ChZnInterface::TransmitDocument2(const InitBlock & rIb, const ChZnInterface: case GTCHZNPT_TEXTILE: p_chzn_prodtype_symb = "lp"; break; case GTCHZNPT_PERFUMERY: p_chzn_prodtype_symb = "perfumery"; break; case GTCHZNPT_MILK: p_chzn_prodtype_symb = "milk"; break; + case GTCHZNPT_BEER: p_chzn_prodtype_symb = "beer"; break; // @v12.0.3 @? я не уверен, что символ "beer" здесь правильный - вносил интуитивно (не по документации) } if(!isempty(p_chzn_prodtype_symb)) { temp_buf.Z().CatEq("pg", p_chzn_prodtype_symb); diff --git a/Src/PPLib/ie_bill.cpp b/Src/PPLib/ie_bill.cpp index 0b8e6d4dd9..0a6f3407f5 100644 --- a/Src/PPLib/ie_bill.cpp +++ b/Src/PPLib/ie_bill.cpp @@ -383,29 +383,6 @@ int DocNalogRu_Reader::ReadFile(const char * pFileName, FileInfo & rHeader, TSCo } } } - // @v12.0.3 { - /* - < ="" ="01.01.2006"/> - < =""/> - < ="" =" "/> - < ="" =" "/> - < ="" ="00000001"/> - */ - else if(SXml::IsName(p_n3, GetToken_Utf8(PPHSC_RU_PARAMETER))) { - if(SXml::GetAttrib(p_n3, GetToken_Utf8(PPHSC_RU_NAME), temp_buf)) { - extra_key = temp_buf; - } - if(SXml::GetAttrib(p_n3, GetToken_Utf8(PPHSC_RU_VALUE), temp_buf)) { - extra_val = temp_buf; - } - if(extra_key == GetToken_Utf8(PPHSC_RU_PARAM_DIVCODE)) { - (p_new_doc->ConsigneeDivCode = extra_val).Transf(CTRANSF_UTF8_TO_INNER); - } - else if(extra_key == GetToken_Utf8(PPHSC_RU_PARAM_DIVNAME)) { - (p_new_doc->ConsigneeDivName = extra_val).Transf(CTRANSF_UTF8_TO_INNER); - } - } - // } @v12.0.3 } } else if(SXml::IsName(p_n2, GetToken_Utf8(PPHSC_RU_INVOICETAB))) { @@ -536,6 +513,29 @@ int DocNalogRu_Reader::ReadFile(const char * pFileName, FileInfo & rHeader, TSCo if(p_part) ReadParticipant(p_n2, *p_part); } + // @v12.0.3 { + /* + < ="" ="01.01.2006"/> + < =""/> + < ="" =" "/> + < ="" =" "/> + < ="" ="00000001"/> + */ + else if(SXml::IsName(p_n2, GetToken_Utf8(PPHSC_RU_PARAMETER))) { + if(SXml::GetAttrib(p_n2, GetToken_Utf8(PPHSC_RU_NAME), temp_buf)) { + extra_key = temp_buf; + } + if(SXml::GetAttrib(p_n2, GetToken_Utf8(PPHSC_RU_VALUE), temp_buf)) { + extra_val = temp_buf; + } + if(extra_key == GetToken_Utf8(PPHSC_RU_PARAM_DIVCODE)) { + (p_new_doc->ConsigneeDivCode = extra_val).Transf(CTRANSF_UTF8_TO_INNER); + } + else if(extra_key == GetToken_Utf8(PPHSC_RU_PARAM_DIVNAME)) { + (p_new_doc->ConsigneeDivName = extra_val).Transf(CTRANSF_UTF8_TO_INNER); + } + } + // } @v12.0.3 else if(SXml::IsName(p_n2, GetToken_Utf8(PPHSC_RU_TABOFDOC))) { // @v11.9.5 SBIS for(const xmlNode * p_n3 = p_n2->children; p_n3; p_n3 = p_n3->next) { if(SXml::IsName(p_n3, GetToken_Utf8(PPHSC_RU_TABOFDOCLINE))) { @@ -4386,7 +4386,7 @@ int PPBillImporter::DoFullEdiProcess() if(Flags & fTestMode) ediprvimp_ctr_flags |= PPEdiProcessor::ProviderImplementation::ctrfTestMode; for(SEnum en = ediprv_obj.Enum(0); en.Next(&ediprv_rec) > 0;) { - p_prvimp = (ediprv_rec.Flags & PPEdiProvider::fPassive) ? 0 : PPEdiProcessor::CreateProviderImplementation(ediprv_rec.ID, main_org_id, ediprvimp_ctr_flags); + p_prvimp = (ediprv_rec.Flags & PPEdiProvider::fPassive) ? 0 : PPEdiProcessor::CreateProviderImplementation(ediprv_rec.ID, main_org_id, ediprvimp_ctr_flags, &Logger); if(p_prvimp) { PPEdiProcessor prc(p_prvimp, &Logger); PPEdiProcessor::DocumentInfo doc_inf; diff --git a/Src/PPLib/ppedi.cpp b/Src/PPLib/ppedi.cpp index 0a46755fcf..7208e54abb 100644 --- a/Src/PPLib/ppedi.cpp +++ b/Src/PPLib/ppedi.cpp @@ -2522,8 +2522,8 @@ PPEdiProcessor::ProviderImplementation::OwnFormatContractor & PPEdiProcessor::Pr return *this; } -PPEdiProcessor::ProviderImplementation::ProviderImplementation(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags) : - Epp(rEpp), MainOrgID(mainOrgID), Flags(flags), P_BObj(BillObj) +PPEdiProcessor::ProviderImplementation::ProviderImplementation(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger) : + Epp(rEpp), MainOrgID(mainOrgID), Flags(flags), P_BObj(BillObj), P_Logger(pLogger) { PPAlbatrosCfgMngr::Get(&ACfg); Arp.SetConfig(0); @@ -2532,6 +2532,7 @@ PPEdiProcessor::ProviderImplementation::ProviderImplementation(const PPEdiProvid PPEdiProcessor::ProviderImplementation::~ProviderImplementation() { + P_Logger = 0; } const SString & FASTCALL PPEdiProcessor::ProviderImplementation::EncXmlText(const char * pS) @@ -4806,7 +4807,7 @@ static const SIntToSymbTabEntry EanComIticSymbList[] = { class EdiProviderImplementation_Kontur : public PPEdiProcessor::ProviderImplementation { public: - EdiProviderImplementation_Kontur(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags); + EdiProviderImplementation_Kontur(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger); virtual ~EdiProviderImplementation_Kontur(); virtual int GetDocumentList(const PPBillIterchangeFilt & rP, PPEdiProcessor::DocumentInfoList & rList); virtual int ReceiveDocument(const PPEdiProcessor::DocumentInfo * pIdent, TSCollection & rList); @@ -4846,7 +4847,7 @@ class EdiProviderImplementation_Kontur : public PPEdiProcessor::ProviderImplemen class EdiProviderImplementation_Exite : public PPEdiProcessor::ProviderImplementation { public: - EdiProviderImplementation_Exite(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags); + EdiProviderImplementation_Exite(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger); virtual ~EdiProviderImplementation_Exite(); virtual int GetDocumentList(const PPBillIterchangeFilt & rP, PPEdiProcessor::DocumentInfoList & rList); virtual int ReceiveDocument(const PPEdiProcessor::DocumentInfo * pIdent, TSCollection & rList); @@ -4876,7 +4877,7 @@ class EdiProviderImplementation_SBIS : public PPEdiProcessor::ProviderImplementa /* СвОЭДОтпр ИННЮЛ="7605016030" ИдЭДО="2BE" НаимОрг="ООО "Компания "Тензор""/> */ - EdiProviderImplementation_SBIS(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags); + EdiProviderImplementation_SBIS(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger); virtual ~EdiProviderImplementation_SBIS(); virtual int GetDocumentList(const PPBillIterchangeFilt & rP, PPEdiProcessor::DocumentInfoList & rList); virtual int ReceiveDocument(const PPEdiProcessor::DocumentInfo * pIdent, TSCollection & rList); @@ -4891,8 +4892,8 @@ class EdiProviderImplementation_SBIS : public PPEdiProcessor::ProviderImplementa // // // -EdiProviderImplementation_SBIS::EdiProviderImplementation_SBIS(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags) : - PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags) +EdiProviderImplementation_SBIS::EdiProviderImplementation_SBIS(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger) : + PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags, pLogger) { } @@ -5254,7 +5255,8 @@ int EdiProviderImplementation_SBIS::Write_ORDERRSP(xmlTextWriter * pX, const S_G int EdiProviderImplementation_SBIS::ProcessDocument(DocNalogRu_Reader::DocumentInfo * pNrDoc, TSCollection & rList) { int ok = 1; - //bool debug_mark = false; // @v12.0.2 @debug + Reference * p_ref = PPRef; + bool debug_mark = false; // @v12.0.2 @debug PPEdiProcessor::Packet * p_pack = 0; SString temp_buf; SString addendum_msg_buf; @@ -5305,12 +5307,50 @@ int EdiProviderImplementation_SBIS::ProcessDocument(DocNalogRu_Reader::DocumentI } } // } @v11.9.9 + // @debug { + /*if(strstr(pNrDoc->Code, "9695") != 0) { + debug_mark = true; + }*/ + // } @debug + if(pNrDoc->ConsigneeDivCode.NotEmpty()) { + if(p_bp->Rec.Object) { + PPID local_acs_id = 0; + PPID buyer_psn_id = ObjectToPerson(p_bp->Rec.Object, &local_acs_id); + if(buyer_psn_id) { + ObjTagItem tag_item; + if(p_ref->Ot.GetTag(PPOBJ_PERSON, buyer_psn_id, PPTAG_PERSON_EXTDLVRLOCCODETAG, &tag_item) > 0) { + PPID loc_code_tag_id = 0; + if(tag_item.GetInt(&loc_code_tag_id) > 0) { + PPIDArray loc_by_divcode_list; + if(p_ref->Ot.SearchObjectsByStr(PPOBJ_LOCATION, loc_code_tag_id, pNrDoc->ConsigneeDivCode, &loc_by_divcode_list) > 0) { + assert(loc_by_divcode_list.getCount()); + if(loc_by_divcode_list.getCount() == 1) { + PPID dlvrloc_id = loc_by_divcode_list.get(0); + debug_mark = true; // @debug + LocationTbl::Rec loc_rec; + if(PsnObj.LocObj.Fetch(dlvrloc_id, &loc_rec) > 0) { + if(loc_rec.OwnerID && loc_rec.OwnerID == buyer_psn_id) { + PPFreight freight; + p_bp->GetFreight(&freight); + freight.SetupDlvrAddr(dlvrloc_id); + p_bp->SetFreight(&freight); + } + } + } + } + } + } + } + } + } PPObjBill::MakeCodeString(&p_bp->Rec, 0, bill_text); // @v12.0.2 // @v12.0.2 @debug { /*if(strstr(p_bp->Rec.Code, "00008734")) { debug_mark = true; }*/ // } @v12.0.2 @debug + uint local_iter_error_count = 0; // @v12.0.3 Счетчик локальных ошибок. Если в конце цикла он не нулевой, то генерируется выход по ошибке. + // Введено с целью информирования об ошибках во всех строках, а не только о первой встретившейся. for(uint gitemidx = 0; gitemidx < pNrDoc->GoodsItemList.getCount(); gitemidx++) { const DocNalogRu_Base::GoodsItem * p_gitem = pNrDoc->GoodsItemList.at(gitemidx); if(p_gitem) { @@ -5371,8 +5411,15 @@ int EdiProviderImplementation_SBIS::ProcessDocument(DocNalogRu_Reader::DocumentI addendum_msg_buf.CatDivIfNotEmpty('/', 0).Cat(p_gitem->SupplCode); if(p_gitem->GoodsName.NotEmpty()) addendum_msg_buf.CatDivIfNotEmpty('/', 0).Cat(p_gitem->GoodsName); - addendum_msg_buf.CatDivIfNotEmpty(':', 1).Cat(p_bp->Rec.Code).CatDiv('-', 1).Cat(p_bp->Rec.Dt, DATF_DMY); - CALLEXCEPT_PP_S(PPERR_EDI_UNBLRSLV_GOODS, addendum_msg_buf); + // @v12.0.3 addendum_msg_buf.CatDivIfNotEmpty(':', 1).Cat(p_bp->Rec.Code).CatDiv('-', 1).Cat(p_bp->Rec.Dt, DATF_DMY); + if(P_Logger) { + local_iter_error_count++; + PPSetError(PPERR_EDI_UNBLRSLV_GOODS, addendum_msg_buf); + P_Logger->LogLastError(); + } + else { + CALLEXCEPT_PP_S(PPERR_EDI_UNBLRSLV_GOODS, addendum_msg_buf); + } } ti.Init(&p_bp->Rec, 0, 0); ti.RByBill = static_cast(p_gitem->RowN); @@ -5383,6 +5430,7 @@ int EdiProviderImplementation_SBIS::ProcessDocument(DocNalogRu_Reader::DocumentI } } } + THROW_PP_S(!local_iter_error_count, PPERR_EDI_DOCNOTACCEPTED_TIFAULT, bill_text); rList.insert(p_pack); p_pack = 0; // Обнуляем указатель, поскольку владение им передано rList } @@ -6649,20 +6697,20 @@ PPEdiProcessor::Packet::~Packet() P_ExtData = 0; } -/*static*/PPEdiProcessor::ProviderImplementation * PPEdiProcessor::CreateProviderImplementation(PPID ediPrvID, PPID mainOrgID, long flags) +/*static*/PPEdiProcessor::ProviderImplementation * PPEdiProcessor::CreateProviderImplementation(PPID ediPrvID, PPID mainOrgID, long flags, PPLogger * pLogger) { ProviderImplementation * p_imp = 0; PPObjEdiProvider ep_obj; PPEdiProviderPacket ep_pack; THROW(ep_obj.GetPacket(ediPrvID, &ep_pack) > 0); if(sstreqi_ascii(ep_pack.Rec.Symb, "KONTUR") || sstreqi_ascii(ep_pack.Rec.Symb, "KONTUR-T")) { - p_imp = new EdiProviderImplementation_Kontur(ep_pack, mainOrgID, flags); + p_imp = new EdiProviderImplementation_Kontur(ep_pack, mainOrgID, flags, pLogger); } else if(sstreqi_ascii(ep_pack.Rec.Symb, "EXITE")) { // @v10.2.8 - p_imp = new EdiProviderImplementation_Exite(ep_pack, mainOrgID, flags); + p_imp = new EdiProviderImplementation_Exite(ep_pack, mainOrgID, flags, pLogger); } else if(sstreqi_ascii(ep_pack.Rec.Symb, "SBIS")) { // @v11.9.4 - p_imp = new EdiProviderImplementation_SBIS(ep_pack, mainOrgID, flags); + p_imp = new EdiProviderImplementation_SBIS(ep_pack, mainOrgID, flags, pLogger); } else { CALLEXCEPT_PP_S(PPERR_EDI_THEREISNTPRVIMP, ep_pack.Rec.Symb); @@ -7104,8 +7152,8 @@ int PPEdiProcessor::SendDESADV(int ediOp, const PPBillIterchangeFilt & rP, const // // // -EdiProviderImplementation_Kontur::EdiProviderImplementation_Kontur(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags) : - PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags) +EdiProviderImplementation_Kontur::EdiProviderImplementation_Kontur(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger) : + PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags, pLogger) { } @@ -7994,8 +8042,8 @@ int EdiProviderImplementation_Kontur::SendDocument(PPEdiProcessor::DocumentInfo // // // -EdiProviderImplementation_Exite::EdiProviderImplementation_Exite(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags) : - PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags) +EdiProviderImplementation_Exite::EdiProviderImplementation_Exite(const PPEdiProviderPacket & rEpp, PPID mainOrgID, long flags, PPLogger * pLogger) : + PPEdiProcessor::ProviderImplementation(rEpp, mainOrgID, flags, pLogger) { } diff --git a/Src/PPLib/ppsupplix.cpp b/Src/PPLib/ppsupplix.cpp index df58b8d51c..6ff2726dd3 100644 --- a/Src/PPLib/ppsupplix.cpp +++ b/Src/PPLib/ppsupplix.cpp @@ -272,17 +272,15 @@ class SoapExporter { InitExportParam(p, addLineRecType); AddLineRec = p.InrRec; } - // @v10.4.0 { if(promoLineRecType) { PromoLineRecType = promoLineRecType; InitExportParam(p, promoLineRecType); PromoLineRec = p.InrRec; } - // } @v10.4.0 HeadScheme = pHeadScheme; LineScheme = pLineScheme; AddLineScheme = pAddLineScheme; - PromoLineScheme = pPromoLineScheme; // @v10.4.0 + PromoLineScheme = pPromoLineScheme; SchemeName = pSchemeName; F.Close(); THROW_SL(F.Open(pFile, SFile::mWrite) > 0); @@ -538,17 +536,17 @@ class SoapExporter { uint HeadRecType; uint LineRecType; uint AddLineRecType; - uint PromoLineRecType; // @v10.4.0 + uint PromoLineRecType; SString HeadScheme; SString LineScheme; SString AddLineScheme; - SString PromoLineScheme; // @v10.4.0 + SString PromoLineScheme; SString SchemeName; SString FileName; SdRecord HeadRec; SdRecord LineRec; SdRecord AddLineRec; - SdRecord PromoLineRec; // @v10.4.0 + SdRecord PromoLineRec; SFile F; }; diff --git a/Src/PPLib/wsctl.cpp b/Src/PPLib/wsctl.cpp index d352035df0..fde946cb29 100644 --- a/Src/PPLib/wsctl.cpp +++ b/Src/PPLib/wsctl.cpp @@ -46,7 +46,7 @@ WsCtl_RegistrationBlock & WsCtl_RegistrationBlock::Z() // // // -WsCtl_SelfIdentityBlock::WsCtl_SelfIdentityBlock() : PrcID(0) +WsCtl_SelfIdentityBlock::WsCtl_SelfIdentityBlock() : PrcID(0), ComputerID(0), CompCatID(0) { } diff --git a/Src/PPMain/wsctl-main.cpp b/Src/PPMain/wsctl-main.cpp index 09a25e74ae..5c6cd9bc5c 100644 --- a/Src/PPMain/wsctl-main.cpp +++ b/Src/PPMain/wsctl-main.cpp @@ -538,7 +538,7 @@ class WsCtl_ImGuiSceneBlock : public ImGuiSceneBase { };*/ class ImDialog_WsRegisterComputer : public ImDialogState { public: - ImDialog_WsRegisterComputer(WsCtl_ImGuiSceneBlock & rBlk, WsCtl_SelfIdentityBlock * pCtx) : R_Blk(rBlk), ImDialogState(pCtx) + ImDialog_WsRegisterComputer(WsCtl_ImGuiSceneBlock & rBlk, WsCtl_SelfIdentityBlock * pCtx) : R_Blk(rBlk), ImDialogState(pCtx), CompCatID(0) { SString temp_buf; if(pCtx) { @@ -563,7 +563,29 @@ class WsCtl_ImGuiSceneBlock : public ImGuiSceneBase { const char * p_popup_title = "Register computer"; ImGui::OpenPopup(p_popup_title); if(ImGui::BeginPopup(p_popup_title)) { + DComputerCategoryList st_data_compcat_list; + R_Blk.St.D_CompCatList.GetData(st_data_compcat_list); + // ImGui::InputText(R_Blk.InputLabelPrefix("Computer Name"), SubstTxt_Name, sizeof(SubstTxt_Name)); + if(st_data_compcat_list.L.getCount()) { + const char * p_selected_text = 0; + if(ImGui::BeginCombo("##compcatlist", p_selected_text)) { + if(CompCatID) { + for(uint i = 0; i < st_data_compcat_list.L.getCount(); i++) { + StrAssocArray::Item comp_cat_item = st_data_compcat_list.L.Get(i); + if(comp_cat_item.Id == CompCatID) { + p_selected_text = comp_cat_item.Txt; + } + } + } + for(uint catidx = 0; catidx < st_data_compcat_list.L.getCount(); catidx++) { + StrAssocArray::Item item = st_data_compcat_list.L.Get(catidx); + if(ImGui::Selectable(item.Txt, item.Id == PgmL.GetSelectedCatSurrogateId())) + PgmL.SetSelectedCatSurrogateId(item.Id); + } + ImGui::EndCombo(); + } + } ImGui::InputText(R_Blk.InputLabelPrefix("MAC Address"), SubstTxt_MacAdr, sizeof(SubstTxt_MacAdr)); ImGui::InputText(R_Blk.InputLabelPrefix("UUID"), SubstTxt_UUID, sizeof(SubstTxt_UUID)); ImGui::NewLine(); @@ -613,6 +635,7 @@ class WsCtl_ImGuiSceneBlock : public ImGuiSceneBase { char SubstTxt_Name[128]; char SubstTxt_MacAdr[48]; char SubstTxt_UUID[48]; + long CompCatID; // @v12.0.3 }; private: // @@ -1973,6 +1996,7 @@ void WsCtl_ImGuiSceneBlock::WsCtl_CliSession::SendRequest(PPJobSrvClient & rCli, break; case PPSCMD_WSCTL_BEGIN_SESS: if(P_St) { + bool local_fault = true; DTSess st_data; SJson js_param(SJson::tOBJECT); js_param.InsertInt("scardid", rReq.P.SCardID); @@ -2000,31 +2024,37 @@ void WsCtl_ImGuiSceneBlock::WsCtl_CliSession::SendRequest(PPJobSrvClient & rCli, if(reply.CheckRepError()) { SJson * p_js = SJson::Parse(reply_buf); if(st_data.FromJsonObject(p_js)) { - ; + local_fault = false; } - else - st_data.SetupByLastError(); - P_St->D_TSess.SetData(st_data); ZDELETE(p_js); } - else - st_data.SetupByLastError(); } st_data.DtmActual = getcurdatetime_(); + if(local_fault) { + st_data.SetupByLastError(); + } P_St->D_TSess.SetData(st_data); } break; case reqidQueryComputerCategoryList: // @v12.0.3 if(P_St) { + bool local_fault = true; DComputerCategoryList st_data; - temp_buf.Z().Cat("SELECT").Space().Cat("COMPUTERCATEGORY"); + temp_buf.Z().Cat("SELECT").Space().Cat("COMPUTERCATEGORY").Space().Cat("BY").Space().Cat("FORMAT").DotCat("BIN").CatParStr(static_cast(0)); if(rCli.ExecSrvCmd(temp_buf, reply)) { // Ответ придет в формате xml SString reply_buf; reply.StartReading(&reply_buf); if(reply.CheckRepError()) { - + if(st_data.L.Read(reply, 0)) { + local_fault = false; + } } } + st_data.DtmActual = getcurdatetime_(); + if(local_fault) { + st_data.SetupByLastError(); + } + P_St->D_CompCatList.SetData(st_data); } break; } @@ -2922,7 +2952,7 @@ void WsCtl_ImGuiSceneBlock::BuildScene() DComputerCategoryList st_data_compcat_list; // @v12.0.3 St.D_CompCatList.GetData(st_data_compcat_list); if(!st_data_compcat_list.DtmActual) { - + P_CmdQ->Push(WsCtlReqQueue::Req(WsCtl_CliSession::reqidQueryComputerCategoryList)); } P_Dlg_RegComp = new ImDialog_WsRegisterComputer(*this, &St.SidBlk); } diff --git a/Src/PPTEST/pptest.cpp b/Src/PPTEST/pptest.cpp index b82f260861..442b5265d0 100644 --- a/Src/PPTEST/pptest.cpp +++ b/Src/PPTEST/pptest.cpp @@ -1990,8 +1990,8 @@ int DoConstructionTest() } #endif // } 0 //TestGtinStruc(); - //PPChZnPrcssr::Test(); - GumboTest(); + PPChZnPrcssr::Test(); + //GumboTest(); //Test_SSystemBackup(); //TestPow10Tab(); //ImportSpecial("D:\\DEV\\RESOURCE\\DATA\\ETC"); diff --git a/Src/Rsrc/Str/ppstr2.symb b/Src/Rsrc/Str/ppstr2.symb index 0db5b10099..5c5292aa2a 100644 --- a/Src/Rsrc/Str/ppstr2.symb +++ b/Src/Rsrc/Str/ppstr2.symb @@ -2,7 +2,7 @@ [common] LastAutoGrpCode=1001 LastSignatureCode=2147494822 -LastAutoCodeList=1,12232;102,10177;103,10072;110,10000;111,12106;112,10000;119,10000;114,10006;7,10000;4,10000;6,10000;342,10000;118,10150;120,10005;1001,10000;121,10000;122,10000;123,10322;124,10347;125,10063 +LastAutoCodeList=1,12233;102,10177;103,10072;110,10000;111,12106;112,10000;119,10000;114,10006;7,10000;4,10000;6,10000;342,10000;118,10150;120,10005;1001,10000;121,10000;122,10000;123,10322;124,10347;125,10063 [SignatureList] testsign1=2147483649 @@ -12873,6 +12873,7 @@ PPERR_EDI_SVCGETDOCLIST=77203 PPERR_EDI_SVCSENDDOC=77204 PPERR_EDI_SVCGETDOC=77205 PPERR_EDI_SVCADDRUNDEF=77206 +PPERR_EDI_DOCNOTACCEPTED_TIFAULT=77769 PPERR_EANCOM_RFFWOQ=77207 PPERR_EANCOM_NADWOQ=77208 PPERR_EANCOM_MOAWOQ=77209 diff --git a/Src/Rsrc/Str/ppstr2.txt b/Src/Rsrc/Str/ppstr2.txt index 3fceff0b0b..d3798bc2e0 100644 --- a/Src/Rsrc/Str/ppstr2.txt +++ b/Src/Rsrc/Str/ppstr2.txt @@ -1797,6 +1797,7 @@ PPERR_EDI_SVCGETDOCLIST "Ошибка получения списк PPERR_EDI_SVCSENDDOC "Ошибка отправки документа на транспортный сервис EDI-провайдера: %s" PPERR_EDI_SVCGETDOC "Ошибка получения документа с транспортного сервиса EDI-провайдера: %s" PPERR_EDI_SVCADDRUNDEF "Не определен адрес доступа к сервису EDI-провайдера %s" +PPERR_EDI_DOCNOTACCEPTED_TIFAULT "Документ '%s' не проведен из-за проблем в одной или нескольких строках" PPERR_EANCOM_RFFWOQ "EANCOM: в сегменте RFF не определен квалификатор" PPERR_EANCOM_NADWOQ "EANCOM: в сегменте NAD не определен квалификатор" PPERR_EANCOM_MOAWOQ "EANCOM: в сегменте MOA не определен квалификатор" @@ -4949,7 +4950,7 @@ PPTXT_GLOBALSERVICELIST "1,@{globalservice_twitter};2,@{globalservic 6,@{globalservice_instagram};7,@{globalservice_uds};8,@{globalservice_uhtt};9,@{globalservice_shopify}" PPTXT_AGGRFUNCNAMELIST "1,@{aggrfunc_count};2,@{aggrfunc_sum};3,@{aggrfunc_average};4,@{aggrfunc_min};5,@{aggrfunc_max};6,@{aggrfunc_stddev}" PPTXT_CHZNPRODUCTTYPES "1,Изделия из меха;2,Табачные изделия;3,Обувь;4,Лекарственные средства;5,Автомобильные шины;6,Текстиль;7,Парфюмерия;\ -8,Молочная продукция;9,Ювелирные изделия;10,Вода питьевая;11,Альтернативная табачная продукция;12,Пиво разливное;13,Биологически-активные добавки" +8,Молочная продукция;9,Ювелирные изделия;10,Вода питьевая;11,Альтернативная табачная продукция;12,Пиво разливное;13,Биологически-активные добавки;14,Пиво фасованное" PPTXT_DEVICETIMEDIFFROMSYS "@{attention} Время на регистраторе @datetime отличается от времени на компьютере" PPTXT_LOG_VK_NODEFIMG "Изображение по-умолчанию не найдено!" PPTXT_LOG_VK_NODESCRFORGOODS "Описание для товара '@goods_id' не найдено!" diff --git a/Src/Rsrc/Version/genver-open.dat b/Src/Rsrc/Version/genver-open.dat index d9122e6df6..3537568078 100644 --- a/Src/Rsrc/Version/genver-open.dat +++ b/Src/Rsrc/Version/genver-open.dat @@ -14,8 +14,8 @@ Demo = 0 OpenSource = 1 MajorVer =12 MinorVer =0 -Revision =2 -AssemblyVer=40228 +Revision =3 +AssemblyVer=40245 MinMajorVer =9 MinMinorVer =8 MinRevision =11 diff --git a/Src/SLib/Tdialog.cpp b/Src/SLib/Tdialog.cpp index d1cacd904a..c3df81e27a 100644 --- a/Src/SLib/Tdialog.cpp +++ b/Src/SLib/Tdialog.cpp @@ -595,7 +595,6 @@ IMPL_HANDLE_EVENT(TDialog) case TEvent::evCommand: switch(event.message.command) { case cmOK: - // @v10.7.7 { { TView * p_temp = P_Last; if(p_temp) { @@ -610,7 +609,6 @@ IMPL_HANDLE_EVENT(TDialog) } } // @fallthrough - // } @v10.7.7 case cmCancel: case cmYes: case cmNo: diff --git a/Src/SLib/hashtab.cpp b/Src/SLib/hashtab.cpp index 78b89f87ca..0e239c12a5 100644 --- a/Src/SLib/hashtab.cpp +++ b/Src/SLib/hashtab.cpp @@ -1867,13 +1867,11 @@ uint32 HashJen(const void * pKey, size_t keyLen, uint numBkts, uint * pBkt) /* The Paul Hsieh hash function */ #undef get16bits -#if(defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ - || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) -#define get16bits(d) (*((const uint16_t*)(d))) +#if(defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) + #define get16bits(d) (*((const uint16_t*)(d))) #endif - #if !defined (get16bits) -#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) + (uint32_t)(((const uint8_t *)(d))[0])) + #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) + (uint32_t)(((const uint8_t *)(d))[0])) #endif #define HASH_SFH(key, keylen, num_bkts, hashv, bkt) \ do { \ diff --git a/Src/SLib/lz4/lz4.c b/Src/SLib/lz4/lz4.c index b1771789d5..3899b664ed 100644 --- a/Src/SLib/lz4/lz4.c +++ b/Src/SLib/lz4/lz4.c @@ -138,7 +138,7 @@ // Basic Types // #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) - #include + //#include //typedef uint8_t BYTE__Removed; //typedef uint16_t U16__Removed; //typedef uint32_t U32__Removed; @@ -249,8 +249,7 @@ LZ4_FORCE_O2_INLINE_GCC_PPC64LE void LZ4_wildCopy(void * dstPtr, const void * sr #define WILDCOPYLENGTH 8 #define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ #define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ -#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength - without overflowing output buffer */ +#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ static const int LZ4_minLength = (MFLIMIT+1); // @sobolev #define KB *(1 <<10) @@ -338,10 +337,12 @@ static uint FASTCALL LZ4_NbCommonBytes(reg_t val) r = 4; } else { - r = 0; val >>= by32; + r = 0; + val >>= by32; } if(!(val>>16)) { - r += 2; val >>= 8; + r += 2; + val >>= 8; } else { val >>= 24; @@ -437,8 +438,17 @@ typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; * content, and matches are found by looking in the ctx * ->dictCtx->hashTable. */ -typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; -typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; +typedef enum { + noDict = 0, + withPrefix64k, + usingExtDict, + usingDictCtx +} dict_directive; + +typedef enum { + noDictIssue = 0, + dictSmall +} dictIssue_directive; // // Local Utils // diff --git a/Src/SLib/scrypt.cpp b/Src/SLib/scrypt.cpp deleted file mode 100644 index 998a57a197..0000000000 --- a/Src/SLib/scrypt.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// SCRYPT.CPP -// Copyright (c) A.Sobolev 2007 -// -#include - -class SCryptCipher { -public: - enum Algorithm { - algSLib = 1, - algRijndael, - algAES = algRijndael - }; - static SCryptCipher * CreateInstance(Algorithm alg, uint level); - -protected: - virtual void Enc(const uint8 * pSrc, uint8 * pDest) const; - virtual void Dec(const uint8 * pSrc, uint8 * pDest) const; - - int Alg; - uint Level; -}; diff --git a/Src/SLib/sfann.cpp b/Src/SLib/sfann.cpp deleted file mode 100644 index 544a3e190e..0000000000 --- a/Src/SLib/sfann.cpp +++ /dev/null @@ -1,6901 +0,0 @@ -// SFANN.CPP -// Copyright (C) 2003-2016 Steffen Nissen (steffen.fann@gmail.com) -// -// #include "config.h" -// #undef PACKAGE // Name of package -// #undef VERSION // Version number of package -// #undef X86_64 // Define for the x86_64 CPU famyly -#include -#include -#pragma hdrstop -#include "fann.h" - -#ifdef _MSC_VER - #define vsnprintf _vsnprintf - #define snprintf _snprintf -#endif -#if defined(_WIN32) && !defined(__MINGW32__) - #define PATH_MAX _MAX_PATH -#endif -#ifndef PATH_MAX - #ifdef _POSIX_PATH_MAX - #define PATH_MAX _POSIX_PATH_MAX - #else - #define PATH_MAX 4096 - #endif -#endif - -//FANN_EXTERNAL FILE * FANN_API fann_default_error_log = (FILE*)-1; - -// #define FANN_NO_SEED -// #define DEBUGTRAIN - -#define fann_scanf(type, name, val) { \ - if(fscanf(conf, name "="type "\n", val) != 1) { \ - fann_error(NULL, SLERR_FANN_CANT_READ_CONFIG, name, configuration_file); \ - fann_destroy(ann); \ - return NULL; \ - }} - -#define fann_skip(name) { \ - if(fscanf(conf, name) != 0) { \ - fann_error(NULL, SLERR_FANN_CANT_READ_CONFIG, name, configuration_file); \ - fann_destroy(ann); \ - return NULL; \ - }} - -//static -const char * Fann::GetAttrText(uint attr, uint value) -{ - switch(attr) { - case attrNetType: - switch(value) { - case Fann::FANN_NETTYPE_LAYER: return "FANN_NETTYPE_LAYER"; - case Fann::FANN_NETTYPE_SHORTCUT: return "FANN_NETTYPE_SHORTCUT"; - } - break; - case attrTrainAlgorithm: - switch(value) { - case Fann::FANN_TRAIN_INCREMENTAL: return "FANN_TRAIN_INCREMENTAL"; - case Fann::FANN_TRAIN_BATCH: return "FANN_TRAIN_BATCH"; - case Fann::FANN_TRAIN_RPROP: return "FANN_TRAIN_RPROP"; - case Fann::FANN_TRAIN_QUICKPROP: return "FANN_TRAIN_QUICKPROP"; - case Fann::FANN_TRAIN_SARPROP: return "FANN_TRAIN_SARPROP"; - } - break; - case attrActivationFunc: - switch(value) { - case Fann::FANN_LINEAR: return "FANN_LINEAR"; - case Fann::FANN_THRESHOLD: return "FANN_THRESHOLD"; - case Fann::FANN_THRESHOLD_SYMMETRIC: return "FANN_THRESHOLD_SYMMETRIC"; - case Fann::FANN_SIGMOID: return "FANN_SIGMOID"; - case Fann::FANN_SIGMOID_STEPWISE: return "FANN_SIGMOID_STEPWISE"; - case Fann::FANN_SIGMOID_SYMMETRIC: return "FANN_SIGMOID_SYMMETRIC"; - case Fann::FANN_SIGMOID_SYMMETRIC_STEPWISE: return "FANN_SIGMOID_SYMMETRIC_STEPWISE"; - case Fann::FANN_GAUSSIAN: return "FANN_GAUSSIAN"; - case Fann::FANN_GAUSSIAN_SYMMETRIC: return "FANN_GAUSSIAN_SYMMETRIC"; - case Fann::FANN_GAUSSIAN_STEPWISE: return "FANN_GAUSSIAN_STEPWISE"; - case Fann::FANN_ELLIOT: return "FANN_ELLIOT"; - case Fann::FANN_ELLIOT_SYMMETRIC: return "FANN_ELLIOT_SYMMETRIC"; - case Fann::FANN_LINEAR_PIECE: return "FANN_LINEAR_PIECE"; - case Fann::FANN_LINEAR_PIECE_SYMMETRIC: return "FANN_LINEAR_PIECE_SYMMETRIC"; - case Fann::FANN_SIN_SYMMETRIC: return "FANN_SIN_SYMMETRIC"; - case Fann::FANN_COS_SYMMETRIC: return "FANN_COS_SYMMETRIC"; - case Fann::FANN_SIN: return "FANN_SIN"; - case Fann::FANN_COS: return "FANN_COS"; - } - break; - case attrErrorFunc: - switch(value) { - case Fann::FANN_ERRORFUNC_LINEAR: return "FANN_ERRORFUNC_LINEAR"; - case Fann::FANN_ERRORFUNC_TANH: return "FANN_ERRORFUNC_TANH"; - } - break; - case attrStopFunc: - switch(value) { - case Fann::FANN_STOPFUNC_MSE: return "FANN_STOPFUNC_MSE"; - case Fann::FANN_STOPFUNC_BIT: return "FANN_STOPFUNC_BIT"; - } - default: - return "unknown-attr"; - } - return "unknown"; -} - -Fann::TrainData::TrainData() -{ - State = 0; - Count = 0; - NumInput = 0; - NumOutput = 0; -} - -int Fann::TrainData::Helper_Construct(uint numInput, uint numOutput, uint numSeries) -{ - int ok = 1; - State = 0; - Count = numSeries; - NumInput = numInput; - NumOutput = numOutput; - for(uint i = 0; i < Count; i++) { - Fann::DataVector * p_new_inp = new Fann::DataVector(numInput); - THROW_S(p_new_inp, SLERR_NOMEM); - THROW(InpL.insert(p_new_inp)); - Fann::DataVector * p_new_out = new Fann::DataVector(numOutput); - THROW_S(p_new_out, SLERR_NOMEM); - THROW(OutL.insert(p_new_out)); - } - CATCH - Destroy(); - State |= stError; - ok = 0; - ENDCATCH - return ok; -} - -Fann::TrainData::TrainData(const Fann & rAnn, uint numSeries) -{ - Helper_Construct(rAnn.NumInput, rAnn.NumOutput, numSeries); -} -Fann::TrainData::TrainData(uint numInput, uint numOutput, uint numSeries) -{ - Helper_Construct(numInput, numOutput, numSeries); -} - -Fann::TrainData::~TrainData() -{ -} - -void Fann::TrainData::Destroy() -{ - State = 0; - InpL.freeAll(); - OutL.freeAll(); - Count = 0; - NumInput = 0; - NumOutput = 0; -} - -int Fann::TrainData::Read(const char * pFileName, long format) -{ - Destroy(); - - int ok = 1; - long actual_count = 0; - long _count = 0; - long _num_inp = 0; - long _num_out = 0; - SString line_buf, temp_buf; - SFile f_in(pFileName, SFile::mRead); - THROW(f_in.IsValid()); - if(f_in.ReadLine(line_buf)) { - StringSet ss(" "); - ss.setBuf(line_buf.Chomp().Strip()); - uint ssp = 0; - int input_output = 0; // , 0 !0 - - RealArray value_list; - THROW(ss.get(&ssp, temp_buf)); - _count = temp_buf.ToLong(); - THROW(ss.get(&ssp, temp_buf)); - _num_inp = temp_buf.ToLong(); - THROW(ss.get(&ssp, temp_buf)); - _num_out = temp_buf.ToLong(); - THROW(_count > 0 && _num_inp > 0 && _num_out > 0); - while(actual_count < _count && f_in.ReadLine(line_buf)) { - value_list.clear(); - ss.setBuf(line_buf.Chomp().Strip()); - for(ssp = 0; ss.get(&ssp, temp_buf);) { - double value = temp_buf.ToReal(); - value_list.insert(&value); - } - if(input_output == 0) { - THROW((long)value_list.getCount() == _num_inp); - { - Fann::DataVector * p_vect = new Fann::DataVector(value_list); - THROW_S(p_vect, SLERR_NOMEM); - THROW(p_vect->getCount() == value_list.getCount()); - THROW(InpL.insert(p_vect)); - } - input_output = 1; - } - else { - THROW((long)value_list.getCount() == _num_out); - { - Fann::DataVector * p_vect = new Fann::DataVector(value_list); - THROW_S(p_vect, SLERR_NOMEM); - THROW(p_vect->getCount() == value_list.getCount()); - THROW(OutL.insert(p_vect)); - } - input_output = 0; - actual_count++; - } - } - THROW(InpL.getCount() == OutL.getCount()); - Count = actual_count; - NumInput = (uint)_num_inp; - NumOutput = (uint)_num_out; - } - CATCH - Destroy(); - State |= stError; - ok = 0; - ENDCATCH - return ok; -} - -int Fann::TrainData::SetInputSeries(uint seriesN, const float * pData) -{ - int ok = 1; - if(pData && seriesN < InpL.getCount() && IsValid()) { - Fann::DataVector * p_vect = InpL.at(seriesN); - for(uint i = 0; i < p_vect->getCount(); i++) - (*p_vect)[i] = pData[i]; - } - else - ok = 0; - return ok; -} - -int Fann::TrainData::SetOutputSeries(uint seriesN, const float * pData) -{ - int ok = 1; - if(pData && seriesN < OutL.getCount() && IsValid()) { - Fann::DataVector * p_vect = OutL.at(seriesN); - for(uint i = 0; i < p_vect->getCount(); i++) - (*p_vect)[i] = pData[i]; - } - else - ok = 0; - return ok; -} - -int Fann::TrainData::IsValid() const -{ - int ok = 1; - THROW(!(State & stError)); - THROW(GetCount() == InpL.getCount()); - THROW(GetCount() == OutL.getCount()); - { - for(uint i = 0; i < InpL.getCount(); i++) { - THROW(InpL.at(i)); - THROW(InpL.at(i)->getCount() == GetInputCount()); - } - } - { - for(uint i = 0; i < OutL.getCount(); i++) { - THROW(OutL.at(i)); - THROW(OutL.at(i)->getCount() == GetOutputCount()); - } - } - CATCHZOK - return ok; -} - -uint Fann::TrainData::GetCount() const -{ - return Count; -} - -uint Fann::TrainData::GetInputCount() const -{ - return NumInput; -} - -uint Fann::TrainData::GetOutputCount() const -{ - return NumOutput; -} - -FANN_EXTERNAL Fann * FANN_API fann_create_standard(uint numLayers, ...) -{ - Fann * ann = 0; - LongArray layers; - { - va_list layer_sizes; - va_start(layer_sizes, numLayers); - for(uint i = 0; i < numLayers; i++) { - const uint arg = va_arg(layer_sizes, uint); - THROW_S(arg >= 0 && arg <= 1000000, SLERR_FANN_CANT_ALLOCATE_MEM); - THROW(layers.add(arg)); - } - va_end(layer_sizes); - } - THROW(ann = fann_create_standard_array(layers)); - CATCH - ZDELETE(ann); - ENDCATCH - return ann; -} - -FANN_EXTERNAL Fann * FANN_API fann_create_standard_array(/*uint numLayers, const uint * pLayers*/const LongArray & rLayers) -{ - return fann_create_sparse_array(1, rLayers/*numLayers, pLayers*/); -} - -FANN_EXTERNAL Fann * FANN_API fann_create_sparse(float connection_rate, uint numLayers, ...) -{ - Fann * ann = 0; - LongArray layers; - { - va_list layer_sizes; - va_start(layer_sizes, numLayers); - for(uint i = 0; i < numLayers; i++) { - const uint arg = va_arg(layer_sizes, uint); - THROW(arg >= 0 && arg <= 1000000); - THROW(layers.add(arg)); - } - va_end(layer_sizes); - } - THROW(ann = fann_create_sparse_array(connection_rate, layers)); - CATCH - ZDELETE(ann); - ENDCATCH - return ann; -} - -FANN_EXTERNAL Fann * FANN_API fann_create_sparse_array(float connectionRate, const LongArray & rLayers/*uint numLayers, const uint * pLayers*/) -{ - Fann * p_result = new Fann(Fann::FANN_NETTYPE_LAYER, connectionRate, rLayers); - if(p_result && !p_result->IsValid()) - ZDELETE(p_result); - return p_result; -} - -FANN_EXTERNAL Fann * FANN_API fann_create_shortcut(uint num_layers, ...) -{ - Fann * ann = 0; - LongArray layers; - { - va_list layer_sizes; - va_start(layer_sizes, num_layers); - //status = 1; - for(uint i = 0; i < num_layers; i++) { - uint arg = va_arg(layer_sizes, uint); - THROW_S(arg >= 0 && arg <= 1000000, SLERR_FANN_CANT_ALLOCATE_MEM); - //p_layers[i] = arg; - THROW(layers.add(arg)); - } - va_end(layer_sizes); - } - THROW(ann = fann_create_shortcut_array(layers/*num_layers, p_layers*/)); - CATCH - ZDELETE(ann); - ENDCATCH - return ann; -} - -FANN_EXTERNAL Fann * FANN_API fann_create_shortcut_array(const LongArray & rLayers) -{ - Fann * p_result = new Fann(Fann::FANN_NETTYPE_SHORTCUT, 1.0f, rLayers); - if(p_result && !p_result->IsValid()) - ZDELETE(p_result); - return p_result; -} - -/*#define fann_activation_switch(activation_function, value, result) \ -switch(activation_function) { \ - case FANN_LINEAR: result = (float)value; break; \ - case FANN_LINEAR_PIECE: result = (float)((value < 0) ? 0 : (value > 1) ? 1 : value); break; \ - case FANN_LINEAR_PIECE_SYMMETRIC: result = (float)((value < -1) ? -1 : (value > 1) ? 1 : value); break; \ - case FANN_SIGMOID: result = (float)fann_sigmoid_real(value); break; \ - case FANN_SIGMOID_SYMMETRIC: result = (float)fann_sigmoid_symmetric_real(value); break; \ - case FANN_SIGMOID_SYMMETRIC_STEPWISE: \ - result = (float)fann_stepwise(-2.64665293693542480469e+00, -1.47221934795379638672e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, -9.90000009536743164062e-01, -8.99999976158142089844e-01, -5.00000000000000000000e-01, 5.00000000000000000000e-01, 8.99999976158142089844e-01, 9.90000009536743164062e-01, -1, 1, value); \ - break; \ - case FANN_SIGMOID_STEPWISE: \ - result = (float)fann_stepwise(-2.64665246009826660156e+00, -1.47221946716308593750e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, 4.99999988824129104614e-03, 5.00000007450580596924e-02, 2.50000000000000000000e-01, 7.50000000000000000000e-01, 9.49999988079071044922e-01, 9.95000004768371582031e-01, 0, 1, value); \ - break; \ - case FANN_THRESHOLD: result = (float)((value < 0) ? 0 : 1); break; \ - case FANN_THRESHOLD_SYMMETRIC: result = (float)((value < 0) ? -1 : 1); break; \ - case FANN_GAUSSIAN: result = (float)fann_gaussian_real(value); break; \ - case FANN_GAUSSIAN_SYMMETRIC: result = (float)fann_gaussian_symmetric_real(value); break; \ - case FANN_ELLIOT: result = (float)fann_elliot_real(value); break; \ - case FANN_ELLIOT_SYMMETRIC: result = (float)fann_elliot_symmetric_real(value); break; \ - case FANN_SIN_SYMMETRIC: result = (float)fann_sin_symmetric_real(value); break; \ - case FANN_COS_SYMMETRIC: result = (float)fann_cos_symmetric_real(value); break; \ - case FANN_SIN: result = (float)fann_sin_real(value); break; \ - case FANN_COS: result = (float)fann_cos_real(value); break; \ - case FANN_GAUSSIAN_STEPWISE: result = 0; break; \ -}*/ - -static FORCEINLINE float ActivationSwitch(int activationFunc, float value) -{ - switch(activationFunc) { - case Fann::FANN_LINEAR: return (float)value; - case Fann::FANN_LINEAR_PIECE: return (float)((value < 0) ? 0 : (value > 1) ? 1 : value); - case Fann::FANN_LINEAR_PIECE_SYMMETRIC: return (float)((value < -1) ? -1 : (value > 1) ? 1 : value); - case Fann::FANN_SIGMOID: return (float)fann_sigmoid_real(value); - case Fann::FANN_SIGMOID_SYMMETRIC: return (float)fann_sigmoid_symmetric_real(value); - case Fann::FANN_SIGMOID_SYMMETRIC_STEPWISE: - return (float)fann_stepwise(-2.64665293693542480469e+00, -1.47221934795379638672e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, -9.90000009536743164062e-01, -8.99999976158142089844e-01, -5.00000000000000000000e-01, 5.00000000000000000000e-01, 8.99999976158142089844e-01, 9.90000009536743164062e-01, -1, 1, value); - case Fann::FANN_SIGMOID_STEPWISE: - return (float)fann_stepwise(-2.64665246009826660156e+00, -1.47221946716308593750e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, 4.99999988824129104614e-03, 5.00000007450580596924e-02, 2.50000000000000000000e-01, 7.50000000000000000000e-01, 9.49999988079071044922e-01, 9.95000004768371582031e-01, 0, 1, value); - case Fann::FANN_THRESHOLD: return (float)((value < 0) ? 0 : 1); - case Fann::FANN_THRESHOLD_SYMMETRIC: return (float)((value < 0) ? -1 : 1); - case Fann::FANN_GAUSSIAN: return (float)fann_gaussian_real(value); - case Fann::FANN_GAUSSIAN_SYMMETRIC: return (float)fann_gaussian_symmetric_real(value); - case Fann::FANN_ELLIOT: return (float)fann_elliot_real(value); - case Fann::FANN_ELLIOT_SYMMETRIC: return (float)fann_elliot_symmetric_real(value); - case Fann::FANN_SIN_SYMMETRIC: return (float)fann_sin_symmetric_real(value); - case Fann::FANN_COS_SYMMETRIC: return (float)fann_cos_symmetric_real(value); - case Fann::FANN_SIN: return (float)fann_sin_real(value); - case Fann::FANN_COS: return (float)fann_cos_real(value); - case Fann::FANN_GAUSSIAN_STEPWISE: return 0.0f; - } - return 0.0f; -} - -float * Fann::Run(const float * pInput) -{ - Fann::Neuron * p_neuron_it; - Fann::Neuron * p_last_neuron; - Fann::Neuron * p_neurons; - Fann::Neuron ** pp_neuron_pointers; - uint i; - uint _num_connections; - float neuron_sum; - float * p_weights; - Fann::Layer * p_layer_it; - const Fann::Layer * p_last_layer; - uint activation_function; - float steepness; - // store some variabels local for fast access - Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - float max_sum = 0; - { - // - // first set the input - // - const uint ci = NumInput; - for(i = 0; i != ci; i++) { - p_first_neuron[i].Value = pInput[i]; - } - } - // - // Set the bias neuron in the input layer - // - (P_FirstLayer->P_LastNeuron-1)->Value = 1; - p_last_layer = P_LastLayer; - for(p_layer_it = P_FirstLayer + 1; p_layer_it != p_last_layer; p_layer_it++) { - p_last_neuron = p_layer_it->P_LastNeuron; - for(p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - if(p_neuron_it->FirstCon == p_neuron_it->LastCon) { - // bias p_neurons - p_neuron_it->Value = 1; - continue; - } - activation_function = p_neuron_it->ActivationFunction; - steepness = p_neuron_it->ActivationSteepness; - neuron_sum = 0; - _num_connections = p_neuron_it->GetConCount(); - p_weights = P_Weights + p_neuron_it->FirstCon; - if(ConnectionRate >= 1) { - p_neurons = (NetworkType == FANN_NETTYPE_SHORTCUT) ? P_FirstLayer->P_FirstNeuron : (p_layer_it - 1)->P_FirstNeuron; - { // unrolled loop start - i = _num_connections & 3; // same as modulo 4 - switch(i) { - case 3: neuron_sum += (p_weights[2] * p_neurons[2].Value); - case 2: neuron_sum += (p_weights[1] * p_neurons[1].Value); - case 1: neuron_sum += (p_weights[0] * p_neurons[0].Value); - case 0: break; - } - for(; i != _num_connections; i += 4) { - neuron_sum += (p_weights[i] * p_neurons[i].Value) + (p_weights[i+1] * p_neurons[i+1].Value) + - (p_weights[i+2] * p_neurons[i+2].Value) + (p_weights[i+3] * p_neurons[i+3].Value); - } - } // unrolled loop end - /*for(i = 0;i != num_connections; i++){ - printf("%f += %f*%f, ", neuron_sum, weights[i], p_neurons[i].value); - neuron_sum += (weights[i] * p_neurons[i].value); - }*/ - } - else { - pp_neuron_pointers = PP_Connections + p_neuron_it->FirstCon; - i = _num_connections & 3; // same as modulo 4 - switch(i) { - case 3: neuron_sum += (p_weights[2] * pp_neuron_pointers[2]->Value); - case 2: neuron_sum += (p_weights[1] * pp_neuron_pointers[1]->Value); - case 1: neuron_sum += (p_weights[0] * pp_neuron_pointers[0]->Value); - case 0: break; - } - for(; i != _num_connections; i += 4) { - neuron_sum += (p_weights[i] * pp_neuron_pointers[i]->Value) + (p_weights[i+1] * pp_neuron_pointers[i+1]->Value) + - (p_weights[i+2] * pp_neuron_pointers[i+2]->Value) + (p_weights[i+3] * pp_neuron_pointers[i+3]->Value); - } - } - neuron_sum = (steepness * neuron_sum); - max_sum = 150/steepness; - if(neuron_sum > max_sum) - neuron_sum = max_sum; - else if(neuron_sum < -max_sum) - neuron_sum = -max_sum; - p_neuron_it->Sum = neuron_sum; - p_neuron_it->Value = ActivationSwitch(activation_function, neuron_sum); - } - } - // set the output - { - float * p_output = P_Output; - p_neurons = (P_LastLayer-1)->P_FirstNeuron; - const uint co = NumOutput; - for(i = 0; i != co; i++) { - p_output[i] = p_neurons[i].Value; - } - } - return P_Output; -} - -void Fann::RandomizeWeights(float minWeight, float maxWeight) -{ - const float * p_last_weight = P_Weights + TotalConnections; - for(float * p_weights = P_Weights; p_weights != p_last_weight; p_weights++) { - *p_weights = (float)(fann_rand(minWeight, maxWeight)); - } - if(P_PrevTrainSlopes) - ClearTrainArrays(); -} - -int Fann::IsEqualScaleVect(uint count, const float * pVect, const float * pOtherVect) const -{ - int yes = 1; - THROW(BIN(pVect) == BIN(pOtherVect)); - if(pVect) { - for(uint i = 0; i < count; i++) { - THROW(pVect[i] == pOtherVect[i]); - } - } - CATCH - yes = 0; - ENDCATCH - return yes; -} - -int Fann::IsEqual(const Fann & rS, long flags) const -{ - int yes = 1; -#define CMPF(f) THROW(f == rS.f) -#define CMPA(a) { for(uint i = 0; i < SIZEOFARRAY(a); i++) { THROW(a[i] == rS.a[i]); } } - THROW(Err.IsEqual(rS.Err)); - { - LongArray layers1, layers2; - GetLayerArray(layers1); - rS.GetLayerArray(layers2); - THROW(layers1 == layers2); - } - CMPF(Layers); - CMPF(LearningRate); - CMPF(LearningMomentum); - CMPF(ConnectionRate); - CMPF(NetworkType); - CMPF(TotalNeurons); - CMPF(NumInput); - CMPF(NumOutput); - CMPF(TrainingAlgorithm); -//#ifdef FIXEDFANN - /* - CMPF(DecimalPoint); - CMPF(Multiplier); - THROW(memcmp(SigmoidResults, rS.SigmoidResults, sizeof(SigmoidResults)) == 0); - THROW(memcmp(SigmoidValues, rS.SigmoidValues, sizeof(SigmoidValues)) == 0); - THROW(memcmp(SigmoidSymmetricResults, rS.SigmoidSymmetricResults, sizeof(SigmoidSymmetricResults)) == 0); - THROW(memcmp(SigmoidSymmetricValues, rS.SigmoidSymmetricValues, sizeof(SigmoidSymmetricValues)) == 0); - */ -//#else - THROW(ScaleIn.IsEqual(NumInput, rS.ScaleIn)); - THROW(ScaleOut.IsEqual(NumOutput, rS.ScaleOut)); -//#endif - CMPF(TotalConnections); - CMPF(num_MSE); - CMPF(MSE_value); - CMPF(NumBitFail); - CMPF(BitFailLimit); - CMPF(TrainErrorFunction); - CMPF(TrainStopFunction); - - CMPF(CascadeOutputChangeFraction); - CMPF(CascadeOutputStagnationEpochs); - CMPF(CascadeCandidateChangeFraction); - CMPF(CascadeCandidateStagnationEpochs); - CMPF(CascadeBestCandidate); - CMPF(CascadeCandidateLimit); - CMPF(CascadeWeightMultiplier); - CMPF(CascadeMaxOutEpochs); - CMPF(CascadeMaxCandEpochs); - CMPF(CascadeMinOutEpochs); - CMPF(CascadeMinCandEpochs); - - CMPF(CascadeNumCandidateGroups); - CMPF(TotalNeuronsAllocated); - CMPF(TotalConnectionsAllocated); - CMPF(QuickpropDecay); - CMPF(QuickpropMu); - CMPF(RpropIncreaseFactor); - CMPF(RpropDecreaseFactor); - CMPF(RpropDeltaMin); - CMPF(RpropDeltaMax); - CMPF(RpropDeltaZero); - CMPF(SarpropWeightDecayShift); - CMPF(SarpropStepErrorThresholdFactor); - CMPF(SarpropStepErrorShift); - CMPF(SarpropTemperature); - CMPF(SarpropEpoch); - CMPF(CascadeActivationFuncList); - CMPF(CascadeActivationSteepnessesList); - // - // - // - { - { - // compare the neurons - const Neuron * p_last_neuron = (P_LastLayer-1)->P_LastNeuron; - const Neuron * p_neur = P_FirstLayer->P_FirstNeuron; - const Neuron * p_s_neur = rS.P_FirstLayer->P_FirstNeuron; - for(;p_neur != p_last_neuron; p_neur++, p_s_neur++) { - THROW(p_neur->IsEqual(*p_s_neur)); - } - } - { - // compare the connections - const Neuron * p_first_neur = P_FirstLayer->P_FirstNeuron; - const Neuron * p_s_first_neur = rS.P_FirstLayer->P_FirstNeuron; - for(uint i = 0; i < TotalConnections; i++) { - const uint _input_neuron = (uint)(PP_Connections[i] - p_first_neur); - const uint _s_input_neuron = (uint)(rS.PP_Connections[i] - p_s_first_neur); - THROW(_input_neuron == _s_input_neuron); - THROW(P_Weights[i] == rS.P_Weights[i]); - } - } - { - THROW((P_TrainSlopes && rS.P_TrainSlopes) || (!P_TrainSlopes && !rS.P_TrainSlopes)); - THROW((P_PrevSteps && rS.P_PrevSteps) || (!P_PrevSteps && !rS.P_PrevSteps)); - THROW((P_PrevTrainSlopes && rS.P_PrevTrainSlopes) || (!P_PrevTrainSlopes && !rS.P_PrevTrainSlopes)); - THROW((P_PrevWeightsDeltas && rS.P_PrevWeightsDeltas) || (!P_PrevWeightsDeltas && !rS.P_PrevWeightsDeltas)); - for(uint i = 0; i < TotalConnectionsAllocated; i++) { - THROW(!P_TrainSlopes || P_TrainSlopes[i] == rS.P_TrainSlopes[i]); - THROW(!P_PrevSteps || P_PrevSteps[i] == rS.P_PrevSteps[i]); - THROW(!P_PrevTrainSlopes || P_PrevTrainSlopes[i] == rS.P_PrevTrainSlopes[i]); - THROW(!P_PrevWeightsDeltas || P_PrevWeightsDeltas[i] == rS.P_PrevWeightsDeltas[i]); - } - } - } - CATCH - yes = 0; - ENDCATCH -#undef CMPF - return yes; -} - -int Fann::Copy(const Fann & rS) -{ -#define COPYF(f) f = rS.f - int ok = 1; - uint i; - uint _input_neuron; - Fann::Neuron * p_last_neuron = 0; - Fann::Neuron * p_orig_neuron_it = 0; - Fann::Neuron * p_copy_neuron_it = 0; - Fann::Neuron * p_orig_first_neuron = 0; - Fann::Neuron * p_copy_first_neuron = 0; - Destroy(); - State = 0; - COPYF(Err); - COPYF(Layers); - COPYF(NumInput); - COPYF(NumOutput); - COPYF(LearningRate); - COPYF(LearningMomentum); - COPYF(ConnectionRate); - COPYF(NetworkType); - COPYF(num_MSE); - COPYF(MSE_value); - COPYF(NumBitFail); - COPYF(BitFailLimit); - COPYF(TrainErrorFunction); - COPYF(TrainStopFunction); - COPYF(TrainingAlgorithm); - COPYF(Callback); - COPYF(P_UserData); // user_data is not deep copied. user should use fann_copy_with_user_data() for that -//#ifdef FIXEDFANN - /* - COPYF(DecimalPoint); - COPYF(Multiplier); - memcpy(SigmoidResults, rS.SigmoidResults, sizeof(SigmoidResults)); - memcpy(SigmoidValues, rS.SigmoidValues, sizeof(SigmoidValues)); - memcpy(SigmoidSymmetricResults, rS.SigmoidSymmetricResults, sizeof(SigmoidSymmetricResults)); - memcpy(SigmoidSymmetricValues, rS.SigmoidSymmetricValues, sizeof(SigmoidSymmetricValues)); - */ -//#else - // copy scale parameters, when used - ScaleIn.Copy(NumInput, rS.ScaleIn); - ScaleOut.Copy(NumOutput, rS.ScaleOut); - COPYF(CascadeOutputChangeFraction); - COPYF(CascadeOutputStagnationEpochs); - COPYF(CascadeCandidateChangeFraction); - COPYF(CascadeCandidateStagnationEpochs); - COPYF(CascadeBestCandidate); - COPYF(CascadeCandidateLimit); - COPYF(CascadeWeightMultiplier); - COPYF(CascadeMaxOutEpochs); - COPYF(CascadeMaxCandEpochs); - COPYF(CascadeMinOutEpochs); - COPYF(CascadeMinCandEpochs); - COPYF(CascadeActivationFuncList); - COPYF(CascadeActivationSteepnessesList); - COPYF(CascadeNumCandidateGroups); - // copy candidate scores, if used - if(rS.P_CascadeCandidateScores == NULL) { - P_CascadeCandidateScores = NULL; - } - else { - P_CascadeCandidateScores = (float*)SAlloc::M(GetCascadeNumCandidates() * sizeof(float)); - THROW_S(P_CascadeCandidateScores, SLERR_FANN_CANT_ALLOCATE_MEM); - memcpy(P_CascadeCandidateScores, rS.P_CascadeCandidateScores, GetCascadeNumCandidates() * sizeof(float)); - } -//#endif // } FIXEDFANN - COPYF(QuickpropDecay); - COPYF(QuickpropMu); - COPYF(RpropIncreaseFactor); - COPYF(RpropDecreaseFactor); - COPYF(RpropDeltaMin); - COPYF(RpropDeltaMax); - COPYF(RpropDeltaZero); - COPYF(SarpropWeightDecayShift); - COPYF(SarpropStepErrorThresholdFactor); - COPYF(SarpropStepErrorShift); - COPYF(SarpropTemperature); - COPYF(SarpropEpoch); - { - Fann::Layer * p_orig_layer_it = 0; - Fann::Layer * p_copy_layer_it = 0; - // - // copy layer sizes, prepare for fann_allocate_neurons - // - THROW(AllocateLayers()); - for(p_orig_layer_it = rS.P_FirstLayer, p_copy_layer_it = P_FirstLayer; p_orig_layer_it != rS.P_LastLayer; p_orig_layer_it++, p_copy_layer_it++) { - const uint _layer_size = p_orig_layer_it->GetCount(); - p_copy_layer_it->P_LastNeuron = p_copy_layer_it->P_FirstNeuron + _layer_size; - TotalNeurons += _layer_size; - } - assert(TotalNeurons == rS.TotalNeurons); - } - { - // - // copy the neurons - // - THROW(AllocateNeurons()); - const uint _layer_size = (rS.P_LastLayer-1)->GetCount(); - memcpy(P_Output, rS.P_Output, _layer_size * sizeof(float)); - p_last_neuron = (rS.P_LastLayer-1)->P_LastNeuron; - for(p_orig_neuron_it = rS.P_FirstLayer->P_FirstNeuron, p_copy_neuron_it = P_FirstLayer->P_FirstNeuron; - p_orig_neuron_it != p_last_neuron; p_orig_neuron_it++, p_copy_neuron_it++) { - memcpy(p_copy_neuron_it, p_orig_neuron_it, sizeof(Fann::Neuron)); - } - } - // - // copy the connections - // - COPYF(TotalConnections); - THROW(AllocateConnections()); - p_orig_first_neuron = rS.P_FirstLayer->P_FirstNeuron; - p_copy_first_neuron = P_FirstLayer->P_FirstNeuron; - for(i = 0; i < rS.TotalConnections; i++) { - P_Weights[i] = rS.P_Weights[i]; - _input_neuron = (uint)(rS.PP_Connections[i] - p_orig_first_neuron); - PP_Connections[i] = p_copy_first_neuron + _input_neuron; - } - if(rS.P_TrainSlopes) { - P_TrainSlopes = (float *)SAlloc::M(TotalConnectionsAllocated * sizeof(float)); - THROW_S(P_TrainSlopes, SLERR_FANN_CANT_ALLOCATE_MEM); - memcpy(P_TrainSlopes, rS.P_TrainSlopes, TotalConnectionsAllocated * sizeof(float)); - } - if(rS.P_PrevSteps) { - P_PrevSteps = (float*)SAlloc::M(TotalConnectionsAllocated * sizeof(float)); - THROW_S(P_PrevSteps, SLERR_FANN_CANT_ALLOCATE_MEM); - memcpy(P_PrevSteps, rS.P_PrevSteps, TotalConnectionsAllocated * sizeof(float)); - } - if(rS.P_PrevTrainSlopes) { - P_PrevTrainSlopes = (float *)SAlloc::M(TotalConnectionsAllocated * sizeof(float)); - THROW_S(P_PrevTrainSlopes, SLERR_FANN_CANT_ALLOCATE_MEM); - memcpy(P_PrevTrainSlopes, rS.P_PrevTrainSlopes, TotalConnectionsAllocated * sizeof(float)); - } - if(rS.P_PrevWeightsDeltas) { - P_PrevWeightsDeltas = (float *)SAlloc::M(TotalConnectionsAllocated * sizeof(float)); - THROW_S(P_PrevWeightsDeltas, SLERR_FANN_CANT_ALLOCATE_MEM); - memcpy(P_PrevWeightsDeltas, rS.P_PrevWeightsDeltas, TotalConnectionsAllocated * sizeof(float)); - } - CATCH - Destroy(); - State |= stError; - ok = 0; - ENDCATCH - return ok; -#undef COPYF -} -// -// deep copy of the fann structure -// -FANN_EXTERNAL Fann * FANN_API fann_copy(const Fann * pOrig) -{ - Fann * p_copy = 0; - if(pOrig) { - p_copy = new Fann(*pOrig); - if(!p_copy->IsValid()) { - ZDELETE(p_copy); - } - } - return p_copy; -} - -FANN_EXTERNAL void FANN_API fann_print_connections(Fann * ann) -{ - Fann::Layer * layer_it; - Fann::Neuron * neuron_it; - uint i; - int value; - uint num_neurons = ann->GetTotalNeurons() - ann->GetNumOutput(); - char * neurons = (char*)SAlloc::M(num_neurons + 1); - if(neurons == NULL) { - fann_error(NULL, SLERR_FANN_CANT_ALLOCATE_MEM); - return; - } - neurons[num_neurons] = 0; - printf("Layer / Neuron "); - for(i = 0; i < num_neurons; i++) { - printf("%d", i % 10); - } - printf("\n"); - for(layer_it = ann->P_FirstLayer + 1; layer_it != ann->P_LastLayer; layer_it++) { - for(neuron_it = layer_it->P_FirstNeuron; neuron_it != layer_it->P_LastNeuron; neuron_it++) { - memset(neurons, (int)'.', num_neurons); - for(i = neuron_it->FirstCon; i < neuron_it->LastCon; i++) { - if(ann->P_Weights[i] < 0) { - value = (int)((ann->P_Weights[i]) - 0.5); - SETMAX(value, -25); - neurons[ann->PP_Connections[i] - ann->P_FirstLayer->P_FirstNeuron] = (char)('a' - value); - } - else { - value = (int)((ann->P_Weights[i]) + 0.5); - SETMIN(value, 25); - neurons[ann->PP_Connections[i] - ann->P_FirstLayer->P_FirstNeuron] = (char)('A' + value); - } - } - printf("L %3d / N %4d %s\n", (int)(layer_it - ann->P_FirstLayer), (int)(neuron_it - ann->P_FirstLayer->P_FirstNeuron), neurons); - } - } - SAlloc::F(neurons); -} -// -// Initialize the weights using Widrow + Nguyen's algorithm. -// -void Fann::InitWeights(const Fann::TrainData * pData) -{ - //float _smallest_inp = pData->input[0][0]; - //float _largest_inp = pData->input[0][0]; - float _smallest_inp; - float _largest_inp; - Fann::DataVector::GetMinMax(pData->InpL, &_smallest_inp, &_largest_inp); - const uint _num_hidden_neurons = (uint)(TotalNeurons - (NumInput + NumOutput + GetNumLayers())); - const float _scale_factor = (float)(pow((double)(0.7f * (double)_num_hidden_neurons), (double)(1.0f / (double)NumInput)) / (double)(_largest_inp - _smallest_inp)); - const Fann::Neuron * p_bias_neuron = P_FirstLayer->P_LastNeuron-1; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - const Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - if(NetworkType == FANN_NETTYPE_LAYER) { - p_bias_neuron = (p_layer_it - 1)->P_LastNeuron-1; - } - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - for(uint _num_connect = p_neuron_it->FirstCon; _num_connect < p_neuron_it->LastCon; _num_connect++) { - if(p_bias_neuron == PP_Connections[_num_connect]) { - P_Weights[_num_connect] = (float)fann_rand(-_scale_factor, _scale_factor); - } - else { - P_Weights[_num_connect] = (float)fann_rand(0, _scale_factor); - } - } - } - } - if(P_PrevTrainSlopes) - ClearTrainArrays(); -} - -FANN_EXTERNAL void FANN_API fann_print_parameters(Fann * ann) -{ - Fann::Layer * layer_it; - uint i; - printf("Input layer :%4d neurons, 1 bias\n", ann->NumInput); - for(layer_it = ann->P_FirstLayer + 1; layer_it != ann->P_LastLayer-1; layer_it++) { - if(ann->NetworkType == Fann::FANN_NETTYPE_SHORTCUT) { - printf(" Hidden layer :%4d neurons, 0 bias\n", (int)layer_it->GetCount()); - } - else { - printf(" Hidden layer :%4d neurons, 1 bias\n", (int)(layer_it->GetCount() - 1)); - } - } - printf("Output layer :%4d neurons\n", ann->NumOutput); - printf("Total neurons and biases :%4d\n", ann->GetTotalNeurons()); - printf("Total connections :%4d\n", ann->TotalConnections); - printf("Connection rate :%8.3f\n", ann->ConnectionRate); - printf("Network type : %s\n", Fann::GetAttrText(Fann::attrNetType, ann->NetworkType)); - printf("Training algorithm : %s\n", Fann::GetAttrText(Fann::attrTrainAlgorithm, ann->TrainingAlgorithm)); - printf("Training error function : %s\n", Fann::GetAttrText(Fann::attrErrorFunc, ann->TrainErrorFunction)); - printf("Training stop function : %s\n", Fann::GetAttrText(Fann::attrStopFunc, ann->TrainStopFunction)); - printf("Bit fail limit :%8.3f\n", ann->BitFailLimit); - printf("Learning rate :%8.3f\n", ann->LearningRate); - printf("Learning momentum :%8.3f\n", ann->LearningMomentum); - printf("Quickprop decay :%11.6f\n", ann->QuickpropDecay); - printf("Quickprop mu :%8.3f\n", ann->QuickpropMu); - printf("RPROP increase factor :%8.3f\n", ann->RpropIncreaseFactor); - printf("RPROP decrease factor :%8.3f\n", ann->RpropDecreaseFactor); - printf("RPROP delta min :%8.3f\n", ann->RpropDeltaMin); - printf("RPROP delta max :%8.3f\n", ann->RpropDeltaMax); - printf("Cascade output change fraction :%11.6f\n", ann->CascadeOutputChangeFraction); - printf("Cascade candidate change fraction :%11.6f\n", ann->CascadeCandidateChangeFraction); - printf("Cascade output stagnation epochs :%4d\n", ann->CascadeOutputStagnationEpochs); - printf("Cascade candidate stagnation epochs :%4d\n", ann->CascadeCandidateStagnationEpochs); - printf("Cascade max output epochs :%4d\n", ann->CascadeMaxOutEpochs); - printf("Cascade min output epochs :%4d\n", ann->CascadeMinOutEpochs); - printf("Cascade max candidate epochs :%4d\n", ann->CascadeMaxCandEpochs); - printf("Cascade min candidate epochs :%4d\n", ann->CascadeMinCandEpochs); - printf("Cascade weight multiplier :%8.3f\n", ann->CascadeWeightMultiplier); - printf("Cascade candidate limit :%8.3f\n", ann->CascadeCandidateLimit); - /* - for(i = 0; i < ann->cascade_activation_functions_count; i++) - printf("Cascade activation functions[%d] : %s\n", i, FANN_ACTIVATIONFUNC_NAMES[ann->P_CascadeActivationFunctions[i]]); - */ - for(i = 0; i < ann->CascadeActivationFuncList.getCount(); i++) { - printf("Cascade activation functions[%d] : %s\n", i, Fann::GetAttrText(Fann::attrActivationFunc, ann->CascadeActivationFuncList.get(i))); - } - /* - for(i = 0; i < ann->cascade_activation_steepnesses_count; i++) - printf("Cascade activation steepnesses[%d] :%8.3f\n", i, ann->CascadeActivationSteepnesses[i]); - */ - for(i = 0; i < ann->CascadeActivationSteepnessesList.getCount(); i++) { - printf("Cascade activation steepnesses[%d] :%8.3f\n", i, ann->CascadeActivationSteepnessesList[i]); - } - printf("Cascade candidate groups :%4d\n", ann->CascadeNumCandidateGroups); - printf("Cascade no. of candidates :%4d\n", ann->GetCascadeNumCandidates()); - /* @todo dump scale parameters */ -} - -FANN_EXTERNAL float FANN_API fann_get_connection_rate(Fann * ann) -{ - return ann->ConnectionRate; -} - -void Fann::GetLayerArray(LongArray & rList) const -{ - rList.clear(); - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - uint count = p_layer_it->GetCount(); - // Remove the bias from the count of neurons - switch(GetNetworkType()) { - case FANN_NETTYPE_LAYER: - --count; - break; - case FANN_NETTYPE_SHORTCUT: - // The bias in the first layer is reused for all layers - if(p_layer_it == P_FirstLayer) - --count; - break; - default: - // Unknown network type, assume no bias present - break; - } - rList.add((long)count); - } - assert(rList == Layers); -} - -void Fann::GetBiasArray(LongArray & rList) const -{ - rList.clear(); - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; ++p_layer_it) { - long bias = 0; // For unknown network type assume no bias present - switch(GetNetworkType()) { - case FANN_NETTYPE_LAYER: - // Report one bias in each layer except the last - bias = (p_layer_it != (P_LastLayer-1)) ? 1 : 0; - break; - case FANN_NETTYPE_SHORTCUT: - // The bias in the first layer is reused for all layers - bias = (p_layer_it == P_FirstLayer) ? 1 : 0; - break; - } - rList.add(bias); - } -} - -int Fann::GetConnectionArray(TSVector & rList) const -{ - rList.clear(); - int ok = 1; - uint _source_index = 0; - uint _destination_index = 0; - const Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - // The following assumes that the last unused bias has no connections - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - for(Fann::Neuron * neuron_it = p_layer_it->P_FirstNeuron; neuron_it != p_layer_it->P_LastNeuron; neuron_it++) { - for(uint idx = neuron_it->FirstCon; idx < neuron_it->LastCon; idx++) { - // Assign the source, destination and weight - FannConnection c; - c.FromNeuron = (uint)(PP_Connections[_source_index] - p_first_neuron); - c.ToNeuron = _destination_index; - c.Weight = P_Weights[_source_index]; - THROW(rList.insert(&c)); - _source_index++; - } - _destination_index++; - } - } - CATCHZOK - return ok; -} - -//FANN_EXTERNAL void FANN_API fann_set_weight_array(Fann * ann, FannConnection * pConnections, uint num_connections) -void Fann::SetWeightArray(const FannConnection * pConnections, uint numConnections) -{ - for(uint idx = 0; idx < numConnections; idx++) { - SetWeight(pConnections[idx].FromNeuron, pConnections[idx].ToNeuron, pConnections[idx].Weight); - } -} - -//FANN_EXTERNAL void FANN_API fann_set_weight(Fann * ann, uint fromNeuron, uint toNeuron, float weight) -void Fann::SetWeight(uint fromNeuron, uint toNeuron, float weight) -{ - uint _source_index = 0; - uint _destination_index = 0; - Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - // Find the connection, simple brute force search through the network - // for one or more connections that match to minimize datastructure dependencies. - // Nothing is done if the connection does not already exist in the network. - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_layer_it->P_LastNeuron; p_neuron_it++) { - for(uint idx = p_neuron_it->FirstCon; idx < p_neuron_it->LastCon; idx++) { - // If the source and destination neurons match, assign the weight - if(toNeuron == _destination_index && ((int)fromNeuron == (PP_Connections[_source_index] - p_first_neuron))) { - P_Weights[_source_index] = weight; - } - _source_index++; - } - _destination_index++; - } - } -} - -size_t Fann::GetWeights(float * pWeights, size_t bufferSize) const -{ - size_t moved_size = 0; - const size_t _size = sizeof(float) * TotalConnections; - if(pWeights == 0) - moved_size = _size; - else if(bufferSize >= _size) { - memcpy(pWeights, P_Weights, _size); - moved_size = _size; - } - return moved_size; -} - -int Fann::SetWeights(const float * pWeights) -{ - memcpy(P_Weights, pWeights, sizeof(float) * TotalConnections); - return 1; -} - -FannError::FannError() -{ - errno_f = SLERR_SUCCESS; - //error_log = fann_default_error_log; -} - -FannError & FannError::Copy(const FannError & rS) -{ - errno_f = rS.errno_f; - //error_log = rS.error_log; - Msg = rS.Msg; - return *this; -} - -IMPL_INVARIANT_C(Fann) -{ - S_INVARIANT_PROLOG(pInvP); - const uint num_layers = Layers.getCount(); - S_ASSERT_P(num_layers > 1 && num_layers <= 1000, pInvP); - { - for(uint i = 0; i < num_layers; i++) { - const long neurons_in_layer = Layers.get(i); - S_ASSERT_P(neurons_in_layer >= 0 && neurons_in_layer <= 1000000, pInvP); - } - } - S_ASSERT_P(Layers.at(0) == NumInput, pInvP); - S_ASSERT_P(Layers.at(num_layers-1) == NumOutput, pInvP); - { - uint ln = 0; - uint total_neurons = 0; - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - S_ASSERT_P(Layers.at(ln) == p_layer_it->_Dim, pInvP); - ln++; - total_neurons += p_layer_it->GetCount(); - } - S_ASSERT_P(ln == Layers.getCount(), pInvP); - S_ASSERT_P(total_neurons == TotalNeurons, pInvP); - } - S_INVARIANT_EPILOG(pInvP); -} - -int Fann::AllocateLayers() -{ - int ok = 1; - uint i = 0; - THROW_S(P_FirstLayer = new Layer[Layers.getCount()], SLERR_NOMEM); - P_LastLayer = P_FirstLayer + Layers.getCount(); - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - p_layer_it->_Dim = Layers.get(i++); - p_layer_it->P_FirstNeuron = NULL; - p_layer_it->P_LastNeuron = NULL; - } - assert(i == Layers.getCount()); - CATCHZOK - return ok; -} - -int Fann::Helper_Construct(int type, float connectionRate, const LongArray & rLayers) -{ - int ok = 1; - State = 0; - { - const uint _lc = rLayers.getCount(); - assert(_lc > 1 && _lc <= 1000); - THROW_S(_lc > 1 && _lc <= 1000, SLERR_FANN_INVLAYERCOUNT); - for(uint i = 0; i < _lc; i++) { - const long v = rLayers.get(i); - assert(v >= 0 && v <= 1000000); - THROW_S(v >= 0 && v <= 1000000, SLERR_FANN_INVLAYERSIZE); - } - } - fann_seed_rand(); - NetworkType = type; // FANN_NETTYPE_LAYER; - ConnectionRate = MINMAX(connectionRate, 0.00001f, 1.0f); - Layers = rLayers; - LearningRate = 0.7f; - LearningMomentum = 0.0; - TotalNeurons = 0; - TotalConnections = 0; - NumInput = 0; - NumOutput = 0; - P_TrainErrors = NULL; - P_TrainSlopes = NULL; - P_PrevSteps = NULL; - P_PrevTrainSlopes = NULL; - P_PrevWeightsDeltas = NULL; - TrainingAlgorithm = FANN_TRAIN_RPROP; - num_MSE = 0; - MSE_value = 0; - NumBitFail = 0; - BitFailLimit = (float)0.35; - TrainErrorFunction = FANN_ERRORFUNC_TANH; - TrainStopFunction = FANN_STOPFUNC_MSE; - Callback = NULL; - P_UserData = NULL; // User is responsible for deallocation - P_Weights = NULL; - PP_Connections = NULL; - P_Output = NULL; - ScaleIn.Destroy(); - ScaleOut.Destroy(); - // variables used for cascade correlation (reasonable defaults) - CascadeOutputChangeFraction = 0.01f; - CascadeCandidateChangeFraction = 0.01f; - CascadeOutputStagnationEpochs = 12; - CascadeCandidateStagnationEpochs = 12; - CascadeNumCandidateGroups = 2; - CascadeWeightMultiplier = (float)0.4; - CascadeCandidateLimit = (float)1000.0; - CascadeMaxOutEpochs = 150; - CascadeMaxCandEpochs = 150; - CascadeMinOutEpochs = 50; - CascadeMinCandEpochs = 50; - P_CascadeCandidateScores = NULL; - { - CascadeActivationFuncList.clear(); - CascadeActivationFuncList.addzlist(FANN_SIGMOID, FANN_SIGMOID_SYMMETRIC, FANN_GAUSSIAN, FANN_GAUSSIAN_SYMMETRIC, - FANN_ELLIOT, FANN_ELLIOT_SYMMETRIC, FANN_SIN_SYMMETRIC, FANN_COS_SYMMETRIC, FANN_SIN, FANN_COS, 0); - assert(CascadeActivationFuncList.getCount() == 10); - } - { - CascadeActivationSteepnessesList.clear(); - CascadeActivationSteepnessesList.add(0.25f); - CascadeActivationSteepnessesList.add(0.5f); - CascadeActivationSteepnessesList.add(0.75f); - CascadeActivationSteepnessesList.add(1.0f); - assert(CascadeActivationSteepnessesList.getCount() == 4); - } - // Variables for use with with Quickprop training (reasonable defaults) - QuickpropDecay = -0.0001f; - QuickpropMu = 1.75; - // Variables for use with with RPROP training (reasonable defaults) - RpropIncreaseFactor = 1.2f; - RpropDecreaseFactor = 0.5; - RpropDeltaMin = 0.0; - RpropDeltaMax = 50.0; - RpropDeltaZero = 0.1f; - // Variables for use with SARPROP training (reasonable defaults) - SarpropWeightDecayShift = -6.644f; - SarpropStepErrorThresholdFactor = 0.1f; - SarpropStepErrorShift = 1.385f; - SarpropTemperature = 0.015f; - SarpropEpoch = 0; - Err.Msg = 0; - Err.errno_f = SLERR_SUCCESS; - // allocate room for the layers - //THROW_S(P_FirstLayer = (Fann::Layer *)SAlloc::C(rLayers.getCount(), sizeof(Fann::Layer)), SLERR_NOMEM); - THROW(AllocateLayers()); - // - //const int multiplier = Multiplier; - if(NetworkType == FANN_NETTYPE_LAYER) { - { - // determine how many neurons there should be in each layer - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - // we do not allocate room here, but we make sure that - // last_neuron - first_neuron is the number of neurons - p_layer_it->P_LastNeuron = p_layer_it->P_FirstNeuron + p_layer_it->_Dim + 1; // +1 for bias - TotalNeurons += p_layer_it->GetCount(); - } - } - NumOutput = ((P_LastLayer-1)->GetCount() - 1); - NumInput = (P_FirstLayer->GetCount() - 1); - // allocate room for the actual neurons - THROW(AllocateNeurons()); - { - uint _num_neurons_in = NumInput; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - const uint _num_neurons_out = (p_layer_it->GetCount() - 1); - // if all neurons in each layer should be connected to at least one neuron - // in the previous layer, and one neuron in the next layer. - // and the bias node should be connected to the all neurons in the next layer. - // Then this is the minimum amount of neurons - const uint _min_connections = MAX(_num_neurons_in, _num_neurons_out); // not calculating bias - const uint _max_connections = _num_neurons_in * _num_neurons_out; // not calculating bias - const uint _num_connections = MAX(_min_connections, (uint)(0.5 + (connectionRate * _max_connections))) + _num_neurons_out; - const uint _connections_per_neuron = _num_connections / _num_neurons_out; - uint _allocated_connections = 0; - // Now split out the connections on the different neurons - uint i = 0; - for(i = 0; i != _num_neurons_out; i++) { - p_layer_it->P_FirstNeuron[i].FirstCon = TotalConnections + _allocated_connections; - _allocated_connections += _connections_per_neuron; - p_layer_it->P_FirstNeuron[i].LastCon = TotalConnections + _allocated_connections; - p_layer_it->P_FirstNeuron[i].ActivationFunction = Fann::FANN_SIGMOID_STEPWISE; - p_layer_it->P_FirstNeuron[i].ActivationSteepness = 0.5; - if(_allocated_connections < (_num_connections * (i + 1)) / _num_neurons_out) { - p_layer_it->P_FirstNeuron[i].LastCon++; - _allocated_connections++; - } - } - // bias neuron also gets stuff - p_layer_it->P_FirstNeuron[i].FirstCon = TotalConnections + _allocated_connections; - p_layer_it->P_FirstNeuron[i].LastCon = TotalConnections + _allocated_connections; - TotalConnections += _num_connections; - // used in the next run of the loop - _num_neurons_in = _num_neurons_out; - } - } - THROW(AllocateConnections()); - if(connectionRate >= 1) { - uint prev_layer_size = NumInput + 1; // @debug - const Fann::Layer * p_prev_layer = P_FirstLayer; - const Fann::Layer * p_last_layer = P_LastLayer; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != p_last_layer; p_layer_it++) { - const Fann::Neuron * last_neuron = p_layer_it->P_LastNeuron - 1; - for(Fann::Neuron * neuron_it = p_layer_it->P_FirstNeuron; neuron_it != last_neuron; neuron_it++) { - const uint _tmp_con = neuron_it->LastCon - 1; - for(uint i = neuron_it->FirstCon; i != _tmp_con; i++) { - P_Weights[i] = (float)fann_random_weight(); - // these connections are still initialized for fully connected networks, to allow - // operations to work, that are not optimized for fully connected networks. - PP_Connections[i] = p_prev_layer->P_FirstNeuron + (i - neuron_it->FirstCon); - } - // bias weight - P_Weights[_tmp_con] = (float)fann_random_bias_weight(); - PP_Connections[_tmp_con] = p_prev_layer->P_FirstNeuron + (_tmp_con - neuron_it->FirstCon); - } - prev_layer_size = p_layer_it->GetCount(); // @debug - p_prev_layer = p_layer_it; - } - } - else { - // make connections for a network, that are not fully connected - // - // generally, what we do is first to connect all the input - // neurons to a output neuron, respecting the number of - // available input neurons for each output neuron. Then - // we go through all the output neurons, and connect the - // rest of the connections to input neurons, that they are not allready connected to. - // - // All the connections are cleared by calloc, because we want to - // be able to see which connections are allready connected */ - // - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - const uint num_neurons_out = (p_layer_it->GetCount() - 1); - const uint num_neurons_in = ((p_layer_it - 1)->GetCount() - 1); - // first connect the bias neuron - Fann::Neuron * p_bias_neuron = (p_layer_it - 1)->P_LastNeuron - 1; - const Fann::Neuron * last_neuron = p_layer_it->P_LastNeuron - 1; - { - for(Fann::Neuron * neuron_it = p_layer_it->P_FirstNeuron; neuron_it != last_neuron; neuron_it++) { - PP_Connections[neuron_it->FirstCon] = p_bias_neuron; - P_Weights[neuron_it->FirstCon] = (float)fann_random_bias_weight(); - } - } - // then connect all neurons in the input layer - last_neuron = (p_layer_it - 1)->P_LastNeuron - 1; - { - for(Fann::Neuron * neuron_it = (p_layer_it - 1)->P_FirstNeuron; neuron_it != last_neuron; neuron_it++) { - // random neuron in the output layer that has space for more connections - Fann::Neuron * p_random_neuron; - do { - const uint _random_number = (int)(0.5 + fann_rand(0, num_neurons_out - 1)); - p_random_neuron = p_layer_it->P_FirstNeuron + _random_number; - // checks the last space in the connections array for room - } while(PP_Connections[p_random_neuron->LastCon-1]); - // find an empty space in the connection array and connect - for(uint i = p_random_neuron->FirstCon; i < p_random_neuron->LastCon; i++) { - if(PP_Connections[i] == NULL) { - PP_Connections[i] = neuron_it; - P_Weights[i] = (float)fann_random_weight(); - break; - } - } - } - } - // then connect the rest of the unconnected neurons - last_neuron = p_layer_it->P_LastNeuron - 1; - { - for(Fann::Neuron * neuron_it = p_layer_it->P_FirstNeuron; neuron_it != last_neuron; neuron_it++) { - // find empty space in the connection array and connect - for(uint i = neuron_it->FirstCon; i < neuron_it->LastCon; i++) { - if(PP_Connections[i] == NULL) { // continue if allready connected - uint _found_connection; - Fann::Neuron * p_random_neuron = 0; - do { - _found_connection = 0; - const uint _random_number = (int)(0.5 + fann_rand(0, num_neurons_in - 1)); - p_random_neuron = (p_layer_it - 1)->P_FirstNeuron + _random_number; - // check to see if this connection is allready there - for(uint j = neuron_it->FirstCon; j < i; j++) { - if(p_random_neuron == PP_Connections[j]) { - _found_connection = 1; - break; - } - } - } while(_found_connection); - // we have found a neuron that is not allready connected to us, connect it - PP_Connections[i] = p_random_neuron; - P_Weights[i] = (float)fann_random_weight(); - } - } - } - } - } - // - // @todo it would be nice to have the randomly created connections sorted for smoother memory access. - // - } - } - else if(NetworkType == FANN_NETTYPE_SHORTCUT) { - { - // determine how many neurons there should be in each layer - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - // we do not allocate room here, but we make sure that - // last_neuron - P_FirstNeuron is the number of neurons - p_layer_it->P_LastNeuron = p_layer_it->P_FirstNeuron + p_layer_it->_Dim/*pLayers[i++]*/; - if(p_layer_it == P_FirstLayer) - p_layer_it->P_LastNeuron++; // there is a bias neuron in the first layer - TotalNeurons += p_layer_it->GetCount(); - } - } - NumOutput = (P_LastLayer-1)->GetCount(); - NumInput = (P_FirstLayer->GetCount() - 1); - THROW(AllocateNeurons()); - { - uint num_neurons_in = NumInput; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - const uint num_neurons_out = p_layer_it->GetCount(); - // Now split out the connections on the different neurons - for(uint i = 0; i != num_neurons_out; i++) { - p_layer_it->P_FirstNeuron[i].FirstCon = TotalConnections; - TotalConnections += num_neurons_in + 1; - p_layer_it->P_FirstNeuron[i].LastCon = TotalConnections; - p_layer_it->P_FirstNeuron[i].ActivationFunction = Fann::FANN_SIGMOID_STEPWISE; - p_layer_it->P_FirstNeuron[i].ActivationSteepness = 0.5; - } - num_neurons_in += num_neurons_out; // used in the next run of the loop - } - } - THROW(AllocateConnections()); - { - // Connections are created from all neurons to all neurons in later layers - uint num_neurons_in = NumInput + 1; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_layer_it->P_LastNeuron; p_neuron_it++) { - uint i = p_neuron_it->FirstCon; - for(Fann::Layer * p_layer_it2 = P_FirstLayer; p_layer_it2 != p_layer_it; p_layer_it2++) { - for(Fann::Neuron * p_neuron_it2 = p_layer_it2->P_FirstNeuron; p_neuron_it2 != p_layer_it2->P_LastNeuron; p_neuron_it2++) { - P_Weights[i] = (float)fann_random_weight(); - PP_Connections[i] = p_neuron_it2; - i++; - } - } - } - num_neurons_in += p_layer_it->GetCount(); - } - } - } - { - SInvariantParam ip; - assert(InvariantC(&ip)); - } - CATCH - // ZFREE(P_CascadeActivationFunctions); - // ZFREE(CascadeActivationSteepnesses); - Destroy(); - State |= stError; - ok = 0; - ENDCATCH - return ok; -} - -void Fann::Helper_Init() -{ - P_Weights = 0; - PP_Connections = 0; - P_FirstLayer = 0; - P_LastLayer = 0; - P_Output = 0; - P_TrainErrors = 0; - P_TrainSlopes = 0; - P_PrevTrainSlopes = 0; - P_PrevSteps = 0; - P_PrevWeightsDeltas = 0; - P_CascadeCandidateScores = 0; - TotalNeurons = 0; - TotalNeuronsAllocated = 0; - TotalConnections = 0; - TotalConnectionsAllocated = 0; -} - -Fann::Fann(int type, float connectionRate, const LongArray & rLayers) -{ - Helper_Init(); - Helper_Construct(type, connectionRate, rLayers); -} - -Fann::Fann(const Fann & rS) -{ - Helper_Init(); - Copy(rS); -} - -Fann::Fann(SBuffer & rBuf, SSerializeContext * pSCtx) -{ - Helper_Init(); - if(!Serialize(-1, rBuf, pSCtx)) { - Destroy(); - State |= stError; - } -} - -Fann::~Fann() -{ - Destroy(); -} - -void Fann::Destroy() -{ - State = 0; - ZFREE(P_Weights); - ZFREE(PP_Connections); - if(P_FirstLayer) { - ZFREE(P_FirstLayer->P_FirstNeuron); - delete [] P_FirstLayer; - P_FirstLayer = 0; - } - P_LastLayer = 0; - ZFREE(P_Output); - ZFREE(P_TrainErrors); - ZFREE(P_TrainSlopes); - ZFREE(P_PrevTrainSlopes); - ZFREE(P_PrevSteps); - ZFREE(P_PrevWeightsDeltas); - ZFREE(P_CascadeCandidateScores); - Layers.freeAll(); - ScaleIn.Destroy(); - ScaleOut.Destroy(); - TotalNeurons = 0; - TotalConnectionsAllocated = 0; - TotalConnections = 0; - TotalConnectionsAllocated = 0; - Callback = 0; - P_UserData = 0; - CascadeActivationFuncList.freeAll(); - CascadeActivationSteepnessesList.freeAll(); -} - -FANN_EXTERNAL void FANN_API fann_destroy(Fann * pAnn) -{ - ZDELETE(pAnn); -} - -float * Fann::ScaleAllocate(uint c, float defValue) -{ - float * p_list = (float *)SAlloc::C(c, sizeof(float)); - if(p_list == NULL) { - fann_error(&Err, SLERR_FANN_CANT_ALLOCATE_MEM); - //fann_destroy(ann); - } - else { - for(uint i = 0; i < c; i++) - p_list[i] = defValue; - } - return p_list; - /* - ann->what ## _ ## where = (float*)SAlloc::C(ann->num_ ## where ## put, sizeof( float )); \ - if(ann->what ## _ ## where == NULL) { \ - fann_error(NULL, SLERR_FANN_CANT_ALLOCATE_MEM); \ - fann_destroy(ann); \ - return 1; \ - } \ - for(i = 0; i < ann->num_ ## where ## put; i++) \ - ann->what ## _ ## where[ i ] = ( default_value ); - */ -} -// -// INTERNAL FUNCTION -// Allocates room for the scaling parameters. -// -//int fann_allocate_scale(Fann * ann) -int Fann::AllocateScale() -{ - // todo this should only be allocated when needed - /* - uint i = 0; -#define SCALE_ALLOCATE(what, where, default_value) \ - ann->what ## _ ## where = (float*)SAlloc::C(ann->num_ ## where ## put, sizeof( float )); \ - if(ann->what ## _ ## where == NULL) { \ - fann_error(NULL, SLERR_FANN_CANT_ALLOCATE_MEM); \ - fann_destroy(ann); \ - return 1; \ - } \ - for(i = 0; i < ann->num_ ## where ## put; i++) \ - ann->what ## _ ## where[ i ] = ( default_value ); - SCALE_ALLOCATE(scale_mean, in, 0.0) - SCALE_ALLOCATE(scale_deviation, in, 1.0) - SCALE_ALLOCATE(scale_new_min, in, -1.0) - SCALE_ALLOCATE(scale_factor, in, 1.0) - SCALE_ALLOCATE(scale_mean, out, 0.0) - SCALE_ALLOCATE(scale_deviation, out, 1.0) - SCALE_ALLOCATE(scale_new_min, out, -1.0) - SCALE_ALLOCATE(scale_factor, out, 1.0) -#undef SCALE_ALLOCATE - */ - THROW(ScaleIn.Allocate(NumInput)); - THROW(ScaleOut.Allocate(NumOutput)); - CATCH - fann_destroy(this); // @bad - return 0; - ENDCATCH - return 1; -} - -int Fann::AllocateNeurons() -{ - int ok = 1; - EXCEPTVAR(*(int *)Err.errno_f); - uint _num_neurons_so_far = 0; - uint _num_neurons = 0; - // all the neurons is allocated in one long array (calloc clears mem) - Fann::Neuron * p_neurons = (Fann::Neuron *)SAlloc::C(TotalNeurons, sizeof(Fann::Neuron)); - TotalNeuronsAllocated = TotalNeurons; - THROW_V(p_neurons, SLERR_FANN_CANT_ALLOCATE_MEM); - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - _num_neurons = p_layer_it->GetCount(); - p_layer_it->P_FirstNeuron = p_neurons + _num_neurons_so_far; - p_layer_it->P_LastNeuron = p_layer_it->P_FirstNeuron + _num_neurons; - _num_neurons_so_far += _num_neurons; - } - THROW_V(P_Output = (float *)SAlloc::C(_num_neurons, sizeof(float)), SLERR_FANN_CANT_ALLOCATE_MEM); - CATCH - fann_error(&Err, Err.errno_f); - ok = 0; - ENDCATCH - return ok; -} - -int Fann::AllocateConnections() -{ - int ok = 1; - THROW(P_Weights = (float *)SAlloc::C(TotalConnections, sizeof(float))); - TotalConnectionsAllocated = TotalConnections; - // @todo make special cases for all places where the connections - // is used, so that it is not needed for fully connected networks - THROW(PP_Connections = (Fann::Neuron **)SAlloc::C(TotalConnectionsAllocated, sizeof(Fann::Neuron *))); - CATCH - ok = 0; - fann_error(&Err, SLERR_FANN_CANT_ALLOCATE_MEM); - ENDCATCH - return ok; -} - -#ifdef FANN_NO_SEED - int FANN_SEED_RAND = 0; -#else - int FANN_SEED_RAND = 1; -#endif - -FANN_EXTERNAL void FANN_API fann_disable_seed_rand() -{ - FANN_SEED_RAND = 0; -} - -FANN_EXTERNAL void FANN_API fann_enable_seed_rand() -{ - FANN_SEED_RAND = 1; -} -// -// INTERNAL FUNCTION -// Seed the random function. -// -void fann_seed_rand() -{ -#ifndef _WIN32 - FILE * fp = fopen("/dev/urandom", "r"); - uint foo; - struct timeval t; - if(!fp) { - gettimeofday(&t, 0); - foo = t.tv_usec; - } - else { - if(fread(&foo, sizeof(foo), 1, fp) != 1) { - gettimeofday(&t, 0); - foo = t.tv_usec; - } - fclose(fp); - } - if(FANN_SEED_RAND) { - srand(foo); - } -#else - /* COMPAT_TIME REPLACEMENT */ - if(FANN_SEED_RAND) { - srand(GetTickCount()); - } -#endif -} -// -// FANN_CASCADE -// -/* #define CASCADE_DEBUG */ -/* #define CASCADE_DEBUG_FULL */ - -void fann_print_connections_raw(Fann * ann) -{ - for(uint i = 0; i < ann->TotalConnectionsAllocated; i++) { - if(i == ann->TotalConnections) - printf("* "); - printf("%f ", ann->P_Weights[i]); - } - printf("\n\n"); -} -// -// Cascade training directly on the training data. -// The connected_neurons pointers are not valid during training, -// but they will be again after training. -// -//FANN_EXTERNAL void FANN_API fann_cascadetrain_on_data(Fann * ann, Fann::TrainData * pData, uint maxNeurons, uint neuronsBetweenReports, float desiredError) -int Fann::CascadeTrainOnData(const Fann::TrainData * pData, uint maxNeurons, uint neuronsBetweenReports, float desiredError) -{ - int ok = 1; - float _error; - uint _total_epochs = 0; - int is_desired_error_reached = 0; - if(neuronsBetweenReports && !Callback) { - printf("Max neurons %3d. Desired error: %.6f\n", maxNeurons, desiredError); - } - for(uint i = 1; i <= maxNeurons; i++) { - // train output neurons - _total_epochs += TrainOutputs(pData, desiredError); - _error = GetMSE(); - is_desired_error_reached = DesiredErrorReached(desiredError); - // print current error - if(neuronsBetweenReports && (i % neuronsBetweenReports == 0 || i == maxNeurons || i == 1 || is_desired_error_reached)) { - if(!Callback) { - printf("Neurons %3d. Current error: %.6f. Total error:%8.4f. Epochs %5d. Bit fail %3d", i-1, _error, MSE_value, _total_epochs, NumBitFail); - if((P_LastLayer-2) != P_FirstLayer) { - printf(". candidate steepness %.2f. function %s", (P_LastLayer-2)->P_FirstNeuron->ActivationSteepness, - Fann::GetAttrText(Fann::attrActivationFunc, (P_LastLayer-2)->P_FirstNeuron->ActivationFunction)); - } - printf("\n"); - } - else if((*Callback)(this, pData, maxNeurons, neuronsBetweenReports, desiredError, _total_epochs) == -1) { - break; // you can break the training by returning -1 - } - } - if(is_desired_error_reached) - break; - else { - THROW(InitializeCandidates()); - // train new candidates - _total_epochs += TrainCandidates(pData); - //InstallCandidate(); // this installs the best candidate - //void fann_install_candidate(Fann * ann) - //void Fann::InstallCandidate() - { - Fann::Layer * p_new_layer = AddLayer(P_LastLayer-1); - THROW(p_new_layer); - AddCandidateNeuron(p_new_layer); - } - } - } - // Train outputs one last time but without any desired error - _total_epochs += TrainOutputs(pData, 0.0); - if(neuronsBetweenReports && !Callback) { - printf("Train outputs Current error: %.6f. Epochs %6d\n", GetMSE(), _total_epochs); - } - // Set pointers in connected_neurons - // This is ONLY done in the end of cascade training, - // since there is no need for them during training. - SetShortcutConnections(); - CATCHZOK - return ok; -} - -/*FANN_EXTERNAL void FANN_API fann_cascadetrain_on_file(Fann * ann, const char * filename, uint max_neurons, uint neurons_between_reports, float desired_error) -{ - Fann::TrainData * p_data = fann_read_train_from_file(filename); - if(p_data) { - ann->CascadeTrainOnData(p_data, max_neurons, neurons_between_reports, desired_error); - delete p_data; - } -}*/ - -//int fann_train_outputs(Fann * ann, Fann::TrainData * pData, float desiredError) -int Fann::TrainOutputs(const Fann::TrainData * pData, float desiredError) -{ - float _target_improvement = 0.0; - float _backslide_improvement = -1.0e20f; - const uint _max_epochs = CascadeMaxOutEpochs; - const uint _min_epochs = CascadeMinOutEpochs; - uint _stagnation = _max_epochs; - // @todo should perhaps not clear all arrays - ClearTrainArrays(); - // run an initial epoch to set the initital error - const float initial_error = TrainOutputsEpoch(pData); - if(DesiredErrorReached(desiredError)) - return 1; - else { - for(uint i = 1; i < _max_epochs; i++) { - const float _error = TrainOutputsEpoch(pData); - // printf("Epoch %6d. Current error: %.6f. Bit fail %d.\n", i, error, ann->num_bit_fail); - if(DesiredErrorReached(desiredError)) { - return (i + 1); - } - else { - // Improvement since start of train - const float error_improvement = initial_error - _error; - // After any significant change, set a new goal and allow a new quota of epochs to reach it - if((_target_improvement >= 0 && (error_improvement > _target_improvement || error_improvement < _backslide_improvement)) || - (_target_improvement < 0 && (error_improvement < _target_improvement || error_improvement > _backslide_improvement))) { - /*printf("error_improvement=%f, target_improvement=%f, backslide_improvement=%f, - stagnation=%d\n", error_improvement, target_improvement, backslide_improvement, stagnation); - */ - _target_improvement = error_improvement * (1.0f + CascadeOutputChangeFraction); - _backslide_improvement = error_improvement * (1.0f - CascadeOutputChangeFraction); - _stagnation = i + CascadeOutputStagnationEpochs; - } - // No improvement in allotted period, so quit - if(i >= _stagnation && i >= _min_epochs) { - return (i + 1); - } - } - } - return _max_epochs; - } -} - -//float fann_train_outputs_epoch(Fann * ann, Fann::TrainData * data) -float Fann::TrainOutputsEpoch(const Fann::TrainData * pData) -{ - ResetMSE(); - for(uint i = 0; i < pData->GetCount(); i++) { - Run((const float *)pData->InpL.at(i)->dataPtr()); - ComputeMSE((const float *)pData->OutL.at(i)->dataPtr()); - UpdateSlopesBatch(P_LastLayer-1, P_LastLayer-1); - } - switch(TrainingAlgorithm) { - case FANN_TRAIN_RPROP: - UpdateWeightsIrpropm((P_LastLayer-1)->P_FirstNeuron->FirstCon, TotalConnections); - break; - case FANN_TRAIN_SARPROP: - UpdateWeightsSarprop(SarpropEpoch, (P_LastLayer-1)->P_FirstNeuron->FirstCon, TotalConnections); - ++(SarpropEpoch); - break; - case FANN_TRAIN_QUICKPROP: - UpdateWeightsQuickprop(pData->GetCount(), (P_LastLayer-1)->P_FirstNeuron->FirstCon, TotalConnections); - break; - case FANN_TRAIN_BATCH: - case FANN_TRAIN_INCREMENTAL: - fann_error(&Err, SLERR_FANN_CANT_USE_TRAIN_ALG); - } - return GetMSE(); -} - -//int fann_reallocate_connections(Fann * ann, uint totalConnections) -int Fann::ReallocateConnections(uint totalConnections) -{ - int ok = 1; - // The connections are allocated, but the pointers inside are - // first moved in the end of the cascade training session. - THROW(PP_Connections = (Fann::Neuron**)SAlloc::R(PP_Connections, totalConnections * sizeof(Fann::Neuron *))); - THROW(P_Weights = (float*)SAlloc::R(P_Weights, totalConnections * sizeof(float))); - THROW(P_TrainSlopes = (float*)SAlloc::R(P_TrainSlopes, totalConnections * sizeof(float))); - THROW(P_PrevSteps = (float*)SAlloc::R(P_PrevSteps, totalConnections * sizeof(float))); - THROW(P_PrevTrainSlopes = (float*)SAlloc::R(P_PrevTrainSlopes, totalConnections * sizeof(float))); - TotalConnectionsAllocated = totalConnections; - CATCHZOK - return ok; -} - -//int fann_reallocate_neurons(Fann * pAnn, uint totalNeurons) -int Fann::ReallocateNeurons(uint totalNeurons) -{ - int ok = 1; - Fann::Neuron * p_neurons = (Fann::Neuron*)SAlloc::R(P_FirstLayer->P_FirstNeuron, totalNeurons * sizeof(Fann::Neuron)); - TotalNeuronsAllocated = totalNeurons; - THROW(p_neurons); - // Also allocate room for more train_errors - THROW(P_TrainErrors = (float*)SAlloc::R(P_TrainErrors, totalNeurons * sizeof(float))); - if(p_neurons != P_FirstLayer->P_FirstNeuron) { - // Then the memory has moved, also move the pointers - // Move pointers from layers to neurons - uint _num_neurons_so_far = 0; - for(Fann::Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - uint _num_neurons = p_layer_it->GetCount(); - p_layer_it->P_FirstNeuron = p_neurons + _num_neurons_so_far; - p_layer_it->P_LastNeuron = p_layer_it->P_FirstNeuron + _num_neurons; - _num_neurons_so_far += _num_neurons; - } - } - CATCHZOK - return ok; -} - -int FASTCALL Fann::DesiredErrorReached(float desired_error) const -{ - int yes = 0; - switch(TrainStopFunction) { - case FANN_STOPFUNC_MSE: - if(GetMSE() <= desired_error) - yes = 1; - break; - case FANN_STOPFUNC_BIT: - if(NumBitFail <= (uint)desired_error) - yes = 1; - break; - } - return yes; -} - -//void initialize_candidate_weights(Fann * ann, uint first_con, uint last_con, float scale_factor) -void Fann::InitializeCandidateWeights(uint firstCon, uint lastCon, float scaleFactor) -{ - const uint _bias_weight = (firstCon + P_FirstLayer->GetCount() - 1); - float _prev_step = (TrainingAlgorithm == FANN_TRAIN_RPROP) ? RpropDeltaZero : 0; - for(uint i = firstCon; i < lastCon; i++) { - if(i == _bias_weight) - P_Weights[i] = fann_rand(-scaleFactor, scaleFactor); - else - P_Weights[i] = fann_rand(0, scaleFactor); - P_TrainSlopes[i] = 0; - P_PrevSteps[i] = _prev_step; - P_PrevTrainSlopes[i] = 0; - } -} - -//int fann_initialize_candidates(Fann * ann) -int Fann::InitializeCandidates() -{ - int ok = 1; - // The candidates are allocated after the normal neurons and connections, - // but there is an empty place between the real neurons and the candidate neurons, - // so that it will be possible to make room when the chosen candidate are copied in on the desired place. - uint _neurons_to_allocate; - uint _connections_to_allocate; - const uint _num_candidates = GetCascadeNumCandidates(); - const uint _num_neurons = TotalNeurons + _num_candidates + 1; - const uint _num_hidden_neurons = TotalNeurons - NumInput - NumOutput; - const uint _candidate_connections_in = TotalNeurons - NumOutput; - const uint _candidate_connections_out = NumOutput; - // - // the number of connections going into a and out of a candidate is ann->TotalNeurons - // - const uint _num_connections = TotalConnections + (TotalNeurons * (_num_candidates + 1)); - const uint _first_candidate_connection = TotalConnections + TotalNeurons; - const uint _first_candidate_neuron = TotalNeurons + 1; - // First make sure that there is enough room, and if not then allocate a - // bit more so that we do not need to allocate more room each time. - if(_num_neurons > TotalNeuronsAllocated) { - // Then we need to allocate more neurons - // Allocate half as many neurons as already exist (at least ten) - _neurons_to_allocate = _num_neurons + _num_neurons / 2; - SETMAX(_neurons_to_allocate, _num_neurons + 10); - THROW(ReallocateNeurons(_neurons_to_allocate)); - } - if(_num_connections > TotalConnectionsAllocated) { - // Then we need to allocate more connections. Allocate half as many connections as already exist (at least enough for ten neurons) - _connections_to_allocate = _num_connections + _num_connections / 2; - if(_connections_to_allocate < (_num_connections + TotalNeurons * 10)) - _connections_to_allocate = _num_connections + TotalNeurons * 10; - THROW(ReallocateConnections(_connections_to_allocate)); - } - { - // Some code to do semi Widrow + Nguyen initialization - float _scale_factor = (float)(2.0 * pow(0.7f * (float)_num_hidden_neurons, 1.0f / (float)NumInput)); - if(_scale_factor > 8) - _scale_factor = 8; - else if(_scale_factor < 0.5) - _scale_factor = 0.5; - { - // - // Set the neurons - // - uint connection_it = _first_candidate_connection; - Fann::Neuron * p_neurons = P_FirstLayer->P_FirstNeuron; - uint candidate_index = _first_candidate_neuron; - for(uint i = 0; i < CascadeActivationFuncList.getCount(); i++) { - for(uint j = 0; j < CascadeActivationSteepnessesList.getCount(); j++) { - const float steepness = CascadeActivationSteepnessesList[j]; - for(uint k = 0; k < CascadeNumCandidateGroups; k++) { - // @todo candidates should actually be created both in - // the last layer before the output layer, and in a new layer. - p_neurons[candidate_index].Value = 0; - p_neurons[candidate_index].Sum = 0; - p_neurons[candidate_index].ActivationFunction = (ActivationFunc)CascadeActivationFuncList.get(i); - p_neurons[candidate_index].ActivationSteepness = steepness; //CascadeActivationSteepnesses[j]; - p_neurons[candidate_index].FirstCon = connection_it; - connection_it += _candidate_connections_in; - p_neurons[candidate_index].LastCon = connection_it; - // We have no specific pointers to the output weights, but they are available after last_con - connection_it += _candidate_connections_out; - P_TrainErrors[candidate_index] = 0; - InitializeCandidateWeights(p_neurons[candidate_index].FirstCon, p_neurons[candidate_index].LastCon+_candidate_connections_out, _scale_factor); - candidate_index++; - } - } - } - } - } - // - // Now randomize the weights and zero out the arrays that needs zeroing out. - // - /* - #ifdef CASCADE_DEBUG_FULL - printf("random cand weight [%d ... %d]\n", first_candidate_connection, num_connections - 1); - #endif - for(i = first_candidate_connection; i < num_connections; i++) { - //P_Weights[i] = fann_random_weight(); - P_Weights[i] = fann_rand(-2.0,2.0); - P_TrainSlopes[i] = 0; - P_PrevSteps[i] = 0; - P_PrevTrainSlopes[i] = initial_slope; - } - */ - CATCHZOK - return ok; -} - -//int fann_train_candidates(Fann * ann, Fann::TrainData * pData) -int Fann::TrainCandidates(const Fann::TrainData * pData) -{ - float _best_cand_score = 0.0; - float _target_cand_score = 0.0; - float _backslide_cand_score = -1.0e20f; - const uint _max_epochs = CascadeMaxCandEpochs; - const uint _min_epochs = CascadeMinCandEpochs; - uint _stagnation = _max_epochs; - if(P_CascadeCandidateScores == NULL) { - P_CascadeCandidateScores = (float*)SAlloc::M(GetCascadeNumCandidates() * sizeof(float)); - if(P_CascadeCandidateScores == NULL) { - fann_error(&Err, SLERR_FANN_CANT_ALLOCATE_MEM); - return 0; - } - } - for(uint i = 0; i < _max_epochs; i++) { - _best_cand_score = TrainCandidatesEpoch(pData); - if((_best_cand_score / MSE_value) > CascadeCandidateLimit) { - return i + 1; - } - else { - if((_best_cand_score > _target_cand_score) || (_best_cand_score < _backslide_cand_score)) { - _target_cand_score = _best_cand_score * (1.0f + CascadeCandidateChangeFraction); - _backslide_cand_score = _best_cand_score * (1.0f - CascadeCandidateChangeFraction); - _stagnation = i + CascadeCandidateStagnationEpochs; - } - // No improvement in allotted period, so quit - if(i >= _stagnation && i >= _min_epochs) { - return i + 1; - } - } - } - return _max_epochs; -} - -void Fann::UpdateCandidateSlopes() -{ - Fann::Neuron * p_neurons = P_FirstLayer->P_FirstNeuron; - Fann::Neuron * p_first_cand = p_neurons + TotalNeurons + 1; - Fann::Neuron * p_last_cand = p_first_cand + GetCascadeNumCandidates(); - const uint _num_output = NumOutput; - const float * p_output_train_errors = P_TrainErrors + (TotalNeurons - NumOutput); - for(Fann::Neuron * p_cand_it = p_first_cand; p_cand_it < p_last_cand; p_cand_it++) { - float cand_score = P_CascadeCandidateScores[p_cand_it - p_first_cand]; - // code more or less stolen from fann_run to fast forward pass - float cand_sum = 0.0; - const uint num_connections = p_cand_it->GetConCount(); - const float * p_weights = P_Weights + p_cand_it->FirstCon; - { - // unrolled loop start - uint i = num_connections & 3; // same as modulo 4 - switch(i) { - case 3: cand_sum += p_weights[2] * p_neurons[2].Value; - case 2: cand_sum += p_weights[1] * p_neurons[1].Value; - case 1: cand_sum += p_weights[0] * p_neurons[0].Value; - case 0: break; - } - for(; i != num_connections; i += 4) { - cand_sum += - p_weights[i] * p_neurons[i].Value + - p_weights[i+1] * p_neurons[i+1].Value + - p_weights[i+2] * p_neurons[i+2].Value + - p_weights[i+3] * p_neurons[i+3].Value; - } - /*for(i = 0; i < num_connections; i++) { - cand_sum += p_weights[i] * p_neurons[i].value; - }*/ - // unrolled loop end - } - { - const float _max_sum = 150.0f/p_cand_it->ActivationSteepness; - if(cand_sum > _max_sum) - cand_sum = _max_sum; - else if(cand_sum < -_max_sum) - cand_sum = -_max_sum; - } - { - float _error_value = 0.0; - const float activation = Activation(p_cand_it->ActivationFunction, p_cand_it->ActivationSteepness, cand_sum); - // printf("%f = sigmoid(%f);\n", activation, cand_sum); - p_cand_it->Sum = cand_sum; - p_cand_it->Value = activation; - const float derived = p_cand_it->ActivationDerived(activation, cand_sum); - // The output weights is located right after the input weights in the weight array. - const float * p_cand_out_weights = p_weights + num_connections; - float * p_cand_out_slopes = P_TrainSlopes + p_cand_it->FirstCon + num_connections; - for(uint j = 0; j < _num_output; j++) { - const float diff = (activation * p_cand_out_weights[j]) - p_output_train_errors[j]; - p_cand_out_slopes[j] -= 2.0f * diff * activation; - _error_value += diff * p_cand_out_weights[j]; - cand_score -= (diff * diff); - } - P_CascadeCandidateScores[p_cand_it-p_first_cand] = cand_score; - _error_value *= derived; - { - float * p_cand_slopes = P_TrainSlopes + p_cand_it->FirstCon; - for(uint cidx = 0; cidx < num_connections; cidx++) - p_cand_slopes[cidx] -= _error_value * p_neurons[cidx].Value; - } - } - } -} - -//void fann_update_candidate_weights(Fann * ann, uint numData) -void Fann::UpdateCandidateWeights(uint numData) -{ - const Fann::Neuron * p_first_cand = (P_LastLayer-1)->P_LastNeuron + 1; // there is an empty neuron between the actual neurons and the candidate neuron - const Fann::Neuron * p_last_cand = p_first_cand + GetCascadeNumCandidates() - 1; - switch(TrainingAlgorithm) { - case FANN_TRAIN_RPROP: - UpdateWeightsIrpropm(p_first_cand->FirstCon, p_last_cand->LastCon + NumOutput); - break; - case FANN_TRAIN_SARPROP: - // @todo increase epoch? - UpdateWeightsSarprop(SarpropEpoch, p_first_cand->FirstCon, p_last_cand->LastCon + NumOutput); - break; - case FANN_TRAIN_QUICKPROP: - UpdateWeightsQuickprop(numData, p_first_cand->FirstCon, p_last_cand->LastCon + NumOutput); - break; - case FANN_TRAIN_BATCH: - case FANN_TRAIN_INCREMENTAL: - fann_error(&Err, SLERR_FANN_CANT_USE_TRAIN_ALG); - break; - } -} - -//float fann_train_candidates_epoch(Fann * ann, Fann::TrainData * pData) -float Fann::TrainCandidatesEpoch(const Fann::TrainData * pData) -{ - float _best_score = 0; - uint i; - const uint _num_cand = GetCascadeNumCandidates(); - float * p_output_train_errors = P_TrainErrors + (TotalNeurons - NumOutput); - Fann::Neuron * p_output_neurons = (P_LastLayer-1)->P_FirstNeuron; - for(i = 0; i < _num_cand; i++) { - // The ann->MSE_value is actually the sum squared error - P_CascadeCandidateScores[i] = MSE_value; - } - for(i = 0; i < pData->GetCount(); i++) { - const DataVector * p_inp_vect = pData->InpL.at(i); - const DataVector * p_out_vect = pData->OutL.at(i); - Run((const float *)p_inp_vect->dataPtr()); - for(uint j = 0; j < NumOutput; j++) { - /* @todo only debug, but the error is in opposite direction, this might be usefull info */ - /* if(output_train_errors[j] != (ann->output[j] - data->output[i][j])){ - * printf("difference in calculated error at %f != %f; %f = %f - %f;\n", output_train_errors[j], - *(ann->output[j] - data->output[i][j]), output_train_errors[j], ann->output[j], - *data->output[i][j]); - * } */ - - /* - * output_train_errors[j] = (data->output[i][j] - ann->output[j])/2; - * output_train_errors[j] = ann->output[j] - data->output[i][j]; - */ - p_output_train_errors[j] = (p_out_vect->at(j) - P_Output[j]); - switch(p_output_neurons[j].ActivationFunction) { - case FANN_LINEAR_PIECE_SYMMETRIC: - case FANN_SIGMOID_SYMMETRIC: - case FANN_SIGMOID_SYMMETRIC_STEPWISE: - case FANN_THRESHOLD_SYMMETRIC: - case FANN_ELLIOT_SYMMETRIC: - case FANN_GAUSSIAN_SYMMETRIC: - case FANN_SIN_SYMMETRIC: - case FANN_COS_SYMMETRIC: - p_output_train_errors[j] /= 2.0; - break; - case FANN_LINEAR: - case FANN_THRESHOLD: - case FANN_SIGMOID: - case FANN_SIGMOID_STEPWISE: - case FANN_GAUSSIAN: - case FANN_GAUSSIAN_STEPWISE: - case FANN_ELLIOT: - case FANN_LINEAR_PIECE: - case FANN_SIN: - case FANN_COS: - break; - } - } - UpdateCandidateSlopes(); - } - UpdateCandidateWeights(pData->GetCount()); - { - // find the best candidate score - uint _best_candidate = 0; - _best_score = P_CascadeCandidateScores[_best_candidate]; - for(i = 1; i < _num_cand; i++) { - // Fann::Neuron *cand = ann->P_FirstLayer->P_FirstNeuron + ann->TotalNeurons + 1 + i; - // printf("candidate[%d] = activation: %s, steepness: %f, score: %f\n", - // i, FANN_ACTIVATIONFUNC_NAMES[cand->activation_function], - // cand->ActivationSteepness, ann->P_CascadeCandidateScores[i]); - if(P_CascadeCandidateScores[i] > _best_score) { - _best_candidate = i; - _best_score = P_CascadeCandidateScores[_best_candidate]; - } - } - CascadeBestCandidate = TotalNeurons + _best_candidate + 1; - } - return _best_score; -} -// -// add a layer at the position pointed to by *layer -// -Fann::Layer * Fann::AddLayer(Fann::Layer * pLayer) -{ - Fann::Layer * p_result = 0; - const int _layer_pos = (int)(pLayer - P_FirstLayer); - const int _num_layers = (int)(GetNumLayers() + 1); - // allocate the layer - Fann::Layer * p_layers = (Fann::Layer*)SAlloc::R(P_FirstLayer, _num_layers * sizeof(Fann::Layer)); - THROW(p_layers); - // copy layers so that the free space is at the right location - for(int i = _num_layers - 1; i >= _layer_pos; i--) { - p_layers[i] = p_layers[i-1]; - } - // the newly allocated layer is empty - p_layers[_layer_pos].P_FirstNeuron = p_layers[_layer_pos+1].P_FirstNeuron; - p_layers[_layer_pos].P_LastNeuron = p_layers[_layer_pos+1].P_FirstNeuron; - // Set the ann pointers correctly - P_FirstLayer = p_layers; - P_LastLayer = p_layers + _num_layers; - p_result = (p_layers + _layer_pos); - CATCH - p_result = 0; - ENDCATCH - return p_result; -} - -//void fann_set_shortcut_connections(Fann * ann) -void Fann::SetShortcutConnections() -{ - uint _num_connections = 0; - Fann::Neuron ** pp_neuron_pointers = PP_Connections; - Fann::Neuron * p_neurons = P_FirstLayer->P_FirstNeuron; - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != P_LastLayer; p_layer_it++) { - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_layer_it->P_LastNeuron; p_neuron_it++) { - pp_neuron_pointers += _num_connections; - _num_connections = p_neuron_it->GetConCount(); - for(uint i = 0; i != _num_connections; i++) { - pp_neuron_pointers[i] = p_neurons + i; - } - } - } -} - -//void fann_add_candidate_neuron(Fann * ann, Fann::Layer * pLayer) -void Fann::AddCandidateNeuron(Fann::Layer * pLayer) -{ - const uint num_connections_in = (uint)(pLayer->P_FirstNeuron - P_FirstLayer->P_FirstNeuron); - const uint num_connections_out = (uint)((P_LastLayer-1)->P_LastNeuron - (pLayer+1)->P_FirstNeuron); - uint num_connections_move = num_connections_out + num_connections_in; - uint candidate_con; - uint candidate_output_weight; - Fann::Neuron * p_neuron_place; - Fann::Neuron * p_candidate; - // We know that there is enough room for the new neuron - // (the candidates are in the same arrays), so move - // the last neurons to make room for this neuron. - // - // first move the pointers to neurons in the layer structs - { - for(Fann::Layer * p_layer_it = P_LastLayer-1; p_layer_it != pLayer; p_layer_it--) { - p_layer_it->P_FirstNeuron++; - p_layer_it->P_LastNeuron++; - } - } - // also move the last neuron in the layer that needs the neuron added - pLayer->P_LastNeuron++; - // this is the place that should hold the new neuron - p_neuron_place = pLayer->P_LastNeuron - 1; - p_candidate = P_FirstLayer->P_FirstNeuron + CascadeBestCandidate; - // the output weights for the candidates are located after the input weights - candidate_output_weight = p_candidate->LastCon; - { - // move the actual output neurons and the indexes to the connection arrays - for(Fann::Neuron * p_neuron_it = (P_LastLayer-1)->P_LastNeuron - 1; p_neuron_it != p_neuron_place; p_neuron_it--) { - *p_neuron_it = *(p_neuron_it-1); - // move the weights - for(int i = p_neuron_it->LastCon-1; i >= (int)p_neuron_it->FirstCon; i--) { - P_Weights[i+num_connections_move-1] = P_Weights[i]; - } - // move the indexes to weights - p_neuron_it->LastCon += num_connections_move; - num_connections_move--; - p_neuron_it->FirstCon += num_connections_move; - // set the new weight to the newly allocated neuron - P_Weights[p_neuron_it->LastCon-1] = (P_Weights[candidate_output_weight]) * CascadeWeightMultiplier; - candidate_output_weight++; - } - } - // Now inititalize the actual neuron - p_neuron_place->Value = 0; - p_neuron_place->Sum = 0; - p_neuron_place->ActivationFunction = p_candidate->ActivationFunction; - p_neuron_place->ActivationSteepness = p_candidate->ActivationSteepness; - p_neuron_place->LastCon = (p_neuron_place + 1)->FirstCon; - p_neuron_place->FirstCon = p_neuron_place->LastCon - num_connections_in; - candidate_con = p_candidate->FirstCon; - // initialize the input weights at random - { - for(uint i = 0; i < num_connections_in; i++) { - P_Weights[i+p_neuron_place->FirstCon] = P_Weights[i+candidate_con]; - } - } - // Change some of main variables - TotalNeurons++; - TotalConnections += num_connections_in + num_connections_out; -} - -/*FANN_EXTERNAL uint FANN_API fann_get_cascade_num_candidates(const Fann * pAnn) -{ - return pAnn->GetCascadeNumCandidates(); -}*/ - -FANN_EXTERNAL void FANN_API fann_set_cascade_activation_functions(Fann * ann, const Fann::ActivationFunc * cascade_activation_functions, uint cascade_activation_functions_count) -{ - /* - if(ann->cascade_activation_functions_count != cascade_activation_functions_count) { - ann->cascade_activation_functions_count = cascade_activation_functions_count; - // reallocate mem - ann->P_CascadeActivationFunctions = (fann_activationfunc_enum*)SAlloc::R(ann->P_CascadeActivationFunctions, - ann->cascade_activation_functions_count * sizeof(fann_activationfunc_enum)); - if(ann->P_CascadeActivationFunctions == NULL) { - fann_error(&ann->Err, SLERR_FANN_CANT_ALLOCATE_MEM); - return; - } - } - memmove(ann->P_CascadeActivationFunctions, cascade_activation_functions, ann->cascade_activation_functions_count * sizeof(fann_activationfunc_enum)); - */ - ann->SetCascadeActivationFunctions(cascade_activation_functions, cascade_activation_functions_count); -} - -void Fann::SetCascadeActivationFunctions(const Fann::ActivationFunc * pCascadeActivationFunctions, uint cascadeActivationFunctionsCount) -{ - /* - if(ann->cascade_activation_functions_count != cascade_activation_functions_count) { - ann->cascade_activation_functions_count = cascade_activation_functions_count; - // reallocate mem - ann->P_CascadeActivationFunctions = (fann_activationfunc_enum*)SAlloc::R(ann->P_CascadeActivationFunctions, - ann->cascade_activation_functions_count * sizeof(fann_activationfunc_enum)); - if(ann->P_CascadeActivationFunctions == NULL) { - fann_error(&ann->Err, SLERR_FANN_CANT_ALLOCATE_MEM); - return; - } - } - memmove(ann->P_CascadeActivationFunctions, cascade_activation_functions, ann->cascade_activation_functions_count * sizeof(fann_activationfunc_enum)); - */ - CascadeActivationFuncList.clear(); - for(uint i = 0; i < cascadeActivationFunctionsCount; i++) { - CascadeActivationFuncList.add(pCascadeActivationFunctions[i]); - } -} - -//FANN_GET(uint, cascade_activation_steepnesses_count) -//FANN_GET(float *, cascade_activation_steepnesses) - -/*FANN_EXTERNAL void FANN_API fann_set_cascade_activation_steepnesses(Fann * ann, float * cascade_activation_steepnesses, uint cascade_activation_steepnesses_count) -{ - if(ann->cascade_activation_steepnesses_count != cascade_activation_steepnesses_count) { - ann->cascade_activation_steepnesses_count = cascade_activation_steepnesses_count; - // reallocate mem - ann->CascadeActivationSteepnesses = (float*)SAlloc::R(ann->CascadeActivationSteepnesses, ann->cascade_activation_steepnesses_count * sizeof(float)); - if(ann->CascadeActivationSteepnesses == NULL) { - fann_error(&ann->Err, SLERR_FANN_CANT_ALLOCATE_MEM); - return; - } - } - memmove(ann->CascadeActivationSteepnesses, cascade_activation_steepnesses, ann->cascade_activation_steepnesses_count * sizeof(float)); -}*/ - -void Fann::SetCascadeActivationSteepnesses(const FloatArray & rList) -{ - CascadeActivationSteepnessesList = rList; -} -// -// FANN_ERROR -// -// -// resets the last error number -// -FANN_EXTERNAL void FANN_API fann_reset_errno(FannError * errdat) -{ - errdat->errno_f = SLERR_SUCCESS; -} -// -// resets the last errstr -// -FANN_EXTERNAL void FANN_API fann_reset_errstr(FannError * pErr) -{ - if(pErr) - pErr->Msg = 0; -} -// -// returns the last error number - // -FANN_EXTERNAL int FANN_API fann_get_errno(const FannError * pErr) -{ - return pErr->errno_f; -} -// -// returns the last errstr -// -FANN_EXTERNAL const char * FANN_API fann_get_errstr(const FannError * pErr) -{ - return pErr ? pErr->Msg.cptr() : 0; -} -// -// change where errors are logged to -// -/*FANN_EXTERNAL void FANN_API fann_set_error_log(FannError * errdat, FILE * log_file) -{ - if(errdat == NULL) - fann_default_error_log = log_file; - else - errdat->error_log = log_file; -}*/ - -/* prints the last error to stderr - */ -FANN_EXTERNAL void FANN_API fann_print_error(FannError * errdat) -{ - if(errdat->errno_f && errdat->Msg.NotEmpty()) { - fprintf(stderr, "FANN Error %d: %s", errdat->errno_f, errdat->Msg.cptr()); - } -} -// -// INTERNAL FUNCTION -// Populate the error information -// -void fann_error(FannError * errdat, int errno_f, ...) -{ - va_list ap; - size_t errstr_max = FANN_ERRSTR_MAX + PATH_MAX - 1; - char errstr[FANN_ERRSTR_MAX + PATH_MAX]; - FILE * error_log = 0; // fann_default_error_log; - if(errdat) - errdat->errno_f = errno_f; - va_start(ap, errno_f); - switch(errno_f) { - case SLERR_SUCCESS: - return; - case SLERR_FANN_CANT_OPEN_CONFIG_R: - vsnprintf(errstr, errstr_max, "Unable to open configuration file \"%s\" for reading.\n", ap); - break; - case SLERR_FANN_CANT_OPEN_CONFIG_W: - vsnprintf(errstr, errstr_max, "Unable to open configuration file \"%s\" for writing.\n", ap); - break; - case SLERR_FANN_WRONG_CONFIG_VERSION: - vsnprintf(errstr, errstr_max, "Wrong version of configuration file, aborting read of configuration file \"%s\".\n", ap); - break; - case SLERR_FANN_CANT_READ_CONFIG: - vsnprintf(errstr, errstr_max, "Error reading \"%s\" from configuration file \"%s\".\n", ap); - break; - case SLERR_FANN_CANT_READ_NEURON: - vsnprintf(errstr, errstr_max, "Error reading neuron info from configuration file \"%s\".\n", ap); - break; - case SLERR_FANN_CANT_READ_CONNECTIONS: - vsnprintf(errstr, errstr_max, "Error reading connections from configuration file \"%s\".\n", ap); - break; - case SLERR_FANN_WRONG_NUM_CONNECTIONS: - vsnprintf(errstr, errstr_max, "ERROR connections_so_far=%d, total_connections=%d\n", ap); - break; - case SLERR_FANN_CANT_OPEN_TD_W: - vsnprintf(errstr, errstr_max, "Unable to open train data file \"%s\" for writing.\n", ap); - break; - case SLERR_FANN_CANT_OPEN_TD_R: - vsnprintf(errstr, errstr_max, "Unable to open train data file \"%s\" for writing.\n", ap); - break; - case SLERR_FANN_CANT_READ_TD: - vsnprintf(errstr, errstr_max, "Error reading info from train data file \"%s\", line: %d.\n", ap); - break; - case SLERR_NOMEM: - case SLERR_FANN_CANT_ALLOCATE_MEM: - STRNSCPY(errstr, "Unable to allocate memory.\n"); - break; - case SLERR_FANN_CANT_TRAIN_ACTIVATION: - STRNSCPY(errstr, "Unable to train with the selected activation function.\n"); - break; - case SLERR_FANN_CANT_USE_ACTIVATION: - STRNSCPY(errstr, "Unable to use the selected activation function.\n"); - break; - case SLERR_FANN_TRAIN_DATA_MISMATCH: - STRNSCPY(errstr, "Training data must be of equivalent structure.\n"); - break; - case SLERR_FANN_CANT_USE_TRAIN_ALG: - STRNSCPY(errstr, "Unable to use the selected training algorithm.\n"); - break; - case SLERR_FANN_TRAIN_DATA_SUBSET: - vsnprintf(errstr, errstr_max, "Subset from %d of length %d not valid in training set of length %d.\n", ap); - break; - case SLERR_FANN_INDEX_OUT_OF_BOUND: - vsnprintf(errstr, errstr_max, "Index %d is out of bound.\n", ap); - break; - case SLERR_FANN_SCALE_NOT_PRESENT: - strcpy(errstr, "Scaling parameters not present.\n"); - break; - case SLERR_FANN_INPUT_NO_MATCH: - vsnprintf(errstr, errstr_max, "The number of input neurons in the ann (%d) and data (%d) don't match\n", ap); - break; - case SLERR_FANN_OUTPUT_NO_MATCH: - vsnprintf(errstr, errstr_max, "The number of output neurons in the ann (%d) and data (%d) don't match\n", ap); - break; - case SLERR_FANN_WRONG_PARAMETERS_FOR_CREATE: - strcpy(errstr, "The parameters for create_standard are wrong, either too few parameters provided or a negative/very high value provided.\n"); - break; - } - va_end(ap); - if(errdat) { - errdat->Msg = errstr; - //error_log = errdat->error_log; - } - /*if(error_log == (FILE*)-1) { // This is the default behavior and will give stderr - fprintf(stderr, "FANN Error %d: %s", errno_f, errstr); - } - else*/ - if(error_log && ((int)error_log) != -1) - fprintf(error_log, "FANN Error %d: %s", errno_f, errstr); -} -// -// INTERNAL FUNCTION -// Initialize an error data strcuture -// -/*void fann_init_error_data(FannError * errdat) -{ - errdat->Msg = 0; - errdat->errno_f = SLERR_SUCCESS; - //errdat->error_log = fann_default_error_log; -}*/ -// -// FANN_IO -// -// -// Create a network from a configuration file. -// -/*FANN_EXTERNAL Fann * FANN_API fann_create_from_file(const char * configuration_file) -{ - Fann * ann = 0; - FILE * conf = fopen(configuration_file, "r"); - if(!conf) { - fann_error(NULL, SLERR_FANN_CANT_OPEN_CONFIG_R, configuration_file); - } - else { - ann = fann_create_from_fd(conf, configuration_file); - fclose(conf); - } - return ann; -}*/ -// -// Save the network. -// -FANN_EXTERNAL int FANN_API fann_save(Fann * ann, const char * configuration_file) -{ - return fann_save_internal(ann, configuration_file, 0); -} -// -// Save the network as fixed point data. -// -FANN_EXTERNAL int FANN_API fann_save_to_fixed(Fann * ann, const char * configuration_file) -{ - return fann_save_internal(ann, configuration_file, 1); -} -// -// INTERNAL FUNCTION -// Used to save the network to a file. -// -int fann_save_internal(Fann * ann, const char * configuration_file, uint save_as_fixed) -{ - FILE * conf = fopen(configuration_file, "w+"); - if(!conf) { - fann_error(&ann->Err, SLERR_FANN_CANT_OPEN_CONFIG_W, configuration_file); - return -1; - } - else { - int retval = fann_save_internal_fd(ann, conf, configuration_file, save_as_fixed); - fclose(conf); - return retval; - } -} - -void Fann::ScaleSave(FILE * pF, uint c, const float * pList, const char * pField) const -{ - /* - #define SCALE_SAVE(what, where) fprintf(conf, # what "_" # where "="); \ - for(i = 0; i < ann->num_ ## where ## put; i++) \ - fprintf(conf, "%f ", ann->what ## _ ## where[ i ]); \ - fprintf(conf, "\n"); - */ - fprintf(pF, "%s=", pField); - for(uint i = 0; i < c; i++) - fprintf(pF, "%f ", pList[i]); - fprintf(pF, "\n"); -} - -int Fann::ScaleLoad(FILE * pF, uint c, float * pList, const char * pField) -{ - /* -#define SCALE_LOAD(what, where) \ - fann_skip(# what "_" # where "="); \ - for(i = 0; i < ann->num_ ## where ## put; i++) { \ - if(fscanf(conf, "%f ", (float*)&ann->what ## _ ## where[ i ]) != 1) { \ - fann_error(&ann->Err, SLERR_FANN_CANT_READ_CONFIG, # what "_" # where, configuration_file); \ - fann_destroy(ann); \ - return NULL; \ - } \ - } -#define fann_skip(name) { \ - if(fscanf(conf, name) != 0) { \ - fann_error(NULL, SLERR_FANN_CANT_READ_CONFIG, name, configuration_file); \ - fann_destroy(ann); \ - return NULL; \ - }} - */ - int ok = 1; - SString temp_buf; - char _buf[256]; - (temp_buf = pField).CatChar('='); - STRNSCPY(_buf, temp_buf); - THROW(fscanf(pF, _buf) == 0); - for(uint i = 0; i < c; i++) { - THROW(fscanf(pF, "%f ", &pList[i]) == 1); - } - CATCH - fann_error(&Err, SLERR_FANN_CANT_READ_CONFIG, pField, ""/*configuration_file*/); - ok = 0; - ENDCATCH - return ok; -} - -Fann::StorageHeader::StorageHeader() -{ - THISZERO(); - Sign[0] = 'S'; - Sign[1] = 'F'; - Sign[2] = 'N'; - Sign[3] = 'N'; - Ver = 1; - if(sizeof(float) == 4) - Type = 1; - else if(sizeof(float) == 8) - Type = 2; - else - Type = 0; // error -} - -int Fann::Serialize(int dir, SBuffer & rBuf, SSerializeContext * pSCtx) -{ - int ok = 1; - /*struct StorageHeader { - StorageHeader(); - - uint8 Sign[4]; // - uint32 Crc32; - uint32 Ver; // - uint32 Type; // 1 - float, 2 - double, 3 - fixed (int32) - uint8 Reserve[48]; - };*/ - StorageHeader pattern; - StorageHeader hdr; - if(dir < 0) { - THROW(pSCtx->SerializeBlock(dir, sizeof(StorageHeader), &hdr, rBuf, 0)); - THROW(memcmp(pattern.Sign, hdr.Sign, sizeof(pattern.Sign)) == 0); - THROW(hdr.Ver >= 1); - THROW(hdr.Type == pattern.Type); - } - else if(dir > 0) { - THROW(pSCtx->SerializeBlock(dir, sizeof(StorageHeader), &hdr, rBuf, 0)); - } - THROW(pSCtx->Serialize(dir, NetworkType, rBuf)); - THROW(pSCtx->Serialize(dir, &Layers, rBuf)); - THROW(pSCtx->Serialize(dir, NumInput, rBuf)); - THROW(pSCtx->Serialize(dir, NumOutput, rBuf)); - THROW(pSCtx->Serialize(dir, LearningRate, rBuf)); - THROW(pSCtx->Serialize(dir, LearningMomentum, rBuf)); - THROW(pSCtx->Serialize(dir, ConnectionRate, rBuf)); - THROW(pSCtx->Serialize(dir, TotalNeurons, rBuf)); - THROW(pSCtx->Serialize(dir, TrainingAlgorithm, rBuf)); - /* - THROW(pSCtx->Serialize(dir, DecimalPoint, rBuf)); - THROW(pSCtx->Serialize(dir, Multiplier, rBuf)); - THROW(pSCtx->SerializeBlock(dir, sizeof(SigmoidResults), SigmoidResults, rBuf, 0)); - THROW(pSCtx->SerializeBlock(dir, sizeof(SigmoidValues), SigmoidValues, rBuf, 0)); - THROW(pSCtx->SerializeBlock(dir, sizeof(SigmoidSymmetricResults), SigmoidSymmetricResults, rBuf, 0)); - THROW(pSCtx->SerializeBlock(dir, sizeof(SigmoidSymmetricValues), SigmoidSymmetricValues, rBuf, 0)); - */ - THROW(pSCtx->Serialize(dir, TotalConnections, rBuf)); - THROW(pSCtx->Serialize(dir, num_MSE, rBuf)); - THROW(pSCtx->Serialize(dir, MSE_value, rBuf)); - THROW(pSCtx->Serialize(dir, NumBitFail, rBuf)); - THROW(pSCtx->Serialize(dir, BitFailLimit, rBuf)); - THROW(pSCtx->Serialize(dir, TrainErrorFunction, rBuf)); - THROW(pSCtx->Serialize(dir, TrainStopFunction, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeOutputChangeFraction, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeOutputStagnationEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeCandidateChangeFraction, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeCandidateStagnationEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeBestCandidate, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeCandidateLimit, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeWeightMultiplier, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeMaxOutEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeMaxCandEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeMinOutEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeMinCandEpochs, rBuf)); - THROW(pSCtx->Serialize(dir, &CascadeActivationFuncList, rBuf)); - THROW(pSCtx->Serialize(dir, &CascadeActivationSteepnessesList, rBuf)); - THROW(pSCtx->Serialize(dir, CascadeNumCandidateGroups, rBuf)); - THROW(pSCtx->Serialize(dir, TotalNeuronsAllocated, rBuf)); - THROW(pSCtx->Serialize(dir, TotalConnectionsAllocated, rBuf)); - THROW(pSCtx->Serialize(dir, QuickpropDecay, rBuf)); - THROW(pSCtx->Serialize(dir, QuickpropMu, rBuf)); - THROW(pSCtx->Serialize(dir, RpropIncreaseFactor, rBuf)); - THROW(pSCtx->Serialize(dir, RpropDecreaseFactor, rBuf)); - THROW(pSCtx->Serialize(dir, RpropDeltaMin, rBuf)); - THROW(pSCtx->Serialize(dir, RpropDeltaMax, rBuf)); - THROW(pSCtx->Serialize(dir, RpropDeltaZero, rBuf)); - THROW(pSCtx->Serialize(dir, SarpropWeightDecayShift, rBuf)); - THROW(pSCtx->Serialize(dir, SarpropStepErrorThresholdFactor, rBuf)); - THROW(pSCtx->Serialize(dir, SarpropStepErrorShift, rBuf)); - THROW(pSCtx->Serialize(dir, SarpropTemperature, rBuf)); - THROW(pSCtx->Serialize(dir, SarpropEpoch, rBuf)); - { - LongArray layer_sizes; - if(dir > 0) { - for(Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - // the number of neurons in the layers (in the last layer, there is always one too many neurons, because of an unused bias) - THROW(layer_sizes.add((long)p_layer_it->GetCount())); - } - THROW(pSCtx->Serialize(dir, &layer_sizes, rBuf)); - } - else if(dir < 0) { - THROW(pSCtx->Serialize(dir, &layer_sizes, rBuf)); - THROW(layer_sizes.getCount() == Layers.getCount()); - THROW(AllocateLayers()); - { - uint layer_no = 0; - uint _local_total_neurons = 0; - for(Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - const long layer_size = layer_sizes.get(layer_no); - p_layer_it->_Dim = Layers.get(layer_no); - p_layer_it->P_FirstNeuron = NULL; - p_layer_it->P_LastNeuron = p_layer_it->P_FirstNeuron + layer_size; - _local_total_neurons += layer_size; - layer_no++; - } - THROW(TotalNeurons == _local_total_neurons); - } - } - } - THROW(ScaleIn.Serialize(dir, NumInput, rBuf, pSCtx)); - THROW(ScaleOut.Serialize(dir, NumOutput, rBuf, pSCtx)); - { - if(dir > 0) { - for(Layer * p_layer_it = P_FirstLayer; p_layer_it != P_LastLayer; p_layer_it++) { - for(Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_layer_it->P_LastNeuron; p_neuron_it++) { - uint _con_count = p_neuron_it->GetConCount(); - THROW(pSCtx->Serialize(dir, _con_count, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->Sum, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->Value, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->ActivationFunction, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->ActivationSteepness, rBuf)); - } - } - // - for(uint i = 0; i < TotalConnections; i++) { - // save the connection "(source weight) " - uint32 nc = (uint32)(PP_Connections[i] - P_FirstLayer->P_FirstNeuron); - THROW(pSCtx->Serialize(dir, nc, rBuf)); - THROW(pSCtx->Serialize(dir, P_Weights[i], rBuf)); - } - } - else if(dir < 0) { - uint _local_total_conn = 0; - THROW(AllocateNeurons()); - for(Neuron * p_neuron_it = P_FirstLayer->P_FirstNeuron; p_neuron_it != (P_LastLayer-1)->P_LastNeuron; p_neuron_it++) { - uint _con_count = 0; - THROW(pSCtx->Serialize(dir, _con_count, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->Sum, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->Value, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->ActivationFunction, rBuf)); - THROW(pSCtx->Serialize(dir, p_neuron_it->ActivationSteepness, rBuf)); - p_neuron_it->FirstCon = _local_total_conn; - _local_total_conn += _con_count; - p_neuron_it->LastCon = _local_total_conn; - } - THROW(TotalConnections == _local_total_conn); - // - THROW(AllocateConnections()); - for(uint i = 0; i < TotalConnections; i++) { - uint32 nc = 0; - THROW(pSCtx->Serialize(dir, nc, rBuf)); - THROW(pSCtx->Serialize(dir, P_Weights[i], rBuf)); - PP_Connections[i] = P_FirstLayer->P_FirstNeuron + nc; - } - } - } - { - /* - THROW((P_TrainSlopes && rS.P_TrainSlopes) || (!P_TrainSlopes && !rS.P_TrainSlopes)); - THROW((P_PrevSteps && rS.P_PrevSteps) || (!P_PrevSteps && !rS.P_PrevSteps)); - THROW((P_PrevTrainSlopes && rS.P_PrevTrainSlopes) || (!P_PrevTrainSlopes && !rS.P_PrevTrainSlopes)); - THROW((P_PrevWeightsDeltas && rS.P_PrevWeightsDeltas) || (!P_PrevWeightsDeltas && !rS.P_PrevWeightsDeltas)); - for(uint i = 0; i < TotalConnectionsAllocated; i++) { - THROW(!P_TrainSlopes || P_TrainSlopes[i] == rS.P_TrainSlopes[i]); - THROW(!P_PrevSteps || P_PrevSteps[i] == rS.P_PrevSteps[i]); - THROW(!P_PrevTrainSlopes || P_PrevTrainSlopes[i] == rS.P_PrevTrainSlopes[i]); - THROW(!P_PrevWeightsDeltas || P_PrevWeightsDeltas[i] == rS.P_PrevWeightsDeltas[i]); - } - */ - uint32 _pc = 0; - if(dir > 0) { - _pc = P_TrainSlopes ? TotalConnectionsAllocated : 0; - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_TrainSlopes[0]), P_TrainSlopes, rBuf, 0)); - } - // - _pc = P_PrevSteps ? TotalConnectionsAllocated : 0; - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevSteps[0]), P_PrevSteps, rBuf, 0)); - } - // - _pc = P_PrevTrainSlopes ? TotalConnectionsAllocated : 0; - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevTrainSlopes[0]), P_PrevTrainSlopes, rBuf, 0)); - } - // - _pc = P_PrevWeightsDeltas ? TotalConnectionsAllocated : 0; - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevWeightsDeltas[0]), P_PrevWeightsDeltas, rBuf, 0)); - } - } - else if(dir < 0) { - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - ZDELETE(P_TrainSlopes); - THROW(P_TrainSlopes = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_TrainSlopes[0]), P_TrainSlopes, rBuf, 0)); - } - // - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - ZDELETE(P_PrevSteps); - THROW(P_PrevSteps = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevSteps[0]), P_PrevSteps, rBuf, 0)); - } - // - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - ZDELETE(P_PrevTrainSlopes); - THROW(P_PrevTrainSlopes = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevTrainSlopes[0]), P_PrevTrainSlopes, rBuf, 0)); - } - // - THROW(pSCtx->Serialize(dir, _pc, rBuf)); - if(_pc) { - ZDELETE(P_PrevWeightsDeltas); - THROW(P_PrevWeightsDeltas = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - THROW(pSCtx->SerializeBlock(dir, _pc * sizeof(P_PrevWeightsDeltas[0]), P_PrevWeightsDeltas, rBuf, 0)); - } - } - } - CATCHZOK - return ok; -} -// -// INTERNAL FUNCTION -// Used to save the network to a file descriptor. -// -int fann_save_internal_fd(Fann * ann, FILE * conf, const char * configuration_file, uint save_as_fixed) -{ - Fann::Layer * layer_it; - int calculated_decimal_point = 0; - Fann::Neuron * neuron_it, * first_neuron; - float * weights; - Fann::Neuron ** connected_neurons; - uint i = 0; - // variabels for use when saving floats as fixed point variabels - uint decimal_point = 0; - uint fixed_multiplier = 0; - float max_possible_value = 0; - uint bits_used_for_max = 0; - float current_max_value = 0; - // save the version information - if(save_as_fixed) - fprintf(conf, FANN_FIX_VERSION "\n"); - else - fprintf(conf, FANN_FLO_VERSION "\n"); - if(save_as_fixed) { - // calculate the maximal possible shift value - for(layer_it = ann->P_FirstLayer + 1; layer_it != ann->P_LastLayer; layer_it++) { - for(neuron_it = layer_it->P_FirstNeuron; neuron_it != layer_it->P_LastNeuron; neuron_it++) { - // look at all connections to each neurons, and see how high a value we can get - current_max_value = 0; - for(i = neuron_it->FirstCon; i != neuron_it->LastCon; i++) { - current_max_value += fabsf(ann->P_Weights[i]); - } - if(current_max_value > max_possible_value) { - max_possible_value = current_max_value; - } - } - } - for(bits_used_for_max = 0; max_possible_value >= 1; bits_used_for_max++) { - max_possible_value /= 2.0; - } - // - // The maximum number of bits we shift the fix point, is the number - // of bits in a integer, minus one for the sign, one for the minus - // in stepwise, and minus the bits used for the maximum. - // This is devided by two, to allow multiplication of two fixed point numbers. - // - calculated_decimal_point = (sizeof(int) * 8 - 2 - bits_used_for_max) / 2; - decimal_point = (calculated_decimal_point < 0) ? 0 : calculated_decimal_point; - fixed_multiplier = 1 << decimal_point; - fprintf(conf, "decimal_point=%u\n", decimal_point); // save the decimal_point on a seperate line - } - // Save network parameters - fprintf(conf, "num_layers=%d\n", (int)ann->GetNumLayers()); - fprintf(conf, "learning_rate=%f\n", ann->LearningRate); - fprintf(conf, "connection_rate=%f\n", ann->ConnectionRate); - fprintf(conf, "network_type=%u\n", ann->NetworkType); - fprintf(conf, "learning_momentum=%f\n", ann->LearningMomentum); - fprintf(conf, "training_algorithm=%u\n", ann->TrainingAlgorithm); - fprintf(conf, "train_error_function=%u\n", ann->TrainErrorFunction); - fprintf(conf, "train_stop_function=%u\n", ann->TrainStopFunction); - fprintf(conf, "cascade_output_change_fraction=%f\n", ann->CascadeOutputChangeFraction); - fprintf(conf, "quickprop_decay=%f\n", ann->QuickpropDecay); - fprintf(conf, "quickprop_mu=%f\n", ann->QuickpropMu); - fprintf(conf, "rprop_increase_factor=%f\n", ann->RpropIncreaseFactor); - fprintf(conf, "rprop_decrease_factor=%f\n", ann->RpropDecreaseFactor); - fprintf(conf, "rprop_delta_min=%f\n", ann->RpropDeltaMin); - fprintf(conf, "rprop_delta_max=%f\n", ann->RpropDeltaMax); - fprintf(conf, "rprop_delta_zero=%f\n", ann->RpropDeltaZero); - fprintf(conf, "cascade_output_stagnation_epochs=%u\n", ann->CascadeOutputStagnationEpochs); - fprintf(conf, "cascade_candidate_change_fraction=%f\n", ann->CascadeCandidateChangeFraction); - fprintf(conf, "cascade_candidate_stagnation_epochs=%u\n", ann->CascadeCandidateStagnationEpochs); - fprintf(conf, "cascade_max_out_epochs=%u\n", ann->CascadeMaxOutEpochs); - fprintf(conf, "cascade_min_out_epochs=%u\n", ann->CascadeMinOutEpochs); - fprintf(conf, "cascade_max_cand_epochs=%u\n", ann->CascadeMaxCandEpochs); - fprintf(conf, "cascade_min_cand_epochs=%u\n", ann->CascadeMinCandEpochs); - fprintf(conf, "cascade_num_candidate_groups=%u\n", ann->CascadeNumCandidateGroups); - if(save_as_fixed) { - fprintf(conf, "bit_fail_limit=%u\n", (int)floor((ann->BitFailLimit * fixed_multiplier) + 0.5)); - fprintf(conf, "cascade_candidate_limit=%u\n", (int)floor((ann->CascadeCandidateLimit * fixed_multiplier) + 0.5)); - fprintf(conf, "cascade_weight_multiplier=%u\n", (int)floor((ann->CascadeWeightMultiplier * fixed_multiplier) + 0.5)); - } - else { - fprintf(conf, "bit_fail_limit=\"%.20e \"\n", ann->BitFailLimit); - fprintf(conf, "cascade_candidate_limit=\"%.20e\"\n", ann->CascadeCandidateLimit); - fprintf(conf, "cascade_weight_multiplier=\"%.20e\"\n", ann->CascadeWeightMultiplier); - } - fprintf(conf, "cascade_activation_functions_count=%u\n", /*ann->cascade_activation_functions_count*/ann->CascadeActivationFuncList.getCount()); - fprintf(conf, "cascade_activation_functions="); - for(i = 0; i < ann->CascadeActivationFuncList.getCount(); i++) - fprintf(conf, "%u ", /*ann->P_CascadeActivationFunctions[i]*/ann->CascadeActivationFuncList.get(i)); - fprintf(conf, "\n"); - fprintf(conf, "cascade_activation_steepnesses_count=%u\n", /*ann->cascade_activation_steepnesses_count*/ann->CascadeActivationSteepnessesList.getCount()); - fprintf(conf, "cascade_activation_steepnesses="); - for(i = 0; i < ann->CascadeActivationSteepnessesList.getCount(); i++) { -/* - if(save_as_fixed) - fprintf(conf, "%u ", (int)floor((ann->CascadeActivationSteepnesses[i] * fixed_multiplier) + 0.5)); - else -*/ - fprintf(conf, "%.20e ", ann->CascadeActivationSteepnessesList[i]); - } - fprintf(conf, "\n"); - fprintf(conf, "layer_sizes="); - for(layer_it = ann->P_FirstLayer; layer_it != ann->P_LastLayer; layer_it++) { - // the number of neurons in the layers (in the last layer, there is always one too many neurons, because of an unused bias) - fprintf(conf, "%d ", (int)layer_it->GetCount()); - } - fprintf(conf, "\n"); - /* 2.1 */ - /* - #define SCALE_SAVE(what, where) fprintf(conf, # what "_" # where "="); \ - for(i = 0; i < ann->num_ ## where ## put; i++) \ - fprintf(conf, "%f ", ann->what ## _ ## where[ i ]); \ - fprintf(conf, "\n"); - */ - if(!save_as_fixed) { - if(ann->ScaleIn.IsPresent()) { - fprintf(conf, "scale_included=1\n"); - ann->ScaleIn.Save(conf, ann->NumInput, "in"); - ann->ScaleOut.Save(conf, ann->NumOutput, "out"); - /*SCALE_SAVE(scale_mean, in) - SCALE_SAVE(scale_deviation, in) - SCALE_SAVE(scale_new_min, in) - SCALE_SAVE(scale_factor, in) - SCALE_SAVE(scale_mean, out) - SCALE_SAVE(scale_deviation, out) - SCALE_SAVE(scale_new_min, out) - SCALE_SAVE(scale_factor, out)*/ - } - else - fprintf(conf, "scale_included=0\n"); - } -//#undef SCALE_SAVE - /* 2.0 */ - fprintf(conf, "neurons (num_inputs, activation_function, activation_steepness)="); - for(layer_it = ann->P_FirstLayer; layer_it != ann->P_LastLayer; layer_it++) { - /* the neurons */ - for(neuron_it = layer_it->P_FirstNeuron; neuron_it != layer_it->P_LastNeuron; neuron_it++) { - if(save_as_fixed) { - fprintf(conf, "(%u, %u, %u) ", neuron_it->GetConCount(), neuron_it->ActivationFunction, (int)floor((neuron_it->ActivationSteepness * fixed_multiplier) + 0.5)); - } - else { - fprintf(conf, "(%u, %u, %.20e) ", neuron_it->GetConCount(), neuron_it->ActivationFunction, neuron_it->ActivationSteepness); - } - } - } - fprintf(conf, "\n"); - connected_neurons = ann->PP_Connections; - weights = ann->P_Weights; - first_neuron = ann->P_FirstLayer->P_FirstNeuron; - // - // Now save all the connections. - // We only need to save the source and the weight, since the destination is given by the order. - // - // The weight is not saved binary due to differences in binary definition of floating point numbers. - // Especially an iPAQ does not use the same binary representation as an i386 machine. - // - fprintf(conf, "connections (connected_to_neuron, weight)="); - for(i = 0; i < ann->TotalConnections; i++) { - // save the connection "(source weight) " - if(save_as_fixed) { - fprintf(conf, "(%d, %d) ", (int)(connected_neurons[i] - first_neuron), (int)floor((weights[i] * fixed_multiplier) + 0.5)); - } - else { - fprintf(conf, "(%d, %.20e) ", (int)(connected_neurons[i] - first_neuron), weights[i]); - } - } - fprintf(conf, "\n"); - return calculated_decimal_point; -} -// -// FANN_TRAIN -// -int FASTCALL Fann::Neuron::IsEqual(const Fann::Neuron & rS) const -{ - int yes = 1; -#define CMPF(f) THROW(f == rS.f) - CMPF(FirstCon); - CMPF(LastCon); - CMPF(Sum); - CMPF(Value); - CMPF(ActivationSteepness); - CMPF(ActivationFunction); -#undef CMPF - CATCH - yes = 0; - ENDCATCH - return yes; -} -// -// Calculates the derived of a value, given an activation function and a steepness -// -float Fann::Neuron::ActivationDerived(float value, float sum) const -{ - switch(ActivationFunction) { - case Fann::FANN_LINEAR: - case Fann::FANN_LINEAR_PIECE: - case Fann::FANN_LINEAR_PIECE_SYMMETRIC: return (float)fann_linear_derive(ActivationSteepness, value); - case Fann::FANN_SIGMOID: - case Fann::FANN_SIGMOID_STEPWISE: - value = MINMAX(value, 0.01f, 0.99f); - return (float)fann_sigmoid_derive(ActivationSteepness, value); - case Fann::FANN_SIGMOID_SYMMETRIC: - case Fann::FANN_SIGMOID_SYMMETRIC_STEPWISE: - value = MINMAX(value, -0.98f, 0.98f); - return (float)fann_sigmoid_symmetric_derive(ActivationSteepness, value); - case Fann::FANN_GAUSSIAN: - // value = MINMAX(value, 0.01f, 0.99f); - return (float)fann_gaussian_derive(ActivationSteepness, value, sum); - case Fann::FANN_GAUSSIAN_SYMMETRIC: - // value = MINMAX(value, -0.98f, 0.98f); - return (float)fann_gaussian_symmetric_derive(ActivationSteepness, value, sum); - case Fann::FANN_ELLIOT: - value = MINMAX(value, 0.01f, 0.99f); - return (float)fann_elliot_derive(ActivationSteepness, value, sum); - case Fann::FANN_ELLIOT_SYMMETRIC: - value = MINMAX(value, -0.98f, 0.98f); - return (float)fann_elliot_symmetric_derive(ActivationSteepness, value, sum); - case Fann::FANN_SIN_SYMMETRIC: return (float)fann_sin_symmetric_derive(ActivationSteepness, sum); - case Fann::FANN_COS_SYMMETRIC: return (float)fann_cos_symmetric_derive(ActivationSteepness, sum); - case Fann::FANN_SIN: return (float)fann_sin_derive(ActivationSteepness, sum); - case Fann::FANN_COS: return (float)fann_cos_derive(ActivationSteepness, sum); - case Fann::FANN_THRESHOLD: fann_error(NULL, SLERR_FANN_CANT_TRAIN_ACTIVATION); - } - return (float)0; -} -// -// INTERNAL FUNCTION -// Calculates the activation of a value, given an activation function and a steepness -// -float Fann::Activation(uint activationFunction, float steepness, float value) const -{ - return ActivationSwitch(activationFunction, steepness * value); -} -// -// Trains the network with the backpropagation algorithm. -// -int Fann::Train(const float * pInput, const float * pDesiredOutput) -{ - return TrainWithOutput(pInput, pDesiredOutput, 0); -} - -int Fann::TrainWithOutput(const float * pInput, const float * pDesiredOutput, float * pResult) -{ - int ok = 1; - const float * p_result = Run(pInput); - if(pResult) { - memcpy(pResult, p_result, NumOutput * sizeof(float)); - } - THROW(ComputeMSE(pDesiredOutput)); - BackpropagateMSE(); - THROW(UpdateWeights()); - CATCHZOK - return ok; -} -// -// INTERNAL FUNCTION -// Helper function to update the MSE value and return a diff which takes symmetric functions into account -// -//float fann_update_MSE(Fann * ann, Fann::Neuron * pNeuron, float neuronDiff) -float Fann::UpdateMSE(Fann::Neuron * pNeuron, float neuronDiff) -{ - switch(pNeuron->ActivationFunction) { - case FANN_LINEAR_PIECE_SYMMETRIC: - case FANN_THRESHOLD_SYMMETRIC: - case FANN_SIGMOID_SYMMETRIC: - case FANN_SIGMOID_SYMMETRIC_STEPWISE: - case FANN_ELLIOT_SYMMETRIC: - case FANN_GAUSSIAN_SYMMETRIC: - case FANN_SIN_SYMMETRIC: - case FANN_COS_SYMMETRIC: neuronDiff /= (float)2.0; break; - case FANN_THRESHOLD: - case FANN_LINEAR: - case FANN_SIGMOID: - case FANN_SIGMOID_STEPWISE: - case FANN_GAUSSIAN: - case FANN_GAUSSIAN_STEPWISE: - case FANN_ELLIOT: - case FANN_LINEAR_PIECE: - case FANN_SIN: - case FANN_COS: break; - } - float _neuron_diff2 = (float)(neuronDiff * neuronDiff); - MSE_value += _neuron_diff2; - // printf("neuron_diff %f = (%f - %f)[/2], neuron_diff2=%f, sum=%f, MSE_value=%f, num_MSE=%d\n", neuron_diff, *desired_output, neuron_value, neuron_diff2, last_layer_begin->sum, ann->MSE_value, ann->num_MSE); - if(fabsf(neuronDiff) >= BitFailLimit) { - NumBitFail++; - } - return neuronDiff; -} -// -// Tests the network. -// -//FANN_EXTERNAL float * FANN_API fann_test(Fann * ann, float * pInput, float * pDesiredOutput) -/*float * Fann::Test_(const float * pInput, const float * pDesiredOutput) -{ - float * p_output_begin = Run(pInput); - const float * p_output_end = p_output_begin + NumOutput; - Fann::Neuron * p_output_neuron = (P_LastLayer-1)->P_FirstNeuron; - // calculate the error - for(float * p_output_it = p_output_begin; p_output_it != p_output_end; p_output_it++) { - const float neuron_value = *p_output_it; - float neuron_diff = (*pDesiredOutput - neuron_value); - neuron_diff = UpdateMSE(p_output_neuron, neuron_diff); - pDesiredOutput++; - p_output_neuron++; - num_MSE++; - } - return p_output_begin; -}*/ - -int Fann::ComputeMSE(const float * pDesiredOutput) -{ - int ok = 1; - float * p_error_it = 0; - float * p_error_begin = 0; - Fann::Neuron * p_last_layer_begin = (P_LastLayer-1)->P_FirstNeuron; - const Fann::Neuron * p_last_layer_end = p_last_layer_begin + NumOutput; - const Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - // if no room allocated for the error variabels, allocate it now - if(P_TrainErrors == NULL) { - THROW(P_TrainErrors = (float*)SAlloc::C(TotalNeurons, sizeof(float))); - } - else { - // clear the error variabels - memzero(P_TrainErrors, TotalNeurons * sizeof(float)); - } - p_error_begin = P_TrainErrors; - // calculate the error and place it in the output layer - p_error_it = p_error_begin + (p_last_layer_begin - p_first_neuron); - for(; p_last_layer_begin != p_last_layer_end; p_last_layer_begin++) { - const float neuron_value = p_last_layer_begin->Value; - float neuron_diff = *pDesiredOutput - neuron_value; - // @todo neuron_diff == 0, , *p_error_it = 0 - neuron_diff = UpdateMSE(p_last_layer_begin, neuron_diff); - if(TrainErrorFunction) { // @todo make switch when more functions - if(neuron_diff < -0.9999999) - neuron_diff = -17.0; - else if(neuron_diff > 0.9999999) - neuron_diff = 17.0; - else - neuron_diff = (float)log((1.0 + neuron_diff) / (1.0 - neuron_diff)); - } - *p_error_it = p_last_layer_begin->ActivationDerived(neuron_value, p_last_layer_begin->Sum) * neuron_diff; - pDesiredOutput++; - p_error_it++; - num_MSE++; - } - CATCHZOK - return ok; -} - -void Fann::BackpropagateMSE() -{ - float * p_error_begin = P_TrainErrors; - float * p_error_prev_layer; - const Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - const Fann::Layer * p_second_layer = P_FirstLayer + 1; - // go through all the layers, from last to first. - // And propagate the error backwards - for(Fann::Layer * p_layer_it = P_LastLayer-1; p_layer_it > p_second_layer; --p_layer_it) { - const Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - // for each connection in this layer, propagate the error backwards - if(ConnectionRate >= 1.0f) { - if(NetworkType == FANN_NETTYPE_LAYER) { - p_error_prev_layer = p_error_begin + ((p_layer_it-1)->P_FirstNeuron - p_first_neuron); - } - else { - p_error_prev_layer = p_error_begin; - } - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron]; - const float * p_weights = P_Weights + p_neuron_it->FirstCon; - for(uint i = p_neuron_it->GetConCount(); i--; ) { - p_error_prev_layer[i] += tmp_error * p_weights[i]; - } - } - } - else { - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron]; - const float * p_weights = P_Weights + p_neuron_it->FirstCon; - Fann::Neuron ** pp_connections = PP_Connections + p_neuron_it->FirstCon; - for(uint i = p_neuron_it->GetConCount(); i--; ) { - p_error_begin[pp_connections[i] - p_first_neuron] += tmp_error * p_weights[i]; - } - } - } - { - // then calculate the actual errors in the previous layer - p_error_prev_layer = p_error_begin + ((p_layer_it-1)->P_FirstNeuron - p_first_neuron); - p_last_neuron = (p_layer_it - 1)->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = (p_layer_it-1)->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - *p_error_prev_layer *= p_neuron_it->ActivationDerived(p_neuron_it->Value, p_neuron_it->Sum); - p_error_prev_layer++; - } - } - } -} - -int Fann::UpdateWeights() -{ - int ok = 1; - Fann::Neuron * p_neuron_it; - Fann::Neuron * p_last_neuron; - Fann::Neuron * p_prev_neurons; - Fann::Layer * p_layer_it; - // store some variabels local for fast access - const float learning_rate = LearningRate; - const float learning_momentum = LearningMomentum; - Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - Fann::Layer * p_first_layer = P_FirstLayer; - const Fann::Layer * p_last_layer = P_LastLayer; - float * p_error_begin = P_TrainErrors; - float * p_deltas_begin; - // if no room allocated for the deltas, allocate it now - if(P_PrevWeightsDeltas == NULL) { - THROW(P_PrevWeightsDeltas = (float*)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - } - p_deltas_begin = P_PrevWeightsDeltas; - p_prev_neurons = p_first_neuron; - for(p_layer_it = (p_first_layer + 1); p_layer_it != p_last_layer; p_layer_it++) { - p_last_neuron = p_layer_it->P_LastNeuron; - if(ConnectionRate >= 1) { - if(NetworkType == FANN_NETTYPE_LAYER) { - p_prev_neurons = (p_layer_it - 1)->P_FirstNeuron; - } - for(p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron] * learning_rate; - const uint num_connections = p_neuron_it->GetConCount(); - float * p_weights = P_Weights + p_neuron_it->FirstCon; - float * p_weights_deltas = p_deltas_begin + p_neuron_it->FirstCon; - for(uint i = 0; i != num_connections; i++) { - const float delta_w = tmp_error * p_prev_neurons[i].Value + learning_momentum * p_weights_deltas[i]; - p_weights[i] += delta_w; - p_weights_deltas[i] = delta_w; - } - } - } - else { - for(p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron] * learning_rate; - const uint num_connections = p_neuron_it->GetConCount(); - float * p_weights = P_Weights + p_neuron_it->FirstCon; - float * p_weights_deltas = p_deltas_begin + p_neuron_it->FirstCon; - for(uint i = 0; i != num_connections; i++) { - const float delta_w = tmp_error * p_prev_neurons[i].Value + learning_momentum * p_weights_deltas[i]; - p_weights[i] += delta_w; - p_weights_deltas[i] = delta_w; - } - } - } - } - CATCHZOK - return ok; -} -// -// INTERNAL FUNCTION -// Update slopes for batch training -// layer_begin = ann->P_FirstLayer+1 and layer_end = ann->last_layer-1 will update all slopes. -// -int Fann::UpdateSlopesBatch(const Fann::Layer * pLayerBegin, const Fann::Layer * pLayerEnd) -{ - int ok = 1; - // store some variabels local for fast access - Fann::Neuron * p_first_neuron = P_FirstLayer->P_FirstNeuron; - const float * p_error_begin = P_TrainErrors; - // if no room allocated for the slope variabels, allocate it now - if(P_TrainSlopes == NULL) { - THROW(P_TrainSlopes = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - } - SETIFZ(pLayerBegin, (P_FirstLayer + 1)); - SETIFZ(pLayerEnd, (P_LastLayer - 1)); - float * p_slope_begin = P_TrainSlopes; - Fann::Neuron * p_prev_neurons = p_first_neuron; - for(; pLayerBegin <= pLayerEnd; pLayerBegin++) { - const Fann::Neuron * p_last_neuron = pLayerBegin->P_LastNeuron; - if(ConnectionRate >= 1) { - if(NetworkType == FANN_NETTYPE_LAYER) { - p_prev_neurons = (pLayerBegin - 1)->P_FirstNeuron; - } - for(Fann::Neuron * p_neuron_it = pLayerBegin->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron]; - float * p_neuron_slope = p_slope_begin + p_neuron_it->FirstCon; - const uint cc = p_neuron_it->GetConCount(); // num_connections - for(uint i = 0; i != cc; i++) { - p_neuron_slope[i] += tmp_error * p_prev_neurons[i].Value; - } - } - } - else { - for(Fann::Neuron * p_neuron_it = pLayerBegin->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - const float tmp_error = p_error_begin[p_neuron_it - p_first_neuron]; - float * p_neuron_slope = p_slope_begin + p_neuron_it->FirstCon; - const uint cc = p_neuron_it->GetConCount(); // num_connections - Fann::Neuron ** pp_connections = PP_Connections + p_neuron_it->FirstCon; - for(uint i = 0; i != cc; i++) { - p_neuron_slope[i] += tmp_error * pp_connections[i]->Value; - } - } - } - } - CATCHZOK - return ok; -} -// -// INTERNAL FUNCTION -// Clears arrays used for training before a new training session. -// Also creates the arrays that do not exist yet. -// -int Fann::ClearTrainArrays() -{ - int ok = 1; - // if no room allocated for the slope variabels, allocate it now (calloc clears mem) - if(P_TrainSlopes == NULL) { - THROW(P_TrainSlopes = (float*)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - } - else - memzero(P_TrainSlopes, TotalConnectionsAllocated * sizeof(float)); - // if no room allocated for the variabels, allocate it now - if(P_PrevSteps == NULL) { - THROW(P_PrevSteps = (float *)SAlloc::M(TotalConnectionsAllocated * sizeof(float))); - } - if(TrainingAlgorithm == FANN_TRAIN_RPROP) { - const float _delta_zero = RpropDeltaZero; - for(uint i = 0; i < TotalConnectionsAllocated; i++) - P_PrevSteps[i] = _delta_zero; - } - else - memzero(P_PrevSteps, TotalConnectionsAllocated * sizeof(float)); - // if no room allocated for the variabels, allocate it now - if(P_PrevTrainSlopes == NULL) { - THROW(P_PrevTrainSlopes = (float *)SAlloc::C(TotalConnectionsAllocated, sizeof(float))); - } - else - memzero(P_PrevTrainSlopes, TotalConnectionsAllocated * sizeof(float)); - CATCHZOK - return ok; -} -// -// INTERNAL FUNCTION -// Update weights for batch training -// -void Fann::UpdateWeightsBatch(uint numData, uint firstWeight, uint pastEnd) -{ - float * p_train_slopes = P_TrainSlopes; - float * p_weights = P_Weights; - const float _epsilon = LearningRate / numData; - for(uint i = firstWeight; i != pastEnd; i++) { - p_weights[i] += p_train_slopes[i] * _epsilon; - p_train_slopes[i] = 0.0; - } -} -// -// INTERNAL FUNCTION -// The quickprop training algorithm -// -void Fann::UpdateWeightsQuickprop(uint numData, uint firstWeight, uint pastEnd) -{ - float * p_train_slopes = P_TrainSlopes; - float * p_weights = P_Weights; - float * p_prev_steps = P_PrevSteps; - float * p_prev_train_slopes = P_PrevTrainSlopes; - const float _epsilon = LearningRate / numData; - const float _decay = QuickpropDecay; // -0.0001 - const float _mu = QuickpropMu; // 1.75 - const float _shrink_factor = (float)(_mu / (1.0 + _mu)); - for(uint i = firstWeight; i != pastEnd; i++) { - float w = p_weights[i]; - const float _prev_step = p_prev_steps[i]; - const float _slope = p_train_slopes[i] + _decay * w; - const float _prev_slope = p_prev_train_slopes[i]; - float _next_step = 0.0; - // The step must always be in direction opposite to the slope - if(_prev_step > 0.001) { - // If last step was positive... - if(_slope > 0.0) // Add in linear term if current slope is still positive - _next_step += _epsilon * _slope; - // If current slope is close to or larger than prev slope... - if(_slope > (_shrink_factor * _prev_slope)) - _next_step += _mu * _prev_step; // Take maximum size negative step - else - _next_step += _prev_step * _slope / (_prev_slope - _slope); // Else, use quadratic estimate - } - else if(_prev_step < -0.001) { - // If last step was negative... - if(_slope < 0.0) // Add in linear term if current slope is still negative - _next_step += _epsilon * _slope; - // If current slope is close to or more neg than prev slope... - if(_slope < (_shrink_factor * _prev_slope)) - _next_step += _mu * _prev_step; // Take maximum size negative step - else - _next_step += _prev_step * _slope / (_prev_slope - _slope); // Else, use quadratic estimate - } - else // Last step was zero, so use only linear term - _next_step += _epsilon * _slope; - /* - if(next_step > 1000 || next_step < -1000) - { - printf("quickprop[%d] weight=%f, slope=%f, prev_slope=%f, next_step=%f, prev_step=%f\n", - i, weights[i], slope, prev_slope, next_step, prev_step); - - if(next_step > 1000) - next_step = 1000; - else - next_step = -1000; - } - */ - // - // update global data arrays - // - p_prev_steps[i] = _next_step; - w += _next_step; - if(w > 1500) - p_weights[i] = 1500; - else if(w < -1500) - p_weights[i] = -1500; - else - p_weights[i] = w; - // weights[i] = w; - p_prev_train_slopes[i] = _slope; - p_train_slopes[i] = 0.0; - } -} -// -// INTERNAL FUNCTION -// The iRprop- algorithm -// -void Fann::UpdateWeightsIrpropm(uint firstWeight, uint pastEnd) -{ - float * p_train_slopes = P_TrainSlopes; - float * p_weights = P_Weights; - float * p_prev_steps = P_PrevSteps; - float * p_prev_train_slopes = P_PrevTrainSlopes; - const float _increase_factor = RpropIncreaseFactor; /*1.2; */ - const float _decrease_factor = RpropDecreaseFactor; /*0.5; */ - const float _delta_min = RpropDeltaMin; /*0.0; */ - const float _delta_max = RpropDeltaMax; /*50.0; */ - for(uint i = firstWeight; i != pastEnd; i++) { - const float _prev_step = MAX(p_prev_steps[i], (float)0.0001); // prev_step may not be zero because then the training will stop - const float _prev_slope = p_prev_train_slopes[i]; - float _slope = p_train_slopes[i]; - const float _same_sign = _prev_slope * _slope; - float _next_step; - if(_same_sign >= 0.0) - _next_step = MIN(_prev_step * _increase_factor, _delta_max); - else { - _next_step = MAX(_prev_step * _decrease_factor, _delta_min); - _slope = 0; - } - if(_slope < 0) { - p_weights[i] -= _next_step; - SETMAX(p_weights[i], -1500); - } - else { - p_weights[i] += _next_step; - SETMIN(p_weights[i], 1500); - } - /*if(i == 2) { - printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step); - }*/ - // update global data arrays - p_prev_steps[i] = _next_step; - p_prev_train_slopes[i] = _slope; - p_train_slopes[i] = 0.0; - } -} -// -// INTERNAL FUNCTION -// The SARprop- algorithm -// -//void fann_update_weights_sarprop(Fann * ann, uint epoch, uint firstWeight, uint pastEnd) -void Fann::UpdateWeightsSarprop(uint epoch, uint firstWeight, uint pastEnd) -{ - assert(firstWeight <= pastEnd); - float * p_train_slopes = P_TrainSlopes; - float * p_weights = P_Weights; - float * p_prev_steps = P_PrevSteps; - float * p_prev_train_slopes = P_PrevTrainSlopes; - - // These should be set from variables - const float _increase_factor = RpropIncreaseFactor; /*1.2; */ - const float _decrease_factor = RpropDecreaseFactor; /*0.5; */ - // @todo why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) - const float _delta_min = 0.000001f; - const float _delta_max = RpropDeltaMax; /*50.0; */ - const float _weight_decay_shift = SarpropWeightDecayShift; /* ld 0.01 = -6.644 */ - const float _step_error_threshold_factor = SarpropStepErrorThresholdFactor; /* 0.1 */ - const float _step_error_shift = SarpropStepErrorShift; /* ld 3 = 1.585 */ - const float _T = SarpropTemperature; - const float _MSE = GetMSE(); - const float _RMSE = sqrtf(_MSE); - // for all weights; TODO: are biases included? - for(uint i = firstWeight; i != pastEnd; i++) { - // @todo confirm whether 1x10^-6 == delta_min is really better - const float _prev_step = MAX(p_prev_steps[i], (float)0.000001); // prev_step may not be zero because then the training will stop - // calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper) - float _slope = -p_train_slopes[i] - p_weights[i] * (float)fann_exp2(-_T * epoch + _weight_decay_shift); - // @todo is prev_train_slopes[i] 0.0 in the beginning? - const float _prev_slope = p_prev_train_slopes[i]; - const float _same_sign = _prev_slope * _slope; - float _next_step = 0; - if(_same_sign > 0.0) { - _next_step = MIN(_prev_step * _increase_factor, _delta_max); - // @todo are the signs inverted? see differences between SARPROP paper and iRprop - if(_slope < 0.0) - p_weights[i] += _next_step; - else - p_weights[i] -= _next_step; - } - else if(_same_sign < 0.0) { - if(_prev_step < _step_error_threshold_factor * _MSE) - _next_step = _prev_step * _decrease_factor + (float)rand() / RAND_MAX * _RMSE * (float)fann_exp2(-_T * epoch + _step_error_shift); - else - _next_step = MAX(_prev_step * _decrease_factor, _delta_min); - _slope = 0.0; - } - else { - if(_slope < 0.0) - p_weights[i] += _prev_step; - else - p_weights[i] -= _prev_step; - } - // if(i == 2) { - // printf("weight=%f, slope=%f, next_step=%f, prev_step=%f\n", weights[i], slope, next_step, prev_step); - // } - // update global data arrays - p_prev_steps[i] = _next_step; - p_prev_train_slopes[i] = _slope; - p_train_slopes[i] = 0.0; - } -} - -void Fann::SetActivationFunctionHidden(Fann::ActivationFunc activationFunction) -{ - const Fann::Layer * p_last_layer = P_LastLayer - 1; /* -1 to not update the output layer */ - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != p_last_layer; p_layer_it++) { - const Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) - p_neuron_it->ActivationFunction = activationFunction; - } -} - -Fann::ActivationFunc Fann::GetActivationFunction(uint layer, int neuron) const -{ - Fann::Neuron * p_neuron_it = GetNeuron(layer, neuron); - return p_neuron_it ? (Fann::ActivationFunc)p_neuron_it->ActivationFunction : (Fann::ActivationFunc)-1; -} - -int Fann::SetActivationFunction(Fann::ActivationFunc activationFunction, uint layer, int neuron) -{ - int ok = 1; - Fann::Neuron * p_neuron_it = GetNeuron(layer, neuron); - if(p_neuron_it) - p_neuron_it->ActivationFunction = activationFunction; - else - ok = 0; - return ok; -} - -void Fann::SetActivationFunctionLayer(Fann::ActivationFunc activationFunction, uint layer) -{ - Fann::Layer * p_layer_it = GetLayer(layer); - if(p_layer_it) { - Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) - p_neuron_it->ActivationFunction = activationFunction; - } -} - -void Fann::SetActivationFunctionOutput(Fann::ActivationFunc activationFunction) -{ - const Fann::Layer * p_last_layer = P_LastLayer - 1; - Fann::Neuron * p_last_neuron = p_last_layer->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_last_layer->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) { - p_neuron_it->ActivationFunction = activationFunction; - } -} - -void Fann::SetActivationSteepnessHidden(float steepness) -{ - const Fann::Layer * p_last_layer = P_LastLayer - 1; // -1 to not update the output layer - for(Fann::Layer * p_layer_it = P_FirstLayer + 1; p_layer_it != p_last_layer; p_layer_it++) { - const Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) - p_neuron_it->ActivationSteepness = steepness; - } -} - -float Fann::GetActivationSteepness(uint layer, int neuron) const -{ - const Fann::Neuron * p_neuron_it = GetNeuron(layer, neuron); - return p_neuron_it ? p_neuron_it->ActivationSteepness : -1 /* layer or neuron out of bounds */; -} - -void Fann::SetActivationSteepness(float steepness, uint layer, int neuron) -{ - Fann::Neuron * p_neur = GetNeuron(layer, neuron); - if(p_neur) - p_neur->ActivationSteepness = steepness; -} - -//FANN_EXTERNAL void FANN_API fann_set_activation_steepness_layer(Fann * ann, float steepness, int layer) -void Fann::SetActivationSteepnessLayer(float steepness, uint layer) -{ - Fann::Layer * p_layer_it = GetLayer(layer); - if(p_layer_it) { - Fann::Neuron * p_last_neuron = p_layer_it->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_layer_it->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) - p_neuron_it->ActivationSteepness = steepness; - } -} - -//FANN_EXTERNAL void FANN_API fann_set_activation_steepness_output(Fann * ann, float steepness) -void Fann::SetActivationSteepnessOutput(float steepness) -{ - const Fann::Layer * p_last_layer = P_LastLayer - 1; - const Fann::Neuron * p_last_neuron = p_last_layer->P_LastNeuron; - for(Fann::Neuron * p_neuron_it = p_last_layer->P_FirstNeuron; p_neuron_it != p_last_neuron; p_neuron_it++) - p_neuron_it->ActivationSteepness = steepness; -} - -/* -FANN_GET_SET(void *, user_data) -FANN_GET_SET(float, cascade_output_change_fraction) -FANN_GET_SET(uint, cascade_output_stagnation_epochs) -FANN_GET_SET(float, cascade_candidate_change_fraction) -FANN_GET_SET(uint, cascade_candidate_stagnation_epochs) -FANN_GET_SET(uint, cascade_num_candidate_groups) -FANN_GET_SET(float, cascade_weight_multiplier) -FANN_GET_SET(float, cascade_candidate_limit) -FANN_GET_SET(uint, cascade_max_out_epochs) -FANN_GET_SET(uint, cascade_max_cand_epochs) -FANN_GET_SET(uint, cascade_min_out_epochs) -FANN_GET_SET(uint, cascade_min_cand_epochs) -FANN_GET_SET(fann_train_enum, training_algorithm) -FANN_GET_SET(fann_errorfunc_enum, train_error_function) -FANN_GET_SET(fann_callback_type, callback) -FANN_GET_SET(float, quickprop_decay) -FANN_GET_SET(float, quickprop_mu) -FANN_GET_SET(float, rprop_increase_factor) -FANN_GET_SET(float, rprop_decrease_factor) -FANN_GET_SET(float, rprop_delta_min) -FANN_GET_SET(float, rprop_delta_max) -FANN_GET_SET(float, rprop_delta_zero) -FANN_GET_SET(float, sarprop_weight_decay_shift) -FANN_GET_SET(float, sarprop_step_error_threshold_factor) -FANN_GET_SET(float, sarprop_step_error_shift) -FANN_GET_SET(float, sarprop_temperature) -FANN_GET_SET(fann_stopfunc_enum, train_stop_function) -FANN_GET_SET(float, bit_fail_limit) -FANN_GET_SET(float, LearningRate) -FANN_GET_SET(float, LearningMomentum) - -FANN_GET(uint, cascade_activation_functions_count) -FANN_GET(fann_activationfunc_enum *, cascade_activation_functions) -FANN_GET(uint, num_input) -FANN_GET(uint, num_output) -FANN_GET(uint, total_connections) -*/ -// -// FANN_TRAIN_DATA -// -// -// Reads training data from a file. -// -/*FANN_EXTERNAL Fann::TrainData * FANN_API fann_read_train_from_file(const char * configuration_file) -{ - Fann::TrainData * data = 0; - FILE * file = fopen(configuration_file, "r"); - if(!file) { - fann_error(NULL, SLERR_FANN_CANT_OPEN_CONFIG_R, configuration_file); - } - else { - data = fann_read_train_from_fd(file, configuration_file); - fclose(file); - } - return data; -}*/ -// -// Save training data to a file -// -FANN_EXTERNAL int FANN_API fann_save_train(Fann::TrainData * data, const char * filename) -{ - return fann_save_train_internal(data, filename, 0, 0); -} -// -// Save training data to a file in fixed point algebra. (Good for testing a network in fixed point) -// -FANN_EXTERNAL int FANN_API fann_save_train_to_fixed(Fann::TrainData * data, const char * filename, uint decimal_point) -{ - return fann_save_train_internal(data, filename, 1, decimal_point); -} -// -// deallocate the train data structure. -// -FANN_EXTERNAL void FANN_API fann_destroy_train(Fann::TrainData * pData) -{ - delete pData; -} -// -// Test a set of training data and calculate the MSE -// -//FANN_EXTERNAL float FANN_API fann_test_data(Fann * ann, Fann::TrainData * data) -float Fann::TestData(const Fann::TrainData * pData) -{ - if(!CheckInputOutputSizes(pData)) - return 0; - else { - ResetMSE(); - for(uint i = 0; i != pData->GetCount(); i++) { - const float * p_input = (const float *)pData->InpL.at(i)->dataPtr(); - const float * p_output = (const float *)pData->OutL.at(i)->dataPtr(); - //Test(p_input, p_output); - //float * Fann::Test_(const float * pInput, const float * pDesiredOutput) - { - const float * p_output_begin = Run(p_input); - const float * p_output_end = p_output_begin + NumOutput; - Fann::Neuron * p_output_neuron = (P_LastLayer-1)->P_FirstNeuron; - // calculate the error - for(const float * p_output_it = p_output_begin; p_output_it != p_output_end; p_output_it++) { - const float neuron_value = *p_output_it; - float neuron_diff = (*p_output - neuron_value); - neuron_diff = UpdateMSE(p_output_neuron, neuron_diff); - p_output++; - p_output_neuron++; - num_MSE++; - } - //return p_output_begin; - } - } - return GetMSE(); - } -} - -int Fann::Helper_TrainData(const Fann::TrainData * pData) -{ - int ok = 1; - ResetMSE(); - for(uint i = 0; i < pData->GetCount(); i++) { - Run((const float *)pData->InpL.at(i)->dataPtr()); - THROW(ComputeMSE((const float *)pData->OutL.at(i)->dataPtr())); - BackpropagateMSE(); - THROW(UpdateSlopesBatch(P_FirstLayer+1, P_LastLayer-1)); - } - CATCHZOK - return ok; -} - -int Fann::TrainOnData(const Fann::TrainData * pData, uint maxEpochs, uint epochsBetweenReports, float desiredError) -{ - int ok = 1; - THROW(CheckInputOutputSizes(pData)); - if(epochsBetweenReports && !Callback) - printf("Max epochs %8d. Desired error: %.10f.\n", maxEpochs, desiredError); - int is_desired_error_reached = 0; - for(uint i = 1; !is_desired_error_reached && i <= maxEpochs; i++) { - // - // Train for one epoch with the selected training algorithm - // - switch(TrainingAlgorithm) { - case FANN_TRAIN_QUICKPROP: - if(P_PrevTrainSlopes == NULL) - THROW(ClearTrainArrays()); - THROW(Helper_TrainData(pData)); - UpdateWeightsQuickprop(pData->GetCount(), 0, TotalConnections); - break; - case FANN_TRAIN_RPROP: - if(P_PrevTrainSlopes == NULL) - THROW(ClearTrainArrays()); - THROW(Helper_TrainData(pData)); - UpdateWeightsIrpropm(0, TotalConnections); - break; - case FANN_TRAIN_SARPROP: - if(P_PrevTrainSlopes == NULL) - THROW(ClearTrainArrays()); - THROW(Helper_TrainData(pData)); - UpdateWeightsSarprop(SarpropEpoch, 0, TotalConnections); - ++(SarpropEpoch); - break; - case FANN_TRAIN_BATCH: - THROW(Helper_TrainData(pData)); - UpdateWeightsBatch(pData->GetCount(), 0, TotalConnections); - break; - case FANN_TRAIN_INCREMENTAL: - { - ResetMSE(); - for(uint didx = 0; didx != pData->GetCount(); didx++) { - Train((const float *)pData->InpL.at(didx)->dataPtr(), (const float *)pData->OutL.at(didx)->dataPtr()); - } - } - break; - default: - CALLEXCEPT_S(SLERR_FANN_INVTRAINALG); - break; - } - const float _error = GetMSE(); - // - is_desired_error_reached = DesiredErrorReached(desiredError); - // - // print current output - // - if(epochsBetweenReports && (i % epochsBetweenReports == 0 || i == maxEpochs || i == 1 || is_desired_error_reached)) { - if(!Callback) { - printf("Epochs %8d. Current error: %.10f. Bit fail %d.\n", i, _error, NumBitFail); - } - else if(((*Callback)(this, pData, maxEpochs, epochsBetweenReports, desiredError, i)) == -1) - break; // you can break the training by returning -1 - } - } - CATCHZOK - return ok; -} -// -// shuffles training data, randomizing the order -// -void Fann::TrainData::Shuffle() -{ - for(uint dat = 0; dat < GetCount(); dat++) { - uint swap = (uint)(rand() % GetCount()); - if(swap != dat) { - uint elem; - Fann::DataVector * p_inp_vect_swap = InpL.at(swap); - Fann::DataVector * p_out_vect_swap = OutL.at(swap); - Fann::DataVector * p_inp_vect = InpL.at(dat); - Fann::DataVector * p_out_vect = OutL.at(dat); - for(elem = 0; elem < p_inp_vect->getCount(); elem++) { - Exchange(&p_inp_vect->at(elem), &p_inp_vect_swap->at(elem)); - } - for(elem = 0; elem < p_out_vect->getCount(); elem++) { - Exchange(&p_out_vect->at(elem), &p_out_vect_swap->at(elem)); - } - } - } -} -// -// INTERNAL FUNCTION calculates min and max of train data -// -/*void fann_get_min_max_data(float ** data, uint num_data, uint num_elem, float * min, float * max) -{ - *min = *max = data[0][0]; - for(uint dat = 0; dat < num_data; dat++) { - for(uint elem = 0; elem < num_elem; elem++) { - const float temp = data[dat][elem]; - if(temp < *min) - *min = temp; - else if(temp > *max) - *max = temp; - } - } -}*/ - -//static -void Fann::DataVector::GetMinMax(const TSCollection & rData, float * pMin, float * pMax) -{ - float _min = rData.at(0)->at(0); - float _max = _min; - for(uint dat = 0; dat < rData.getCount(); dat++) { - const DataVector * p_vect = rData.at(dat); - for(uint elem = 0; elem < p_vect->getCount(); elem++) { - const float v = p_vect->at(elem); - SETMIN(_min, v); - SETMAX(_max, v); - } - } - ASSIGN_PTR(pMin, _min); - ASSIGN_PTR(pMax, _max); -} - -//FANN_EXTERNAL float FANN_API fann_get_min_train_input(Fann::TrainData * train_data) -float Fann::TrainData::GetMinInput() const -{ - float min, max; - //fann_get_min_max_data(train_data->input, train_data->num_data, train_data->num_input, &min, &max); - Fann::DataVector::GetMinMax(InpL, &min, &max); - return min; -} - -//FANN_EXTERNAL float FANN_API fann_get_max_train_input(Fann::TrainData * train_data) -float Fann::TrainData::GetMaxInput() const -{ - float min, max; - //fann_get_min_max_data(train_data->input, train_data->num_data, train_data->num_input, &min, &max); - Fann::DataVector::GetMinMax(InpL, &min, &max); - return max; -} - -//FANN_EXTERNAL float FANN_API fann_get_min_train_output(Fann::TrainData * train_data) -float Fann::TrainData::GetMinOutput() const -{ - float min, max; - //fann_get_min_max_data(train_data->output, train_data->num_data, train_data->num_output, &min, &max); - Fann::DataVector::GetMinMax(OutL, &min, &max); - return min; -} - -//FANN_EXTERNAL float FANN_API fann_get_max_train_output(Fann::TrainData * train_data) -float Fann::TrainData::GetMaxOutput() const -{ - float min, max; - //fann_get_min_max_data(train_data->output, train_data->num_data, train_data->num_output, &min, &max); - Fann::DataVector::GetMinMax(OutL, &min, &max); - return max; -} -// -// INTERNAL FUNCTION Scales data to a specific range -// -/*void fann_scale_data(float ** data, uint num_data, uint num_elem, float new_min, float new_max) -{ - float old_min, old_max; - fann_get_min_max_data(data, num_data, num_elem, &old_min, &old_max); - fann_scale_data_to_range(data, num_data, num_elem, old_min, old_max, new_min, new_max); -}*/ - -//static -//void Fann::ScaleData(TSCollection & rData, float newMin, float newMax) -// -// INTERNAL FUNCTION Scales data to a specific range -// -/*FANN_EXTERNAL void FANN_API fann_scale_data_to_range(float ** data, uint num_data, uint num_elem, - float old_min, float old_max, float new_min, float new_max) -{ - const float old_span = old_max - old_min; - const float new_span = new_max - new_min; - const float factor = new_span / old_span; - // printf("max %f, min %f, factor %f\n", old_max, old_min, factor); - for(uint dat = 0; dat < num_data; dat++) { - for(uint elem = 0; elem < num_elem; elem++) { - const float temp = (data[dat][elem] - old_min) * factor + new_min; - if(temp < new_min) { - data[dat][elem] = new_min; - // printf("error %f < %f\n", temp, new_min); - } - else if(temp > new_max) { - data[dat][elem] = new_max; - // printf("error %f > %f\n", temp, new_max); - } - else { - data[dat][elem] = temp; - } - } - } -}*/ - -void Fann::DataVector::ScaleToRange(TSCollection & rData, float oldMin, float oldMax, float newMin, float newMax) -{ - const float old_span = oldMax - oldMin; - const float new_span = newMax - newMin; - const float factor = new_span / old_span; - // printf("max %f, min %f, factor %f\n", old_max, old_min, factor); - for(uint dat = 0; dat < rData.getCount(); dat++) { - const DataVector * p_vect = rData.at(dat); - for(uint elem = 0; elem < p_vect->getCount(); elem++) { - const float temp = (p_vect->at(elem) - oldMin) * factor + newMin; - if(temp < newMin) { - p_vect->at(elem) = newMin; - // printf("error %f < %f\n", temp, new_min); - } - else if(temp > newMax) { - p_vect->at(elem) = newMax; - // printf("error %f > %f\n", temp, new_max); - } - else { - p_vect->at(elem) = temp; - } - } - } -} -// -// Scales the inputs in the training data to the specified range -// -void Fann::TrainData::ScaleInput(float newMin, float newMax) -{ - //fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min, new_max); - Fann::DataVector::Scale(InpL, newMin, newMax); -} -// -// Scales the inputs in the training data to the specified range -// -void Fann::TrainData::ScaleOutput(float newMin, float newMax) -{ - //fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min, new_max); - Fann::DataVector::Scale(OutL, newMin, newMax); -} -// -// Scales the inputs in the training data to the specified range -// -void Fann::TrainData::Scale(float newMin, float newMax) -{ - //fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min, new_max); - //fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min, new_max); - ScaleInput(newMin, newMax); - ScaleOutput(newMin, newMax); -} -// -// INTERNAL FUNCTION -// Save the train data structure. -// -int fann_save_train_internal(Fann::TrainData * data, const char * filename, uint save_as_fixed, uint decimal_point) -{ - int retval = 0; - FILE * file = fopen(filename, "w"); - if(!file) { - fann_error((FannError*)data, SLERR_FANN_CANT_OPEN_TD_W, filename); - retval = -1; - } - else { - retval = fann_save_train_internal_fd(data, file, filename, save_as_fixed, decimal_point); - fclose(file); - } - return retval; -} -// -// INTERNAL FUNCTION -// Save the train data structure. -// -int fann_save_train_internal_fd(Fann::TrainData * data, FILE * file, const char * filename, uint save_as_fixed, uint decimal_point) -{ - uint num_data = data->GetCount(); - uint num_input = data->GetInputCount(); - uint num_output = data->GetOutputCount(); - uint i, j; - int retval = 0; - uint multiplier = 1 << decimal_point; - fprintf(file, "%u %u %u\n", data->GetCount(), data->GetInputCount(), data->GetOutputCount()); - for(i = 0; i < num_data; i++) { - const Fann::DataVector * p_inp_vect = data->InpL.at(i); - const Fann::DataVector * p_out_vect = data->OutL.at(i); - for(j = 0; j < num_input; j++) { - const float iv = p_inp_vect->at(j); - if(save_as_fixed) { - fprintf(file, "%d ", (int)(iv * multiplier)); - } - else { - if(((int)floor(iv + 0.5) * 1000000) == ((int)floor(iv * 1000000.0 + 0.5))) { - fprintf(file, "%d ", (int)iv); - } - else { - fprintf(file, "%f ", iv); - } - } - } - fprintf(file, "\n"); - - for(j = 0; j < num_output; j++) { - const float ov = p_out_vect->at(j); - if(save_as_fixed) { - fprintf(file, "%d ", (int)(ov * multiplier)); - } - else { - if(((int)floor(ov + 0.5) * 1000000) == ((int)floor(ov * 1000000.0 + 0.5))) { - fprintf(file, "%d ", (int)ov); - } - else { - fprintf(file, "%f ", ov); - } - } - } - fprintf(file, "\n"); - } - return retval; -} -// -// -// -Fann::ScaleParam::ScaleParam() : P_Mean(0), P_Deviation(0), P_NewMin(0), P_Factor(0) -{ -} - -Fann::ScaleParam::~ScaleParam() -{ - Destroy(); -} - -int Fann::ScaleParam::Copy(uint count, const ScaleParam & rS) -{ - int ok = 1; - Destroy(); - if(rS.P_Mean) { - THROW(P_Mean = Helper_Allocate(count, 0.0)); - memcpy(P_Mean, rS.P_Mean, count * sizeof(float)); - } - if(rS.P_Deviation) { - THROW(P_Deviation = Helper_Allocate(count, 0.0)); - memcpy(P_Deviation, rS.P_Deviation, count * sizeof(float)); - } - if(rS.P_NewMin) { - THROW(P_NewMin = Helper_Allocate(count, 0.0)); - memcpy(P_NewMin, rS.P_NewMin, count * sizeof(float)); - } - if(rS.P_Factor) { - THROW(P_Factor = Helper_Allocate(count, 0.0)); - memcpy(P_Factor, rS.P_Factor, count * sizeof(float)); - } - CATCHZOK - return ok; -} - -void Fann::ScaleParam::Destroy() -{ - ZFREE(P_Mean); - ZFREE(P_Deviation); - ZFREE(P_NewMin); - ZFREE(P_Factor); -} - -int Fann::ScaleParam::Allocate(uint count) -{ - int ok = 1; - Destroy(); - THROW(P_Mean = Helper_Allocate(count, 0.0)); - THROW(P_Deviation = Helper_Allocate(count, 1.0)); - THROW(P_NewMin = Helper_Allocate(count, -1.0)); - THROW(P_Factor = Helper_Allocate(count, 1.0)); - CATCH - Destroy(); - ok = 0; - ENDCATCH - return ok; -} - -int Fann::ScaleParam::ScaleVector(Fann::DataVector * pV) const -{ - int ok = 1; - THROW_S(P_Mean && P_Deviation && P_Factor && P_NewMin, SLERR_FANN_SCALE_NOT_PRESENT); - for(uint i = 0; i < pV->getCount(); i++) { - pV->at(i) = (float)(((pV->at(i) - P_Mean[i]) / P_Deviation[i] - (-1.0f) /* This is old_min */) * P_Factor[i] + P_NewMin[i]); - } - CATCHZOK - return ok; -} - -int Fann::ScaleParam::DescaleVector(Fann::DataVector * pV) const -{ - int ok = 1; - THROW_S(P_Mean && P_Deviation && P_Factor && P_NewMin, SLERR_FANN_SCALE_NOT_PRESENT); - for(uint i = 0; i < pV->getCount(); i++) { - pV->at(i) = (float)((((float)pV->at(i) - P_NewMin[i]) / P_Factor[i] + (-1.0f) /* This is old_min */) * P_Deviation[i] + P_Mean[i]); - } - CATCHZOK - return ok; -} - -int Fann::ScaleParam::IsEqualVect(uint count, const float * pVect, const float * pOtherVect) const -{ - int ok = 1; - THROW(BIN(pVect) == BIN(pOtherVect)); - if(pVect) { - for(uint i = 0; i < count; i++) { - THROW(pVect[i] == pOtherVect[i]); - } - } - CATCHZOK - return ok; -} - -int Fann::ScaleParam::IsEqual(uint c, const ScaleParam & rS) const -{ - int ok = 1; - THROW(IsEqualVect(c, P_Mean, rS.P_Mean)); - THROW(IsEqualVect(c, P_Deviation, rS.P_Deviation)); - THROW(IsEqualVect(c, P_NewMin, rS.P_NewMin)); - THROW(IsEqualVect(c, P_Factor, rS.P_Factor)); - CATCHZOK - return ok; -} - -void Fann::ScaleParam::Set(uint c, const TSCollection & rData, float newMin, float newMax) -{ - uint cur_neuron, cur_sample; - // Calculate mean: sum(x)/length - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_Mean[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < rData.getCount(); cur_sample++) { - const Fann::DataVector * p_vect = rData.at(cur_sample); - P_Mean[cur_neuron] += p_vect->at(cur_neuron); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_Mean[cur_neuron] /= (float)rData.getCount(); - // Calculate deviation: sqrt(sum((x-mean)^2)/length) - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_Deviation[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < rData.getCount(); cur_sample++) { - const Fann::DataVector * p_vect = rData.at(cur_sample); - P_Deviation[cur_neuron] += (p_vect->at(cur_neuron) - P_Mean[cur_neuron]) * (p_vect->at(cur_neuron) - P_Mean[cur_neuron]); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_Deviation[cur_neuron] = sqrtf(P_Deviation[cur_neuron] / (float)rData.getCount()); - // Calculate factor: (new_max-new_min)/(old_max(1)-old_min(-1)) - // Looks like we dont need whole array of factors? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_Factor[cur_neuron] = (newMax - newMin) / (1.0f - (-1.0f)); - // Copy new minimum. - // Looks like we dont need whole array of new minimums? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - P_NewMin[cur_neuron] = newMin; -} - -int Fann::ScaleParam::Serialize(int dir, uint c, SBuffer & rBuf, SSerializeContext * pSCtx) -{ - int ok = 1; - uint _count = (dir > 0) ? (P_Mean ? c : 0) : 0; - THROW(pSCtx->Serialize(dir, _count, rBuf)); - if(dir > 0) { - if(_count) { - assert(P_Mean && P_Deviation && P_NewMin && P_Factor); - for(uint i = 0; i < _count; i++) { - THROW(pSCtx->Serialize(dir, P_Mean[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_Deviation[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_NewMin[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_Factor[i], rBuf)); - } - } - } - else if(dir < 0) { - THROW(_count == 0 || _count == c); - if(_count) { - THROW(Allocate(_count)); - for(uint i = 0; i < _count; i++) { - THROW(pSCtx->Serialize(dir, P_Mean[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_Deviation[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_NewMin[i], rBuf)); - THROW(pSCtx->Serialize(dir, P_Factor[i], rBuf)); - } - } - else - Destroy(); - } - CATCHZOK - return ok; -} - -int Fann::ScaleParam::Save(FILE * pF, uint c, const char * pSuffix) -{ - int ok = 1; - SString temp_buf; - (temp_buf = "scale_mean").CatChar('_').Cat(pSuffix); - SaveVector(pF, c, P_Mean, temp_buf); - (temp_buf = "scale_deviation").CatChar('_').Cat(pSuffix); - SaveVector(pF, c, P_Deviation, temp_buf); - (temp_buf = "scale_new_min").CatChar('_').Cat(pSuffix); - SaveVector(pF, c, P_NewMin, temp_buf); - (temp_buf = "scale_factor").CatChar('_').Cat(pSuffix); - SaveVector(pF, c, P_Factor, temp_buf); - return ok; -} - -int Fann::ScaleParam::Load(FILE * pF, uint c, const char * pSuffix) -{ - int ok = 1; - SString temp_buf; - (temp_buf = "scale_mean").CatChar('_').Cat(pSuffix); - THROW(LoadVector(pF, c, P_Mean, temp_buf)); - (temp_buf = "scale_deviation").CatChar('_').Cat(pSuffix); - THROW(LoadVector(pF, c, P_Deviation, temp_buf)); - (temp_buf = "scale_new_min").CatChar('_').Cat(pSuffix); - THROW(LoadVector(pF, c, P_NewMin, temp_buf)); - (temp_buf = "scale_factor").CatChar('_').Cat(pSuffix); - THROW(LoadVector(pF, c, P_Factor, temp_buf)); - CATCHZOK - return ok; -} - -float * Fann::ScaleParam::Helper_Allocate(uint c, float defValue) -{ - float * p_list = (float *)SAlloc::M(c * sizeof(float)); - if(p_list) { - for(uint i = 0; i < c; i++) - p_list[i] = defValue; - } - return p_list; -} - -void Fann::ScaleParam::SaveVector(FILE * pF, uint c, const float * pList, const char * pField) const -{ - fprintf(pF, "%s=", pField); - for(uint i = 0; i < c; i++) - fprintf(pF, "%f ", pList[i]); - fprintf(pF, "\n"); -} - -int Fann::ScaleParam::LoadVector(FILE * pF, uint c, float * pList, const char * pField) -{ - int ok = 1; - SString temp_buf; - char _buf[256]; - (temp_buf = pField).CatChar('='); - STRNSCPY(_buf, temp_buf); - THROW_S(fscanf(pF, _buf) == 0, SLERR_FANN_CANT_READ_CONFIG); - for(uint i = 0; i < c; i++) { - THROW_S(fscanf(pF, "%f ", &pList[i]) == 1, SLERR_FANN_CANT_READ_CONFIG); - } - CATCHZOK - return ok; -} -// -// Creates an empty set of training data -// -FANN_EXTERNAL Fann::TrainData * FANN_API fann_create_train(uint numData, uint numInput, uint numOutput) -{ - Fann::TrainData * p_data = new Fann::TrainData(numInput, numOutput, numData); - if(p_data && !p_data->IsValid()) { - ZDELETE(p_data); - } - return p_data; -} -// -// INTERNAL FUNCTION Reads training data from a file descriptor. -// -/*Fann::TrainData * fann_read_train_from_fd(FILE * file, const char * filename) -{ - Fann::TrainData * data = 0; - uint num_input, num_output, num_data, i, j; - uint line = 1; - if(fscanf(file, "%u %u %u\n", &num_data, &num_input, &num_output) != 3) { - fann_error(NULL, SLERR_FANN_CANT_READ_TD, filename, line); - } - else { - line++; - data = fann_create_train(num_data, num_input, num_output); - if(data) { - for(i = 0; i != num_data; i++) { - Fann::DataVector * p_inp_vect = data->InpL.at(i); - Fann::DataVector * p_out_vect = data->OutL.at(i); - for(j = 0; j != num_input; j++) { - THROW_S(fscanf(file, FANNSCANF " ", &p_inp_vect->at(j)) == 1, SLERR_FANN_CANT_READ_TD); - } - line++; - for(j = 0; j != num_output; j++) { - THROW_S(fscanf(file, FANNSCANF " ", &p_out_vect->at(j)) == 1, SLERR_FANN_CANT_READ_TD); - } - line++; - } - } - } - CATCH - ZDELETE(data); - ENDCATCH - return data; -}*/ -// -// INTERNAL FUNCTION returns 1 if the desired error is reached and 0 if it is not reached -// -/*int fann_desired_error_reached(const Fann * ann, float desired_error) - { return ann->DesiredErrorReached(desired_error); }*/ -/* - * Scale input and output data based on previously calculated parameters. - */ -//FANN_EXTERNAL void FANN_API fann_scale_train(Fann * ann, Fann::TrainData * data) -int Fann::ScaleTrain(Fann::TrainData * pData) -{ - int ok = 1; - THROW_S(ScaleIn.IsPresent(), SLERR_FANN_SCALE_NOT_PRESENT); - // Check that we have good training data. - THROW(CheckInputOutputSizes(pData)); - { - for(uint i = 0; i < pData->InpL.getCount(); i++) { - THROW(ScaleIn.ScaleVector(pData->InpL.at(i))); - } - } - { - for(uint i = 0; i < pData->OutL.getCount(); i++) { - THROW(ScaleOut.ScaleVector(pData->OutL.at(i))); - } - } - CATCHZOK - return ok; -} -// -// Scale input and output data based on previously calculated parameters. -// -int Fann::DescaleTrain(Fann::TrainData * pData) -{ - int ok = 1; - THROW_S(ScaleIn.IsPresent(), SLERR_FANN_SCALE_NOT_PRESENT); - // Check that we have good training data - THROW(CheckInputOutputSizes(pData)); - { - for(uint i = 0; i < pData->InpL.getCount(); i++) { - THROW(ScaleIn.DescaleVector(pData->InpL.at(i))); - } - } - { - for(uint i = 0; i < pData->OutL.getCount(); i++) { - THROW(ScaleOut.DescaleVector(pData->OutL.at(i))); - } - } - CATCHZOK - return ok; -} - -void Fann::ScaleReset(uint c, float * pArray, float value) -{ - for(uint i = 0; i < c; i++) - pArray[i] = value; -} - -#if 0 // { -#define SCALE_RESET(what, where, default_value) \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->what ## _ ## where[cur_neuron] = ( default_value ); - -#define SCALE_SET_PARAM(where) \ - /* Calculate mean: sum(x)/length */ \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_mean_ ## where[cur_neuron] = 0.0f; \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - for(cur_sample = 0; cur_sample < data->num_data; cur_sample++) \ - ann->scale_mean_ ## where[cur_neuron] += (float)data->where ## put[ cur_sample ][cur_neuron]; \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_mean_ ## where[cur_neuron] /= (float)data->num_data; \ - /* Calculate deviation: sqrt(sum((x-mean)^2)/length) */ \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_deviation_ ## where[cur_neuron] = 0.0f; \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - for(cur_sample = 0; cur_sample < data->num_data; cur_sample++) \ - ann->scale_deviation_ ## where[cur_neuron] += \ - /* Another local variable in macro? Oh no! */ \ - ((float)data->where ## put[ cur_sample ][cur_neuron] - ann->scale_mean_ ## where[cur_neuron] \ - ) \ - * \ - ((float)data->where ## put[ cur_sample ][cur_neuron] - ann->scale_mean_ ## where[cur_neuron] \ - ); \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_deviation_ ## where[cur_neuron] = sqrtf(ann->scale_deviation_ ## where[cur_neuron] / (float)data->num_data); \ - /* Calculate factor: (new_max-new_min)/(old_max(1)-old_min(-1)) */ \ - /* Looks like we dont need whole array of factors? */ \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_factor_ ## where[cur_neuron] = \ - ( new_ ## where ## put_max - new_ ## where ## put_min ) \ - / \ - ( 1.0f - ( -1.0f ) ); \ - /* Copy new minimum. */ \ - /* Looks like we dont need whole array of new minimums? */ \ - for(cur_neuron = 0; cur_neuron < ann->num_ ## where ## put; cur_neuron++) \ - ann->scale_new_min_ ## where[cur_neuron] = new_ ## where ## put_min; -#endif // } 0 - -void Fann::ScaleSetParam(uint c, uint numData, float ** const ppData, float newMin, float newMax, - float * pScaleMean, float * pScaleDeviation, float * pScaleNewMin, float * pScaleFactor) -{ - uint cur_neuron, cur_sample; - // Calculate mean: sum(x)/length - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleMean[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < numData; cur_sample++) - pScaleMean[cur_neuron] += (float)ppData[cur_sample][cur_neuron]; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleMean[cur_neuron] /= (float)numData; - // Calculate deviation: sqrt(sum((x-mean)^2)/length) - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleDeviation[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < numData; cur_sample++) { - pScaleDeviation[cur_neuron] += ((float)ppData[cur_sample][cur_neuron] - pScaleMean[cur_neuron]) * ((float)ppData[cur_sample][cur_neuron] - pScaleMean[cur_neuron]); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleDeviation[cur_neuron] = sqrtf(pScaleDeviation[cur_neuron] / (float)numData); - // Calculate factor: (new_max-new_min)/(old_max(1)-old_min(-1)) - // Looks like we dont need whole array of factors? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleFactor[cur_neuron] = (newMax - newMin) / (1.0f - (-1.0f)); - // Copy new minimum. - // Looks like we dont need whole array of new minimums? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - pScaleNewMin[cur_neuron] = newMin; -} - -/*void Fann::ScaleSetParam2(uint c, uint numData, float ** const ppData, float newMin, float newMax, Fann::ScaleParam & rParam) -{ - uint cur_neuron, cur_sample; - // Calculate mean: sum(x)/length - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Mean[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < numData; cur_sample++) - rParam.P_Mean[cur_neuron] += (float)ppData[cur_sample][cur_neuron]; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Mean[cur_neuron] /= (float)numData; - // Calculate deviation: sqrt(sum((x-mean)^2)/length) - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Deviation[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < numData; cur_sample++) { - rParam.P_Deviation[cur_neuron] += ((float)ppData[cur_sample][cur_neuron] - rParam.P_Mean[cur_neuron]) * ((float)ppData[cur_sample][cur_neuron] - rParam.P_Mean[cur_neuron]); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Deviation[cur_neuron] = sqrtf(rParam.P_Deviation[cur_neuron] / (float)numData); - // Calculate factor: (new_max-new_min)/(old_max(1)-old_min(-1)) - // Looks like we dont need whole array of factors? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Factor[cur_neuron] = (newMax - newMin) / (1.0f - (-1.0f)); - // Copy new minimum. - // Looks like we dont need whole array of new minimums? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_NewMin[cur_neuron] = newMin; -}*/ - -/*void Fann::ScaleSetParam3(uint c, const TSCollection & rData, float newMin, float newMax, Fann::ScaleParam & rParam) -{ - uint cur_neuron, cur_sample; - // Calculate mean: sum(x)/length - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Mean[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < rData.getCount(); cur_sample++) { - const Fann::DataVector * p_vect = rData.at(cur_sample); - rParam.P_Mean[cur_neuron] += p_vect->at(cur_neuron); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Mean[cur_neuron] /= (float)rData.getCount(); - // Calculate deviation: sqrt(sum((x-mean)^2)/length) - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Deviation[cur_neuron] = 0.0f; - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - for(cur_sample = 0; cur_sample < rData.getCount(); cur_sample++) { - const Fann::DataVector * p_vect = rData.at(cur_sample); - rParam.P_Deviation[cur_neuron] += (p_vect->at(cur_neuron) - rParam.P_Mean[cur_neuron]) * (p_vect->at(cur_neuron) - rParam.P_Mean[cur_neuron]); - } - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Deviation[cur_neuron] = sqrtf(rParam.P_Deviation[cur_neuron] / (float)rData.getCount()); - // Calculate factor: (new_max-new_min)/(old_max(1)-old_min(-1)) - // Looks like we dont need whole array of factors? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_Factor[cur_neuron] = (newMax - newMin) / (1.0f - (-1.0f)); - // Copy new minimum. - // Looks like we dont need whole array of new minimums? - for(cur_neuron = 0; cur_neuron < c; cur_neuron++) - rParam.P_NewMin[cur_neuron] = newMin; -}*/ - -/* FANN_EXTERNAL int FANN_API fann_set_input_scaling_params(Fann * ann, const Fann::TrainData * pData, float new_input_min, float new_input_max) -{ - //uint cur_neuron, cur_sample; - // Check that we have good training data. - // No need for if( !params || !ann ) - if(pData->num_input != ann->NumInput || pData->num_output != ann->NumOutput) { - fann_error(&ann->Err, SLERR_FANN_TRAIN_DATA_MISMATCH); - return -1; - } - if(ann->scale_mean_in == NULL) - ann->AllocateScale(); - if(ann->scale_mean_in == NULL) - return -1; - const uint c = ann->NumInput; - if(!pData->num_data) { - //SCALE_RESET(scale_mean, in, 0.0) - //SCALE_RESET(scale_deviation, in, 1.0) - //SCALE_RESET(scale_new_min, in, -1.0) - //SCALE_RESET(scale_factor, in, 1.0) - ann->ScaleReset(c, ann->scale_mean_in, 0.0f); - ann->ScaleReset(c, ann->scale_deviation_in, 1.0f); - ann->ScaleReset(c, ann->scale_new_min_in, -1.0f); - ann->ScaleReset(c, ann->scale_factor_in, 1.0f); - } - else { - //SCALE_SET_PARAM(in); - ann->ScaleSetParam(c, pData->num_input, pData->input, new_input_min, new_input_max, - ann->scale_mean_in, ann->scale_deviation_in, ann->scale_new_min_in, ann->scale_factor_in); - } - return 0; -}*/ - -int Fann::SetInputScalingParams(const Fann::TrainData * pData, float newInputMin, float newInputMax) -{ - int ok = 1; - //uint cur_neuron, cur_sample; - // Check that we have good training data. - // No need for if( !params || !ann ) - THROW_S(pData->GetInputCount() == NumInput && pData->GetOutputCount() == NumOutput, SLERR_FANN_TRAIN_DATA_MISMATCH); - if(!pData->GetCount()) { - THROW(ScaleIn.Allocate(NumInput)); - } - else { - if(!ScaleIn.IsPresent()) - THROW(ScaleIn.Allocate(NumInput)); - //SCALE_SET_PARAM(in); - //ScaleSetParam(c, pData->num_input, pData->input, newInputMin, newInputMax, scale_mean_in, scale_deviation_in, scale_new_min_in, scale_factor_in); - //ScaleSetParam2(NumInput, pData->GetInputCount(), pData->input, newInputMin, newInputMax, ScaleIn); - //ScaleSetParam3(NumInput, pData->InpL, newInputMin, newInputMax, ScaleIn); - ScaleIn.Set(NumInput, pData->InpL, newInputMin, newInputMax); - } - CATCHZOK - return ok; -} - - -/*FANN_EXTERNAL int FANN_API fann_set_output_scaling_params(Fann * ann, const Fann::TrainData * data, float new_output_min, float new_output_max) -{ - //uint cur_neuron, cur_sample; - // Check that we have good training data. - // No need for if( !params || !ann ) - if(data->num_input != ann->NumInput || data->num_output != ann->NumOutput) { - fann_error(&ann->Err, SLERR_FANN_TRAIN_DATA_MISMATCH); - return -1; - } - if(ann->scale_mean_out == NULL) - ann->AllocateScale(); - if(ann->scale_mean_out == NULL) - return -1; - const uint c = ann->NumOutput; - if(!data->num_data) { - //SCALE_RESET(scale_mean, out, 0.0) - //SCALE_RESET(scale_deviation, out, 1.0) - //SCALE_RESET(scale_new_min, out, -1.0) - //SCALE_RESET(scale_factor, out, 1.0) - ann->ScaleReset(c, ann->scale_mean_out, 0.0f); - ann->ScaleReset(c, ann->scale_deviation_out, 1.0f); - ann->ScaleReset(c, ann->scale_new_min_out, -1.0f); - ann->ScaleReset(c, ann->scale_factor_out, 1.0f); - } - else { - //SCALE_SET_PARAM(out); - ann->ScaleSetParam(c, data->num_output, data->output, new_output_min, new_output_max, - ann->scale_mean_out, ann->scale_deviation_out, ann->scale_new_min_out, ann->scale_factor_out); - } - return 0; -}*/ - -int Fann::SetOutputScalingParams(const Fann::TrainData * pData, float newOutputMin, float newOutputMax) -{ - int ok = 1; - //uint cur_neuron, cur_sample; - // Check that we have good training data. - // No need for if( !params || !ann ) - THROW_S(pData->GetInputCount() == NumInput && pData->GetOutputCount() == NumOutput, SLERR_FANN_TRAIN_DATA_MISMATCH); - if(!pData->GetCount()) { - THROW(ScaleOut.Allocate(NumOutput)); - } - else { - if(!ScaleOut.IsPresent()) - THROW(ScaleOut.Allocate(NumOutput)); - //SCALE_SET_PARAM(out); - //ScaleSetParam(c, pData->num_output, pData->output, newOutputMin, newOutputMax, scale_mean_out, scale_deviation_out, scale_new_min_out, scale_factor_out); - //ScaleSetParam2(NumOutput, pData->GetOutputCount(), pData->output, newOutputMin, newOutputMax, ScaleOut); - //ScaleSetParam3(NumOutput, pData->OutL, newOutputMin, newOutputMax, ScaleOut); - ScaleOut.Set(NumOutput, pData->OutL, newOutputMin, newOutputMax); - } - CATCHZOK - return ok; -} -// -// Calculate scaling parameters for future use based on training data. -// -/*FANN_EXTERNAL int FANN_API fann_set_scaling_params(Fann * ann, - const Fann::TrainData * data, float new_input_min, float new_input_max, float new_output_min, float new_output_max) -{ - return (ann->SetInputScalingParams(data, new_input_min, new_input_max) == 0) ? ann->SetOutputScalingParams(data, new_output_min, new_output_max) : -1; -}*/ - -int Fann::SetScalingParams(const Fann::TrainData * pData, float newInputMin, float newInputMax, float newOutputMin, float newOutputMax) -{ - return (SetInputScalingParams(pData, newInputMin, newInputMax) == 0) ? SetOutputScalingParams(pData, newOutputMin, newOutputMax) : -1; -} -// -// Clears scaling parameters. -// -/*FANN_EXTERNAL int FANN_API fann_clear_scaling_params(Fann * ann) -{ - //uint cur_neuron; - if(ann->scale_mean_out == NULL) - ann->AllocateScale(); - if(ann->scale_mean_out == NULL) - return -1; - //SCALE_RESET(scale_mean, in, 0.0) - //SCALE_RESET(scale_deviation, in, 1.0) - //SCALE_RESET(scale_new_min, in, -1.0) - //SCALE_RESET(scale_factor, in, 1.0) - ann->ScaleReset(ann->NumInput, ann->scale_mean_in, 0.0f); - ann->ScaleReset(ann->NumInput, ann->scale_deviation_in, 1.0f); - ann->ScaleReset(ann->NumInput, ann->scale_new_min_in, -1.0f); - ann->ScaleReset(ann->NumInput, ann->scale_factor_in, 1.0f); - //SCALE_RESET(scale_mean, out, 0.0) - //SCALE_RESET(scale_deviation, out, 1.0) - //SCALE_RESET(scale_new_min, out, -1.0) - //SCALE_RESET(scale_factor, out, 1.0) - ann->ScaleReset(ann->NumOutput, ann->scale_mean_out, 0.0f); - ann->ScaleReset(ann->NumOutput, ann->scale_deviation_out, 1.0f); - ann->ScaleReset(ann->NumOutput, ann->scale_new_min_out, -1.0f); - ann->ScaleReset(ann->NumOutput, ann->scale_factor_out, 1.0f); - return 0; -}*/ - -int Fann::ClearScalingParams() -{ - //uint cur_neuron; - int ok = 1; - THROW(ScaleIn.Allocate(NumInput)); - THROW(ScaleOut.Allocate(NumOutput)); - /* - if(scale_mean_out == NULL) - AllocateScale(); - if(scale_mean_out == NULL) - return -1; - //SCALE_RESET(scale_mean, in, 0.0) - //SCALE_RESET(scale_deviation, in, 1.0) - //SCALE_RESET(scale_new_min, in, -1.0) - //SCALE_RESET(scale_factor, in, 1.0) - ScaleReset(NumInput, scale_mean_in, 0.0f); - ScaleReset(NumInput, scale_deviation_in, 1.0f); - ScaleReset(NumInput, scale_new_min_in, -1.0f); - ScaleReset(NumInput, scale_factor_in, 1.0f); - //SCALE_RESET(scale_mean, out, 0.0) - //SCALE_RESET(scale_deviation, out, 1.0) - //SCALE_RESET(scale_new_min, out, -1.0) - //SCALE_RESET(scale_factor, out, 1.0) - ScaleReset(NumOutput, scale_mean_out, 0.0f); - ScaleReset(NumOutput, scale_deviation_out, 1.0f); - ScaleReset(NumOutput, scale_new_min_out, -1.0f); - ScaleReset(NumOutput, scale_factor_out, 1.0f); - */ - CATCHZOK - return ok; -} - -//int fann_check_input_output_sizes(Fann * ann, Fann::TrainData * pData) -int Fann::CheckInputOutputSizes(const Fann::TrainData * pData) -{ - int ok = 1; - THROW_S(NumInput == pData->GetInputCount(), SLERR_FANN_INPUT_NO_MATCH); //fann_error(&Err, SLERR_FANN_INPUT_NO_MATCH, NumInput, pData->num_input); - THROW_S(NumOutput == pData->GetOutputCount(), SLERR_FANN_OUTPUT_NO_MATCH); // fann_error(&Err, SLERR_FANN_OUTPUT_NO_MATCH, NumOutput, pData->num_output); - CATCHZOK - return ok; -} - -float Fann::ExamineTrain(/*Fann::TrainAlg tal, Fann::ActivationFunc hact, Fann::ActivationFunc oact*/ - const ExamineTrainParam & rParam, const Fann::TrainData * pTrainData, const Fann::TrainData * pTestData) -{ - SetTrainingAlgorithm(rParam.TrAlg); - SetActivationFunctionHidden(rParam.HiddActF); - SetActivationFunctionOutput(rParam.OutpActF); - //fann_set_activation_function_output(ann, oact); - //fann_set_callback(ann, LogOut ); - TrainOnData(pTrainData, NZOR(rParam.MaxEpoch, 2000), 0, 0.0f); - //fann_train_on_data(ann, TrainData, 2000, 250, 0.0); - float train_mse = GetMSE(); //fann_get_MSE(ann); - float test_mse = -1.0f; - if(pTestData /*&& ft.overtraining*/) { - //fann_reset_MSE(ann); - ResetMSE(); - //fann_test_data(ann,ft.TestData); - TestData(pTestData); - test_mse = GetMSE(); // fann_get_MSE(ann); - return (train_mse + test_mse) / 2.0f; - } - else - return train_mse; -} - -//static -int Fann::DetectOptimal(Fann::DetectOptimalParam & rParam) -{ - int ok = 1; - float * p_preserve_weights = 0; - THROW(rParam.P_TrainData); - { - Fann test_ann(rParam.NetworkType, rParam.ConnectionRate, rParam.Layers); - THROW(test_ann.IsValid()); - const size_t weights_buffer_size = test_ann.GetWeights(0, 0); - THROW(p_preserve_weights = (float *)SAlloc::M(weights_buffer_size)); - THROW(test_ann.GetWeights(p_preserve_weights, weights_buffer_size)); - if(rParam.Flags & rParam.fDetectActivationFunc) { - uint best_hi = 0; - uint best_oi = 0; - float best_mse = MAXFLOAT; - - LongArray activation_func_list; - activation_func_list.add(Fann::FANN_LINEAR); - // (Can NOT be used during training) activation_func_list.add(Fann::FANN_THRESHOLD); - // (Can NOT be used during training) activation_func_list.add(Fann::FANN_THRESHOLD_SYMMETRIC); - activation_func_list.add(Fann::FANN_SIGMOID); - activation_func_list.add(Fann::FANN_SIGMOID_STEPWISE); - activation_func_list.add(Fann::FANN_SIGMOID_SYMMETRIC); - activation_func_list.add(Fann::FANN_SIGMOID_SYMMETRIC_STEPWISE); - activation_func_list.add(Fann::FANN_GAUSSIAN); - activation_func_list.add(Fann::FANN_GAUSSIAN_SYMMETRIC); - activation_func_list.add(Fann::FANN_GAUSSIAN_STEPWISE); - activation_func_list.add(Fann::FANN_ELLIOT); - activation_func_list.add(Fann::FANN_ELLIOT_SYMMETRIC); - activation_func_list.add(Fann::FANN_LINEAR_PIECE); - activation_func_list.add(Fann::FANN_LINEAR_PIECE_SYMMETRIC); - activation_func_list.add(Fann::FANN_SIN_SYMMETRIC); - activation_func_list.add(Fann::FANN_COS_SYMMETRIC); - activation_func_list.add(Fann::FANN_SIN); - activation_func_list.add(Fann::FANN_COS); - - for(uint hi = 0; hi < activation_func_list.getCount(); hi++) { - for(uint oi = 0; oi < activation_func_list.getCount(); oi++) { - test_ann.SetWeights(p_preserve_weights); - ExamineTrainParam etp; - etp.HiddActF = (Fann::ActivationFunc)activation_func_list.at(hi); - etp.OutpActF = (Fann::ActivationFunc)activation_func_list.at(oi); - const float mse = test_ann.ExamineTrain(etp, rParam.P_TrainData, rParam.P_TestData); - //ysa, (fann_train_enum)Method->value(),(fann_activationfunc_enum) Act[i],(fann_activationfunc_enum) Act[j],TrainData); - if(mse < best_mse) { - best_mse = mse; - best_hi = hi; - best_oi = oi; - } - } - } - rParam.BestHiddActF = activation_func_list.get(best_hi); - rParam.BestOutpActF = activation_func_list.get(best_oi); - rParam.ResultFlags |= rParam.rfHiddActFuncDetected; - rParam.ResultFlags |= rParam.rfOutpActFuncDetected; - } - if(rParam.Flags & rParam.fDetectTrainAlg) { - uint best_ai = 0; - float best_mse = MAXFLOAT; - - LongArray alg_list; - alg_list.add(FANN_TRAIN_INCREMENTAL); - alg_list.add(FANN_TRAIN_BATCH); - alg_list.add(FANN_TRAIN_RPROP); - alg_list.add(FANN_TRAIN_QUICKPROP); - alg_list.add(FANN_TRAIN_SARPROP); - for(uint ai = 0; ai < alg_list.getCount(); ai++) { - test_ann.SetWeights(p_preserve_weights); - ExamineTrainParam etp; - etp.HiddActF = (Fann::ActivationFunc)((rParam.BestHiddActF >= 0) ? rParam.BestHiddActF : rParam.HiddActF); - etp.OutpActF = (Fann::ActivationFunc)((rParam.BestOutpActF >= 0) ? rParam.BestOutpActF : rParam.OutpActF); - etp.TrAlg = (Fann::TrainAlg)alg_list.get(ai); - const float mse = test_ann.ExamineTrain(etp, rParam.P_TrainData, rParam.P_TestData); - //ysa, (fann_train_enum)Method->value(),(fann_activationfunc_enum) Act[i],(fann_activationfunc_enum) Act[j],TrainData); - if(mse < best_mse) { - best_mse = mse; - best_ai = ai; - } - } - rParam.BestTrainAlg = alg_list.get(best_ai); - rParam.ResultFlags |= rParam.rfTrainAlgDetected; - } - } - CATCHZOK - ZFREE(p_preserve_weights); - return ok; - -/* - if(TrainData==NULL){ - fl_alert("Firstly Load Train Data !"); - return; - } - if(working) return; - int best_ha,best_oa; - Out->clear(); - ActivateStop(); - if(Layer->value()==5) - ysa = fann_create_sparse(ConnectionRate->value(),5,(int)Input->value(),(int)Hid1->value(),(int)Hid2->value(),(int)Hid3->value(),(int)Output->value()); - if(Layer->value()==4) - ysa = fann_create_sparse(ConnectionRate->value(),4,(int)Input->value(),(int)Hid1->value(),(int)Hid2->value(),(int)Output->value()); - else - ysa = fann_create_sparse(ConnectionRate->value(),3,(int)Input->value(),(int)Hid1->value(),(int)Output->value()); - fann_type *w=GetWeigths(ysa); - - int best_ta; - fann_type min=1,mse; - char Buf[512]; - for(int i=0; i<13 && !stop;i++) { - for(int j=0; j<13&& !stop;j++) { - SetWeights(ysa,w); - sprintf(Buf,"@C4Hid Activation Func. : %s --- Out Activation Func. : %s ",FANN_ACTIVATIONFUNC_NAMES[Act[i]],FANN_ACTIVATIONFUNC_NAMES[Act[j]]); - Out->add(Buf); - mse=ExamineTrain(ysa,(fann_train_enum)Method->value(),(fann_activationfunc_enum) Act[i],(fann_activationfunc_enum) Act[j],TrainData); - if(mse -//#include "parallel_fann.h" -//#include "config.h" -//#include "fann.h" - -FANN_EXTERNAL float FANN_API fann_train_epoch_batch_parallel(Fann * ann, Fann::TrainData * data, const uint threadnumb) -{ - /*vector ann_vect(threadnumb);*/ - Fann** ann_vect = (Fann**)SAlloc::M(threadnumb * sizeof(Fann*)); - int i = 0, j = 0; - ann->ResetMSE(); - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i = 0; i<(int)threadnumb; i++) { - ann_vect[i] = fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j = omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - //parallel update of the weights - { - const uint num_data = data->num_data; - const uint first_weight = 0; - const uint past_end = ann->TotalConnections; - float * weights = ann->P_Weights; - const float epsilon = ann->LearningRate / num_data; - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel - { - #pragma omp for schedule(static) - for(i = first_weight; i < (int)past_end; i++) { - float temp_slopes = 0.0; - uint k; - float * train_slopes; - for(k = 0; kP_TrainSlopes; - temp_slopes += train_slopes[i]; - train_slopes[i] = 0.0; - } - weights[i] += temp_slopes*epsilon; - } - } - } - //merge of MSEs - for(i = 0; i<(int)threadnumb; ++i) { - ann->MSE_value += ann_vect[i]->MSE_value; - ann->num_MSE += ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - SAlloc::F(ann_vect); - return ann->GetMSE(); -} - -FANN_EXTERNAL float FANN_API fann_train_epoch_irpropm_parallel(Fann * ann, Fann::TrainData * data, const uint threadnumb) -{ - Fann** ann_vect = (Fann**)SAlloc::M(threadnumb * sizeof(Fann*)); - int i = 0, j = 0; - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - /*vector ann_vect(threadnumb);*/ - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i = 0; i<(int)threadnumb; i++) { - ann_vect[i] = fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j = omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float * weights = ann->P_Weights; - float * prev_steps = ann->P_PrevSteps; - float * prev_train_slopes = ann->P_PrevTrainSlopes; - float next_step; - const float increase_factor = ann->RpropIncreaseFactor; //1.2; - const float decrease_factor = ann->RpropDecreaseFactor; //0.5; - const float delta_min = ann->RpropDeltaMin; //0.0; - const float delta_max = ann->RpropDeltaMax; //50.0; - const uint first_weight = 0; - const uint past_end = ann->TotalConnections; - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i = first_weight; i < (int)past_end; i++) { - float prev_slope, same_sign; - const float prev_step = MAX(prev_steps[i], (float)0.0001); // prev_step may not be zero because - // then the training will stop - float temp_slopes = 0.0; - uint k; - float * train_slopes; - for(k = 0; kP_TrainSlopes; - temp_slopes += train_slopes[i]; - train_slopes[i] = 0.0; - } - prev_slope = prev_train_slopes[i]; - same_sign = prev_slope * temp_slopes; - if(same_sign >= 0.0) - next_step = MIN(prev_step * increase_factor, delta_max); - else { - next_step = MAX(prev_step * decrease_factor, delta_min); - temp_slopes = 0; - } - if(temp_slopes < 0) { - weights[i] -= next_step; - SETMAX(weights[i], -1500); - } - else { - weights[i] += next_step; - SETMIN(weights[i], 1500); - } - // update global data arrays - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - } - } - } - //merge of MSEs - for(i = 0; i<(int)threadnumb; ++i) { - ann->MSE_value += ann_vect[i]->MSE_value; - ann->num_MSE += ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - SAlloc::F(ann_vect); - return ann->GetMSE(); -} - -FANN_EXTERNAL float FANN_API fann_train_epoch_quickprop_parallel(Fann * ann, Fann::TrainData * data, const uint threadnumb) -{ - Fann** ann_vect = (Fann**)SAlloc::M(threadnumb * sizeof(Fann*)); - int i = 0, j = 0; - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - /*vector ann_vect(threadnumb);*/ - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i = 0; i<(int)threadnumb; i++) { - ann_vect[i] = fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j = omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float * weights = ann->P_Weights; - float * prev_steps = ann->P_PrevSteps; - float * prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight = 0; - const uint past_end = ann->TotalConnections; - - float w = 0.0, next_step; - - const float epsilon = ann->LearningRate / data->num_data; - const float decay = ann->QuickpropDecay; // -0.0001 - const float mu = ann->QuickpropMu; // 1.75 - const float shrink_factor = (float)(mu / (1.0 + mu)); - - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(w, next_step) - { - #pragma omp for schedule(static) - for(i = first_weight; i < (int)past_end; i++) { - float temp_slopes = 0.0; - uint k; - float * train_slopes; - float prev_step, prev_slope; - w = weights[i]; - for(k = 0; kP_TrainSlopes; - temp_slopes += train_slopes[i]; - train_slopes[i] = 0.0; - } - temp_slopes += decay * w; - prev_step = prev_steps[i]; - prev_slope = prev_train_slopes[i]; - next_step = 0.0; - /* The step must always be in direction opposite to the slope. */ - if(prev_step > 0.001) { - // If last step was positive... - if(temp_slopes > 0.0) // Add in linear term if current slope is still positive - next_step += epsilon * temp_slopes; - // If current slope is close to or larger than prev slope... - if(temp_slopes > (shrink_factor * prev_slope)) - next_step += mu * prev_step; /* Take maximum size negative step. */ - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); // Else, use quadratic estimate - } - else if(prev_step < -0.001) { - /* If last step was negative... */ - if(temp_slopes < 0.0) // Add in linear term if current slope is still negative. - next_step += epsilon * temp_slopes; - /* If current slope is close to or more neg than prev slope... */ - if(temp_slopes < (shrink_factor * prev_slope)) - next_step += mu * prev_step; // Take maximum size negative step - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); // Else, use quadratic estimate - } - else // Last step was zero, so use only linear term - next_step += epsilon * temp_slopes; - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - w += next_step; - if(w > 1500) - weights[i] = 1500; - else if(w < -1500) - weights[i] = -1500; - else - weights[i] = w; - } - } - } - //merge of MSEs - for(i = 0; i<(int)threadnumb; ++i) { - ann->MSE_value += ann_vect[i]->MSE_value; - ann->num_MSE += ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - SAlloc::F(ann_vect); - return ann->GetMSE(); -} - -FANN_EXTERNAL float FANN_API fann_train_epoch_sarprop_parallel(Fann * ann, Fann::TrainData * data, const uint threadnumb) -{ - Fann** ann_vect = (Fann**)SAlloc::M(threadnumb * sizeof(Fann*)); - int i = 0, j = 0; - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - /*vector ann_vect(threadnumb);*/ - - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i = 0; i<(int)threadnumb; i++) { - ann_vect[i] = fann_copy(ann); - } - //parallel computing of the updates -#pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j = omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float * weights = ann->P_Weights; - float * prev_steps = ann->P_PrevSteps; - float * prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight = 0; - const uint past_end = ann->TotalConnections; - const uint epoch = ann->SarpropEpoch; - - float next_step; - - /* These should be set from variables */ - const float increase_factor = ann->RpropIncreaseFactor; /*1.2; */ - const float decrease_factor = ann->RpropDecreaseFactor; /*0.5; */ - /* @todo why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */ - const float delta_min = 0.000001f; - const float delta_max = ann->RpropDeltaMax; /*50.0; */ - const float weight_decay_shift = ann->SarpropWeightDecayShift; /* ld 0.01 = -6.644 */ - const float step_error_threshold_factor = ann->SarpropStepErrorThresholdFactor; /* 0.1 */ - const float step_error_shift = ann->SarpropStepErrorShift; /* ld 3 = 1.585 */ - const float T = ann->SarpropTemperature; - float MSE, RMSE; - //merge of MSEs - for(i = 0; i<(int)threadnumb; ++i) { - ann->MSE_value += ann_vect[i]->MSE_value; - ann->num_MSE += ann_vect[i]->num_MSE; - } - MSE = ann->GetMSE(); - RMSE = sqrtf(MSE); - // for all weights; TODO: are biases included? - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i = first_weight; i < (int)past_end; i++) { - /* @todo confirm whether 1x10^-6 == delta_min is really better */ - const float prev_step = MAX(prev_steps[i], (float)0.000001); // prev_step may not be zero because then the training will stop - // calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper) - float prev_slope, same_sign; - float temp_slopes = 0.0; - uint k; - float * train_slopes; - for(k = 0; kP_TrainSlopes; - temp_slopes += train_slopes[i]; - train_slopes[i] = 0.0; - } - temp_slopes = -temp_slopes - weights[i] * (float)fann_exp2(-T * epoch + weight_decay_shift); - next_step = 0.0; - // @todo is prev_train_slopes[i] 0.0 in the beginning? - prev_slope = prev_train_slopes[i]; - same_sign = prev_slope * temp_slopes; - if(same_sign > 0.0) { - next_step = MIN(prev_step * increase_factor, delta_max); - // @todo are the signs inverted? see differences between SARPROP paper and iRprop - if(temp_slopes < 0.0) - weights[i] += next_step; - else - weights[i] -= next_step; - } - else if(same_sign < 0.0) { -#ifndef RAND_MAX - #define RAND_MAX 0x7fffffff -#endif - if(prev_step < step_error_threshold_factor * MSE) - next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (float)fann_exp2(-T * epoch + step_error_shift); - else - next_step = MAX(prev_step * decrease_factor, delta_min); - temp_slopes = 0.0; - } - else { - if(temp_slopes < 0.0) - weights[i] += prev_step; - else - weights[i] -= prev_step; - } - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - } - } - } - ++(ann->SarpropEpoch); - //already computed before - /*/ /merge of MSEs - for(i = 0; iMSE_value += ann_vect[i]->MSE_value; - ann->num_MSE += ann_vect[i]->num_MSE; - } - */ - //destroy the copies of the ann - for(i = 0; i<(int)threadnumb; i++) { - fann_destroy(ann_vect[i]); - } - SAlloc::F(ann_vect); - return ann->GetMSE(); -} - -FANN_EXTERNAL float FANN_API fann_train_epoch_incremental_mod(Fann * ann, Fann::TrainData * data) -{ - ann->ResetMSE(); - for(uint i = 0; i != data->num_data; i++) - ann->Train(data->input[i], data->output[i]); - return ann->GetMSE(); -} - -#endif // DISABLE_PARALLEL_FANN -// -// parallel_FANN.cpp -// Author: Alessandro Pietro Bardelli -// -#ifndef DISABLE_PARALLEL_FANN -//#include "parallel_fann.hpp" -#include -using namespace std; -namespace parallel_fann { -// @todo rewrite all these functions in c++ using fann_cpp interface - -float train_epoch_batch_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb) -{ - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - //parallel update of the weights - { - const uint num_data=data->num_data; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - float *weights = ann->P_Weights; - const float epsilon = ann->LearningRate / num_data; - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0; k < threadnumb; ++k) { - train_slopes=ann_vect[k]->P_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - weights[i] += temp_slopes*epsilon; - } - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_irpropm_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - float next_step; - const float increase_factor = ann->RpropIncreaseFactor; //1.2; - const float decrease_factor = ann->RpropDecreaseFactor; //0.5; - const float delta_min = ann->RpropDeltaMin; //0.0; - const float delta_max = ann->RpropDeltaMax; //50.0; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - const float prev_step = MAX(prev_steps[i], (float) 0.0001); // prev_step may not be zero because then the training will stop - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - const float prev_slope = prev_train_slopes[i]; - const float same_sign = prev_slope * temp_slopes; - if(same_sign >= 0.0) - next_step = MIN(prev_step * increase_factor, delta_max); - else { - next_step = MAX(prev_step * decrease_factor, delta_min); - temp_slopes = 0; - } - if(temp_slopes < 0) { - weights[i] -= next_step; - SETMAX(weights[i], -1500); - } - else { - weights[i] += next_step; - SETMIN(weights[i], 1500); - } - // update global data arrays - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - } - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_quickprop_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - float w=0.0, next_step; - const float epsilon = ann->LearningRate / data->num_data; - const float decay = ann->QuickpropDecay; // -0.0001 - const float mu = ann->QuickpropMu; // 1.75 - const float shrink_factor = (float) (mu / (1.0 + mu)); - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(w, next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - w = weights[i]; - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - temp_slopes+= decay * w; - const float prev_step = prev_steps[i]; - const float prev_slope = prev_train_slopes[i]; - next_step = 0.0; - /* The step must always be in direction opposite to the slope. */ - if(prev_step > 0.001) { - /* If last step was positive... */ - if(temp_slopes > 0.0) /* Add in linear term if current slope is still positive. */ - next_step += epsilon * temp_slopes; - /*If current slope is close to or larger than prev slope... */ - if(temp_slopes > (shrink_factor * prev_slope)) - next_step += mu * prev_step; /* Take maximum size negative step. */ - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ - } - else if(prev_step < -0.001) { - /* If last step was negative... */ - if(temp_slopes < 0.0) /* Add in linear term if current slope is still negative. */ - next_step += epsilon * temp_slopes; - /* If current slope is close to or more neg than prev slope... */ - if(temp_slopes < (shrink_factor * prev_slope)) - next_step += mu * prev_step; /* Take maximum size negative step. */ - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ - } - else /* Last step was zero, so use only linear term. */ - next_step += epsilon * temp_slopes; - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - w += next_step; - if(w > 1500) - weights[i] = 1500; - else if(w < -1500) - weights[i] = -1500; - else - weights[i] = w; - } - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_sarprop_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - //#define THREADNUM 1 - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - ann_vect[j]->Run(data->input[i]); - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - const uint epoch=ann->SarpropEpoch; - float next_step; - /* These should be set from variables */ - const float increase_factor = ann->RpropIncreaseFactor; /*1.2; */ - const float decrease_factor = ann->RpropDecreaseFactor; /*0.5; */ - /* @todo why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */ - const float delta_min = 0.000001f; - const float delta_max = ann->RpropDeltaMax; /*50.0; */ - const float weight_decay_shift = ann->SarpropWeightDecayShift; /* ld 0.01 = -6.644 */ - const float step_error_threshold_factor = ann->SarpropStepErrorThresholdFactor; /* 0.1 */ - const float step_error_shift = ann->SarpropStepErrorShift; /* ld 3 = 1.585 */ - const float T = ann->SarpropTemperature; - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - } - const float MSE = ann->GetMSE(); - const float RMSE = sqrtf(MSE); - /* for all weights; TODO: are biases included? */ - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - /* @todo confirm whether 1x10^-6 == delta_min is really better */ - const float prev_step = MAX(prev_steps[i], (float) 0.000001); /* prev_step may not be zero because then the training will stop */ - /* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/ - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - temp_slopes= -temp_slopes - weights[i] * (float)fann_exp2(-T * epoch + weight_decay_shift); - next_step=0.0; - /* @todo is prev_train_slopes[i] 0.0 in the beginning? */ - const float prev_slope = prev_train_slopes[i]; - const float same_sign = prev_slope * temp_slopes; - if(same_sign > 0.0) { - next_step = MIN(prev_step * increase_factor, delta_max); - /* @todo are the signs inverted? see differences between SARPROP paper and iRprop */ - if(temp_slopes < 0.0) - weights[i] += next_step; - else - weights[i] -= next_step; - } - else if(same_sign < 0.0) { -#ifndef RAND_MAX - #define RAND_MAX 0x7fffffff -#endif - if(prev_step < step_error_threshold_factor * MSE) - next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (float)fann_exp2(-T * epoch + step_error_shift); - else - next_step = MAX(prev_step * decrease_factor, delta_min); - temp_slopes = 0.0; - } - else { - if(temp_slopes < 0.0) - weights[i] += prev_step; - else - weights[i] -= prev_step; - } - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - } - } - } - ++(ann->SarpropEpoch); - //already computed before - /*//merge of MSEs - for(i=0;iMSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - }*/ - //destroy the copies of the ann - for(i=0; i<(int)threadnumb; i++) { - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_incremental_mod(Fann *ann, Fann::TrainData *data) -{ - ann->ResetMSE(); - for(uint i = 0; i != data->num_data; i++) { - ann->Train(data->input[i], data->output[i]); - } - return ann->GetMSE(); -} - -//the following versions returns also the outputs via the predicted_outputs parameter - -float train_epoch_batch_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb,vector< vector >& predicted_outputs) -{ - ann->ResetMSE(); - predicted_outputs.resize(data->num_data,vector (data->num_output)); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - float* temp_predicted_output=ann_vect[j]->Run(data->input[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - //parallel update of the weights - { - const uint num_data=data->num_data; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - float *weights = ann->P_Weights; - const float epsilon = ann->LearningRate / num_data; - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - float temp_slopes=0.0; - float * train_slopes; - for(uint k = 0; k < threadnumb; ++k) { - train_slopes = ann_vect[k]->P_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - weights[i] += temp_slopes*epsilon; - } - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_irpropm_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb, vector< vector >& predicted_outputs) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - ann->ResetMSE(); - predicted_outputs.resize(data->num_data,vector (data->num_output)); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - float * temp_predicted_output=ann_vect[j]->Run(data->input[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - float next_step; - const float increase_factor = ann->RpropIncreaseFactor; //1.2; - const float decrease_factor = ann->RpropDecreaseFactor; //0.5; - const float delta_min = ann->RpropDeltaMin; //0.0; - const float delta_max = ann->RpropDeltaMax; //50.0; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - const float prev_step = MAX(prev_steps[i], (float) 0.0001); // prev_step may not be zero because then the training will stop - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - const float prev_slope = prev_train_slopes[i]; - const float same_sign = prev_slope * temp_slopes; - if(same_sign >= 0.0) - next_step = MIN(prev_step * increase_factor, delta_max); - else { - next_step = MAX(prev_step * decrease_factor, delta_min); - temp_slopes = 0; - } - if(temp_slopes < 0) { - weights[i] -= next_step; - SETMAX(weights[i], -1500); - } - else { - weights[i] += next_step; - SETMIN(weights[i], 1500); - } - // update global data arrays - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - - } - } - } - //merge of MSEs - for(i = 0; i < (int)threadnumb; ++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_quickprop_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb, vector< vector >& predicted_outputs) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - ann->ResetMSE(); - predicted_outputs.resize(data->num_data,vector (data->num_output)); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - float* temp_predicted_output=ann_vect[j]->Run(data->input[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - float w=0.0, next_step; - const float epsilon = ann->LearningRate / data->num_data; - const float decay = ann->QuickpropDecay; // -0.0001 - const float mu = ann->QuickpropMu; // 1.75 - const float shrink_factor = (float) (mu / (1.0 + mu)); - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(w, next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - w = weights[i]; - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - temp_slopes+= decay * w; - - const float prev_step = prev_steps[i]; - const float prev_slope = prev_train_slopes[i]; - next_step = 0.0; - /* The step must always be in direction opposite to the slope. */ - if(prev_step > 0.001) { - /* If last step was positive... */ - if(temp_slopes > 0.0) /* Add in linear term if current slope is still positive. */ - next_step += epsilon * temp_slopes; - /*If current slope is close to or larger than prev slope... */ - if(temp_slopes > (shrink_factor * prev_slope)) - next_step += mu * prev_step; /* Take maximum size negative step. */ - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ - } - else if(prev_step < -0.001) { - /* If last step was negative... */ - if(temp_slopes < 0.0) /* Add in linear term if current slope is still negative. */ - next_step += epsilon * temp_slopes; - - /* If current slope is close to or more neg than prev slope... */ - if(temp_slopes < (shrink_factor * prev_slope)) - next_step += mu * prev_step; /* Take maximum size negative step. */ - else - next_step += prev_step * temp_slopes / (prev_slope - temp_slopes); /* Else, use quadratic estimate. */ - } - else /* Last step was zero, so use only linear term. */ - next_step += epsilon * temp_slopes; - - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - - w += next_step; - - if(w > 1500) - weights[i] = 1500; - else if(w < -1500) - weights[i] = -1500; - else - weights[i] = w; - } - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_sarprop_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb, vector< vector >& predicted_outputs) -{ - if(!ann->P_PrevTrainSlopes) { - ann->ClearTrainArrays(); - } - ann->ResetMSE(); - predicted_outputs.resize(data->num_data,vector (data->num_output)); - vector ann_vect(threadnumb); - int i=0,j=0; - - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; i++) { - j=omp_get_thread_num(); - float* temp_predicted_output=ann_vect[j]->Run(data->input[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - ann_vect[j]->ComputeMSE(data->output[i]); - ann_vect[j]->BackpropagateMSE(); - ann_vect[j]->UpdateSlopesBatch(ann_vect[j]->P_FirstLayer + 1, ann_vect[j]->P_LastLayer - 1); - } - } - { - float *weights = ann->P_Weights; - float *prev_steps = ann->P_PrevSteps; - float *prev_train_slopes = ann->P_PrevTrainSlopes; - const uint first_weight=0; - const uint past_end=ann->TotalConnections; - const uint epoch=ann->SarpropEpoch; - float next_step; - /* These should be set from variables */ - const float increase_factor = ann->RpropIncreaseFactor; /*1.2; */ - const float decrease_factor = ann->RpropDecreaseFactor; /*0.5; */ - /* @todo why is delta_min 0.0 in iRprop? SARPROP uses 1x10^-6 (Braun and Riedmiller, 1993) */ - const float delta_min = 0.000001f; - const float delta_max = ann->RpropDeltaMax; /*50.0; */ - const float weight_decay_shift = ann->SarpropWeightDecayShift; /* ld 0.01 = -6.644 */ - const float step_error_threshold_factor = ann->SarpropStepErrorThresholdFactor; /* 0.1 */ - const float step_error_shift = ann->SarpropStepErrorShift; /* ld 3 = 1.585 */ - const float T = ann->SarpropTemperature; - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - } - const float MSE = ann->GetMSE(); - const float RMSE = (float)sqrt(MSE); - /* for all weights; TODO: are biases included? */ - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(next_step) - { - #pragma omp for schedule(static) - for(i=first_weight; i < (int)past_end; i++) { - /* @todo confirm whether 1x10^-6 == delta_min is really better */ - const float prev_step = MAX(prev_steps[i], (float) 0.000001); /* prev_step may not be zero because then the training will stop */ - /* calculate SARPROP slope; TODO: better as new error function? (see SARPROP paper)*/ - float temp_slopes=0.0; - uint k; - float *train_slopes; - for(k=0;kP_TrainSlopes; - temp_slopes+= train_slopes[i]; - train_slopes[i]=0.0; - } - temp_slopes= -temp_slopes - weights[i] * (float)fann_exp2(-T * epoch + weight_decay_shift); - next_step=0.0; - /* @todo is prev_train_slopes[i] 0.0 in the beginning? */ - const float prev_slope = prev_train_slopes[i]; - const float same_sign = prev_slope * temp_slopes; - if(same_sign > 0.0) { - next_step = MIN(prev_step * increase_factor, delta_max); - /* @todo are the signs inverted? see differences between SARPROP paper and iRprop */ - if (temp_slopes < 0.0) - weights[i] += next_step; - else - weights[i] -= next_step; - } - else if(same_sign < 0.0) { - #ifndef RAND_MAX - #define RAND_MAX 0x7fffffff - #endif - if(prev_step < step_error_threshold_factor * MSE) - next_step = prev_step * decrease_factor + (float)rand() / RAND_MAX * RMSE * (float)fann_exp2(-T * epoch + step_error_shift); - else - next_step = MAX(prev_step * decrease_factor, delta_min); - - temp_slopes = 0.0; - } - else { - if(temp_slopes < 0.0) - weights[i] += prev_step; - else - weights[i] -= prev_step; - } - - /* update global data arrays */ - prev_steps[i] = next_step; - prev_train_slopes[i] = temp_slopes; - - } - } - } - ++(ann->SarpropEpoch); - //already computed before - /*//merge of MSEs - for(i=0;iMSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - }*/ - //destroy the copies of the ann - for(i=0; i<(int)threadnumb; i++) { - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float train_epoch_incremental_mod(Fann *ann, Fann::TrainData *data, vector< vector >& predicted_outputs) -{ - predicted_outputs.resize(data->num_data,vector (data->num_output)); - ann->ResetMSE(); - for(uint i = 0; i < data->num_data; ++i) { - float * temp_predicted_output=ann->Run(data->input[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - ann->ComputeMSE(data->output[i]); - ann->BackpropagateMSE(); - ann->UpdateWeights(); - } - return ann->GetMSE(); -} - -float test_data_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb) -{ - if(!ann->CheckInputOutputSizes(data)) - return 0; - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; ++i) { - j=omp_get_thread_num(); - ann_vect[j]->Test(data->input[i],data->output[i]); - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} - -float test_data_parallel(Fann *ann, Fann::TrainData *data, const uint threadnumb, vector< vector >& predicted_outputs) -{ - if(!ann->CheckInputOutputSizes(data)) - return 0; - predicted_outputs.resize(data->num_data,vector (data->num_output)); - ann->ResetMSE(); - vector ann_vect(threadnumb); - int i=0,j=0; - //generate copies of the ann - omp_set_dynamic(0); - omp_set_num_threads(threadnumb); - #pragma omp parallel private(j) - { - #pragma omp for schedule(static) - for(i=0; i<(int)threadnumb; i++) { - ann_vect[i]=fann_copy(ann); - } - //parallel computing of the updates - #pragma omp for schedule(static) - for(i = 0; i < (int)data->num_data; ++i) { - j=omp_get_thread_num(); - float* temp_predicted_output = ann_vect[j]->Test(data->input[i],data->output[i]); - for(uint k=0;knum_output;++k) { - predicted_outputs[i][k]=temp_predicted_output[k]; - } - - } - } - //merge of MSEs - for(i=0;i<(int)threadnumb;++i) { - ann->MSE_value+= ann_vect[i]->MSE_value; - ann->num_MSE+=ann_vect[i]->num_MSE; - fann_destroy(ann_vect[i]); - } - return ann->GetMSE(); -} -} // namespace parallel_fann - -#endif // DISABLE_PARALLEL_FANN - -#if 0 // @construction { -// -// FANN_TEST.CPP -// -#include -//#include "fann_test.h" -#include "gtest/gtest.h" -#include "doublefann.h" -#include "fann_cpp.h" -//#include "fann_test_data.h" -#include "gtest/gtest.h" -#include "doublefann.h" -#include "fann_cpp.h" -//#include "fann_test_train.h" -//#include "fann_test.h" - -using namespace FANN; - -class FannTest : public testing::Test { -protected: - neural_net net; - training_data data; - void AssertCreateAndCopy(neural_net &net, uint numLayers, const uint *layers, uint neurons, uint connections); - void AssertCreate(neural_net &net, uint numLayers, const uint *layers, uint neurons, uint connections); - void AssertWeights(neural_net &net, float min, float max, float avg); - virtual void SetUp(); - virtual void TearDown(); -}; - -class FannTestData : public FannTest { -protected: - uint numData; - uint numInput; - uint numOutput; - float inputValue; - float outputValue; - float **inputData; - float **outputData; - virtual void SetUp(); - virtual void TearDown(); - void AssertTrainData(FANN::training_data &trainingData, uint numData, uint numInput, uint numOutput, float inputValue, float outputValue); - void InitializeTrainDataStructure(uint numData, uint numInput, uint numOutput, float inputValue, float outputValue, float **inputData, float **outputData); -}; - -using namespace std; - -void FannTest::SetUp() -{ - //ensure random generator is seeded at a known value to ensure reproducible results - srand(0); - fann_disable_seed_rand(); -} - -void FannTest::TearDown() -{ - net.destroy(); - data.destroy_train(); -} - -void FannTest::AssertCreate(neural_net &net, uint numLayers, const uint *layers, uint neurons, uint connections) -{ - EXPECT_EQ(numLayers, net.get_num_layers()); - EXPECT_EQ(layers[0], net.get_num_input()); - EXPECT_EQ(layers[numLayers - 1], net.get_num_output()); - uint *layers_res = new uint[numLayers]; - net.get_layer_array(layers_res); - for(uint i = 0; i < numLayers; i++) { - EXPECT_EQ(layers[i], layers_res[i]); - } - delete layers_res; - EXPECT_EQ(neurons, net.get_total_neurons()); - EXPECT_EQ(connections, net.get_total_connections()); - AssertWeights(net, -0.09, 0.09, 0.0); -} - -void FannTest::AssertCreateAndCopy(neural_net &net, uint numLayers, const uint *layers, uint neurons, uint connections) -{ - AssertCreate(net, numLayers, layers, neurons, connections); - neural_net net_copy(net); - AssertCreate(net_copy, numLayers, layers, neurons, connections); -} - -void FannTest::AssertWeights(neural_net &net, float min, float max, float avg) -{ - connection *connections = new connection[net.get_total_connections()]; - net.get_connection_array(connections); - float minWeight = connections[0].weight; - float maxWeight = connections[0].weight; - float totalWeight = 0.0; - for(int i = 1; i < net.get_total_connections(); ++i) { - if(connections[i].weight < minWeight) - minWeight = connections[i].weight; - if(connections[i].weight > maxWeight) - maxWeight = connections[i].weight; - totalWeight += connections[i].weight; - } - EXPECT_NEAR(min, minWeight, 0.05); - EXPECT_NEAR(max, maxWeight, 0.05); - EXPECT_NEAR(avg, totalWeight / (float) net.get_total_connections(), 0.5); -} - -TEST_F(FannTest, CreateStandardThreeLayers) -{ - neural_net net(LAYER, 3, 2, 3, 4); - AssertCreateAndCopy(net, 3, (const uint[]) {2, 3, 4}, 11, 25); -} - -TEST_F(FannTest, CreateStandardThreeLayersUsingCreateMethod) -{ - ASSERT_TRUE(net.create_standard(3, 2, 3, 4)); - uint layers[] = {2, 3, 4}; - AssertCreateAndCopy(net, 3, layers, 11, 25); -} - -TEST_F(FannTest, CreateStandardFourLayersArray) -{ - uint layers[] = {2, 3, 4, 5}; - neural_net net(LAYER, 4, layers); - AssertCreateAndCopy(net, 4, layers, 17, 50); -} - -TEST_F(FannTest, CreateStandardFourLayersArrayUsingCreateMethod) -{ - uint layers[] = {2, 3, 4, 5}; - ASSERT_TRUE(net.create_standard_array(4, layers)); - AssertCreateAndCopy(net, 4, layers, 17, 50); -} - -TEST_F(FannTest, CreateStandardFourLayersVector) -{ - vector layers{2, 3, 4, 5}; - neural_net net(LAYER, layers.begin(), layers.end()); - AssertCreateAndCopy(net, 4, layers.data(), 17, 50); -} - -TEST_F(FannTest, CreateSparseFourLayers) -{ - neural_net net(0.5, 4, 2, 3, 4, 5); - AssertCreateAndCopy(net, 4, (const uint[]){2, 3, 4, 5}, 17, 31); -} - -TEST_F(FannTest, CreateSparseFourLayersUsingCreateMethod) -{ - ASSERT_TRUE(net.create_sparse(0.5f, 4, 2, 3, 4, 5)); - AssertCreateAndCopy(net, 4, (const uint[]){2, 3, 4, 5}, 17, 31); -} - -TEST_F(FannTest, CreateSparseArrayFourLayers) -{ - uint layers[] = {2, 3, 4, 5}; - neural_net net(0.5f, 4, layers); - AssertCreateAndCopy(net, 4, layers, 17, 31); -} - -TEST_F(FannTest, CreateSparseArrayFourLayersUsingCreateMethod) -{ - uint layers[] = {2, 3, 4, 5}; - ASSERT_TRUE(net.create_sparse_array(0.5f, 4, layers)); - AssertCreateAndCopy(net, 4, layers, 17, 31); -} - -TEST_F(FannTest, CreateSparseArrayWithMinimalConnectivity) -{ - uint layers[] = {2, 2, 2}; - neural_net net(0.01f, 3, layers); - AssertCreateAndCopy(net, 3, layers, 8, 8); -} - -TEST_F(FannTest, CreateShortcutFourLayers) -{ - neural_net net(SHORTCUT, 4, 2, 3, 4, 5); - AssertCreateAndCopy(net, 4, (const uint[]){2, 3, 4, 5}, 15, 83); - EXPECT_EQ(SHORTCUT, net.get_network_type()); -} - -TEST_F(FannTest, CreateShortcutFourLayersUsingCreateMethod) -{ - ASSERT_TRUE(net.create_shortcut(4, 2, 3, 4, 5)); - AssertCreateAndCopy(net, 4, (const uint[]){2, 3, 4, 5}, 15, 83); - EXPECT_EQ(SHORTCUT, net.get_network_type()); -} - -TEST_F(FannTest, CreateShortcutArrayFourLayers) -{ - uint layers[] = {2, 3, 4, 5}; - neural_net net(SHORTCUT, 4, layers); - AssertCreateAndCopy(net, 4, layers, 15, 83); - EXPECT_EQ(SHORTCUT, net.get_network_type()); -} - -TEST_F(FannTest, CreateShortcutArrayFourLayersUsingCreateMethod) -{ - uint layers[] = {2, 3, 4, 5}; - ASSERT_TRUE(net.create_shortcut_array(4, layers)); - AssertCreateAndCopy(net, 4, layers, 15, 83); - EXPECT_EQ(SHORTCUT, net.get_network_type()); -} - -TEST_F(FannTest, CreateFromFile) -{ - ASSERT_TRUE(net.create_standard(3, 2, 3, 4)); - neural_net netToBeSaved(LAYER, 3, 2, 3, 4); - ASSERT_TRUE(netToBeSaved.save("tmpfile")); - neural_net netToBeLoaded("tmpfile"); - AssertCreateAndCopy(netToBeLoaded, 3, (const uint[]){2, 3, 4}, 11, 25); -} - -TEST_F(FannTest, CreateFromFileUsingCreateMethod) -{ - ASSERT_TRUE(net.create_standard(3, 2, 3, 4)); - neural_net inputNet(LAYER, 3, 2, 3, 4); - ASSERT_TRUE(inputNet.save("tmpfile")); - ASSERT_TRUE(net.create_from_file("tmpfile")); - AssertCreateAndCopy(net, 3, (const uint[]){2, 3, 4}, 11, 25); -} - -TEST_F(FannTest, RandomizeWeights) { - neural_net net(LAYER, 2, 20, 10); - net.randomize_weights(-1.0, 1.0); - AssertWeights(net, -1.0, 1.0, 0); -} -// -// FANN_TEST_DATA.CPP -// -void FannTestData::SetUp() -{ - FannTest::SetUp(); - numData = 2; - numInput = 3; - numOutput = 1; - inputValue = 1.1; - outputValue = 2.2; - inputData = new float *[numData]; - outputData = new float *[numData]; - InitializeTrainDataStructure(numData, numInput, numOutput, inputValue, outputValue, inputData, outputData); -} - -void FannTestData::TearDown() -{ - FannTest::TearDown(); - delete(inputData); - delete(outputData); -} - -void FannTestData::InitializeTrainDataStructure(uint numData, uint numInput, uint numOutput, float inputValue, float outputValue, float **inputData, float **outputData) -{ - for(uint i = 0; i < numData; i++) { - inputData[i] = new float[numInput]; - outputData[i] = new float[numOutput]; - for(uint j = 0; j < numInput; j++) - inputData[i][j] = inputValue; - for(uint j = 0; j < numOutput; j++) - outputData[i][j] = outputValue; - } -} - -void FannTestData::AssertTrainData(training_data &trainingData, uint numData, uint numInput, - uint numOutput, float inputValue, float outputValue) -{ - EXPECT_EQ(numData, trainingData.length_train_data()); - EXPECT_EQ(numInput, trainingData.num_input_train_data()); - EXPECT_EQ(numOutput, trainingData.num_output_train_data()); - for (int i = 0; i < numData; i++) { - for (int j = 0; j < numInput; j++) - EXPECT_DOUBLE_EQ(inputValue, trainingData.get_input()[i][j]); - for (int j = 0; j < numOutput; j++) - EXPECT_DOUBLE_EQ(outputValue, trainingData.get_output()[i][j]); - } -} - -TEST_F(FannTestData, CreateTrainDataFromPointerArrays) -{ - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - AssertTrainData(data, numData, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, CreateTrainDataFromArrays) -{ - float input[] = {inputValue, inputValue, inputValue, inputValue, inputValue, inputValue}; - float output[] = {outputValue, outputValue}; - data.set_train_data(numData, numInput, input, numOutput, output); - AssertTrainData(data, numData, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, CreateTrainDataFromCopy) -{ - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - training_data dataCopy(data); - AssertTrainData(dataCopy, numData, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, CreateTrainDataFromFile) -{ - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - data.save_train("tmpFile"); - training_data dataCopy; - dataCopy.read_train_from_file("tmpFile"); - AssertTrainData(dataCopy, numData, numInput, numOutput, inputValue, outputValue); -} - -void callBack(uint pos, uint numInput, uint numOutput, float *input, float *output) -{ - for(uint i = 0; i < numInput; i++) - input[i] = (float) 1.2; - for(uint i = 0; i < numOutput; i++) - output[i] = (float) 2.3; -} - -TEST_F(FannTestData, CreateTrainDataFromCallback) -{ - data.create_train_from_callback(numData, numInput, numOutput, callBack); - AssertTrainData(data, numData, numInput, numOutput, 1.2, 2.3); -} - -TEST_F(FannTestData, ShuffleTrainData) -{ - //only really ensures that the data doesn't get corrupted, a more complete test would need to check - //that this was indeed a permutation of the original data - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - data.shuffle_train_data(); - AssertTrainData(data, numData, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, MergeTrainData) -{ - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - training_data dataCopy(data); - data.merge_train_data(dataCopy); - AssertTrainData(data, numData*2, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, SubsetTrainData) -{ - data.set_train_data(numData, numInput, inputData, numOutput, outputData); - //call merge 2 times to get 8 data samples - data.merge_train_data(data); - data.merge_train_data(data); - data.subset_train_data(2, 5); - AssertTrainData(data, 5, numInput, numOutput, inputValue, outputValue); -} - -TEST_F(FannTestData, ScaleOutputData) -{ - float input[] = {0.0, 1.0, 0.5, 0.0, 1.0, 0.5}; - float output[] = {0.0, 1.0}; - data.set_train_data(2, 3, input, 1, output); - data.scale_output_train_data(-1.0, 2.0); - EXPECT_DOUBLE_EQ(0.0, data.get_min_input()); - EXPECT_DOUBLE_EQ(1.0, data.get_max_input()); - EXPECT_DOUBLE_EQ(-1.0, data.get_min_output()); - EXPECT_DOUBLE_EQ(2.0, data.get_max_output()); -} - -TEST_F(FannTestData, ScaleInputData) -{ - float input[] = {0.0, 1.0, 0.5, 0.0, 1.0, 0.5}; - float output[] = {0.0, 1.0}; - data.set_train_data(2, 3, input, 1, output); - data.scale_input_train_data(-1.0, 2.0); - EXPECT_DOUBLE_EQ(-1.0, data.get_min_input()); - EXPECT_DOUBLE_EQ(2.0, data.get_max_input()); - EXPECT_DOUBLE_EQ(0.0, data.get_min_output()); - EXPECT_DOUBLE_EQ(1.0, data.get_max_output()); -} - -TEST_F(FannTestData, ScaleData) -{ - float input[] = {0.0, 1.0, 0.5, 0.0, 1.0, 0.5}; - float output[] = {0.0, 1.0}; - data.set_train_data(2, 3, input, 1, output); - data.scale_train_data(-1.0, 2.0); - for(uint i = 0; i < 2; i++) { - float *train_input = data.get_train_input(i); - EXPECT_DOUBLE_EQ(-1.0, train_input[0]); - EXPECT_DOUBLE_EQ(2.0, train_input[1]); - EXPECT_DOUBLE_EQ(0.5, train_input[2]); - } - EXPECT_DOUBLE_EQ(-1.0, data.get_train_output(0)[0]); - EXPECT_DOUBLE_EQ(2.0, data.get_train_output(0)[1]); -} -// -// FANN_TEST_TRAIN.CPP -// -class FannTestTrain : public FannTest { -protected: - float xorInput[8] = { - 0.0, 0.0, - 0.0, 1.0, - 1.0, 0.0, - 1.0, 1.0}; - float xorOutput[4] = { - 0.0, - 1.0, - 1.0, - 0.0}; - virtual void SetUp(); - virtual void TearDown(); -}; - -void FannTestTrain::SetUp() -{ - FannTest::SetUp(); -} - -void FannTestTrain::TearDown() -{ - FannTest::TearDown(); -} - -TEST_F(FannTestTrain, TrainOnDateSimpleXor) -{ - neural_net net(LAYER, 3, 2, 3, 1); - data.set_train_data(4, 2, xorInput, 1, xorOutput); - net.train_on_data(data, 100, 100, 0.001); - EXPECT_LT(net.get_MSE(), 0.001); - EXPECT_LT(net.test_data(data), 0.001); -} - -TEST_F(FannTestTrain, TrainSimpleIncrementalXor) -{ - neural_net net(LAYER, 3, 2, 3, 1); - for(int i = 0; i < 100000; i++) { - net.train((float*) (const float[]) {0.0, 0.0}, (float*) (const float[]) {0.0}); - net.train((float*) (const float[]) {1.0, 0.0}, (float*) (const float[]) {1.0}); - net.train((float*) (const float[]) {0.0, 1.0}, (float*) (const float[]) {1.0}); - net.train((float*) (const float[]) {1.0, 1.0}, (float*) (const float[]) {0.0}); - } - EXPECT_LT(net.get_MSE(), 0.01); -} - -int main(int argc, char **argv) -{ - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - -#endif // } 0 @construction - -#if SLTEST_RUNNING // { - -static int AssertWeights(STestCase * pTc, const Fann * pNet, float min, float max, float avg) -{ - TSVector connections; - pNet->GetConnectionArray(connections); - float min_weight = connections.at(0).Weight; - float max_weight = connections.at(0).Weight; - float total_weight = 0.0; - const uint conn_count = pNet->GetTotalConnections(); - for(uint i = 1; i < conn_count; ++i) { - if(connections.at(i).Weight < min_weight) - min_weight = connections.at(i).Weight; - if(connections.at(i).Weight > max_weight) - max_weight = connections.at(i).Weight; - total_weight += connections.at(i).Weight; - } - pTc->SLTEST_CHECK_EQ_TOL(min, min_weight, 0.06f); // @v10.2.4 0.05f--0.06f - pTc->SLTEST_CHECK_EQ_TOL(max, max_weight, 0.06f); // @v10.2.4 0.05f--0.06f - pTc->SLTEST_CHECK_EQ_TOL(avg, total_weight / (float)conn_count, 0.5f); - return pTc->GetCurrentStatus(); -} - -static int AssertCreate(STestCase * pTc, Fann * pNet, uint numLayers, const uint * pLayers, uint neurons, uint connections) -{ - LongArray layers; - pTc->SLTEST_CHECK_EQ(numLayers, pNet->GetNumLayers()); - pNet->GetLayerArray(layers); - pTc->SLTEST_CHECK_EQ(layers.get(0), (long)pNet->GetNumInput()); - pTc->SLTEST_CHECK_EQ(layers.get(numLayers-1), (long)pNet->GetNumOutput()); - for(uint i = 0; i < numLayers; i++) { - pTc->SLTEST_CHECK_EQ(pLayers[i], (uint)layers.get(i)); - } - pTc->SLTEST_CHECK_EQ(neurons, pNet->GetTotalNeurons()); - pTc->SLTEST_CHECK_EQ(connections, pNet->GetTotalConnections()); - AssertWeights(pTc, pNet, -0.09f, 0.09f, 0.0f); - return pTc->GetCurrentStatus(); -} - -static int AssertCreateAndCopy(STestCase * pTc, Fann * pNet, uint numLayers, const uint * pLayers, uint neurons, uint connections) -{ - AssertCreate(pTc, pNet, numLayers, pLayers, neurons, connections); - Fann * p_copy = fann_copy(pNet); - AssertCreate(pTc, p_copy, numLayers, pLayers, neurons, connections); - pTc->SLTEST_CHECK_NZ(p_copy->IsEqual(*pNet, 0)); - fann_destroy(p_copy); - return pTc->GetCurrentStatus(); -} - -SLTEST_R(FANN) -{ - LongArray layers; - // CreateStandardThreeLayers - { - const uint p_layer_dim[] = { 2, 3, 4 }; - Fann * p_ann = fann_create_standard(3, 2, 3, 4); - AssertCreateAndCopy(this, p_ann, 3, p_layer_dim, 11, 25); - fann_destroy(p_ann); - } - // CreateStandardFourLayersArray - { - const uint p_layer_dim[] = { 2, 3, 4, 5 }; - Fann * p_ann = fann_create_standard(4, 2, 3, 4, 5); - AssertCreateAndCopy(this, p_ann, 4, p_layer_dim, 17, 50); - fann_destroy(p_ann); - } - // CreateSparseFourLayers - { - const uint p_layer_dim[] = { 2, 3, 4, 5 }; - Fann * p_ann = fann_create_sparse(0.5f, 4, 2, 3, 4, 5); - AssertCreateAndCopy(this, p_ann, 4, p_layer_dim, 17, 31); - fann_destroy(p_ann); - } - // CreateSparseArrayWithMinimalConnectivity - { - const uint p_layer_dim[] = { 2, 2, 2 }; - Fann * p_ann = fann_create_sparse(0.01f, 3, 2, 2, 2); - AssertCreateAndCopy(this, p_ann, 3, p_layer_dim, 8, 8); - fann_destroy(p_ann); - } - // CreateShortcutFourLayers - { - const uint p_layer_dim[] = { 2, 3, 4, 5 }; - Fann * p_ann = fann_create_shortcut(4, 2, 3, 4, 5); - AssertCreateAndCopy(this, p_ann, 4, p_layer_dim, 15, 83); - SLTEST_CHECK_EQ((long)p_ann->GetNetworkType(), (long)Fann::FANN_NETTYPE_SHORTCUT); - fann_destroy(p_ann); - } - /* - // CreateFromFile - { - ASSERT_TRUE(net.create_standard(3, 2, 3, 4)); - neural_net netToBeSaved(LAYER, 3, 2, 3, 4); - ASSERT_TRUE(netToBeSaved.save("tmpfile")); - neural_net netToBeLoaded("tmpfile"); - AssertCreateAndCopy(netToBeLoaded, 3, (const uint[]){2, 3, 4}, 11, 25); - } - // CreateFromFileUsingCreateMethod - { - ASSERT_TRUE(net.create_standard(3, 2, 3, 4)); - neural_net inputNet(LAYER, 3, 2, 3, 4); - ASSERT_TRUE(inputNet.save("tmpfile")); - ASSERT_TRUE(net.create_from_file("tmpfile")); - AssertCreateAndCopy(net, 3, (const uint[]){2, 3, 4}, 11, 25); - } - // RandomizeWeights - { - neural_net net(LAYER, 2, 20, 10); - net.randomize_weights(-1.0, 1.0); - AssertWeights(net, -1.0, 1.0, 0); - } - */ - { - float XorInput[] = { - 0.0, 0.0, - 0.0, 1.0, - 1.0, 0.0, - 1.0, 1.0 - }; - float XorOutput[] = { - 0.0, - 1.0, - 1.0, - 0.0 - }; - // TrainOnDateSimpleXor - { - const uint c_in = 2; - const uint c_out = 1; - const uint c_d = 4; - Fann * p_ann = fann_create_standard(3, c_in, 3, c_out); - Fann::TrainData train_data(*p_ann, c_d); - for(uint i = 0; i < c_d; i++) { - SLTEST_CHECK_NZ(train_data.SetInputSeries(i, XorInput + i * c_in)); - SLTEST_CHECK_NZ(train_data.SetOutputSeries(i, XorOutput + i * c_out)); - } - p_ann->TrainOnData(&train_data, 100, 100, 0.001f); - SLTEST_CHECK_LT(p_ann->GetMSE(), 0.001f); - SLTEST_CHECK_LT(p_ann->TestData(&train_data), 0.001f); - { - SBuffer sbuf; - int r = 0; - { - SSerializeContext sctx; - SLTEST_CHECK_NZ(r = p_ann->Serialize(+1, sbuf, &sctx)); - } - if(r) { - SSerializeContext sctx; - Fann new_ann(sbuf, &sctx); - SLTEST_CHECK_NZ(new_ann.IsValid()); - SLTEST_CHECK_NZ(new_ann.IsEqual(*p_ann, 0)); - } - } - fann_destroy(p_ann); - } - // TrainSimpleIncrementalXor - { - const uint c_in = 2; - const uint c_out = 1; - layers.clear(); - layers.addzlist(2, 3, 1, 0); - Fann ann(Fann::FANN_NETTYPE_LAYER, 1.0f, layers); - SLTEST_CHECK_NZ(ann.IsValid()); - for(int i = 0; i < 100000; i++) { - ann.Train(XorInput + 0 * c_in, XorOutput + 0 * c_out); - ann.Train(XorInput + 1 * c_in, XorOutput + 1 * c_out); - ann.Train(XorInput + 2 * c_in, XorOutput + 2 * c_out); - ann.Train(XorInput + 3 * c_in, XorOutput + 3 * c_out); - } - const float _mse = ann.GetMSE(); - SLTEST_CHECK_LT(_mse, 0.01f); - SLTEST_CHECK_EQ(ann.Run(XorInput + 0 * c_in)[0], XorOutput[0 * c_out]); - SLTEST_CHECK_EQ(ann.Run(XorInput + 1 * c_in)[0], XorOutput[1 * c_out]); - SLTEST_CHECK_EQ(ann.Run(XorInput + 2 * c_in)[0], XorOutput[2 * c_out]); - SLTEST_CHECK_EQ(ann.Run(XorInput + 3 * c_in)[0], XorOutput[3 * c_out]); - { - SBuffer sbuf; - int r = 0; - { - SSerializeContext sctx; - SLTEST_CHECK_NZ(r = ann.Serialize(+1, sbuf, &sctx)); - } - if(r) { - SSerializeContext sctx; - Fann new_ann(sbuf, &sctx); - SLTEST_CHECK_NZ(new_ann.IsValid()); - SLTEST_CHECK_NZ(new_ann.IsEqual(ann, 0)); - } - } - /* - neural_net net(LAYER, 3, 2, 3, 1); - for(int i = 0; i < 100000; i++) { - net.train((float*) (const float[]) {0.0, 0.0}, (float*) (const float[]) {0.0}); - net.train((float*) (const float[]) {1.0, 0.0}, (float*) (const float[]) {1.0}); - net.train((float*) (const float[]) {0.0, 1.0}, (float*) (const float[]) {1.0}); - net.train((float*) (const float[]) {1.0, 1.0}, (float*) (const float[]) {0.0}); - } - EXPECT_LT(net.get_MSE(), 0.01); - */ - } - if(0) { - SString temp_buf; - SString test_item_name; - SString input_file_name; - SString test_input_file_name; - for(uint ap = 0; EnumArg(&ap, test_item_name);) { - float local_err = 0.0f; - input_file_name = MakeInputFilePath((temp_buf = test_item_name).Dot().Cat("train")); - test_input_file_name = MakeInputFilePath((temp_buf = test_item_name).Dot().Cat("test")); - if(fileExists(input_file_name) && fileExists(test_input_file_name)) { - Fann::TrainData train_data; - Fann::TrainData test_data; - int trr_ = 0; - int tsr_ = 0; - int dor_ = 0; - SLTEST_CHECK_NZ(trr_ = train_data.Read(input_file_name, 0)); - SLTEST_CHECK_NZ(tsr_ = test_data.Read(test_input_file_name, 0)); - if(trr_ && tsr_) { - layers.clear(); - layers.addzlist(train_data.GetInputCount(), /*train_data.GetInputCount() * 10*/30, train_data.GetOutputCount(), 0); - Fann::DetectOptimalParam dop; - dop.Layers = layers; - dop.Flags |= (dop.fDetectActivationFunc|dop.fDetectTrainAlg); - dop.P_TrainData = &train_data; - SLTEST_CHECK_NZ(dor_ = Fann::DetectOptimal(dop)); - { - Fann ann(Fann::FANN_NETTYPE_LAYER, 1.0f, layers); - SLTEST_CHECK_NZ(ann.IsValid()); - if(dop.ResultFlags & dop.rfTrainAlgDetected && dop.BestTrainAlg >= 0) - ann.SetTrainingAlgorithm((Fann::TrainAlg)dop.BestTrainAlg); - if(dop.ResultFlags & dop.rfHiddActFuncDetected && dop.BestHiddActF >= 0) - ann.SetActivationFunctionHidden((Fann::ActivationFunc)dop.BestHiddActF); - if(dop.ResultFlags & dop.rfOutpActFuncDetected && dop.BestOutpActF >= 0) - ann.SetActivationFunctionOutput((Fann::ActivationFunc)dop.BestOutpActF); - ann.TrainOnData(&train_data, 1000, 0, 0.000001f); - { - local_err = ann.TestData(&test_data); - } - } - } - } - } - } - } - CATCH - CurrentStatus = 0; - ENDCATCH - return CurrentStatus; -} - -#endif // } SLTEST_RUNNING diff --git a/Src/SLib/slport.c b/Src/SLib/slport.c deleted file mode 100644 index 2bcc1500d9..0000000000 --- a/Src/SLib/slport.c +++ /dev/null @@ -1,3 +0,0 @@ -// SLPORT.C -// Copyright (c) A.Sobolev 2020 -// diff --git a/Src/SLib/slrecmgr.cpp b/Src/SLib/slrecmgr.cpp index 789f32e765..d50c4045ca 100644 --- a/Src/SLib/slrecmgr.cpp +++ b/Src/SLib/slrecmgr.cpp @@ -173,7 +173,7 @@ bool SDataPageHeader::IsValid() const return (Signature == SignatureValue && TotalSize > 0 && /*FreePos < TotalSize &&*/(!FixedChunkSize || Type == tFixedChunkPool)); } -bool SDataPageHeader::GetStat(Stat & rStat) const +bool SDataPageHeader::GetStat(Stat & rStat, TSVector * pUsableBlockList) const { bool ok = true; rStat.Z(); @@ -192,6 +192,14 @@ bool SDataPageHeader::GetStat(Stat & rStat) const if(pfx.PayloadSize >= 8) { rStat.UsableBlockCount++; rStat.UsableBlockSize += pfx.PayloadSize; + if(pUsableBlockList) { + uint64 row_id = SRecPageManager::MakeRowId(TotalSize, Seq, offs); + THROW(row_id); + { + SRecPageFreeList::Entry ue(row_id, pfx.PayloadSize); + pUsableBlockList->insert(&ue); + } + } } } offs += pfx.TotalSize; @@ -640,6 +648,22 @@ int SRecPageFreeList::Put(uint32 type, uint64 rowId, uint32 freeSize) return ok; } +int SRecPageFreeList::GetListForPage(uint pageSeq, TSVector & rList) const // @debug +{ + // @unfinished + int ok = -1; + for(uint i = 0; i < L.getCount(); i++) { + const SingleTypeList * p_stl = L.at(i); + if(p_stl) { + for(uint j = 0; j < p_stl->getCount(); j++) { + const Entry & r_entry = p_stl->at(j); + //SRecPageManager::SplitRowId(r_entry.RowId, ) + } + } + } + return ok; +} + const SRecPageFreeList::Entry * SRecPageFreeList::Get(uint32 type, uint32 reqSize) const { const Entry * p_result = 0; @@ -652,3 +676,18 @@ const SRecPageFreeList::Entry * SRecPageFreeList::Get(uint32 type, uint32 reqSiz } return p_result; } + +/*static*/bool SRecPageManager::TestSinglePage(uint pageSize) +{ + bool ok = true; + SRecPageManager mgr(pageSize); + SDataPageHeader * p_page = mgr.AllocatePage(SDataPageHeader::tRecord); + THROW(p_page); + { + SDataPageHeader::Stat stat; + TSVector free_list; + THROW(p_page->GetStat(stat, &free_list)); + } + CATCHZOK + return ok; +} \ No newline at end of file diff --git a/Src/SLib/tcontrol.cpp b/Src/SLib/tcontrol.cpp index 2e27830dd0..189c0ccdfa 100644 --- a/Src/SLib/tcontrol.cpp +++ b/Src/SLib/tcontrol.cpp @@ -10,9 +10,7 @@ TStaticText::TStaticText(const TRect & bounds, const char * pText) : TView(bounds), Text(pText) { SubSign = TV_SUBSIGN_STATIC; - Text.ShiftLeftChr(3); // @v10.7.7 - // @v10.7.7 if(pText && *pText == 3) pText++; - // @v10.7.7 Text = pText; + Text.ShiftLeftChr(3); } int TStaticText::handleWindowsMessage(UINT uMsg, WPARAM wParam, LPARAM lParam) diff --git a/Src/SLib/test-openssl.cpp b/Src/SLib/test-openssl.cpp deleted file mode 100644 index 9a3cc71518..0000000000 --- a/Src/SLib/test-openssl.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// TEST-OPENSSL.CPP -// -#include -#pragma hdrstop -// -// @construction -// Test of openssl module -// \ No newline at end of file diff --git a/Src/SLib/uri.cpp b/Src/SLib/uri.cpp index cef85be95d..e8eea783b9 100644 --- a/Src/SLib/uri.cpp +++ b/Src/SLib/uri.cpp @@ -2673,9 +2673,9 @@ const char * STDCALL UriParserState::ParseIpLit2(const char * first, const char */ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const char * afterLast) { - int zipperEver = 0; - int quadsDone = 0; - int digitCount = 0; + int zipper_ever = 0; + int quads_done = 0; + int digit_count = 0; uchar digitHistory[4]; int ip4OctetsDone = 0; uchar quadsAfterZipper[14]; @@ -2690,19 +2690,19 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const for(;;) { switch(*first) { case URI_SET_DIGIT: - if(digitCount == 4) { + if(digit_count == 4) { return StopSyntax(first); } - digitHistory[digitCount++] =static_cast(9+*first-'9'); + digitHistory[digit_count++] =static_cast(9+*first-'9'); break; case '.': - if((ip4OctetsDone == 4) || /* NOTE! */ (digitCount == 0) || (digitCount == 4)) + if((ip4OctetsDone == 4) || /* NOTE! */ (digit_count == 0) || (digit_count == 4)) return StopSyntax(first); // Invalid digit or octet count - else if((digitCount > 1) && (digitHistory[0] == 0)) - return StopSyntax(first-digitCount); // Leading zero - else if((digitCount > 2) && (digitHistory[1] == 0)) - return StopSyntax(first-digitCount+1); // Leading zero - else if((digitCount == 3) && (100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { + else if((digit_count > 1) && (digitHistory[0] == 0)) + return StopSyntax(first-digit_count); // Leading zero + else if((digit_count > 2) && (digitHistory[1] == 0)) + return StopSyntax(first-digit_count+1); // Leading zero + else if((digit_count == 3) && (100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { // Octet value too large if(digitHistory[0] > 2) StopSyntax(first-3); @@ -2713,18 +2713,18 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const return NULL; } // Copy IPv4 octet - P_Uri->HostData.ip6->data[16-4+ip4OctetsDone] = uriGetOctetValue(digitHistory, digitCount); - digitCount = 0; + P_Uri->HostData.ip6->data[16-4+ip4OctetsDone] = uriGetOctetValue(digitHistory, digit_count); + digit_count = 0; ip4OctetsDone++; break; case ']': - if((ip4OctetsDone != 3) || /* NOTE! */ (digitCount == 0) || (digitCount == 4)) + if((ip4OctetsDone != 3) || /* NOTE! */ (digit_count == 0) || (digit_count == 4)) return StopSyntax(first); // Invalid digit or octet count - else if((digitCount > 1) && (digitHistory[0] == 0)) - return StopSyntax(first-digitCount); // Leading zero - else if((digitCount > 2) && (digitHistory[1] == 0)) - return StopSyntax(first-digitCount+1); // Leading zero - else if((digitCount == 3) && (100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { + else if((digit_count > 1) && (digitHistory[0] == 0)) + return StopSyntax(first-digit_count); // Leading zero + else if((digit_count > 2) && (digitHistory[1] == 0)) + return StopSyntax(first-digit_count+1); // Leading zero + else if((digit_count == 3) && (100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { // Octet value too large if(digitHistory[0] > 2) StopSyntax(first-3); @@ -2738,7 +2738,7 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const // Copy missing quads right before IPv4 memcpy(P_Uri->HostData.ip6->data+16-4-2*quadsAfterZipperCount, quadsAfterZipper, 2*quadsAfterZipperCount); // Copy last IPv4 octet - P_Uri->HostData.ip6->data[16-4+3] = uriGetOctetValue(digitHistory, digitCount); + P_Uri->HostData.ip6->data[16-4+3] = uriGetOctetValue(digitHistory, digit_count); return first+1; default: return StopSyntax(first); } @@ -2753,36 +2753,36 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const switch(*first) { case URI_SET_HEX_LETTER_LOWER: letterAmong = 1; - if(digitCount == 4) + if(digit_count == 4) return StopSyntax(first); - digitHistory[digitCount] = static_cast(15+*first-'f'); - digitCount++; + digitHistory[digit_count] = static_cast(15+*first-'f'); + digit_count++; break; case URI_SET_HEX_LETTER_UPPER: letterAmong = 1; - if(digitCount == 4) + if(digit_count == 4) return StopSyntax(first); - digitHistory[digitCount] = static_cast(15+*first-'F'); - digitCount++; + digitHistory[digit_count] = static_cast(15+*first-'F'); + digit_count++; break; case URI_SET_DIGIT: - if(digitCount == 4) + if(digit_count == 4) return StopSyntax(first); - digitHistory[digitCount] = static_cast(9+*first-'9'); - digitCount++; + digitHistory[digit_count] = static_cast(9+*first-'9'); + digit_count++; break; case ':': { int setZipper = 0; - if(quadsDone > 8-zipperEver) // Too many quads? + if(quads_done > 8-zipper_ever) // Too many quads? return StopSyntax(first); else if(first+1 >= afterLast) // "::"? return StopSyntax(first+1); else { if(first[1] == ':') { - const int resetOffset = 2*(quadsDone+(digitCount > 0)); + const int resetOffset = 2*(quads_done+(digit_count > 0)); first++; - if(zipperEver) + if(zipper_ever) return StopSyntax(first); // "::.+::" else { // Zero everything after zipper @@ -2795,32 +2795,32 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const return StopSyntax(first+1); // ":::+ " } } - if(digitCount > 0) { - if(zipperEver) { - uriWriteQuadToDoubleByte(digitHistory, digitCount, quadsAfterZipper+2*quadsAfterZipperCount); + if(digit_count > 0) { + if(zipper_ever) { + uriWriteQuadToDoubleByte(digitHistory, digit_count, quadsAfterZipper+2*quadsAfterZipperCount); quadsAfterZipperCount++; } else { - uriWriteQuadToDoubleByte(digitHistory, digitCount, P_Uri->HostData.ip6->data+2*quadsDone); + uriWriteQuadToDoubleByte(digitHistory, digit_count, P_Uri->HostData.ip6->data+2*quads_done); } - quadsDone++; - digitCount = 0; + quads_done++; + digit_count = 0; } letterAmong = 0; if(setZipper) { - zipperEver = 1; + zipper_ever = 1; } } } break; case '.': - if((quadsDone > 6) || /* NOTE */(!zipperEver &&(quadsDone < 6)) || letterAmong ||(digitCount == 0) ||(digitCount == 4)) + if((quads_done > 6) || /* NOTE */(!zipper_ever &&(quads_done < 6)) || letterAmong ||(digit_count == 0) ||(digit_count == 4)) return StopSyntax(first); // Invalid octet before - else if((digitCount > 1) &&(digitHistory[0] == 0)) - return StopSyntax(first-digitCount); // Leading zero - else if((digitCount > 2) &&(digitHistory[1] == 0)) - return StopSyntax(first-digitCount+1); // Leading zero - else if((digitCount == 3) &&(100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { + else if((digit_count > 1) &&(digitHistory[0] == 0)) + return StopSyntax(first-digit_count); // Leading zero + else if((digit_count > 2) &&(digitHistory[1] == 0)) + return StopSyntax(first-digit_count+1); // Leading zero + else if((digit_count == 3) &&(100*digitHistory[0]+10*digitHistory[1]+digitHistory[2] > 255)) { // Octet value too large if(digitHistory[0] > 2) StopSyntax(first-3); @@ -2831,26 +2831,26 @@ const char * STDCALL UriParserState::ParseIPv6address2(const char * first, const return NULL; } // Copy first IPv4 octet - P_Uri->HostData.ip6->data[16-4] = uriGetOctetValue(digitHistory, digitCount); - digitCount = 0; + P_Uri->HostData.ip6->data[16-4] = uriGetOctetValue(digitHistory, digit_count); + digit_count = 0; // Switch over to IPv4 loop ip4OctetsDone = 1; walking = 0; break; case ']': // Too little quads? - if(!zipperEver && !((quadsDone == 7) && (digitCount > 0))) + if(!zipper_ever && !((quads_done == 7) && (digit_count > 0))) return StopSyntax(first); - if(digitCount > 0) { - if(zipperEver) { - uriWriteQuadToDoubleByte(digitHistory, digitCount, quadsAfterZipper+2*quadsAfterZipperCount); + if(digit_count > 0) { + if(zipper_ever) { + uriWriteQuadToDoubleByte(digitHistory, digit_count, quadsAfterZipper+2*quadsAfterZipperCount); quadsAfterZipperCount++; } else { - uriWriteQuadToDoubleByte(digitHistory, digitCount, P_Uri->HostData.ip6->data+2*quadsDone); + uriWriteQuadToDoubleByte(digitHistory, digit_count, P_Uri->HostData.ip6->data+2*quads_done); } /* - quadsDone++; - digitCount = 0; + quads_done++; + digit_count = 0; */ } // Copy missing quads to the end