From 0fe1a8e1e04333a22ad1b62b6c231b20ec3c565d Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Fri, 8 Sep 2023 16:56:18 +0530 Subject: [PATCH 01/20] Issue KN-000 merge: Merge Release 5.6.0 into Master (#984) * Issue #IQ-45 feat: CSP changes * Issue #KN-585 feat: Single click local installation of knowledge platform * Issue #KN-603 fix: CSP changes * Issue #KN-603 fix: remove gcloud hard dependency * Issue #KN-15 fix:Postman search-service API positive test cases - * feat (doc): local setup using kubernetes. Setup of DBs and Taxonomy-Service. * Issue #KN-15 fix:Postman search-service API Negative test cases * feat (doc): local setup using kubernetes. * Issue #KN-439 feat: Handles domain agnostic for Neo4J. * Issue #KN-439 feat: Handles domain agnostic for Neo4J. * Issue #KN-439 feat: Handles domain agnostic for Neo4J. * Issue #KN-439 test: Added the config for unit test. * Issue #KN-439 feat: Fixed the replacement issue. * Issue #KN-439 feat: Handles domain agnostic for Cassandra. * Issue #KN-439 feat: Handles domain agnostic for Cassandra. * Issue #KN-579 feat: Properties update * Issue #KN-579 feat: Properties update * Issue #KN-439 fix: Fixed update scenario where response is return with metadata without reading it again. * Issue #KN-439 test: Fixed update scenario where response is return with metadata without reading it again. * Issue #KN-439 merge: csp-migration to release-5.2.0 (#900) * Issue #KN-439 fix: Fixed saveExternalRelativePath issue for ArrayList * Issue #KN-439 feat: Added the variable for relative_path_prefix Co-authored-by: anilgupta Co-authored-by: vinukumar-vs * Issue #KN-427 feat: Test case update * Issue #KN-427 feat: Test case update * Issue #KN-439 feat: Fixed ClassCast issue. * Issue #KN-439 feat: Fixed the NullPointer issue * Issue #IQ-193 fix: updated code for cname * Issue #IQ-193 fix: updated code * Issue #IQ-193 feat: added test cases * Issue #KN-439 chore: added the migrationVersion in asset, content and collection schema.json * Issue #KN-439 chore: Added the more fields in metadata list * Issue #KN-667 fix: Search-service: Search API - Part 2 * Issue #CO-173 debug: Adding debug statements * Issue #CO-173 debug: Adding debug statements * Issue #CO-173 debug: Adding debug statements * Issue #CO-173 debug: Adding debug statements * Issue #CO-173 fix: Schema validation fix * Issue #CO-173 fix: Schema validation fix * Issue #CO-173 fix: Schema validation fix * Issue #KN- fix: relational metadata backward compatibility fix * Issue #KN-730 fix: QR Code and QRRequired inconsistency debug * Issue #KN-751 fix: Content and collection image publish issue * Issue #KN-754 feat: Postman DOCS publishing. * Issue #KN-754 fix: Sunbird License API collection name updated * Issue KN-767 feat (doc): content service local setup using kubernetes * Issue KN-767 feat (doc): content service local setup using kubernetes * Issue KN-767 feat (doc): content service local setup using kubernetes * Issue KN-767 fix: content service config map updated * Corrected the typo in line 20 Refer to https://github.com/orgs/Sunbird-inQuiry/discussions/62 * feat : Updated scala version to 2.12 * Issue KN-767 feat: Added Terraform scripts and Helmcharts for one click installation * Issue KN-767 feat: Removed the unused helmcharts * Issue KN-767 feat: Updated Readme file . * Issue KN-767 feat: Updated Readme file . * Issue KN-767 feat: Removed Unused files . * Issue KN-767 feat: Added gitignore. * Issue KN-767 feat: Added Helm chart and terraform file for content-service and added few changes to taxonomy-service. * Issue KN-767 feat: Added Helm chart and terraform file for search-service * Issue KN-767 feat: Added Helm chart and terraform file for learning-service ,added configmaps and updated changes in the helmcharts. * Issue KN-767 feat:Added changes in Neo4j to connect with logstash * Delete terraform.tfstate * Delete terraform.tfstate.backup * Issue KN-767 feat:Added learning service terraform script * Issue #KN-767 fix: knowlg one click installation for azure added * Issue #KN-767 fix: dial services helm charts added * Issue #KN-767 fix: Updated README.md file * Issue #KN-767 fix:Added Flink Helm Chart and Terraform script * Issue #KN-767 fix: knowlg one click installation for flink jobs in azure added * Revert "Merge branch 'Sunbird-Knowlg:master' into KN-848" This reverts commit 9c7c619b585ebea9cdd3a82ed92e36db73446551, reversing changes made to 59e42ecb04d66c53f170dff329ccc3d5676f12cb. * Delete schema.json * Issue KN-848 fix: Removed the deprecated import and added the alternative * Issue #KN-767 fix: search service config updated * Issue #KN-10 feat: Added create framework API * Issue #KN-10 fix: Updated youtubeurl * Delete StorageService.scala * Revert "Delete StorageService.scala" This reverts commit f633e69a66451f79d4cffb2837576b41068290bf. * Updated Cloud-store-sdk version * Update YouTubeUrlUtilTest.java * Update YouTubeURLManagerImplTest.java * Updated YoutubeUrls * Issue #KN-10 feat: Added framework cache for read framework * Issue #KN-10 feat: Updated create framework API * Undo logger changes * Update DataNode.scala * Update DataNode , DefinitionNode * Updated Youtube URLs * Update schema * Issue #KN-10 feat: Added update framework API * Issue #KN-10 feat: retire framework API added * Issue #KN-10 feat: Added copy framework API * Issue #KN-10 fix: create framework API fixes * Issue #KN-10 feat: Added update and retire framework APIs * Issue #KN-10 feat: Added update and retire framework APIsIssue #KN-10 feat: Added Publish framework APIs * Issue #KN-10 feat: framework publish and read API changes * feat: refactoring jacoco version update * Issue #KN-10 feat: category, categoryInstance and term APIs Added * Issue #KN-848 fix: updated scala version and test dependencies * Issue #KN-848 fix: updated jsonschemavalidator * Issue #KN-848 fix: updated schema path * Issue #KN-848 fix: updated schema path * Issue #KN-848 fix: updated schema path * Issue #KN-10 feat:Added TestCases for Master Category * feat: handle in-relationship metadata to set the index For FrameworkCategory create it uses in relation with Framework. This is not handled properly to set the index value... This commit has the required changes. * Issue #KN-10 feat:Bug fix in Create Category API * Issue #KN-10 feat:Added TestCases for CategoryInstance ,Framework Term * Issue KN-10 fix: get node inRelations data issue fix * KN-10 fix: index value populating in relation metadata * Issue IQ-436 feat: code changes for question & questionset v2 api's * Issue #IQ-436 fix: fixed HttpUrlUtilTest tests * KN-10 fix: index value populating in relation metadata for term api * Issue KN-10 fix: Validate translations and bug fix in category * Issue #IQ-436 fix: updated code to handle NullPointerException * Issue #IQ-436 fix: fixed build issue * Issue #IQ-436 fix: fixed build issue * Issue #KN-848 fix: updated test cases * Issue #IQ-436 feat: added test cases * Issue #IQ-436 fix: updated test cases * Issue #IQ-436 fix: updated test case * Issue KN-848 feat: taxonomy service migration to SBT. * Issue #IQ436 feat: updated code for test * Issue #KN-848 fix: updated test cases * Issue KN-10 fix: Added Master Category Validation in Category Instance Create API * Issue KN-10 fix: Added Test Cases * Issue KN-10 fix: framework read initial commit * Issue KN-10 fix: read and publish API changes * Issue KN-10 fix: read framework hierarchy fix * Issue KN-10 fix: Test Cases for Framework * Issue KN-10 feat: category terms set in redis on publish * Issue KN-10 fix: framework copy API changes * Issue KN-10 fix: master category required schema properties added * Issue KN-10 fix: Test Cases for Framework * Issue KN-10 fix: category instance validation issue fix * Issue #KN-848 fix: updated test cases * Issue KN-10 fix: framework copy API validation issue fix * Issue #KN-848 fix: updated application.conf * Issue #KN-848 fix: updated application.conf and error message * Issue KN-10 fix: removed commented code * Issue #KN-848 fix: removed imports * Issue KN-10 fix: removed println statements * Issue #KN-848 fix: removed imports * Issue #KN-848 fix: dummy commit * Issue #KN-848 fix: test commit * Issue #KN-848 fix: updated test * fix: added connection exception scenario. * fix: remove Matchers trait from BaseSpec * fix: upgrade plowermock * Issue KN-10 fix: DataSubGraph moved to correct folder * Issue KN-10 fix: test cases issue fixed * Issue KN-10 test: unit testes added for read & publish apis * Issue KN-10 fix: Jackson dependency exclusion * Issue #KN-10 fix: relation index issue fix * Issue #KN-10 fix: logs added * Issue #KN-10 fix: index issue fix for categoryInstance Currently, Framework getting from cache. While creating categoryInstance disabling the cache and getting node from DB * Issue #KN-10 fix: disable Cache true & false conditions handled * Issue #KN-10 fix: framework response sort issue fixed * Issue KN-10 fix: copy framework cache issue fix * Issue KN-10 fix: framework name issue fix * Issue KN-10 fix: DIAL code reserve issue fix * Issue KN-10 fix: DIAL code reserve issue fix * Issue KN-10 fix: DIAL code reserve issue fix * Issue KN-10 fix: category cache issue fix * Issue KN-10 fix: kafka version updated * Merge Release 5.5.0 into Release-5.6.0 (#982) * KN : CSP Changes * KN : CSP Changes - Circle ci changes * KN : CSP Changes - Circle ci changes * Update HttpUrlUtilTest.java * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: Jenkins Build changes * Issue #KN-920 fix: removed console logs * OCI changes: Added formaturl for content service (#979) * OCI changes Added formaturl for content service * ignore test case * CircleCi issue fix * CircleCi issue fix * Update StorageServiceTest.scala --------- Co-authored-by: Aiman Sharief Co-authored-by: Mahesh Kumar Gangula --------- Co-authored-by: vinukumar-vs Co-authored-by: Anil Gupta Co-authored-by: vinukumar-vs Co-authored-by: Gourav More Co-authored-by: Mahesh Kumar Gangula Co-authored-by: anilgupta Co-authored-by: Jayaprakash8887 Co-authored-by: Jayaprakash8887 Co-authored-by: Kumar Gauraw Co-authored-by: Surendrasingh Sucharia Co-authored-by: aimansharief <110388201+aimansharief@users.noreply.github.com> Co-authored-by: Aiman Sharief Co-authored-by: shourya-solanki Co-authored-by: Mahesh Kumar Gangula --- .circleci/config.yml | 2 +- .github/pull_request_template.md | 2 +- .github/pull_request_template.md.yaml | 2 +- KNOWLG-SETUP.md | 152 + README.md | 25 +- assessment-api/assessment-actors/pom.xml | 2 +- .../org/sunbird/actors/ItemSetActor.scala | 2 +- .../sunbird/managers/AssessmentManager.scala | 2 +- .../scala/org/sunbird/utils/RequestUtil.scala | 2 +- .../sunbird/actors/QuestionActorTest.scala | 2 +- .../sunbird/actors/QuestionSetActorTest.scala | 2 +- assessment-api/pom.xml | 2 +- assessment-api/qs-hierarchy-manager/pom.xml | 2 +- .../sunbird/managers/HierarchyManager.scala | 2 +- .../managers/UpdateHierarchyManager.scala | 3 +- .../managers/HierarchyManagerTest.scala | 2 +- build/content-service/Jenkinsfile | 7 +- .../License API(s).postman_collection.json | 1017 +++ ... Management API(s).postman_collection.json | 1179 ++++ ... Management API(s).postman_collection.json | 2921 +++++++++ ... Management API(s).postman_collection.json | 5667 +++++++++++++++++ content-api/collection-csv-actors/pom.xml | 2 +- .../manager/CollectionCSVManager.scala | 19 +- .../validator/CollectionCSVValidator.scala | 2 +- .../src/test/resources/application.conf | 2 +- .../TestCollectionCSVActor.scala | 1 - content-api/content-actors/pom.xml | 4 +- .../channel/managers/ChannelManager.scala | 2 +- .../sunbird/content/dial/DIALManager.scala | 6 +- .../content/publish/mgr/PublishManager.scala | 4 +- .../sunbird/content/util/DiscardManager.scala | 2 +- .../sunbird/content/util/FlagManager.scala | 2 +- .../sunbird/content/util/RetireManager.scala | 3 +- .../src/test/resources/application.conf | 5 +- .../content/actors/TestCategoryActor.scala | 25 +- .../content/actors/TestChannelActor.scala | 4 +- .../content/actors/TestContentActor.scala | 13 +- .../content/actors/TestEventActor.scala | 2 +- .../content/dial/DIALManagerTest.scala | 2 +- .../content/util/CopyManagerTest.scala | 9 +- .../content/util/DiscardManagerTest.scala | 11 +- .../content-service/conf/application.conf | 14 +- content-api/content-service/pom.xml | 6 +- content-api/hierarchy-manager/pom.xml | 2 +- .../sunbird/managers/HierarchyManager.scala | 3 +- .../managers/UpdateHierarchyManager.scala | 6 +- .../scala/org/sunbird/managers/BaseSpec.scala | 8 +- .../org/sunbird/managers/TestHierarchy.scala | 45 +- .../managers/TestUpdateHierarchy.scala | 7 +- content-api/pom.xml | 2 +- docker-compose.yml | 61 + knowlg-automation/.gitignore | 39 + knowlg-automation/LICENSE | 21 + knowlg-automation/README.md | 39 + .../helm_charts/cassandra/Chart.yaml | 5 + .../cassandra/templates/deployment.yaml | 33 + .../helm_charts/cassandra/values.yaml | 4 + .../helm_charts/content/Chart.yaml | 5 + .../content/content-service_application.conf | 657 ++ .../content/content-service_logback.xml | 30 + .../content/templates/content-configmap.yaml | 705 ++ .../content/templates/deployment.yaml | 75 + .../helm_charts/content/values.yaml | 42 + knowlg-automation/helm_charts/dial/Chart.yaml | 5 + .../dial/dial-service_application.conf | 189 + .../helm_charts/dial/dial-service_logback.xml | 0 .../dial/templates/deployment.yaml | 75 + .../helm_charts/dial/templates/dial-configmap | 234 + .../helm_charts/dial/values.yaml | 34 + .../helm_charts/elasticsearch/.helmignore | 21 + .../helm_charts/elasticsearch/Chart.lock | 9 + .../helm_charts/elasticsearch/Chart.yaml | 28 + .../helm_charts/elasticsearch/README.md | 969 +++ .../elasticsearch/charts/common-2.2.4.tgz | Bin 0 -> 12364 bytes .../elasticsearch/templates/NOTES.txt | 121 + .../elasticsearch/templates/_helpers.tpl | 585 ++ .../elasticsearch/templates/configmap.yaml | 23 + .../templates/coordinating/hpa.yaml | 47 + .../coordinating/serviceaccount.yaml | 22 + .../templates/coordinating/statefulset.yaml | 313 + .../templates/coordinating/svc-headless.yaml | 28 + .../elasticsearch/templates/data/hpa.yaml | 47 + .../templates/data/serviceaccount.yaml | 22 + .../templates/data/statefulset.yaml | 373 ++ .../templates/data/svc-headless.yaml | 28 + .../elasticsearch/templates/extra-list.yaml | 4 + .../elasticsearch/templates/ingest/hpa.yaml | 47 + .../templates/ingest/ingress.yaml | 65 + .../templates/ingest/service.yaml | 60 + .../templates/ingest/serviceaccount.yaml | 22 + .../templates/ingest/statefulset.yaml | 313 + .../templates/ingest/svc-headless.yaml | 28 + .../templates/ingress-tls-secrets.yaml | 94 + .../elasticsearch/templates/ingress.yaml | 65 + .../templates/initialization-configmap.yaml | 16 + .../elasticsearch/templates/master/hpa.yaml | 47 + .../templates/master/serviceaccount.yaml | 22 + .../templates/master/statefulset.yaml | 373 ++ .../templates/master/svc-headless.yaml | 28 + .../templates/metrics/deployment.yaml | 182 + .../templates/metrics/prometheusrule.yaml | 22 + .../templates/metrics/service.yaml | 24 + .../templates/metrics/servicemonitor.yaml | 46 + .../elasticsearch/templates/secrets.yaml | 43 + .../elasticsearch/templates/service.yaml | 66 + .../elasticsearch/templates/tls-secret.yaml | 117 + .../helm_charts/elasticsearch/values.yaml | 2145 +++++++ .../helm_charts/flink/.helmignore | 22 + .../helm_charts/flink/Chart.yaml | 21 + .../helm_charts/flink/templates/_helpers.tpl | 67 + .../flink/templates/flink_job_configmap.yaml | 14 + .../flink/templates/flink_job_deployment.yaml | 202 + .../helm_charts/flink/values.yaml | 660 ++ .../helm_charts/kafka/.helmignore | 21 + .../helm_charts/kafka/Chart.lock | 9 + .../helm_charts/kafka/Chart.yaml | 33 + knowlg-automation/helm_charts/kafka/README.md | 1050 +++ .../kafka/charts/common/.helmignore | 22 + .../kafka/charts/common/Chart.yaml | 23 + .../helm_charts/kafka/charts/common/README.md | 351 + .../charts/common/templates/_affinities.tpl | 106 + .../charts/common/templates/_capabilities.tpl | 154 + .../kafka/charts/common/templates/_errors.tpl | 23 + .../kafka/charts/common/templates/_images.tpl | 76 + .../charts/common/templates/_ingress.tpl | 68 + .../kafka/charts/common/templates/_labels.tpl | 18 + .../kafka/charts/common/templates/_names.tpl | 66 + .../charts/common/templates/_secrets.tpl | 165 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../kafka/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../common/templates/validations/_mysql.tpl | 103 + .../templates/validations/_postgresql.tpl | 129 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../kafka/charts/common/values.yaml | 5 + .../kafka/charts/zookeeper/.helmignore | 21 + .../kafka/charts/zookeeper/Chart.lock | 6 + .../kafka/charts/zookeeper/Chart.yaml | 24 + .../kafka/charts/zookeeper/README.md | 532 ++ .../zookeeper/charts/common/.helmignore | 22 + .../charts/zookeeper/charts/common/Chart.yaml | 23 + .../charts/zookeeper/charts/common/README.md | 351 + .../charts/common/templates/_affinities.tpl | 106 + .../charts/common/templates/_capabilities.tpl | 154 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 76 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 70 + .../charts/common/templates/_secrets.tpl | 165 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../common/templates/validations/_mysql.tpl | 103 + .../templates/validations/_postgresql.tpl | 129 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../zookeeper/charts/common/values.yaml | 5 + .../charts/zookeeper/templates/NOTES.txt | 76 + .../charts/zookeeper/templates/_helpers.tpl | 361 ++ .../charts/zookeeper/templates/configmap.yaml | 17 + .../zookeeper/templates/extra-list.yaml | 4 + .../zookeeper/templates/metrics-svc.yaml | 29 + .../zookeeper/templates/networkpolicy.yaml | 41 + .../kafka/charts/zookeeper/templates/pdb.yaml | 26 + .../zookeeper/templates/prometheusrule.yaml | 27 + .../templates/scripts-configmap.yaml | 102 + .../charts/zookeeper/templates/secrets.yaml | 77 + .../zookeeper/templates/serviceaccount.yaml | 21 + .../zookeeper/templates/servicemonitor.yaml | 53 + .../zookeeper/templates/statefulset.yaml | 532 ++ .../zookeeper/templates/svc-headless.yaml | 42 + .../kafka/charts/zookeeper/templates/svc.yaml | 71 + .../zookeeper/templates/tls-secrets.yaml | 55 + .../kafka/charts/zookeeper/values.yaml | 877 +++ knowlg-automation/helm_charts/kafka/kafka | 13 + .../helm_charts/kafka/templates/NOTES.txt | 310 + .../helm_charts/kafka/templates/_helpers.tpl | 509 ++ .../kafka/templates/configmap.yaml | 17 + .../kafka/templates/extra-list.yaml | 4 + .../kafka/templates/jaas-secret.yaml | 40 + .../kafka/templates/jmx-configmap.yaml | 64 + .../kafka/templates/jmx-metrics-svc.yaml | 34 + .../templates/kafka-metrics-deployment.yaml | 171 + .../kafka-metrics-serviceaccount.yaml | 16 + .../kafka/templates/kafka-metrics-svc.yaml | 34 + .../templates/kafka-provisioning-secret.yaml | 19 + .../kafka-provisioning-serviceaccount.yaml | 15 + .../kafka/templates/kafka-provisioning.yaml | 260 + .../kafka/templates/log4j-configmap.yaml | 17 + .../kafka/templates/networkpolicy-egress.yaml | 22 + .../templates/networkpolicy-ingress.yaml | 53 + .../kafka/templates/poddisruptionbudget.yaml | 26 + .../kafka/templates/prometheusrule.yaml | 20 + .../helm_charts/kafka/templates/role.yaml | 24 + .../kafka/templates/rolebinding.yaml | 23 + .../kafka/templates/scripts-configmap.yaml | 202 + .../kafka/templates/serviceaccount.yaml | 20 + .../templates/servicemonitor-jmx-metrics.yaml | 53 + .../templates/servicemonitor-metrics.yaml | 53 + .../kafka/templates/statefulset.yaml | 610 ++ .../kafka/templates/svc-external-access.yaml | 63 + .../kafka/templates/svc-headless.yaml | 37 + .../helm_charts/kafka/templates/svc.yaml | 63 + .../kafka/templates/tls-secrets.yaml | 30 + .../helm_charts/kafka/values.yaml | 1808 ++++++ .../helm_charts/learning/Chart.yaml | 5 + .../learning-service_application.conf | 315 + .../learning/templates/deployment.yaml | 64 + .../templates/learning-configmap.yaml | 323 + .../helm_charts/learning/values.yaml | 54 + .../helm_charts/neo4j/Chart.yaml | 5 + .../neo4j/templates/deployment.yaml | 72 + .../neo4j/templates/logstash-configmap.yaml | 39 + .../helm_charts/neo4j/values.yaml | 20 + .../helm_charts/redis/Chart.yaml | 5 + .../redis/templates/deployment.yaml | 34 + .../helm_charts/redis/values.yaml | 5 + .../helm_charts/search/Chart.yaml | 5 + .../search/search-service_application.conf | 315 + .../search/search-service_logback.xml | 0 .../search/templates/deployment.yaml | 75 + .../search/templates/search-configmap.yaml | 393 ++ .../helm_charts/search/values.yaml | 28 + .../helm_charts/taxonomy/Chart.yaml | 5 + .../taxonomy-service_application.conf | 399 ++ .../taxonomy/taxonomy-service_logback.xml | 31 + .../taxonomy/templates/deployment.yaml | 75 + .../templates/taxonomy-configmap.yaml | 446 ++ .../helm_charts/taxonomy/values.yaml | 31 + knowlg-automation/terraform/aks.yaml | 4 + knowlg-automation/terraform/azure/aks.tf | 21 + .../terraform/azure/cassandra-provision.tf | 11 + .../terraform/azure/content-provision.tf | 11 + knowlg-automation/terraform/azure/data.tf | 1 + .../terraform/azure/dial-provision.tf | 10 + .../azure/elasticsearch-provision.tf | 11 + .../terraform/azure/flink-provision.tf | 10 + knowlg-automation/terraform/azure/kafka.tf | 1 + .../terraform/azure/kafka_vars.tf | 1 + .../terraform/azure/learning-provision.tf | 10 + knowlg-automation/terraform/azure/local.tf | 18 + .../terraform/azure/local_file.tf | 4 + .../terraform/azure/neo4j-provision.tf | 10 + knowlg-automation/terraform/azure/provider.tf | 17 + .../terraform/azure/redis-provision.tf | 10 + .../terraform/azure/search-provision.tf | 10 + .../terraform/azure/storage_account.tf | 12 + .../terraform/azure/storage_container.tf | 5 + knowlg-automation/terraform/azure/subnets.tf | 7 + .../terraform/azure/taxonomy-provision.tf | 11 + .../terraform/azure/variables.tf | 250 + .../terraform/azure/virtual_network.tf | 10 + knowlg-automation/terraform/create | 16 + knowlg-automation/terraform/destroy | 5 + knowlg-automation/terraform/helm/kafka.tf | 36 + .../terraform/helm/kafka_exporter.tf | 15 + .../terraform/helm/kafka_exporter_vars.tf | 35 + .../terraform/helm/kafka_vars.tf | 107 + .../terraform/local/cassandra-provision.tf | 11 + .../terraform/local/content-provision.tf | 11 + .../terraform/local/dial-provision.tf | 10 + .../local/elasticsearch-provision.tf | 11 + .../terraform/local/flink-provision.tf | 9 + .../terraform/local/kafka-provision.tf | 72 + .../terraform/local/learning-provision.tf | 10 + knowlg-automation/terraform/local/main.tf | 51 + .../terraform/local/neo4j-provision.tf | 10 + .../terraform/local/redis-provision.tf | 10 + .../terraform/local/search-provision.tf | 10 + .../terraform/local/taxonomy-provision.tf | 11 + knowlg-automation/terraform/local/vars.tf | 195 + knowlg-automation/terraform/local/versions.tf | 21 + knowlg-docker-image.sh | 23 + kubernetes/content/Chart.yaml | 5 + .../content/content-service_application.conf | 657 ++ .../content/content-service_logback.xml | 30 + kubernetes/content/templates/deployment.yaml | 74 + kubernetes/content/values.yaml | 22 + kubernetes/sunbird-dbs/cassandra/Chart.yaml | 5 + .../cassandra/templates/deployment.yaml | 33 + kubernetes/sunbird-dbs/cassandra/values.yaml | 4 + kubernetes/sunbird-dbs/neo4j/Chart.yaml | 5 + .../neo4j/templates/deployment.yaml | 51 + kubernetes/sunbird-dbs/neo4j/values.yaml | 18 + kubernetes/sunbird-dbs/redis/Chart.yaml | 5 + .../redis/templates/deployment.yaml | 34 + kubernetes/sunbird-dbs/redis/values.yaml | 5 + kubernetes/taxonomy/Chart.yaml | 5 + .../taxonomy-service_application.conf | 399 ++ .../taxonomy/taxonomy-service_logback.xml | 31 + kubernetes/taxonomy/templates/deployment.yaml | 74 + kubernetes/taxonomy/values.yaml | 28 + local-setup.sh | 6 + ontology-engine/graph-common/pom.xml | 2 +- .../src/test/resources/application.conf | 2 +- .../org/sunbird/graph/GraphService.scala | 74 - .../pom.xml | 4 +- .../org/sunbird/graph/GraphService.scala | 101 + .../sunbird/graph/OntologyEngineContext.scala | 0 .../graph/exception/GraphErrorCodes.scala | 0 .../graph/external/ExternalPropsManager.scala | 0 .../graph/external/store/ExternalStore.scala | 10 +- .../external/store/ExternalStoreFactory.scala | 0 .../org/sunbird/graph/util/CSPMetaUtil.scala | 176 + .../sunbird/graph/util/ScalaJsonUtil.scala | 37 + .../graph/validator/NodeValidator.scala | 2 +- .../src/test/resources/application.conf | 9 +- .../src/test/resources/cassandra-unit.yaml | 0 .../src/test/resources/logback.xml | 28 + .../scala/org/sunbird/graph/BaseSpec.scala | 0 .../external/ExternalPropsManagerTest.scala | 33 +- .../sunbird/graph/util/CSPMetaUtilTest.scala | 234 + .../graph/util/ScalaJsonUtilTest.scala | 52 + ontology-engine/graph-dac-api/pom.xml | 15 +- .../org/sunbird/graph/dac/model/Node.java | 8 +- .../org/sunbird/graph/dac/model/Relation.java | 6 + .../org/sunbird/graph/dac/model/SubGraph.java | 33 + .../operation/GraphAsyncOperations.java | 94 +- .../operation/SearchAsyncOperations.java | 4 +- .../util/GraphQueryGenerationUtil.java | 7 + .../NodeAsyncOperationsExceptionTest.java | 5 +- .../operation/NodeAsyncOperationsTest.java | 3 +- .../sunbird/graph/schema/DefinitionNode.scala | 306 - .../pom.xml | 6 +- .../sunbird/graph/engine/CaseClasses.scala | 0 .../graph/external/store/ExternalStore.scala | 10 +- .../graph/health/HealthCheckManager.scala | 0 .../org/sunbird/graph/nodes/DataNode.scala | 13 +- .../org/sunbird/graph/path/DataSubGraph.scala | 136 + .../graph/relations/AbstractRelation.scala | 0 .../graph/relations/AssociationRelation.scala | 0 .../sunbird/graph/relations/IRelation.scala | 0 .../graph/relations/RelationHandler.scala | 0 .../SequenceMembershipRelation.scala | 0 .../schema/CategoryDefinitionValidator.scala | 0 .../graph/schema/CoreDomainObject.scala | 0 .../sunbird/graph/schema/DefinitionDTO.scala | 5 +- .../graph/schema/DefinitionFactory.scala | 0 .../sunbird/graph/schema/DefinitionNode.scala | 321 + .../schema/FrameworkMasterCategoryMap.scala | 0 .../sunbird/graph/schema/IDefinition.scala | 2 +- .../schema/ObjectCategoryDefinition.scala | 0 .../schema/ObjectCategoryDefinitionMap.scala | 0 .../schema/validator/BaseDefinitionNode.scala | 9 +- .../schema/validator/FrameworkValidator.scala | 2 +- .../validator/PropAsEdgeValidator.scala | 2 +- .../schema/validator/RelationValidator.scala | 0 .../schema/validator/SchemaValidator.scala | 0 .../validator/VersionKeyValidator.scala | 0 .../schema/validator/VersioningNode.scala | 28 +- .../org/sunbird/graph/utils/NodeUtil.scala | 35 +- .../sunbird/graph/utils/ScalaJsonUtils.scala | 0 .../src/test/resources/application.conf | 2 +- .../src/test/resources/cassandra-unit.yaml | 0 .../src/test/resources/logback.xml | 28 + .../scala/org/sunbird/graph/BaseSpec.scala | 3 +- .../graph/health/TestHealthCheckManager.scala | 3 +- .../sunbird/graph/nodes/TestDataNode.scala | 43 +- .../graph/schema/TestDefinitionNode.scala | 12 + .../TestObjectCategoryDefinitionMap.scala | 3 +- .../validator/TestSchemaValidator.scala | 5 +- .../sunbird/graph/utils/NodeUtilTest.scala | 5 +- .../graph/utils/ScalaJsonUtilsTest.scala | 0 ontology-engine/pom.xml | 4 +- platform-core/platform-common/pom.xml | 2 +- platform-core/platform-telemetry/pom.xml | 2 +- platform-core/pom.xml | 2 +- platform-core/schema-validator/pom.xml | 2 +- .../src/test/resources/application.conf | 2 +- platform-modules/import-manager/pom.xml | 4 +- platform-modules/mimetype-manager/pom.xml | 10 +- .../sunbird/cloudstore/StorageService.scala | 53 +- .../mgr/impl/CollectionMimeTypeMgrImpl.scala | 2 +- .../mgr/impl/EcmlMimeTypeMgrImpl.scala | 2 +- .../src/test/resources/application.conf | 11 +- .../cloudstore/StorageServiceTest.scala | 90 +- .../mgr/impl/EcmlMimeTypeMgrImplTest.scala | 19 +- .../mgr/impl/H5PMimeTypeMgrImplTest.scala | 2 +- .../mgr/impl/PluginMimeTypeMgrImplTest.scala | 2 +- platform-modules/pom.xml | 2 +- platform-modules/url-manager/pom.xml | 2 +- .../sunbird/url/util/GoogleDriveUrlUtil.java | 2 - .../org/sunbird/url/util/HTTPUrlUtil.java | 7 +- .../mgr/impl/YouTubeURLManagerImplTest.java | 3 +- .../sunbird/url/util/GoogleDriveUtilTest.java | 6 +- .../org/sunbird/url/util/HttpUrlUtilTest.java | 5 +- .../sunbird/url/util/YouTubeUrlUtilTest.java | 5 +- pom.xml | 6 +- schemas/asset/1.0/schema.json | 3 + schemas/category/1.0/schema.json | 19 +- schemas/collection/1.0/schema.json | 3 + schemas/content/1.0/schema.json | 2819 ++++---- schemas/framework/1.0/config.json | 10 +- schemas/relationalmetadata/1.0/schema.json | 8 +- ...mposite Search API.postman_collection.json | 714 +++ ...Composite Search API.postman_test_run.json | 661 ++ search-api/pom.xml | 2 +- search-api/search-actors/pom.xml | 8 +- .../org/sunbird/actors/HealthActorTest.java | 2 - .../src/test/resources/application.conf | 2 +- search-api/search-core/pom.xml | 4 +- .../sunbird/search/ElasticSearchUtilTest.java | 1 + .../sunbird/search/SearchProcessorTest.java | 2 +- .../search/util/DefinitionUtilTest.java | 1 - .../src/test/resources/application.conf | 2 +- .../controllers/SearchBaseController.scala | 2 +- .../search-service/conf/application.conf | 2 +- search-api/search-service/pom.xml | 2 +- .../Framework API(s).postman_collection.json | 4507 +++++++++++++ ...ct Category API(s).postman_collection.json | 1854 ++++++ taxonomy-api/pom.xml | 2 +- taxonomy-api/taxonomy-actors/pom.xml | 2 +- .../org/sunbird/actors/CategoryActor.scala | 68 + .../actors/CategoryInstanceActor.scala | 148 + .../org/sunbird/actors/FrameworkActor.scala | 168 + .../scala/org/sunbird/actors/TermActor.scala | 163 + .../org/sunbird/mangers/CategoryManager.scala | 27 + .../sunbird/mangers/FrameworkManager.scala | 266 + .../org/sunbird/utils/CategoryCache.scala | 55 + .../scala/org/sunbird/utils/Constants.scala | 43 + .../org/sunbird/utils/FrameworkCache.scala | 65 + .../src/test/resources/application.conf | 2 +- .../scala/org/sunbird/actors/BaseSpec.scala | 30 + .../sunbird/actors/CategoryActorTest.scala | 219 + .../actors/CategoryInstanceActorTest.scala | 364 ++ .../sunbird/actors/FrameworkActorTest.scala | 315 + .../org/sunbird/actors/TermActorTest.scala | 350 + .../org/sunbird/utils/RequestUtilTest.scala | 34 + .../app/controllers/BaseController.scala | 2 +- .../controllers/v3/CategoryController.scala | 57 + .../v3/CategoryInstanceController.scala | 67 + .../controllers/v3/FrameworkController.scala | 86 +- .../v3/FrameworkTermController.scala | 68 + .../app/modules/TaxonomyModule.scala | 6 +- .../app/utils/ActorNames.scala | 4 + .../taxonomy-service/app/utils/ApiId.scala | 20 + .../taxonomy-service/conf/application.conf | 20 +- taxonomy-api/taxonomy-service/conf/routes | 16 +- .../controllers/v3/CategoryInstanceSpec.scala | 39 + .../test/controllers/v3/CategorySpec.scala | 39 + .../test/controllers/v3/FrameworkSpec.scala | 2 +- .../controllers/v3/FrameworkTermSpec.scala | 45 + .../test/modules/TestModule.scala | 4 + .../app/controllers/BaseController.scala | 75 + .../app/controllers/HealthController.scala | 23 + .../controllers/v3/FrameworkController.scala | 54 + .../v4/ObjectCategoryController.scala | 49 + .../ObjectCategoryDefinitionController.scala | 69 + .../app/filters/AccessLogFilter.scala | 45 + .../app/handlers/SignalHandler.scala | 33 + .../app/modules/TaxonomyModule.scala | 17 + .../app/utils/ActorNames.scala | 9 + taxonomy-service-sbt/app/utils/ApiId.scala | 13 + .../app/utils/ItemSetOperations.scala | 5 + .../app/utils/JavaJsonUtils.scala | 38 + taxonomy-service-sbt/build.sbt | 30 + taxonomy-service-sbt/conf/application.conf | 339 + taxonomy-service-sbt/conf/logback.xml | 28 + taxonomy-service-sbt/conf/routes | 30 + taxonomy-service-sbt/project/build.properties | 1 + taxonomy-service-sbt/project/plugins.sbt | 12 + .../test/controllers/base/BaseSpec.scala | 38 + .../test/controllers/v3/FrameworkSpec.scala | 65 + .../controllers/v3/HealthControllerSpec.scala | 18 + .../v4/ObjectCategoryDefinitionSpec.scala | 40 + .../controllers/v4/ObjectCategorySpec.scala | 37 + .../test/modules/TestModule.scala | 28 + vmsetup.sh | 2 +- 478 files changed, 53197 insertions(+), 2166 deletions(-) create mode 100644 KNOWLG-SETUP.md create mode 100644 content-api/api-tests/Collections/License API(s).postman_collection.json create mode 100644 content-api/api-tests/Collections/Sunbird Asset Management API(s).postman_collection.json create mode 100644 content-api/api-tests/Collections/Sunbird Collection Management API(s).postman_collection.json create mode 100644 content-api/api-tests/Collections/Sunbird Content Management API(s).postman_collection.json create mode 100755 docker-compose.yml create mode 100644 knowlg-automation/.gitignore create mode 100644 knowlg-automation/LICENSE create mode 100644 knowlg-automation/README.md create mode 100644 knowlg-automation/helm_charts/cassandra/Chart.yaml create mode 100644 knowlg-automation/helm_charts/cassandra/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/cassandra/values.yaml create mode 100644 knowlg-automation/helm_charts/content/Chart.yaml create mode 100644 knowlg-automation/helm_charts/content/content-service_application.conf create mode 100644 knowlg-automation/helm_charts/content/content-service_logback.xml create mode 100644 knowlg-automation/helm_charts/content/templates/content-configmap.yaml create mode 100644 knowlg-automation/helm_charts/content/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/content/values.yaml create mode 100644 knowlg-automation/helm_charts/dial/Chart.yaml create mode 100644 knowlg-automation/helm_charts/dial/dial-service_application.conf rename ontology-engine/graph-core_2.11/src/test/resources/logback.xml => knowlg-automation/helm_charts/dial/dial-service_logback.xml (100%) create mode 100644 knowlg-automation/helm_charts/dial/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/dial/templates/dial-configmap create mode 100644 knowlg-automation/helm_charts/dial/values.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/.helmignore create mode 100644 knowlg-automation/helm_charts/elasticsearch/Chart.lock create mode 100644 knowlg-automation/helm_charts/elasticsearch/Chart.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/README.md create mode 100644 knowlg-automation/helm_charts/elasticsearch/charts/common-2.2.4.tgz create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/NOTES.txt create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/_helpers.tpl create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/configmap.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/coordinating/hpa.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/coordinating/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/coordinating/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/coordinating/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/data/hpa.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/data/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/data/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/data/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/extra-list.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/hpa.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/ingress.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/service.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingest/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingress-tls-secrets.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/ingress.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/initialization-configmap.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/master/hpa.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/master/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/master/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/master/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/metrics/deployment.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/metrics/prometheusrule.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/metrics/service.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/metrics/servicemonitor.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/secrets.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/service.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/templates/tls-secret.yaml create mode 100644 knowlg-automation/helm_charts/elasticsearch/values.yaml create mode 100644 knowlg-automation/helm_charts/flink/.helmignore create mode 100644 knowlg-automation/helm_charts/flink/Chart.yaml create mode 100644 knowlg-automation/helm_charts/flink/templates/_helpers.tpl create mode 100644 knowlg-automation/helm_charts/flink/templates/flink_job_configmap.yaml create mode 100644 knowlg-automation/helm_charts/flink/templates/flink_job_deployment.yaml create mode 100644 knowlg-automation/helm_charts/flink/values.yaml create mode 100644 knowlg-automation/helm_charts/kafka/.helmignore create mode 100644 knowlg-automation/helm_charts/kafka/Chart.lock create mode 100644 knowlg-automation/helm_charts/kafka/Chart.yaml create mode 100644 knowlg-automation/helm_charts/kafka/README.md create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/.helmignore create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/Chart.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/README.md create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_affinities.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_capabilities.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_errors.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_images.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_ingress.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_labels.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_names.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_secrets.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_storage.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_tplvalues.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_utils.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/_warnings.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_cassandra.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mariadb.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mongodb.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mysql.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_postgresql.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_redis.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_validations.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/common/values.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/.helmignore create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.lock create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/README.md create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/.helmignore create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/Chart.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/README.md create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/values.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/NOTES.txt create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/_helpers.tpl create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/extra-list.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/metrics-svc.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/networkpolicy.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/pdb.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/prometheusrule.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/secrets.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/servicemonitor.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/tls-secrets.yaml create mode 100644 knowlg-automation/helm_charts/kafka/charts/zookeeper/values.yaml create mode 100644 knowlg-automation/helm_charts/kafka/kafka create mode 100644 knowlg-automation/helm_charts/kafka/templates/NOTES.txt create mode 100644 knowlg-automation/helm_charts/kafka/templates/_helpers.tpl create mode 100644 knowlg-automation/helm_charts/kafka/templates/configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/extra-list.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/jaas-secret.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/jmx-configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/jmx-metrics-svc.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-metrics-deployment.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-metrics-serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-metrics-svc.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-secret.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/kafka-provisioning.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/log4j-configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/networkpolicy-egress.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/networkpolicy-ingress.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/poddisruptionbudget.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/prometheusrule.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/role.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/rolebinding.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/scripts-configmap.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/serviceaccount.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/servicemonitor-jmx-metrics.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/servicemonitor-metrics.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/statefulset.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/svc-external-access.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/svc-headless.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/svc.yaml create mode 100644 knowlg-automation/helm_charts/kafka/templates/tls-secrets.yaml create mode 100644 knowlg-automation/helm_charts/kafka/values.yaml create mode 100644 knowlg-automation/helm_charts/learning/Chart.yaml create mode 100644 knowlg-automation/helm_charts/learning/learning-service_application.conf create mode 100644 knowlg-automation/helm_charts/learning/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/learning/templates/learning-configmap.yaml create mode 100644 knowlg-automation/helm_charts/learning/values.yaml create mode 100644 knowlg-automation/helm_charts/neo4j/Chart.yaml create mode 100644 knowlg-automation/helm_charts/neo4j/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/neo4j/templates/logstash-configmap.yaml create mode 100644 knowlg-automation/helm_charts/neo4j/values.yaml create mode 100644 knowlg-automation/helm_charts/redis/Chart.yaml create mode 100644 knowlg-automation/helm_charts/redis/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/redis/values.yaml create mode 100644 knowlg-automation/helm_charts/search/Chart.yaml create mode 100644 knowlg-automation/helm_charts/search/search-service_application.conf rename ontology-engine/graph-engine_2.11/src/test/resources/logback.xml => knowlg-automation/helm_charts/search/search-service_logback.xml (100%) create mode 100644 knowlg-automation/helm_charts/search/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/search/templates/search-configmap.yaml create mode 100644 knowlg-automation/helm_charts/search/values.yaml create mode 100644 knowlg-automation/helm_charts/taxonomy/Chart.yaml create mode 100644 knowlg-automation/helm_charts/taxonomy/taxonomy-service_application.conf create mode 100644 knowlg-automation/helm_charts/taxonomy/taxonomy-service_logback.xml create mode 100644 knowlg-automation/helm_charts/taxonomy/templates/deployment.yaml create mode 100644 knowlg-automation/helm_charts/taxonomy/templates/taxonomy-configmap.yaml create mode 100644 knowlg-automation/helm_charts/taxonomy/values.yaml create mode 100644 knowlg-automation/terraform/aks.yaml create mode 100644 knowlg-automation/terraform/azure/aks.tf create mode 100644 knowlg-automation/terraform/azure/cassandra-provision.tf create mode 100644 knowlg-automation/terraform/azure/content-provision.tf create mode 100644 knowlg-automation/terraform/azure/data.tf create mode 100644 knowlg-automation/terraform/azure/dial-provision.tf create mode 100644 knowlg-automation/terraform/azure/elasticsearch-provision.tf create mode 100644 knowlg-automation/terraform/azure/flink-provision.tf create mode 120000 knowlg-automation/terraform/azure/kafka.tf create mode 120000 knowlg-automation/terraform/azure/kafka_vars.tf create mode 100644 knowlg-automation/terraform/azure/learning-provision.tf create mode 100644 knowlg-automation/terraform/azure/local.tf create mode 100644 knowlg-automation/terraform/azure/local_file.tf create mode 100644 knowlg-automation/terraform/azure/neo4j-provision.tf create mode 100644 knowlg-automation/terraform/azure/provider.tf create mode 100644 knowlg-automation/terraform/azure/redis-provision.tf create mode 100644 knowlg-automation/terraform/azure/search-provision.tf create mode 100644 knowlg-automation/terraform/azure/storage_account.tf create mode 100644 knowlg-automation/terraform/azure/storage_container.tf create mode 100644 knowlg-automation/terraform/azure/subnets.tf create mode 100644 knowlg-automation/terraform/azure/taxonomy-provision.tf create mode 100644 knowlg-automation/terraform/azure/variables.tf create mode 100644 knowlg-automation/terraform/azure/virtual_network.tf create mode 100644 knowlg-automation/terraform/create create mode 100644 knowlg-automation/terraform/destroy create mode 100644 knowlg-automation/terraform/helm/kafka.tf create mode 100644 knowlg-automation/terraform/helm/kafka_exporter.tf create mode 100644 knowlg-automation/terraform/helm/kafka_exporter_vars.tf create mode 100644 knowlg-automation/terraform/helm/kafka_vars.tf create mode 100644 knowlg-automation/terraform/local/cassandra-provision.tf create mode 100644 knowlg-automation/terraform/local/content-provision.tf create mode 100644 knowlg-automation/terraform/local/dial-provision.tf create mode 100644 knowlg-automation/terraform/local/elasticsearch-provision.tf create mode 100644 knowlg-automation/terraform/local/flink-provision.tf create mode 100644 knowlg-automation/terraform/local/kafka-provision.tf create mode 100644 knowlg-automation/terraform/local/learning-provision.tf create mode 100644 knowlg-automation/terraform/local/main.tf create mode 100644 knowlg-automation/terraform/local/neo4j-provision.tf create mode 100644 knowlg-automation/terraform/local/redis-provision.tf create mode 100644 knowlg-automation/terraform/local/search-provision.tf create mode 100644 knowlg-automation/terraform/local/taxonomy-provision.tf create mode 100644 knowlg-automation/terraform/local/vars.tf create mode 100644 knowlg-automation/terraform/local/versions.tf create mode 100755 knowlg-docker-image.sh create mode 100644 kubernetes/content/Chart.yaml create mode 100644 kubernetes/content/content-service_application.conf create mode 100644 kubernetes/content/content-service_logback.xml create mode 100644 kubernetes/content/templates/deployment.yaml create mode 100644 kubernetes/content/values.yaml create mode 100644 kubernetes/sunbird-dbs/cassandra/Chart.yaml create mode 100644 kubernetes/sunbird-dbs/cassandra/templates/deployment.yaml create mode 100644 kubernetes/sunbird-dbs/cassandra/values.yaml create mode 100644 kubernetes/sunbird-dbs/neo4j/Chart.yaml create mode 100644 kubernetes/sunbird-dbs/neo4j/templates/deployment.yaml create mode 100644 kubernetes/sunbird-dbs/neo4j/values.yaml create mode 100644 kubernetes/sunbird-dbs/redis/Chart.yaml create mode 100644 kubernetes/sunbird-dbs/redis/templates/deployment.yaml create mode 100644 kubernetes/sunbird-dbs/redis/values.yaml create mode 100644 kubernetes/taxonomy/Chart.yaml create mode 100644 kubernetes/taxonomy/taxonomy-service_application.conf create mode 100644 kubernetes/taxonomy/taxonomy-service_logback.xml create mode 100644 kubernetes/taxonomy/templates/deployment.yaml create mode 100644 kubernetes/taxonomy/values.yaml create mode 100755 local-setup.sh delete mode 100644 ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/GraphService.scala rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/pom.xml (98%) create mode 100644 ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala (100%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/exception/GraphErrorCodes.scala (100%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/external/ExternalPropsManager.scala (100%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala (95%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/external/store/ExternalStoreFactory.scala (100%) create mode 100644 ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala create mode 100644 ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/ScalaJsonUtil.scala rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala (97%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/test/resources/application.conf (96%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/test/resources/cassandra-unit.yaml (100%) create mode 100644 ontology-engine/graph-core_2.12/src/test/resources/logback.xml rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/test/scala/org/sunbird/graph/BaseSpec.scala (100%) rename ontology-engine/{graph-core_2.11 => graph-core_2.12}/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala (86%) create mode 100644 ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/CSPMetaUtilTest.scala create mode 100644 ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/ScalaJsonUtilTest.scala create mode 100644 ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/SubGraph.java delete mode 100644 ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/pom.xml (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/engine/CaseClasses.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala (95%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/health/HealthCheckManager.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/nodes/DataNode.scala (96%) create mode 100644 ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/relations/AbstractRelation.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/relations/AssociationRelation.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/relations/IRelation.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/relations/RelationHandler.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/relations/SequenceMembershipRelation.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/CategoryDefinitionValidator.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/CoreDomainObject.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/DefinitionFactory.scala (100%) create mode 100644 ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/FrameworkMasterCategoryMap.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/IDefinition.scala (88%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinition.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinitionMap.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala (91%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala (99%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/RelationValidator.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/VersionKeyValidator.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala (85%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala (90%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/main/scala/org/sunbird/graph/utils/ScalaJsonUtils.scala (100%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/resources/application.conf (99%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/resources/cassandra-unit.yaml (100%) create mode 100644 ontology-engine/graph-engine_2.12/src/test/resources/logback.xml rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/BaseSpec.scala (99%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala (96%) create mode 100644 ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestDefinitionNode.scala rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala (95%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala (97%) rename ontology-engine/{graph-engine_2.11 => graph-engine_2.12}/src/test/scala/org/sunbird/graph/utils/ScalaJsonUtilsTest.scala (100%) create mode 100644 search-api/api-tests/Collections/Composite Search API.postman_collection.json create mode 100644 search-api/api-tests/Reports/Composite Search API.postman_test_run.json create mode 100644 taxonomy-api/api-tests/Collections/Framework API(s).postman_collection.json create mode 100644 taxonomy-api/api-tests/Collections/Object Category API(s).postman_collection.json create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/FrameworkActor.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/CategoryManager.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/CategoryCache.scala create mode 100644 taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/FrameworkCache.scala create mode 100644 taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryActorTest.scala create mode 100644 taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryInstanceActorTest.scala create mode 100644 taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/FrameworkActorTest.scala create mode 100644 taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/TermActorTest.scala create mode 100644 taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/utils/RequestUtilTest.scala create mode 100644 taxonomy-api/taxonomy-service/app/controllers/v3/CategoryController.scala create mode 100644 taxonomy-api/taxonomy-service/app/controllers/v3/CategoryInstanceController.scala create mode 100644 taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkTermController.scala create mode 100644 taxonomy-api/taxonomy-service/test/controllers/v3/CategoryInstanceSpec.scala create mode 100644 taxonomy-api/taxonomy-service/test/controllers/v3/CategorySpec.scala create mode 100644 taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkTermSpec.scala create mode 100644 taxonomy-service-sbt/app/controllers/BaseController.scala create mode 100644 taxonomy-service-sbt/app/controllers/HealthController.scala create mode 100644 taxonomy-service-sbt/app/controllers/v3/FrameworkController.scala create mode 100644 taxonomy-service-sbt/app/controllers/v4/ObjectCategoryController.scala create mode 100644 taxonomy-service-sbt/app/controllers/v4/ObjectCategoryDefinitionController.scala create mode 100644 taxonomy-service-sbt/app/filters/AccessLogFilter.scala create mode 100644 taxonomy-service-sbt/app/handlers/SignalHandler.scala create mode 100644 taxonomy-service-sbt/app/modules/TaxonomyModule.scala create mode 100644 taxonomy-service-sbt/app/utils/ActorNames.scala create mode 100644 taxonomy-service-sbt/app/utils/ApiId.scala create mode 100644 taxonomy-service-sbt/app/utils/ItemSetOperations.scala create mode 100644 taxonomy-service-sbt/app/utils/JavaJsonUtils.scala create mode 100644 taxonomy-service-sbt/build.sbt create mode 100644 taxonomy-service-sbt/conf/application.conf create mode 100644 taxonomy-service-sbt/conf/logback.xml create mode 100644 taxonomy-service-sbt/conf/routes create mode 100644 taxonomy-service-sbt/project/build.properties create mode 100644 taxonomy-service-sbt/project/plugins.sbt create mode 100644 taxonomy-service-sbt/test/controllers/base/BaseSpec.scala create mode 100644 taxonomy-service-sbt/test/controllers/v3/FrameworkSpec.scala create mode 100644 taxonomy-service-sbt/test/controllers/v3/HealthControllerSpec.scala create mode 100644 taxonomy-service-sbt/test/controllers/v4/ObjectCategoryDefinitionSpec.scala create mode 100644 taxonomy-service-sbt/test/controllers/v4/ObjectCategorySpec.scala create mode 100644 taxonomy-service-sbt/test/modules/TestModule.scala diff --git a/.circleci/config.yml b/.circleci/config.yml index 711732d1d..6f9d6f8a9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,7 +11,7 @@ jobs: - run: name: Run build command: | - mvn clean install -DskipTests + mvn clean install -DskipTests -DCLOUD_STORE_GROUP_ID=$CLOUD_STORE_GROUP_ID -DCLOUD_STORE_ARTIFACT_ID=$CLOUD_STORE_ARTIFACT_ID -DCLOUD_STORE_VERSION=$CLOUD_STORE_VERSION - save_cache: paths: - ~/.m2 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 920b32a49..f7e45f070 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,7 +17,7 @@ Please describe the tests that you ran to verify your changes in the below check - [ ] Ran Test B **Test Configuration**: -* Software versions: Java 11, scala-2.11, play-2.7.2 +* Software versions: Java 11, scala-2.12, play-2.7.2 * Hardware versions: 2 CPU/ 4GB RAM ### Checklist: diff --git a/.github/pull_request_template.md.yaml b/.github/pull_request_template.md.yaml index f1826648e..64750cf63 100644 --- a/.github/pull_request_template.md.yaml +++ b/.github/pull_request_template.md.yaml @@ -17,7 +17,7 @@ Please describe the tests that you ran to verify your changes in the below check - [ ] Ran Test B **Test Configuration**: -* Software versions: Java 11, scala-2.11, play-2.7.2 +* Software versions: Java 11, scala-2.12, play-2.7.2 * Hardware versions: ### Checklist: diff --git a/KNOWLG-SETUP.md b/KNOWLG-SETUP.md new file mode 100644 index 000000000..1ca3ee9c1 --- /dev/null +++ b/KNOWLG-SETUP.md @@ -0,0 +1,152 @@ + +Below are the steps to set up the Sunbird Knowlg Microservices, DBs with seed data and Jobs. It uses a local Kubernetes cluster deploy the required services. + +### Prerequisites: +* Java 11 +* Maven +* Docker +* Minikube - It implements a local Kubernetes cluster on macOS, Linux, and Windows. +* KubeCtl - The Kubernetes command-line tool + +### Prepare folders for database data and logs + +```shell +mkdir -p ~/sunbird-dbs/neo4j ~/sunbird-dbs/cassandra ~/sunbird-dbs/redis ~/sunbird-dbs/es ~/sunbird-dbs/kafka +export sunbird_dbs_path=~/sunbird-dbs +``` + + + +### Docker Images of Knowlg MicroServices +Start Docker in your machine and create the Docker Images of below microservices using the shell script. +1. taxonomy-service +2. content-service +3. search-service + +```shell +sh ./knowlg-docker-image.sh # provide the TAG for the docker image. +``` +**Note:** Please specify the TAG for the Docker Images and update the configuration in helm chart of respective deployment. + +Check the Docker Images +```shell +docker image ls -a +``` +**Output:** +```shell +❯❯ docker image ls -a +REPOSITORY TAG IMAGE ID CREATED SIZE +assessment-service R5.0.0 72a9cc1b2cc4 14 seconds ago 479MB +search-service R5.0.0 24b7d8947a4f 23 seconds ago 465MB +content-service R5.0.0 afcbc9c10fa3 33 seconds ago 556MB +taxonomy-service R5.0.0 a8a24a6241f2 47 seconds ago 480MB +``` + +### Kubernetes Cluster Setup +Please use the minikube to quickly set up the kubernetes cluster in local machine. + +```shell +minikube start +``` + +### Load Docker Images to Minikube Cluster +```shell +minikube image load neo4j:3.3.0 +minikube image load taxonomy-service:R5.0.0 +``` + +### Create Namespace +Create the namespaces to deploy the API microservices, DBs and Jobs. +1. knowlg-api +2. knowlg-db +3. knowlg-job + +```shell +kubectl create namespace knowlg-api +kubectl create namespace knowlg-db +kubectl create namespace knowlg-job +``` + +### Setup Databases +Please run the below `helm` commands to set up the required databases within the kubernets cluster. +It requires the below DBs for Knowlg. +1. Neo4J +2. Cassandra +3. Elasticsearch +4. Kafka +5. Redis + +```shell +cd kubernetes +helm install redis sunbird-dbs/redis -n knowlg-db + +minikube mount :/var/lib/neo4j/data // LOCAL_SOURCE_DIR is where neo4j dump is extracted Ex: /Users/abc/sunbird-dbs/neo4j/data +helm install neo4j sunbird-dbs/neo4j -n knowlg-db + +minikube mount :/mnt/backups // LOCAL_SOURCE_DIR is where neo4j dump is extracted Ex: /Users/abc/sunbird-dbs/cassandra/backups +helm install cassandra sunbird-dbs/cassandra -n knowlg-db + +ssh to cassandra pod +run => cqlsh +run => source '/mnt/backups/cassandra_backup/db_schema.cql'; +``` + +**Note:** +- The `helm` charts for Kafka, Elasticsearch will be added soon. + +### Define ConfigMap +We use the configmap to load the configuration for the microservices. + +#### ConfigMap for Taxonomy-Service +Use the below commands to load the configmap of taxonomy-service. +1. `taxonomy-config` - this has the application configuration. Please update the variables with respect to your context and load. +2. `taxonomy-xml-config` - this has the logback configuration to handle the logs. + +We have to update the below configurations in `taxonomy/templates/taxonomy-service_application.conf` specific to your context. + +```shell +cd kubernetes +kubectl create configmap taxonomy-xml-config --from-file=taxonomy/taxonomy-service_logback.xml -n knowlg-api -o=yaml +kubectl create configmap taxonomy-config --from-file=taxonomy/taxonomy-service_application.conf -n knowlg-api -o=yaml +``` + +### Run Taxonomy-Service +Use the `taxonomy` helm chart to run the taxonomy-service in local kubernetes cluster. + +```shell +cd kubernetes +helm install taxonomy taxonomy -n knowlg-api +``` +Use Port Forwarding to access the application in the cluster from local. + +```shell +kubectl port-forward 9000:9000 -n knowlg-api +curl 'localhost:9000/health' +``` + +### Define ConfigMap for Content-Service +Use the below commands to load the configmap of content-Service. +1. `content-config` - this has the application configuration. Please update the variables with respect to your context and load. +2. `content-xml-config` - this has the logback configuration to handle the logs. + +We have to update the below configurations in `content/templates/content-service_application` specific to your context. + +```shell +cd kubernetes +kubectl create configmap content-xml-config --from-file=content/content-service_logback.xml -n knowlg-api -o=yaml +kubectl create configmap content-config --from-file=content/content-service_application.conf -n knowlg-api -o=yaml +``` + +### Run Content-Service +Use the `taxonomy` helm chart to run the Content-Service in local kubernetes cluster. + +```shell +cd kubernetes +helm install content content -n knowlg-api +``` +Use Port Forwarding to access the application in the cluster from local. + +```shell +kubectl port-forward 9000:9000 -n knowlg-api +curl 'localhost:9000/health' +``` \ No newline at end of file diff --git a/README.md b/README.md index 773722a36..1e4e3d577 100644 --- a/README.md +++ b/README.md @@ -2,13 +2,32 @@ Repository for Knowledge Platform - 2.0 -## Knowledge-platform local setup +## Knowledge-platform local setup This readme file contains the instruction to set up and run the content-service in local machine. ### System Requirements: ### Prerequisites: * Java 11 +* Docker, Docker Compose + + +## One step installation + +1. Go to Root folder (knowledge-platform) +2. Run "local-setup.sh" file +``` shell +sh ./local-setup.sh +``` + + This will install all the requied dcoker images & local folders for DB mounting. + 3. Follow the below manual setps of running content service + refer: [Running Content Service:](#running-content-service) + + + +## Manual steps to install all the dependents +Please follow the manual steps in [One step installation](#one-step-installation) is failed. ### Prepare folders for database data and logs @@ -114,7 +133,7 @@ services: - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:2181 kafka: - image: 'wurstmeister/kafka:2.11-1.0.1' + image: 'wurstmeister/kafka:2.12-1.0.1' container_name: kafka ports: - "9092:9092" @@ -184,4 +203,4 @@ mvn play2:run 3. Using the below command we can verify whether the databases(neo4j,redis & cassandra) connection is established or not. If all connections are good, health is shown as 'true' otherwise it will be 'false'. ```shell curl http://localhost:9000/health -``` \ No newline at end of file +``` diff --git a/assessment-api/assessment-actors/pom.xml b/assessment-api/assessment-actors/pom.xml index d56dd0a67..5e5418207 100644 --- a/assessment-api/assessment-actors/pom.xml +++ b/assessment-api/assessment-actors/pom.xml @@ -28,7 +28,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar diff --git a/assessment-api/assessment-actors/src/main/scala/org/sunbird/actors/ItemSetActor.scala b/assessment-api/assessment-actors/src/main/scala/org/sunbird/actors/ItemSetActor.scala index 524a0604c..ba6d39993 100644 --- a/assessment-api/assessment-actors/src/main/scala/org/sunbird/actors/ItemSetActor.scala +++ b/assessment-api/assessment-actors/src/main/scala/org/sunbird/actors/ItemSetActor.scala @@ -13,7 +13,7 @@ import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.NodeUtil import org.sunbird.parseq.Task -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters.seqAsJavaListConverter import scala.concurrent.{ExecutionContext, Future} diff --git a/assessment-api/assessment-actors/src/main/scala/org/sunbird/managers/AssessmentManager.scala b/assessment-api/assessment-actors/src/main/scala/org/sunbird/managers/AssessmentManager.scala index 407d2d534..e81fe6c9d 100644 --- a/assessment-api/assessment-actors/src/main/scala/org/sunbird/managers/AssessmentManager.scala +++ b/assessment-api/assessment-actors/src/main/scala/org/sunbird/managers/AssessmentManager.scala @@ -15,7 +15,7 @@ import org.sunbird.telemetry.util.LogTelemetryEventUtil import org.sunbird.utils.RequestUtil import scala.concurrent.{ExecutionContext, Future} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters import scala.collection.JavaConverters._ diff --git a/assessment-api/assessment-actors/src/main/scala/org/sunbird/utils/RequestUtil.scala b/assessment-api/assessment-actors/src/main/scala/org/sunbird/utils/RequestUtil.scala index 4246a74c8..c96e218b0 100644 --- a/assessment-api/assessment-actors/src/main/scala/org/sunbird/utils/RequestUtil.scala +++ b/assessment-api/assessment-actors/src/main/scala/org/sunbird/utils/RequestUtil.scala @@ -7,7 +7,7 @@ import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.schema.DefinitionNode import scala.concurrent.ExecutionContext -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ object RequestUtil { diff --git a/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionActorTest.scala b/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionActorTest.scala index 349bc916a..a6ded4683 100644 --- a/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionActorTest.scala +++ b/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionActorTest.scala @@ -11,7 +11,7 @@ import org.sunbird.graph.utils.ScalaJsonUtils import org.sunbird.graph.{GraphService, OntologyEngineContext} import org.sunbird.kafka.client.KafkaClient -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global diff --git a/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionSetActorTest.scala b/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionSetActorTest.scala index 25591cd35..dbd30c7e5 100644 --- a/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionSetActorTest.scala +++ b/assessment-api/assessment-actors/src/test/scala/org/sunbird/actors/QuestionSetActorTest.scala @@ -15,7 +15,7 @@ import org.sunbird.managers.CopyManager import org.sunbird.utils.{AssessmentConstants, BranchingUtil, JavaJsonUtils} import java.util -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future diff --git a/assessment-api/pom.xml b/assessment-api/pom.xml index b1bb913ed..7c55ee4d4 100644 --- a/assessment-api/pom.xml +++ b/assessment-api/pom.xml @@ -20,7 +20,7 @@ UTF-8 UTF-8 - 2.11 + 2.12 diff --git a/assessment-api/qs-hierarchy-manager/pom.xml b/assessment-api/qs-hierarchy-manager/pom.xml index e4de07f22..8431814a0 100644 --- a/assessment-api/qs-hierarchy-manager/pom.xml +++ b/assessment-api/qs-hierarchy-manager/pom.xml @@ -14,7 +14,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar diff --git a/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala b/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala index 125489648..906d838a7 100644 --- a/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala +++ b/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala @@ -13,7 +13,7 @@ import org.sunbird.graph.dac.model.Node import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.{NodeUtil, ScalaJsonUtils} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.collection.JavaConverters import scala.concurrent.{ExecutionContext, Future} diff --git a/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala b/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala index 59cdd1810..6ce0dc829 100644 --- a/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala +++ b/assessment-api/qs-hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala @@ -2,7 +2,6 @@ package org.sunbird.managers import java.util import java.util.concurrent.CompletionException - import org.apache.commons.collections4.{CollectionUtils, MapUtils} import org.apache.commons.lang3.StringUtils import org.sunbird.common.dto.{Request, Response, ResponseHandler} @@ -18,8 +17,8 @@ import org.sunbird.graph.utils.{NodeUtil, ScalaJsonUtils} import org.sunbird.telemetry.logger.TelemetryManager import org.sunbird.utils.{HierarchyConstants, HierarchyErrorCodes} -import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} diff --git a/assessment-api/qs-hierarchy-manager/src/test/scala/org/sunbird/managers/HierarchyManagerTest.scala b/assessment-api/qs-hierarchy-manager/src/test/scala/org/sunbird/managers/HierarchyManagerTest.scala index baaaeadb8..b1b6dbef6 100644 --- a/assessment-api/qs-hierarchy-manager/src/test/scala/org/sunbird/managers/HierarchyManagerTest.scala +++ b/assessment-api/qs-hierarchy-manager/src/test/scala/org/sunbird/managers/HierarchyManagerTest.scala @@ -8,7 +8,7 @@ import org.sunbird.common.exception.ClientException import org.sunbird.graph.OntologyEngineContext import org.sunbird.utils.HierarchyConstants -import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ class HierarchyManagerTest extends BaseSpec { diff --git a/build/content-service/Jenkinsfile b/build/content-service/Jenkinsfile index 6909c139c..c7b32fde2 100644 --- a/build/content-service/Jenkinsfile +++ b/build/content-service/Jenkinsfile @@ -21,11 +21,14 @@ node('build-slave') { commit_hash = sh(script: 'git rev-parse --short HEAD', returnStdout: true).trim() build_tag = sh(script: "echo " + params.github_release_tag.split('/')[-1] + "_" + commit_hash + "_" + env.BUILD_NUMBER, returnStdout: true).trim() echo "build_tag: " + build_tag + cloud_store_group_id = params.CLOUD_STORE_GROUP_ID + cloud_store_artifact_id = params.CLOUD_STORE_ARTIFACT_ID + cloud_store_version = params.CLOUD_STORE_VERSION stage('Build') { env.NODE_ENV = "build" print "Environment will be : ${env.NODE_ENV}" - sh 'mvn clean install -DskipTests=true ' + sh 'mvn clean install -DskipTests=true -DCLOUD_STORE_GROUP_ID=' + cloud_store_group_id + ' -DCLOUD_STORE_ARTIFACT_ID=' + cloud_store_artifact_id + ' -DCLOUD_STORE_VERSION=' + cloud_store_version } @@ -47,4 +50,4 @@ node('build-slave') { currentBuild.result = "FAILURE" throw err } -} \ No newline at end of file +} diff --git a/content-api/api-tests/Collections/License API(s).postman_collection.json b/content-api/api-tests/Collections/License API(s).postman_collection.json new file mode 100644 index 000000000..50f8db7ad --- /dev/null +++ b/content-api/api-tests/Collections/License API(s).postman_collection.json @@ -0,0 +1,1017 @@ +{ + "info": { + "_postman_id": "fb7f316f-8f4d-4ebf-a358-e1cc9ba3e8ed", + "name": "Sunbird License API(s)", + "description": "The License API allows you to create licenses which can be tagged to content based on your requirements over the sunbird platform. Each endpoint serves a different purpose, so based on your requirement you can do the following: \n- The operations include CRUD (Create, Update, Read and Delete) operations\n- The URL for Sunbird License API(s) is `/license/v3`\n- The backend URL for the same is `/license/v3`. \n\n\nContact Support:\n Email: info@sunbird.org", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "license/v3", + "item": [ + { + "name": "Create License", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"name\": \"CC BY 4.0\",\n \"url\": \"www.url.com\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "create" + ] + }, + "description": "This API is associated with creating License on the Sunbird Platform.\n - The endpoint for **Create License** is `/license/v3/create`\n - It points to content-service - `/license/v3/create`\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Create License** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"name\": \"CC BY 4.0\",\n \"url\": \"www.url.com\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T06:48:10ZZ\",\n \"params\": {\n \"resmsgid\": \"5eb2e0f9-e27c-4d38-8353-e1720b0ed78a\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"cc-by-4.0\",\n \"node_id\": \"cc-by-4.0\"\n }\n}" + }, + { + "name": "BAD REQUEST. The **Create License** operation failed. You may have missed input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"name\": \"CC BY 4.0\",\n \"url\": \"www.url.com\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T08:41:21ZZ\",\n \"params\": {\n \"resmsgid\": \"0ea93a27-b70d-4587-b280-29564175af86\",\n \"msgid\": null,\n \"err\": \"CLIENT_ERROR\",\n \"status\": \"failed\",\n \"errmsg\": \"Validation Errors\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": [\n \"Required Metadata name not set\"\n ]\n }\n}" + }, + { + "name": "'INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"name\": \"CC BY 4.0\",\n \"url\": \"www.url.com\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Update License", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/update/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "update", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License Id To the Request URL" + } + ] + }, + "description": "This API is associated with updating license on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update License\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Create License** is `/license/v3/update/{License_Id}`\n - It points to content-service - `/license/v3/update/{License_Id}`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation. License update operation was successfuly executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/update/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "update", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T12:10:36ZZ\",\n \"params\": {\n \"resmsgid\": \"18fa41bd-7b8e-4f8b-9028-d17879f60cc6\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"standard-youtube-license\",\n \"node_id\": \"standard-youtube-license\"\n }\n}" + }, + { + "name": "BAD REQUEST. The 'Update License ' operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter. ", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/update/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "update", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:27:01ZZ\",\n \"params\": {\n \"resmsgid\": \"87eb99c6-bc10-406a-a6fe-13651f546ce1\",\n \"msgid\": null,\n \"err\": \"ERROR_RESTRICTED_PROP\",\n \"status\": \"failed\",\n \"errmsg\": \"Properties in list [identifier, name, status] are not allowed in request\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**License Update** operation failed !The possible reason for failure is that you may have provided wrong license id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/update/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "update", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T12:13:57ZZ\",\n \"params\": {\n \"resmsgid\": \"d30ba7f5-916e-422f-8fcc-6a67b80acaea\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: standard-youtube-license1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"license\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/license/v3/update/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "update", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Read License", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/read/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "read", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License_Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the master object category on the Sunbird Platform .\n - The **_/read/{License_Id}_** endpoint executes request for fetching the licenses to be viewed and read.\n - The endpoint for **Read License** is `/license/v3/read/{License_Id}`\n - It points to content-service - `/license/v3/read/{License_Id}`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate id.\n - You need to provide a valid object category id value in {License_Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Read License\" operation was successfully executed. For all possible metadata, please [refer](https://github.com/project-sunbird/knowledge-platform/blob/release-3.5.0/schemas/license/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/read/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "read", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License_Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T12:10:18ZZ\",\n \"params\": {\n \"resmsgid\": \"f7b3b400-cf43-49a3-ab14-a6d1a74e70b7\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"license\": {\n \"identifier\": \"standard-youtube-license\",\n \"lastStatusChangedOn\": \"2019-11-25T13:34:45.334+0000\",\n \"apoc_json\": \"{\\\"batch\\\": true}\",\n \"url\": \"www.url.com\",\n \"description\": \"This is the standard license of any Youtube content\",\n \"languageCode\": [],\n \"createdOn\": \"2019-11-25T13:34:45.334+0000\",\n \"objectType\": \"License\",\n \"apoc_text\": \"APOC\",\n \"versionKey\": \"1582725030824\",\n \"name\": \"Standard YouTube License\",\n \"lastUpdatedOn\": \"2020-02-26T13:50:30.824+0000\",\n \"apoc_num\": 1,\n \"status\": \"Live\"\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**License Read** operation failed !The possible reason for failure is that you may have provided wrong license id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/read/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "read", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License_Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-18T12:21:51ZZ\",\n \"params\": {\n \"resmsgid\": \"f5b2202a-f349-4488-a7ba-e061cfc2a88b\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: standard-youtube-license1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "esse et eu quis ut" + }, + { + "description": "(Required) All license APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/read/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "read", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid License_Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Retire Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "DELETE", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/retire/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "retire", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with retiring license on the Sunbird Platform .\n - The **_/retire/{License_Id}_** endpoint executes \"Retire License\" request based on license Id to delete the specific license.'\n - The endpoint for **Retire License** is `/license/v3/retire/{License_Id}`\n - It points to content-service - `/license/v3/retire/{License_Id}`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.**License retire operation was successfully executed.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/retire/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "retire", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T12:05:52ZZ\",\n \"params\": {\n \"resmsgid\": \"ebd2655a-a69a-460c-9bcf-0f45f5938fac\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"standard-youtube-license\",\n \"node_id\": \"standard-youtube-license\"\n }\n}" + }, + { + "name": "ERR CONTENT RETIRE! License is already retired.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/retire/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "retire", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T12:01:24ZZ\",\n \"params\": {\n \"resmsgid\": \"ec20ec05-581c-4559-988a-449f3c5ae698\",\n \"msgid\": null,\n \"err\": \"ERR_CONTENT_RETIRE\",\n \"status\": \"failed\",\n \"errmsg\": \"Content with Identifier standard-youtube-license is already Retired.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**License Retire** operation failed !The possible reason for failure is that you may have provided wrong license id.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/retire/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "retire", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T11:56:35ZZ\",\n \"params\": {\n \"resmsgid\": \"d826e808-2866-4603-9b60-0a3130a37128\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: standard-youtube-license1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "esse et eu quis ut" + } + ], + "url": { + "raw": "{{baseUrl}}/license/v3/retire/:License_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "license", + "v3", + "retire", + ":License_Id" + ], + "variable": [ + { + "key": "License_Id", + "value": "esse et eu quis ut", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.license.retire\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.sunbirded.org/api", + "type": "string" + } + ] +} diff --git a/content-api/api-tests/Collections/Sunbird Asset Management API(s).postman_collection.json b/content-api/api-tests/Collections/Sunbird Asset Management API(s).postman_collection.json new file mode 100644 index 000000000..e1121bc32 --- /dev/null +++ b/content-api/api-tests/Collections/Sunbird Asset Management API(s).postman_collection.json @@ -0,0 +1,1179 @@ +{ + "info": { + "_postman_id": "8ca47646-464a-411a-b9be-7d741055618b", + "name": "Sunbird Asset Management API(s)", + "description": "The Asset Management API allows you to manage asset over the sunbird platform.Apis perform operations related to all the _Asset_ on the Sunbird Platform. - Each of the endpoints serve a different purpose, so which one to use depends on what you want to do - The operations include CRUD (Create, Update, Read and Upload) operations and other operations such as upload etc. - The URL for Sunbird Asset API(s) is `asset/v1` - [TestCases](https://www.getpostman.com/collections/fd62845fa723c00658ae)\n\nContact Support:\n Email: info@sunbird.org", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "1419050" + }, + "item": [ + { + "name": "asset", + "item": [ + { + "name": "v1", + "item": [ + { + "name": "Asset Create", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"channel\": \"et qui officia non\",\n \"code\": \"tempor nostrud esse\",\n \"createdBy\": \"conseq\",\n \"creator\": \"in enim\",\n \"keywords\": \"occaecat ea\",\n \"language\": [\n \"proident id aliquip\",\n \"dolor\"\n ],\n \"mediaType\": \"dolore dolore sint dolor\",\n \"mimeType\": \"dolor irure\",\n \"name\": \"consequat in aute irure\",\n \"primaryCategory\": \"dolor pariatur\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "create" + ] + }, + "description": "This API is associated with creating asset on the Sunbird Platform.\n - The endpoint for **Create Asset** is `/asset/v1/create`\n - It points to knowledge-mw-service - `/v1/asset/create`\n - It is mandatory to provide values for parameters marked with *\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"channel\": \"eiusmod est ullamco dolor\",\n \"code\": \"officia incididunt culpa commodo\",\n \"createdBy\": \"ea sit laboris cupidatat\",\n \"creator\": \"et anim deserunt sint\",\n \"keywords\": \"dolore cill\",\n \"language\": [\n \"magna\",\n \"esse Excepteur cillum\"\n ],\n \"mediaType\": \"Duis proident id elit\",\n \"mimeType\": \"Lorem in aute nisi\",\n \"name\": \"sunt non proident Duis\",\n \"primaryCategory\": \"esse reprehenderit\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.create\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-29T09:20:03ZZ\",\n \"params\": {\n \"resmsgid\": \"bc3f02f5-d550-42b0-904e-38eb671248ef\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_113441184591495168151\",\n \"node_id\": \"do_113441184591495168151\",\n \"versionKey\": \"1640769603462\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"channel\": \"eiusmod est ullamco dolor\",\n \"code\": \"officia incididunt culpa commodo\",\n \"createdBy\": \"ea sit laboris cupidatat\",\n \"creator\": \"et anim deserunt sint\",\n \"keywords\": \"dolore cill\",\n \"language\": [\n \"magna\",\n \"esse Excepteur cillum\"\n ],\n \"mediaType\": \"Duis proident id elit\",\n \"mimeType\": \"Lorem in aute nisi\",\n \"name\": \"sunt non proident Duis\",\n \"primaryCategory\": \"esse reprehenderit\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.create\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-29T09:36:28ZZ\",\n \"params\": {\n \"resmsgid\": \"c2a8542a-3ed3-4b69-8a76-f2cc782191ed\",\n \"err\": \"CLIENT_ERROR\",\n \"status\": \"failed\",\n \"errmsg\": \"Validation Errors\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": [\n \"Required Metadata code not set\"\n ]\n }\n}" + } + ] + }, + { + "name": "Asset Update", + "request": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"name\": \"test asset\",\n \"versionKey\": \"1640774434453\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/update/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "update", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + }, + "description": "This API is associated with updating Asset on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update Asset\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Asset** is `/asset/v1/update`\n - It points to knowledge-mw-service - `/v1/asset/update`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"name\": \"test asset\",\n \"versionKey\": \"1640774434453\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/update/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "update", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-29T09:24:27ZZ\",\n \"params\": {\n \"resmsgid\": \"7302823d-669f-449e-a903-d03b5e3c307d\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_113441184591495168151\",\n \"node_id\": \"do_113441184591495168151\",\n \"versionKey\": \"1640769867533\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"name\": \"test asset\",\n \"versionKey\": \"1640774434453\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/update/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "update", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-29T09:38:42ZZ\",\n \"params\": {\n \"resmsgid\": \"63da1ede-b594-473d-abd3-70ca45bce86c\",\n \"err\": \"CLIENT_ERROR\",\n \"status\": \"failed\",\n \"errmsg\": \"Invalid version Key\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Asset Read", + "request": { + "method": "GET", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/read/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "read", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the asset on the Sunbird Platform .\n - The **_/read/{asset Id}_** endpoint executes request for fetching the asset to be viewed and read.\n - The endpoint for **Read Asset** is `/asset/v1/read`\n - It points to knowledge-mw-service - `/v1/asset/read`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid asset Id value in {Asset Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/read/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "read", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.read\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-29T10:22:50ZZ\",\n \"params\": {\n \"resmsgid\": \"30114c34-e044-4e49-ad5e-89075ae503ce\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"code\": \"f2f41233-1c34-cf28-7bf8-0987a1eee420\",\n \"keywords\": \"\",\n \"channel\": 1309282781705830400,\n \"downloadUrl\": \"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_113441146415767552142/file_example_mp4_640_3mg.mp4\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"video/mp4\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2021-12-29T08:02:23.340+0000\",\n \"objectType\": \"Asset\",\n \"primaryCategory\": \"asset\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2021-12-29T08:02:24.755+0000\",\n \"contentEncoding\": \"identity\",\n \"artifactUrl\": \"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_113441146415767552142/file_example_mp4_640_3mg.mp4\",\n \"contentType\": \"Resource\",\n \"dialcodeRequired\": false,\n \"identifier\": \"do_113441146415767552142\",\n \"lastStatusChangedOn\": \"2021-12-29T08:02:23.340+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"creator\": \"N11\",\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Default\",\n \"consumerId\": \"bfe5883f-ac66-4744-a064-3ed88d986eba\",\n \"mediaType\": \"video\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1640764944755\",\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"NCF\",\n \"size\": 3114374,\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"compatibilityLevel\": 1,\n \"name\": \"file_example_MP4_640_3MG\",\n \"status\": \"Processing\"\n }\n }\n}" + } + ] + }, + { + "name": "Asset Upload", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/upload/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "upload", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + }, + "description": "This API is associated with uploading asset on the Sunbird Platform .\n - The**_/upload/{Asset Id}_** endpoint executes \"Upload Asset\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Upload Asset** is `/asset/v1/upload`\n - It points to knowledge-mw-service - `/v1/asset/upload`\n - {...}refers to a section of upload endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid asset Id value in {Asset Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/upload/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "upload", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.upload\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-28T12:18:32ZZ\",\n \"params\": {\n \"resmsgid\": \"1ee721a1-deb7-4e9c-8e7e-777a376f7844\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134405639859486721110\",\n \"artifactUrl\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134405639859486721110/image1.jpeg\",\n \"content_url\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134405639859486721110/image1.jpeg\",\n \"node_id\": \"do_1134405639859486721110\",\n \"versionKey\": \"1640693912866\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/upload/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "upload", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "application/json:" + } + ], + "cookie": [], + "body": "" + }, + { + "name": "Internal Server Error", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "url": { + "raw": "{{baseUrl}}/asset/v1/upload/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "upload", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "application/json:" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Asset Copy", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"publisher\": \"API_DOC_PUBLISHER\",\n \"lastPublishedBy\": \"API_DOC_PUBLISHER\",\n \"publishComment\": \"OK\",\n \"publishChecklist\": []\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/copy/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "copy", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + }, + "description": "This API is associated with copying a asset on the Sunbird Platform .\n - The **_/copy/_**endpoint executes request for copy asset, if asset is found appropriate.\n - The endpoint for **Copy Asset** is `/asset/v1/copy`\n - It points to knowledge-mw-service - `/v1/asset/copy`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty.." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"sit culpa\",\n \"publishChecklist\": [\n \"ullamco\",\n \"consequat do ullamco esse labore\"\n ],\n \"publishComment\": \"anim amet consequat exercitation\",\n \"publisher\": \"mollit veniam est do\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/copy/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "copy", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.copy\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-30T11:42:05ZZ\",\n \"params\": {\n \"resmsgid\": \"ab112c9f-bbb0-4493-a32d-040243b14a09\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": {\n \"do_1134418461689282561147\": \"do_1134419621916180481174\"\n },\n \"versionKey\": \"1640864525352\"\n }\n}" + }, + { + "name": "Not Found", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"sit culpa\",\n \"publishChecklist\": [\n \"ullamco\",\n \"consequat do ullamco esse labore\"\n ],\n \"publishComment\": \"anim amet consequat exercitation\",\n \"publisher\": \"mollit veniam est do\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/asset/v1/copy/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "v1", + "copy", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.copy\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-12-30T11:42:58ZZ\",\n \"params\": {\n \"resmsgid\": \"0a0c203d-4d8f-4a78-be67-1365fad3407e\",\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1134418461689282561142\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + } + ] + } + ] + }, + { + "name": "Asset Upload Url", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"fileName\": \"flower6.jpeg\"\n }\n }\n}" + }, + "url": { + "raw": "{{baseUrl}}/asset/upload/url/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "upload", + "url", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + }, + "description": "This API is associated with uploading asset on the Sunbird Platform .\n - The**_/upload/url/{Asset Id}_** endpoint executes \"Upload Asset URL\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Upload Asset URL** is `/asset/v1/upload/url`\n - It points to knowledge-mw-service - `/v1/asset/upload/url`\n - {...}refers to a section of upload endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid asset Id value in {Asset Id} field of API URL.\n - On Success api returns pre-signed-url link with come configured ttl against which file can be uploaded.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"fileName\": \"flower6.jpeg\"\n }\n }\n}" + }, + "url": { + "raw": "{{baseUrl}}/asset/upload/url/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "upload", + "url", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.upload.url\",\n \"ver\": \"3.0\",\n \"ts\": \"2021-12-30T07:57:47ZZ\",\n \"params\": {\n \"resmsgid\": \"df673ca8-7d2c-46c9-b9e5-77e5602f9454\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134418461689282561147\",\n \"url_expiry\": \"54000\",\n \"pre_signed_url\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134418461689282561147/flower6.jpeg?sv=2017-04-17&se=2021-12-30T22%3A57%3A47Z&sr=b&sp=w&sig=2pow25YLNLIM0hrMwVx4uy3aDu3BTNNbNdLI8X9Yt4M%3D\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"fileName\": \"flower6.jpeg\"\n }\n }\n}" + }, + "url": { + "raw": "{{baseUrl}}/asset/upload/url/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "upload", + "url", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.upload.url\",\n \"ver\": \"3.0\",\n \"ts\": \"2021-12-30T07:47:32ZZ\",\n \"params\": {\n \"resmsgid\": \"9b18aedd-c6d3-43bf-b967-d787e2054b60\",\n \"err\": \"ERR_CONTENT_BLANK_FILE_NAME\",\n \"status\": \"failed\",\n \"errmsg\": \"File name is blank\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "Not Found", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "reprehenderit aliquip" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "reprehenderit aliquip" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "reprehenderit aliquip" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"asset\": {\n \"fileName\": \"flower6.jpeg\"\n }\n }\n}" + }, + "url": { + "raw": "{{baseUrl}}/asset/upload/url/:Asset_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "asset", + "upload", + "url", + ":Asset_Id" + ], + "variable": [ + { + "key": "Asset_Id", + "value": "reprehenderit aliquip", + "description": "(Required) Please append a valid Asset Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.asset.upload.url\",\n \"ver\": \"3.0\",\n \"ts\": \"2021-12-30T07:59:12ZZ\",\n \"params\": {\n \"resmsgid\": \"256bbf94-1ddb-4f44-ab48-ffd04bd8c8a9\",\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1134418461689282561149\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.sunbirded.org/api", + "type": "string" + } + ] +} \ No newline at end of file diff --git a/content-api/api-tests/Collections/Sunbird Collection Management API(s).postman_collection.json b/content-api/api-tests/Collections/Sunbird Collection Management API(s).postman_collection.json new file mode 100644 index 000000000..d1231f7db --- /dev/null +++ b/content-api/api-tests/Collections/Sunbird Collection Management API(s).postman_collection.json @@ -0,0 +1,2921 @@ +{ + "info": { + "_postman_id": "f3122aff-72d4-4066-9373-9132b61dc0de", + "name": "Sunbird Collection Management API(s)", + "description": "The Collection Management API allows you to manage collection over the sunbird platform.Apis perform operations related to all the _Collection_ on the Sunbird Platform. \n- Each of the endpoints serve a different purpose, so which one to use depends on what you want to do\n- The operations include CRUD (Create, Update, Read) operations and other operations such as copy, flag, link Dialcode, etc. \n- The URL for Sunbird Content API(s) is `collection/v1` \n- [TestCases](https://www.getpostman.com/collections/40dde03073826216533a)\n\nContact Support:\n Email: info@sunbird.org", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "1419050" + }, + "item": [ + { + "name": "collection/v1", + "item": [ + { + "name": "flag", + "item": [ + { + "name": "Collection Flag", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"flagReasons\": [\n \"Copyright Violation\"\n ],\n \"flaggedBy\": \"API-DOC-Tester\",\n \"versionKey\": \"{{versionKey}}\",\n \"flags\": [\n \"kp-flags\",\n \"newKp-flags\"\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with flag Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Flag Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Collection** is `/collection/v1/flag`\n - It points to knowledge-mw-service - `/v1/collection/flag`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"flagReasons\": [\n \"fugiat\",\n \"labore voluptate eiusmod\"\n ],\n \"flaggedBy\": \"labore e\",\n \"flags\": [\n \"mollit laborum\",\n \"p\"\n ],\n \"versionKey\": \"Ut nulla officia dolore\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.flag\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:11:52ZZ\",\n \"params\": {\n \"resmsgid\": \"eb628dff-53c6-45f2-9789-0f193988f585\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134467204859658241168\",\n \"node_id\": \"do_1134467204859658241168\",\n \"versionKey\": \"1641453112470\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"flagReasons\": [\n \"fugiat\",\n \"labore voluptate eiusmod\"\n ],\n \"flaggedBy\": \"labore e\",\n \"flags\": [\n \"mollit laborum\",\n \"p\"\n ],\n \"versionKey\": \"Ut nulla officia dolore\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.flag\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:08:12ZZ\",\n \"params\": {\n \"resmsgid\": \"aad90170-6b97-4d65-a980-b3184fe13142\",\n \"err\": \"ERR_CONTENT_NOT_FLAGGABLE\",\n \"status\": \"failed\",\n \"errmsg\": \"Unpublished Content do_11334223976733900811 cannot be flagged\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Collection Flag Accept", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"{{versionKey}}\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/accept/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + "accept", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with Flag Accept Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Flag Accept Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Flag Accept Collection** is `/collection/v1/flag/accept`\n - It points to knowledge-mw-service - `/v1/collection/flag/accept`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"{{versionKey}}\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/accept/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + "accept", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.flag.accept\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:14:38ZZ\",\n \"params\": {\n \"resmsgid\": \"c7b2c6b0-ace7-4da5-8429-6f9d5f6b6d35\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134467204859658241168\",\n \"node_id\": \"do_1134467204859658241168\",\n \"versionKey\": \"1641453278531\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"{{versionKey}}\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/flag/accept/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "flag", + "accept", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.flag.accept\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:06:24ZZ\",\n \"params\": {\n \"resmsgid\": \"40bd05d7-890e-4e3f-9ad3-dcde348e8fb9\",\n \"err\": \"ERR_INVALID_CONTENT\",\n \"status\": \"failed\",\n \"errmsg\": \"Invalid Flagged Content! Content Can Not Be Accepted.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + }, + { + "name": "hierarchy", + "item": [ + { + "name": "Update Collection Hierarchy", + "request": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"data\": {\n \"nodesModified\": {\n \"do_113444715309432832183\": {\n \"root\": true,\n \"objectType\": \"Content\",\n \"metadata\": {\n \"appIcon\": \"\",\n \"name\": \"Untitled Collection\",\n \"description\": \"Enter description for Collection\",\n \"dialcodeRequired\": false,\n \"primaryCategory\": \"Content Playlist\",\n \"additionalCategories\": [],\n \"audience\": [\n \"Student\"\n ],\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"author\": \"N11\",\n \"copyright\": \"NIT123\",\n \"license\": \"CC BY 4.0\",\n \"contentType\": \"Collection\",\n \"attributions\": [],\n \"keywords\": [\n \"nnn\"\n ],\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"copyrightYear\": 2121\n },\n \"isNew\": false\n }\n },\n \"hierarchy\": {\n \"do_113444715309432832183\": {\n \"name\": \"Untitled Collection\",\n \"children\": [],\n \"root\": true\n }\n },\n \"lastUpdatedBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/update", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "update" + ] + }, + "description": "This API is associated with updating collectio on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update Collection Hierarchy\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Collection Hierarchy** is `/collection/v1/hierarchy/update`\n - It points to knowledge-mw-service - `/v1/collection/hierarchy/update`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"data\": {\n \"nodesModified\": {\n \"do_113444715309432832183\": {\n \"root\": true,\n \"objectType\": \"Content\",\n \"metadata\": {\n \"appIcon\": \"\",\n \"name\": \"Untitled Collection\",\n \"description\": \"Enter description for Collection\",\n \"dialcodeRequired\": false,\n \"primaryCategory\": \"Content Playlist\",\n \"additionalCategories\": [],\n \"audience\": [\n \"Student\"\n ],\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"author\": \"N11\",\n \"copyright\": \"NIT123\",\n \"license\": \"CC BY 4.0\",\n \"contentType\": \"Collection\",\n \"attributions\": [],\n \"keywords\": [\n \"nnn\"\n ],\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"copyrightYear\": 2121\n },\n \"isNew\": false\n }\n },\n \"hierarchy\": {\n \"do_113444715309432832183\": {\n \"name\": \"Untitled Collection\",\n \"children\": [],\n \"root\": true\n }\n },\n \"lastUpdatedBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/update", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "update" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.hierarchy.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:43:27.001Z\",\n \"params\": {\n \"resmsgid\": \"99def090-6c79-11ec-b3c3-d9afeab1a49c\",\n \"msgid\": \"fec10a19-18ca-743d-2594-701f84f47462\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content_id\": \"do_113444715309432832183\",\n \"identifiers\": {}\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"data\": {\n \"nodesModified\": {\n \"do_113444715309432832183\": {\n \"root\": true,\n \"objectType\": \"Content\",\n \"metadata\": {\n \"appIcon\": \"\",\n \"name\": \"Untitled Collection\",\n \"description\": \"Enter description for Collection\",\n \"dialcodeRequired\": false,\n \"primaryCategory\": \"Content Playlist\",\n \"additionalCategories\": [],\n \"audience\": [\n \"Student\"\n ],\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"author\": \"N11\",\n \"copyright\": \"NIT123\",\n \"license\": \"CC BY 4.0\",\n \"contentType\": \"Collection\",\n \"attributions\": [],\n \"keywords\": [\n \"nnn\"\n ],\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"copyrightYear\": 2121\n },\n \"isNew\": false\n }\n },\n \"hierarchy\": {\n \"do_113444715309432832183\": {\n \"name\": \"Untitled Collection\",\n \"children\": [],\n \"root\": true\n }\n },\n \"lastUpdatedBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/update", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "update" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.hierarchy.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:37:29.231Z\",\n \"params\": {\n \"resmsgid\": \"c49f91f0-6c78-11ec-b3c3-d9afeab1a49c\",\n \"status\": \"failed\",\n \"err\": \"ERR_CONTENT_HIERARCHY_UPDATE_FIELDS_MISSING\",\n \"errmsg\": \"Required fields for update hierarchy are missing\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "Not Found", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"data\": {\n \"nodesModified\": {\n \"do_113444715309432832183\": {\n \"root\": true,\n \"objectType\": \"Content\",\n \"metadata\": {\n \"appIcon\": \"\",\n \"name\": \"Untitled Collection\",\n \"description\": \"Enter description for Collection\",\n \"dialcodeRequired\": false,\n \"primaryCategory\": \"Content Playlist\",\n \"additionalCategories\": [],\n \"audience\": [\n \"Student\"\n ],\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"author\": \"N11\",\n \"copyright\": \"NIT123\",\n \"license\": \"CC BY 4.0\",\n \"contentType\": \"Collection\",\n \"attributions\": [],\n \"keywords\": [\n \"nnn\"\n ],\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"copyrightYear\": 2121\n },\n \"isNew\": false\n }\n },\n \"hierarchy\": {\n \"do_113444715309432832183\": {\n \"name\": \"Untitled Collection\",\n \"children\": [],\n \"root\": true\n }\n },\n \"lastUpdatedBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/update", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "update" + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.hierarchy.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:36:03.730Z\",\n \"params\": {\n \"resmsgid\": \"91a92720-6c78-11ec-b3c3-d9afeab1a49c\",\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_113444715309432832182\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "Internal Server Error", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"data\": {\n \"nodesModified\": {\n \"do_113444715309432832183\": {\n \"root\": true,\n \"objectType\": \"Content\",\n \"metadata\": {\n \"appIcon\": \"\",\n \"name\": \"Untitled Collection\",\n \"description\": \"Enter description for Collection\",\n \"dialcodeRequired\": false,\n \"primaryCategory\": \"Content Playlist\",\n \"additionalCategories\": [],\n \"audience\": [\n \"Student\"\n ],\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"author\": \"N11\",\n \"copyright\": \"NIT123\",\n \"license\": \"CC BY 4.0\",\n \"contentType\": \"Collection\",\n \"attributions\": [],\n \"keywords\": [\n \"nnn\"\n ],\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"copyrightYear\": 2121\n },\n \"isNew\": false\n }\n },\n \"hierarchy\": {\n \"do_113444715309432832183\": {\n \"name\": \"Untitled Collection\",\n \"children\": [],\n \"root\": true\n }\n },\n \"lastUpdatedBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/update", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "update" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.hierarchy.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:42:31.388Z\",\n \"params\": {\n \"resmsgid\": \"78b911c0-6c79-11ec-b3c3-d9afeab1a49c\",\n \"status\": \"failed\",\n \"err\": \"ERR_SYSTEM_EXCEPTION\",\n \"errmsg\": \"Something went wrong in server while processing the request\"\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Read Collection Hierarchy", + "request": { + "method": "GET", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/:Collection_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + ":Collection_Id" + ], + "variable": [ + { + "key": "Collection_Id", + "value": "magna", + "description": "(Required) Please append a valid collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the collection on the Sunbird Platform .\n - The **_/hierarchy/{collectionId}_** endpoint executes request for fetching the collection to be viewed and read.\n - The endpoint for **Read Collection** is `/collection/v1/hierarchy`\n - It points to knowledge-mw-service - `/v1/collection/hierarchy`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid collection Id value in {Collection Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/:Collection_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + ":Collection_Id" + ], + "variable": [ + { + "key": "Collection_Id", + "value": "magna", + "description": "(Required) Please append a valid collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "X-Frame-Options", + "value": "SAMEORIGIN", + "description": "" + }, + { + "key": "Strict-Transport-Security", + "value": "max-age=15552000; includeSubDomains", + "description": "" + }, + { + "key": "X-Content-Type-Options", + "value": "nosniff", + "description": "" + }, + { + "key": "request-time", + "value": "19", + "description": "" + }, + { + "key": "Connection", + "value": "keep-alive", + "description": "" + }, + { + "key": "X-Download-Options", + "value": "noopen", + "description": "" + }, + { + "key": "X-DNS-Prefetch-Control", + "value": "false", + "description": "" + }, + { + "key": "Content-Length", + "value": "8967", + "description": "" + }, + { + "key": "X-XSS-Protection", + "value": "1; mode=block", + "description": "" + }, + { + "key": "Date", + "value": "Tue, 04 Jan 2022 12:28:17 GMT", + "description": "" + }, + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.hierarchy.get\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-04T12:28:17ZZ\",\n \"params\": {\n \"resmsgid\": \"0541c308-4754-4b0c-8e0e-47467c3cc35c\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"copyright\": \"NIT123\",\n \"keywords\": [\n \"Collection Test\"\n ],\n \"subject\": [\n \"Mathematics\"\n ],\n \"channel\": \"sunbird\",\n \"organisation\": [\n \"NIT\"\n ],\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/vnd.ekstep.content-collection\",\n \"objectType\": \"Collection\",\n \"chapterCountForContribution\": 1,\n \"gradeLevel\": [\n \"Class 1\"\n ],\n \"appIcon\": \"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_11293020669537484811928/artifact/001_1578394365141.png\",\n \"primaryCategory\": \"Digital Textbook\",\n \"children\": [\n {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"parent\": \"do_1134445959309803521184\",\n \"code\": \"do_1132167447297474561289\",\n \"credentials\": {\n \"enabled\": false\n },\n \"origin\": \"do_1132167447297474561289\",\n \"channel\": \"sunbird\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/vnd.ekstep.content-collection\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2022-01-03T05:00:26.831+0000\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Content Playlist\",\n \"children\": [\n {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"parent\": \"do_1134445959317995521185\",\n \"code\": \"do_113333485887143936169\",\n \"credentials\": {\n \"enabled\": false\n },\n \"origin\": \"do_113333485887143936169\",\n \"channel\": \"sunbird\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/vnd.ekstep.content-collection\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2022-01-03T05:00:26.834+0000\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Textbook Unit\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2022-01-03T05:00:26.834+0000\",\n \"contentEncoding\": \"gzip\",\n \"originData\": {\n \"channel\": 1309282781705830400\n },\n \"generateDIALCodes\": false,\n \"contentType\": \"TextBookUnit\",\n \"dialcodeRequired\": false,\n \"identifier\": \"do_1134445959318241281187\",\n \"lastStatusChangedOn\": \"2022-01-03T05:00:26.834+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Parent\",\n \"discussionForum\": {\n \"enabled\": true\n },\n \"index\": 1,\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.launcher\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1641186026834\",\n \"allowedContentTypes\": [\n \"Demo Practice Question Set\",\n \"Content Playlist\",\n \"Course Assessment\",\n \"eTextbook\",\n \"Explanation Content\",\n \"Learning Resource\",\n \"Practice Question Set\",\n \"Teacher Resource\"\n ],\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"depth\": 2,\n \"compatibilityLevel\": 1,\n \"name\": \"Part 1\",\n \"openForContribution\": true,\n \"timeLimits\": {},\n \"programId\": \"fae8bbf0-6c51-11ec-ad7a-853bd8da76ad\",\n \"status\": \"Draft\",\n \"children\": [\n {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"unitIdentifiers\": [\n \"do_1134445959318241281187\"\n ],\n \"parent\": \"do_1134445959318241281187\",\n \"subject\": [\n \"Mathematics\"\n ],\n \"channel\": 1309282781705830400,\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/pdf\",\n \"objectType\": \"Content\",\n \"gradeLevel\": [\n \"Class 1\"\n ],\n \"primaryCategory\": \"eTextbook\",\n \"contentEncoding\": \"identity\",\n \"contentType\": \"eTextBook\",\n \"identifier\": \"do_1134453165632552961201\",\n \"audience\": [\n \"Student\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"visibility\": \"Default\",\n \"author\": \"anusha\",\n \"index\": 1,\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"license\": \"CC BY 4.0\",\n \"name\": \"Untitled\",\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"status\": \"Draft\",\n \"code\": \"b1ef1747-b33a-d91a-93f0-7a0064f1f802\",\n \"interceptionPoints\": {},\n \"credentials\": {\n \"enabled\": false\n },\n \"medium\": [\n \"English\"\n ],\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2022-01-04T05:26:34.539+0000\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2022-01-04T05:26:34.989+0000\",\n \"collectionId\": \"do_1134445959309803521184\",\n \"dialcodeRequired\": false,\n \"lastStatusChangedOn\": \"2022-01-04T05:26:34.539+0000\",\n \"creator\": \"anusha\",\n \"os\": [\n \"All\"\n ],\n \"versionKey\": \"1641273994989\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"ekstep_ncert_k-12\",\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"depth\": 3,\n \"createdBy\": \"19ba0e4e-9285-4335-8dd0-f674bf03fa4d\",\n \"compatibilityLevel\": 1,\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"board\": \"CBSE\",\n \"programId\": \"fae8bbf0-6c51-11ec-ad7a-853bd8da76ad\",\n \"downloadUrl\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134453165632552961201/sample.pdf\",\n \"artifactUrl\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134453165632552961201/sample.pdf\",\n \"size\": 3028\n },\n {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"unitIdentifiers\": [\n \"do_1134445959318241281187\"\n ],\n \"parent\": \"do_1134445959318241281187\",\n \"copyright\": \"2021\",\n \"subject\": [\n \"Mathematics\"\n ],\n \"channel\": 1309282781705830400,\n \"downloadUrl\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134455063866736641416/sample.pdf\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/pdf\",\n \"objectType\": \"Content\",\n \"gradeLevel\": [\n \"Class 1\"\n ],\n \"primaryCategory\": \"eTextbook\",\n \"contentEncoding\": \"identity\",\n \"artifactUrl\": \"https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/assets/do_1134455063866736641416/sample.pdf\",\n \"contentType\": \"eTextBook\",\n \"identifier\": \"do_1134455063866736641416\",\n \"audience\": [\n \"Student\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"visibility\": \"Default\",\n \"author\": \"anusha\",\n \"index\": 2,\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"license\": \"CC BY 4.0\",\n \"size\": 3028,\n \"name\": \"test content\",\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"status\": \"Draft\",\n \"code\": \"9ab1557b-ea67-c31b-be52-e4073c5d8f1b\",\n \"interceptionPoints\": {},\n \"credentials\": {\n \"enabled\": false\n },\n \"medium\": [\n \"English\"\n ],\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2022-01-04T11:52:46.343+0000\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2022-01-04T11:53:13.077+0000\",\n \"collectionId\": \"do_1134445959309803521184\",\n \"dialcodeRequired\": false,\n \"lastStatusChangedOn\": \"2022-01-04T11:52:46.343+0000\",\n \"creator\": \"anusha\",\n \"os\": [\n \"All\"\n ],\n \"versionKey\": \"1641297193077\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"ekstep_ncert_k-12\",\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"depth\": 3,\n \"createdBy\": \"19ba0e4e-9285-4335-8dd0-f674bf03fa4d\",\n \"compatibilityLevel\": 1,\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"board\": \"CBSE\",\n \"programId\": \"fae8bbf0-6c51-11ec-ad7a-853bd8da76ad\"\n }\n ]\n }\n ],\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2022-01-03T05:00:26.831+0000\",\n \"contentEncoding\": \"gzip\",\n \"originData\": {\n \"channel\": 1309282781705830400\n },\n \"contentType\": \"Collection\",\n \"dialcodeRequired\": false,\n \"trackable\": {\n \"enabled\": false,\n \"autoBatch\": false\n },\n \"identifier\": \"do_1134445959317995521185\",\n \"lastStatusChangedOn\": \"2022-01-03T05:00:26.831+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Parent\",\n \"index\": 1,\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.launcher\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1641186026831\",\n \"allowedContentTypes\": [\n \"Demo Practice Question Set\",\n \"Content Playlist\",\n \"Course Assessment\",\n \"eTextbook\",\n \"Explanation Content\",\n \"Learning Resource\",\n \"Practice Question Set\",\n \"Teacher Resource\"\n ],\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"depth\": 1,\n \"compatibilityLevel\": 1,\n \"name\": \"Textbook Unit 1\",\n \"openForContribution\": true,\n \"timeLimits\": {},\n \"programId\": \"fae8bbf0-6c51-11ec-ad7a-853bd8da76ad\",\n \"status\": \"Draft\"\n }\n ],\n \"contentEncoding\": \"gzip\",\n \"collaborators\": [\n \"88ffb6eb-33bf-4f96-ad3a-75c15e5a04ff\",\n \"a4b4a783-d686-4f67-b079-512329c77f5e\",\n \"ce7f4172-f13a-47b7-804d-f194a2275538\"\n ],\n \"contentType\": \"Collection\",\n \"trackable\": {\n \"enabled\": false,\n \"autoBatch\": false\n },\n \"identifier\": \"do_1134445959309803521184\",\n \"audience\": [\n \"Student\"\n ],\n \"subjectIds\": [\n \"ekstep_ncert_k-12_subject_mathematics\"\n ],\n \"visibility\": \"Default\",\n \"consumerId\": \"028d6fb1-2d6f-4331-86aa-f7cf491a41e0\",\n \"childNodes\": [\n \"do_1134445959318241281187\",\n \"do_1134445959317995521185\",\n \"do_1134453165632552961201\",\n \"do_1134455063866736641416\"\n ],\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"allowedContentTypes\": [\n \"Demo Practice Question Set\",\n \"Content Playlist\",\n \"Course Assessment\",\n \"eTextbook\",\n \"Explanation Content\",\n \"Learning Resource\",\n \"Practice Question Set\",\n \"Teacher Resource\"\n ],\n \"license\": \"CC BY 4.0\",\n \"name\": \"Collection Test\",\n \"mediumIds\": [\n \"ekstep_ncert_k-12_medium_english\"\n ],\n \"attributions\": [\n \"Collection Test\"\n ],\n \"status\": \"Draft\",\n \"code\": \"do_1132167446143959041288\",\n \"credentials\": {\n \"enabled\": false\n },\n \"origin\": \"do_1132167446143959041288\",\n \"description\": \"Enter description for Collection\",\n \"medium\": [\n \"English\"\n ],\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2022-01-03T05:00:26.734+0000\",\n \"copyrightYear\": 2021,\n \"contentDisposition\": \"inline\",\n \"additionalCategories\": [\n \"Textbook\"\n ],\n \"lastUpdatedOn\": \"2022-01-04T11:54:05.627+0000\",\n \"originData\": {\n \"channel\": 1309282781705830400\n },\n \"dialcodeRequired\": false,\n \"lastStatusChangedOn\": \"2022-01-03T05:00:26.734+0000\",\n \"createdFor\": [\n 1309282781705830400\n ],\n \"creator\": \"N11\",\n \"os\": [\n \"All\"\n ],\n \"chapterCount\": 1,\n \"versionKey\": \"1641297245627\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"ekstep_ncert_k-12\",\n \"depth\": 0,\n \"boardIds\": [\n \"ekstep_ncert_k-12_board_cbse\"\n ],\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"compatibilityLevel\": 1,\n \"userConsent\": true,\n \"openForContribution\": true,\n \"timeLimits\": \"{}\",\n \"gradeLevelIds\": [\n \"ekstep_ncert_k-12_gradelevel_class1\"\n ],\n \"board\": \"CBSE\",\n \"programId\": \"fae8bbf0-6c51-11ec-ad7a-853bd8da76ad\",\n \"resourceType\": \"Collection\"\n }\n }\n}" + } + ] + }, + { + "name": "Add Collection Hierarchy", + "request": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"rootId\": \"do_1134445959309803521184\",\n \"unitId\": \"do_1134445959318241281187\",\n \"children\": [\n \"do_1134455063866736641416\"\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/add", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "add" + ] + }, + "description": "This API is associated with creating Collection on the Sunbird Platform.\n - The endpoint for **Create Collection** is `collection/v1/hierarchy/add`\n - It points to knowledge-mw-service - `/v1/collection/hierarchy/add`\n - It is mandatory to provide values for parameters marked with *\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"children\": [\n \"consequat qui esse\",\n \"dolore m\"\n ],\n \"rootId\": \"nostrud tempor\",\n \"unitId\": \"nisi incididunt in do\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/add", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "add" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.hierarchy.add\",\n \"ver\": \"3.0\",\n \"ts\": \"2022-01-04T11:54:05ZZ\",\n \"params\": {\n \"resmsgid\": \"bb899cb6-e81a-4a7d-9e87-710aa1fba0ce\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"rootId\": \"do_1134445959309803521184\",\n \"do_1134445959318241281187\": [\n \"do_1134455063866736641416\"\n ]\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"children\": [\n \"consequat qui esse\",\n \"dolore m\"\n ],\n \"rootId\": \"nostrud tempor\",\n \"unitId\": \"nisi incididunt in do\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/add", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "add" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.hierarchy.add\",\n \"ver\": \"3.0\",\n \"ts\": \"2022-01-04T12:16:21ZZ\",\n \"params\": {\n \"resmsgid\": \"4e3ab83b-e931-46fb-ad83-078c2731891c\",\n \"err\": \"ERR_BAD_REQUEST\",\n \"status\": \"failed\",\n \"errmsg\": \"unitId is mandatory\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "Internal Server Error", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"children\": [\n \"consequat qui esse\",\n \"dolore m\"\n ],\n \"rootId\": \"nostrud tempor\",\n \"unitId\": \"nisi incididunt in do\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/hierarchy/add", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "hierarchy", + "add" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.hierarchy.add\",\n \"ver\": \"3.0\",\n \"ts\": \"2022-01-04T12:22:29ZZ\",\n \"params\": {\n \"resmsgid\": \"112e8d10-9fb1-4b89-a1ad-333e3ee25819\",\n \"err\": \"ERR_SYSTEM_EXCEPTION\",\n \"status\": \"failed\",\n \"errmsg\": \"Something went wrong in server while processing the request\"\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + }, + { + "name": "CSV TOC", + "item": [ + { + "name": "CSV TOC Upload", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "X-Authenticated-User-Token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "fileUrl", + "value": "id enim non est Ut", + "description": "(Required) File which user want to upload" + } + ] + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/import/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "import", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID to upload. It should be unique and same as csv file identifier" + } + ] + }, + "description": "'This API is used to upload TOC with csv format on the Sunbird Platform.\n\nThe endpoint for TOC CSV upload is /collection/v1/import\n\nYou can find sample TOC CSV here:\n\n[Sample-collection-create-TOC](https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/collectioncreatecsvsample/artifact/collectioncreatecsvsample_1624340752201_createtoc.csv)\n\nor use following data with first line as header of csv and the rest is example of TOC data.\n\n\"Level 1 Folder*,Level 2 Folder,Level 3 Folder,Level 4 Folder,Description\n5. Human Body,,,,\n5. Human Body,5.1 Parts of Body,,,,\"\n\n\n[Sample-collection-update-TOC](https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/collectionupdatecsvsample/artifact/collectionupdatecsvsample_1624340789653_updatetoc.csv)\n\nor use following data with first line as header of csv and the rest is example of TOC data.\n\n\"Collection Name*,Folder Identifier*,Level 1 Folder*,Level 2 Folder,Level 3 Folder,Level 4 Folder,Description,Mapped Topics,Purpose of Content to be linked,Keywords,QR Code Required?,QR Code,Linked Content 1,Linked Content 2,Linked Content 3,Linked Content 4,Linked Content 5,Linked Content 6,Linked Content 7,Linked Content 8,Linked Content 9,Linked Content 10,Linked Content 11,Linked Content 12,Linked Content 13,Linked Content 14,Linked Content 15,Linked Content 16,Linked Content 17,Linked Content 18,Linked Content 19,Linked Content 20,Linked Content 21,Linked Content 22,Linked Content 23,Linked Content 24,Linked Content 26,Linked Content 27,Linked Content 8,Linked Content 29,Linked Content 30\nScience-10,id1,5. Human Body,,,,This chapter describes about human body,Human Body,\"head, eyes, nose, mouth\",Yes,MC8L1V,do_31260042816321126413012,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\nScience-10,id2,5. Human Body,5.1 Parts of Body,,,\"This section describes about various part of the body such as head, hands, legs etc.\",Parts of Body,,No,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\nScience-10,id3,5. Human Body,5.1 Parts of Body,5.1.1 Key parts in the head,,,Parts of Head,,Yes,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\nScience-10,id4,5. Human Body,5.1 Parts of Body,5.1.2 Other parts,,,,,No,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\nScience-10,id5,5. Human Body,5.2 Organ Systems,,,,Organ Systems,,No,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\nScience-10,id6,5. Human Body,5.2 Organ Systems,5.2.1 Respiratory System,,,Respiratory System,,No,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\"\n\n\nThe fields marked with an asterisk (*) are mandatory. They cannot be null or empty.'" + }, + "response": [ + { + "name": "SUCCESS. The TOC was uploaded successfully.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "X-Authenticated-User-Token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "fileUrl", + "value": "id enim non est Ut", + "description": "(Required) File which user want to upload" + } + ] + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/import/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "import", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID to upload. It should be unique and same as csv file identifier" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.import\",\n \"ver\": \"v1\",\n \"ts\": \"2021-06-08 15:35:56:361+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"b3eb6ee7-108d-48b7-9f9f-0d96c55ac289\",\n \"err\": null,\n \"status\": \"success\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"response\": \"SUCCESS\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "X-Authenticated-User-Token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "fileUrl", + "value": "id enim non est Ut", + "description": "(Required) File which user want to upload" + } + ] + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/import/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "import", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID to upload. It should be unique and same as csv file identifier" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.import\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-27 15:20:24:393+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"30527359-4723-4b59-b4c0-0b5b14f9a3e2\",\n \"err\": \"CSV_INVALID_COLLECTION_NAME\",\n \"status\": \"CSV_INVALID_COLLECTION_NAME\",\n \"errmsg\": \"Following rows have invalid “Collection Name”. Please correct and upload again.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "Resource Not Found", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "X-Authenticated-User-Token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "fileUrl", + "value": "id enim non est Ut", + "description": "(Required) File which user want to upload" + } + ] + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/import/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "import", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID to upload. It should be unique and same as csv file identifier" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.import\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-27 15:27:56:969+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"c148b326-7f46-4899-b0cb-31d563f225bc\",\n \"err\": \"RESOURCE_NOT_FOUND\",\n \"status\": \"RESOURCE_NOT_FOUND\",\n \"errmsg\": \"rootId do_11316025545317580812 does not exist\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "Internal Server Error", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "X-Authenticated-User-Token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "body": { + "mode": "urlencoded", + "urlencoded": [ + { + "key": "fileUrl", + "value": "id enim non est Ut", + "description": "(Required) File which user want to upload" + } + ] + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/import/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "import", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID to upload. It should be unique and same as csv file identifier" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.import\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-23 06:48:19:128+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"50f76a53-4710-4333-936f-c043031eec03\",\n \"err\": \"SERVER_ERROR\",\n \"status\": \"SERVER_ERROR\",\n \"errmsg\": \"SERVER_ERROR\"\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "TOC Download", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "x-authenticated-user-token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/export/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "export", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID which user want to download" + } + ] + }, + "description": "This API is used to download collection in csv format.\n\nThe endpoint for TOC download is /export\nThe fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The TOC was downloaded successfully.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "x-authenticated-user-token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/export/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "export", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID which user want to download" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.export\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-27 15:47:33:106+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"ab202e68-bacd-4eee-a3b3-10d59b09c17f\",\n \"err\": null,\n \"status\": \"success\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"collection\": {\n \"tocUrl\": \"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/collection/toc/do_11316025504283852811_test-27-nov_1606491356306.csv\",\n \"ttl\": \"86400\"\n }\n }\n}" + }, + { + "name": "Resource Not Found", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "x-authenticated-user-token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/export/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "export", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID which user want to download" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.export\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-27 15:49:18:113+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"1f4ed0b6-ab14-4644-8e91-0bed6b0cd756\",\n \"err\": \"RESOURCE_NOT_FOUND\",\n \"status\": \"RESOURCE_NOT_FOUND\",\n \"errmsg\": \"rootId do_11263298042220544900 does not exist\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "Internal Server Error", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types are:- \n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) Specify authorization key (format: Bearer api-key) received from administrator when performing given API request.", + "key": "x-authenticated-user-token", + "value": "{{authToken}}" + }, + { + "description": "(Required) Access token of registered user performing given API request.", + "key": "Authorization", + "value": "{{api-key}}" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/export/:collection-id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "export", + ":collection-id" + ], + "variable": [ + { + "key": "collection-id", + "value": "consectetur minim incididunt", + "description": "(Required) Collection ID which user want to download" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.export\",\n \"ver\": \"v1\",\n \"ts\": \"2020-11-23 06:48:19:128+0000\",\n \"params\": {\n \"resmsgid\": null,\n \"msgid\": \"50f76a53-4710-4333-936f-c043031eec03\",\n \"err\": \"SERVER_ERROR\",\n \"status\": \"SERVER_ERROR\",\n \"errmsg\": \"SERVER_ERROR\"\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ], + "description": "The Collection CSV TOC API resources perform operations related to download and upload of collection type of contents on the Sunbird Platform in csv format. The CSV TOC API(s) operations include uploads and downloads.\n\nThe URL for CSV TOC API(s) is /collection/v1/.\n\n- The backend URL for CSV TOC API(s) is collection/v4/.\n- [TestCases](https://desktop.postman.com/?desktopVersion=10.6.0&userId=1419050&teamId=304931)\n \n\nContact Support: \nEmail: [info@sunbird.org](https://desktop.postman.com/?desktopVersion=10.6.0&userId=1419050&teamId=304931)" + }, + { + "name": "Create Collection", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"contentType\": \"voluptate magna aute laboris mollit\",\n \"createdBy\": \"ea dolor veni\",\n \"createdFor\": [\n \"irure sunt dolor ut\",\n \"nos\"\n ],\n \"creator\": \"reprehenderit dolore labore tempor\",\n \"description\": \"in aute dolore laborum e\",\n \"framework\": \"laborum ex culpa voluptate\",\n \"mimeType\": \"ad minim ullamco pariatur\",\n \"name\": \"commodo\",\n \"organisation\": [\n \"sunt voluptate eiusmod Excepteur dolore\",\n \"nulla\"\n ],\n \"primaryCategory\": \"eu pariatur occaecat sunt\",\n \"resourceType\": \"culpa irure\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "create" + ] + }, + "description": "This API is associated with creating Collection on the Sunbird Platform.\n - The endpoint for **Create Collection** is `/collection/v1/create`\n - It points to knowledge-mw-service - `/v1/collection/create`\n - It is mandatory to provide values for parameters marked with *\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"contentType\": \"velit pariatur ea\",\n \"createdBy\": \"magn\",\n \"createdFor\": [\n \"ea culpa in do\",\n \"incididunt tempor ea\"\n ],\n \"creator\": \"mollit aute\",\n \"description\": \"officia aliquip elit\",\n \"framework\": \"qui incididunt aliquip exercitation\",\n \"mimeType\": \"est velit sunt voluptate\",\n \"name\": \"ad\",\n \"organisation\": [\n \"laborum non\",\n \"enim mollit ex\"\n ],\n \"primaryCategory\": \"sed Excepteur dolore officia qui\",\n \"resourceType\": \"minim\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:05:31.875Z\",\n \"params\": {\n \"resmsgid\": \"4dca4330-6c74-11ec-b3c3-d9afeab1a49c\",\n \"msgid\": \"0340ddc8-1b95-30c5-4a1d-b5181ba88c5e\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content_id\": \"do_113444716395110400184\",\n \"versionKey\": \"1641200731833\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "magna" + }, + { + "description": "The Asset Type entity is the media type of the resource.Possible", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API", + "key": "x-authenticated-user-token", + "value": "magna" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs", + "key": "X-Channel-ID", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"contentType\": \"velit pariatur ea\",\n \"createdBy\": \"magn\",\n \"createdFor\": [\n \"ea culpa in do\",\n \"incididunt tempor ea\"\n ],\n \"creator\": \"mollit aute\",\n \"description\": \"officia aliquip elit\",\n \"framework\": \"qui incididunt aliquip exercitation\",\n \"mimeType\": \"est velit sunt voluptate\",\n \"name\": \"ad\",\n \"organisation\": [\n \"laborum non\",\n \"enim mollit ex\"\n ],\n \"primaryCategory\": \"sed Excepteur dolore officia qui\",\n \"resourceType\": \"minim\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2022-01-03T09:48:59.425Z\",\n \"params\": {\n \"resmsgid\": \"6002d110-6c7a-11ec-b3c3-d9afeab1a49c\",\n \"status\": \"failed\",\n \"err\": \"CLIENT_ERROR\",\n \"errmsg\": \"Validation Errors\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": [\n \"Metadata resourceType should be one of: [Read, Learn, Teach, Play, Test, Practice, Experiment, Collection, Book, Lesson Plan, Course, Theory, Worksheet, Practical]\"\n ]\n }\n}" + } + ] + }, + { + "name": "Collection Read", + "request": { + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/read/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "read", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with Read Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Read Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Read Collection** is `/collection/v1/read`\n - It points to knowledge-mw-service - `/v1/collection/read`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/read/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "read", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-09T12:09:04.202Z\",\n \"params\": {\n \"resmsgid\": \"548b82a0-3a17-11eb-be5b-f9d249ba9049\",\n \"msgid\": \"5488ea90-3a17-11eb-be5b-f9d249ba9049\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"code\": \"org.sunbird.ccG6ru\",\n \"credentials\": {\n \"enabled\": false\n },\n \"channel\": \"in.ekstep\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/pdf\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2020-12-09T12:08:54.913+0000\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Explanation Content\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2020-12-09T12:08:54.913+0000\",\n \"contentEncoding\": \"identity\",\n \"contentType\": \"Resource\",\n \"dialcodeRequired\": false,\n \"identifier\": \"do_1131687689003827201864\",\n \"lastStatusChangedOn\": \"2020-12-09T12:08:54.913+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Default\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1607515734913\",\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"NCF\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\",\n \"compatibilityLevel\": 1,\n \"name\": \"API DOCUMENTATION CONTENT\",\n \"status\": \"Draft\"\n }\n }\n}" + } + ] + }, + { + "name": "Collection Private Read", + "request": { + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/private/read/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "private", + "read", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with read collection private on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Private Read Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Read Collection** is `/collection/v1/private/read`\n - It points to knowledge-mw-service - `/v1/collection/private/read`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/private/read/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "private", + "read", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-09T12:09:04.202Z\",\n \"params\": {\n \"resmsgid\": \"548b82a0-3a17-11eb-be5b-f9d249ba9049\",\n \"msgid\": \"5488ea90-3a17-11eb-be5b-f9d249ba9049\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"code\": \"org.sunbird.ccG6ru\",\n \"credentials\": {\n \"enabled\": false\n },\n \"channel\": \"in.ekstep\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/pdf\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2020-12-09T12:08:54.913+0000\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Explanation Content\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2020-12-09T12:08:54.913+0000\",\n \"contentEncoding\": \"identity\",\n \"contentType\": \"Resource\",\n \"dialcodeRequired\": false,\n \"identifier\": \"do_1131687689003827201864\",\n \"lastStatusChangedOn\": \"2020-12-09T12:08:54.913+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Default\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1607515734913\",\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"NCF\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\",\n \"compatibilityLevel\": 1,\n \"name\": \"API DOCUMENTATION CONTENT\",\n \"status\": \"Draft\"\n }\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/private/read/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "private", + "read", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.private.read\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T05:44:55ZZ\",\n \"params\": {\n \"resmsgid\": \"6ad2cc51-3878-4625-b2b7-89e5ab6b8cca\",\n \"err\": \"ERR_INVALID_CHANNEL\",\n \"status\": \"failed\",\n \"errmsg\": \"Please Provide Channel!\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Collection Update", + "request": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"versionKey\": \"{{versionKey}}\",\n \"lastUpdatedBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\",\n \"description\": \"dsd\",\n \"name\": \"API DOCUMENTATION CONTENT\",\n \"mimeType\": \"application/vnd.ekstep.content-collection\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/update/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "update", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with update Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Update Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Collection** is `/collection/v1/update`\n - It points to knowledge-mw-service - `/v1/collection/update`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"versionKey\": \"{{versionKey}}\",\n \"lastUpdatedBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\",\n \"description\": \"dsd\",\n \"name\": \"API DOCUMENTATION CONTENT\",\n \"mimeType\": \"application/vnd.ekstep.content-collection\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/update/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "update", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:01:16ZZ\",\n \"params\": {\n \"resmsgid\": \"5d0b07e7-be99-41a1-a702-ef10bdf93c61\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_11334223976733900811\",\n \"node_id\": \"do_11334223976733900811\",\n \"versionKey\": \"1641452476524\"\n }\n}" + } + ] + }, + { + "name": "Collection Copy", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"createdBy\": \"Test\",\n \"createdFor\": [\n \"Sunbird\"\n ],\n \"organisation\": [\n \"Sunbird\"\n ],\n \"framework\": \"test\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/copy/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "copy", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with copy Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Copy Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Collection** is `/collection/v1/copy`\n - It points to knowledge-mw-service - `/v1/collection/copy`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"createdBy\": \"laborum qui eiusmod sunt\",\n \"createdFor\": [\n \"et volup\",\n \"ullamco \"\n ],\n \"framework\": \"exercitation ex fugiat\",\n \"organisation\": [\n \"sunt ea mollit ullamco\",\n \"et\"\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/copy/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "copy", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.copy\",\n \"ver\": \"3.0\",\n \"ts\": \"2022-01-06T07:27:47ZZ\",\n \"params\": {\n \"resmsgid\": \"38bc39fd-86ce-4ca0-9509-f11d7f761bf1\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": {\n \"do_1134417529722552321144\": \"do_1134467917040271361183\"\n },\n \"versionKey\": \"1641454067281\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"createdBy\": \"laborum qui eiusmod sunt\",\n \"createdFor\": [\n \"et volup\",\n \"ullamco \"\n ],\n \"framework\": \"exercitation ex fugiat\",\n \"organisation\": [\n \"sunt ea mollit ullamco\",\n \"et\"\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/copy/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "copy", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.copy\",\n \"ver\": \"3.0\",\n \"ts\": \"2022-01-06T07:24:41ZZ\",\n \"params\": {\n \"resmsgid\": \"3b2ee619-4c41-4f3c-a6b1-0b31bba23dfb\",\n \"err\": \"ERR_INVALID_REQUEST\",\n \"status\": \"failed\",\n \"errmsg\": \"Cannot Copy content which is in retired status\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Collection Reject", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"rejectComment\": \"Collection rejected\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/reject/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "reject", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with reject Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Reject Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Reject Collection** is `/collection/v1/reject`\n - It points to knowledge-mw-service - `/v1/collection/reject`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"collection\": {\n \"rejectComment\": \"Collection rejected\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/collection/v1/reject/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "reject", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.review.reject\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T07:04:33ZZ\",\n \"params\": {\n \"resmsgid\": \"1fdef75e-9854-4dbb-a829-efb03debde84\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_11334223976733900811\",\n \"node_id\": \"do_11334223976733900811\"\n }\n}" + } + ] + }, + { + "name": "Collection Retire", + "request": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/retire/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "retire", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with Retire Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Retire Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Retire Collection** is `/collection/v1/retire`\n - It points to knowledge-mw-service - `/v1/collection/retire`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/retire/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "retire", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.retire\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T06:51:00ZZ\",\n \"params\": {\n \"resmsgid\": \"280f83f7-4f69-4c22-bc93-33ebeb8823e8\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_11334223976733900811\",\n \"node_id\": \"do_11334223976733900811\"\n }\n}" + }, + { + "name": "Bad Request", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/retire/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "retire", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.retire\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T06:56:42ZZ\",\n \"params\": {\n \"resmsgid\": \"0c79c484-c75f-4e77-b354-69c36a1caa7e\",\n \"err\": \"ERR_CONTENT_RETIRE\",\n \"status\": \"failed\",\n \"errmsg\": \"Content with Identifier do_11334223976733900811 is already Retired.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Collection Discard", + "request": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/discard/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "discard", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + }, + "description": "This API is associated with Discard Collection on the Sunbird Platform .\n - **_/Collection/_** endpoint executes the \"Discard Collection\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Discard Collection** is `/collection/v1/discard`\n - It points to knowledge-mw-service - `/v1/collection/discard`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "magna" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "magna" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "magna" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "magna" + } + ], + "url": { + "raw": "{{baseUrl}}/collection/v1/discard/:collection_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "collection", + "v1", + "discard", + ":collection_id" + ], + "variable": [ + { + "key": "collection_id", + "value": "magna", + "description": "(Required) Please append a valid Collection Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.collection.discard\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T06:41:51ZZ\",\n \"params\": {\n \"resmsgid\": \"fc899cd5-958b-40ad-9a5c-6f31b65404b4\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134467341514588161171\",\n \"message\": \"Draft version of the content with id : do_1134467341514588161171 is discarded\",\n \"node_id\": \"do_1134467341514588161171\"\n }\n}" + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.sunbirded.org/api", + "type": "string" + } + ] +} \ No newline at end of file diff --git a/content-api/api-tests/Collections/Sunbird Content Management API(s).postman_collection.json b/content-api/api-tests/Collections/Sunbird Content Management API(s).postman_collection.json new file mode 100644 index 000000000..9a7eddef8 --- /dev/null +++ b/content-api/api-tests/Collections/Sunbird Content Management API(s).postman_collection.json @@ -0,0 +1,5667 @@ +{ + "info": { + "_postman_id": "868e5465-d7df-4e57-9fe0-249b2e15cceb", + "name": "Sunbird Content Management API(s)", + "description": "The Content Management API allows you to manage content over the sunbird platform.Apis perform operations related to all the _Content_ on the Sunbird Platform.\n- Each of the endpoints serve a different purpose, so which one to use depends on what you want to do\n- The operations include CRUD (Create, Update, Read and Delete) operations and other operations such as upload, publish, flag, link Dialcode, etc.\n- The URL for Sunbird Content API(s) is `content/v2`\n- [TestCases](https://www.getpostman.com/collections/3c1817fcb4606ae53b9a)\n\nContact Support:\n Email: info@sunbird.org", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", + "_exporter_id": "1419050" + }, + "item": [ + { + "name": "content", + "item": [ + { + "name": "v2", + "item": [ + { + "name": "upload", + "item": [ + { + "name": "Upload Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "file", + "description": "(Required) Content package file which you intend to upload", + "type": "file", + "src": [] + } + ] + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with uploading content on the Sunbird Platform .\n - The**_/upload/{content Id}_** endpoint executes \"Upload Content\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Upload Content** is `/content/v2/upload`\n - It points to knowledge-mw-service - `/v2/content/upload`\n - {...}refers to a section of upload endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Content upload\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "file", + "description": "(Required) Content package file which you intend to upload", + "type": "file", + "src": [] + } + ] + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.upload\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T21:00:16ZZ\",\n \"params\": {\n \"resmsgid\": \"4505a08c-9d0c-447b-8b3f-692e31f841fd\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1131697376667402241315\",\n \"artifactUrl\": \"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/content/do_112547906582659072139/artifact/pdf_1531726896452.pdf\",\n \"content_url\": \"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/content/do_112547906582659072139/artifact/pdf_1531726896452.pdf\",\n \"node_id\": \"do_1131697376667402241315\",\n \"versionKey\": \"1607634016927\"\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Review** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "file", + "description": "(Required) Content package file which you intend to upload", + "type": "file", + "src": [] + } + ] + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.upload\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T21:21:20ZZ\",\n \"params\": {\n \"resmsgid\": \"58139c89-2852-4673-b414-2f867c91bbc1\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_11316973766674022413151\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "formdata", + "formdata": [ + { + "key": "file", + "description": "(Required) Content package file which you intend to upload", + "type": "file", + "src": [] + } + ] + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Upload URL", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"fileName\": \"test.pdf\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/url/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + "url", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with uploading content on the Sunbird Platform .\n - The**_/upload/url/{content Id}_** endpoint executes \"Upload Content URL\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Upload Content URL** is `/content/v2/upload/url`\n - It points to knowledge-mw-service - `/v2/content/upload/url`\n - {...}refers to a section of upload endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - On Success api returns pre-signed-url link with come configured ttl against which file can be uploaded.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Content upload\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"fileName\": \"test.pdf\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/url/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + "url", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.upload.url\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T23:52:44ZZ\",\n \"params\": {\n \"resmsgid\": \"81e308fa-3e36-45ff-b295-e28e8f757dc7\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_11317053041892556811094\",\n \"url_expiry\": \"54000\",\n \"pre_signed_url\": \"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_11317053041892556811094/10-page-pdf.pdf?sv=2017-04-17&se=2020-12-12T14%3A52%3A44Z&sr=b&sp=w&sig=Tlt6uWI/6lMeupkEbsTAmGajmo1qzw8HeUHrVwOm6yk%3D\"\n }\n}" + }, + { + "name": "BAD REQUEST. The 'Upload Url' operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"fileName\": \"test.pdf\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/url/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + "url", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.upload.url\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T23:54:07ZZ\",\n \"params\": {\n \"resmsgid\": \"4edde64d-3116-4271-bcc1-a8ba0ec2d56b\",\n \"msgid\": null,\n \"err\": \"ERR_CONTENT_BLANK_FILE_NAME\",\n \"status\": \"failed\",\n \"errmsg\": \"File name is blank\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Id sent for generating upload url** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"fileName\": \"test.pdf\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/url/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + "url", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.upload.url\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T23:48:26.694Z\",\n \"params\": {\n \"resmsgid\": \"5cf72660-3c0b-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_11254577204586086415\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"fileName\": \"test.pdf\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/upload/url/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "upload", + "url", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + } + ] + }, + { + "name": "flag", + "item": [ + { + "name": "Flag Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with setting Flag for content.\n - The **_/flag/{content Id}_** endpoint executes a request to set flagging status for content ,flag can either be set to \"Accepted\" or \"Rejected\".\n - The endpoint for **Flag Content** is `/content/v2/flag`\n - It points to knowledge-mw-service - `/v2/content/flag`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Content Flagging\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.flag\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T08:45:49ZZ\",\n \"params\": {\n \"resmsgid\": \"9c935e0c-b504-45e6-bd15-313f46c4613f\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_213143334457982976144\",\n \"node_id\": \"do_213143334457982976144\",\n \"versionKey\": \"1607676349236\"\n }\n}" + }, + { + "name": "ERR_CONTENT_NOT_FLAGGABLE! Looks like contentId is not in flaggable status.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.flag\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T08:44:53ZZ\",\n \"params\": {\n \"resmsgid\": \"2ff1945b-a114-44ca-929e-7d19a6f8a745\",\n \"msgid\": null,\n \"err\": \"ERR_CONTENT_NOT_FLAGGABLE\",\n \"status\": \"failed\",\n \"errmsg\": \"Unpublished Content do_213168073757138944196 cannot be flagged\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Flag** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.flag\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T09:41:41ZZ\",\n \"params\": {\n \"resmsgid\": \"5b4f0b33-3941-4c18-b8bf-123c2e0348e6\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_2131433344579829761441\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-Userid", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Accept Flag", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"1507102327646\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/accept/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + "accept", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with setting Accept Flag for content under review process on the Sunbird Platform .\n - The **_/flag/accept/_**endpoint executes request for flagging content under review to \"Accept flag\", if content is found appropriate.\n - The endpoint for **Flag Accept Content** is `/content/v2/flag/accept`\n - It points to knowledge-mw-service - `/v2/content/flag/accept`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Accept Flag\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"1507102327646\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/accept/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + "accept", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.flag.accept\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T11:40:01.464Z\",\n \"params\": {\n \"resmsgid\": \"9a9e4b80-3ba5-11eb-b0a2-8d5c9f561887\",\n \"msgid\": \"9a49e9f0-3ba5-11eb-b0a2-8d5c9f561887\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_31221753157709004825531\",\n \"node_id\": \"do_31221753157709004825531\",\n \"versionKey\": \"1607686801161\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Accept Flag\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"1507102327646\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/accept/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + "accept", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.flag.accept\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T11:25:34.614Z\",\n \"params\": {\n \"resmsgid\": \"95ef7b60-3ba3-11eb-be5b-f9d249ba9049\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_CONTENT\",\n \"errmsg\": \"Invalid Flagged Content! Content Can Not Be Accepted.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Flag Accept** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"1507102327646\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/accept/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + "accept", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.flag.accept\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T11:24:42.903Z\",\n \"params\": {\n \"resmsgid\": \"771d0270-3ba3-11eb-be5b-f9d249ba9049\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_213143334457982976144\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"versionKey\": \"1507102327646\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/flag/accept/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "flag", + "accept", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + } + ] + }, + { + "name": "Create Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"name\": \"H5P Content\",\n \"code\": \"h5pcontent\",\n \"mimeType\": \"application/vnd.ekstep.h5p-archive\",\n \"primaryCategory\": \"Resource\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "create" + ] + }, + "description": "This API is associated with creating content on the Sunbird Platform.\n - The endpoint for **Create Content** is `/content/v2/create`\n - It points to knowledge-mw-service - `/v2/content/create`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Create Content** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"name\": \"H5P Content\",\n \"code\": \"h5pcontent\",\n \"mimeType\": \"application/vnd.ekstep.h5p-archive\",\n \"primaryCategory\": \"Resource\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T20:24:45ZZ\",\n \"params\": {\n \"resmsgid\": \"3be02c4b-3324-41a3-afd8-60f6be0584d2\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1131697204035993601314\",\n \"node_id\": \"do_1131697204035993601314\",\n \"versionKey\": \"1607631885207\"\n }\n}" + }, + { + "name": "'BAD REQUEST. The **Create Content** operation failed. You may have missed input for a mandatory parameter.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"name\": \"H5P Content\",\n \"code\": \"h5pcontent\",\n \"mimeType\": \"application/vnd.ekstep.h5p-archive\",\n \"primaryCategory\": \"Resource\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:50:29.786Z\",\n \"params\": {\n \"resmsgid\": \"c169a7a0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_CONTENT_CREATE_FIELDS_MISSING\",\n \"errmsg\": \"Required fields for create content are missing\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "'INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) Represents the authorized User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"name\": \"H5P Content\",\n \"code\": \"h5pcontent\",\n \"mimeType\": \"application/vnd.ekstep.h5p-archive\",\n \"primaryCategory\": \"Resource\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Update Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"versionKey\": \"1607631400608\",\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/update/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "update", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with updating content on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update Content\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Content** is `/content/v2/update`\n - It points to knowledge-mw-service - `/v2/content/update`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.Content update operation was successfuly executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"versionKey\": \"1607631400608\",\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/update/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "update", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:26:07ZZ\",\n \"params\": {\n \"resmsgid\": \"80aa9310-b749-411c-a13b-8d9f25af389f\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1131697204035993601314\",\n \"node_id\": \"do_1131697204035993601314\",\n \"versionKey\": \"1607631967842\"\n }\n}" + }, + { + "name": "BAD REQUEST. The 'Update Content' operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"versionKey\": \"1607631400608\",\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/update/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "update", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:27:01ZZ\",\n \"params\": {\n \"resmsgid\": \"87eb99c6-bc10-406a-a6fe-13651f546ce1\",\n \"msgid\": null,\n \"err\": \"ERR_INVALID_REQUEST\",\n \"status\": \"failed\",\n \"errmsg\": \"Please Provide Version Key!\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Update** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"versionKey\": \"1607631400608\",\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/update/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "update", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-12T00:31:16ZZ\",\n \"params\": {\n \"resmsgid\": \"c29ffd8b-d105-4eeb-a875-deaeae2bf5b8\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_21317015414162227213604\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"versionKey\": \"1607631400608\",\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/update/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "update", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Read Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the content on the Sunbird Platform .\n - The **_/read/{contentId}_** endpoint executes request for fetching the contents to be viewed and read.\n - The endpoint for **Read Content** is `/content/v2/read`\n - It points to knowledge-mw-service - `/v2/content/read`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Read content\" operation was successfully executed. For all possible metadata, please [refer](https://github.com/project-sunbird/knowledge-platform/blob/master/schemas/content/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T20:38:32.510Z\",\n \"params\": {\n \"resmsgid\": \"ab16e5e0-3b27-11eb-b0a2-8d5c9f561887\",\n \"msgid\": \"ab131550-3b27-11eb-b0a2-8d5c9f561887\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"code\": \"org.sunbird.EHdZir\",\n \"credentials\": {\n \"enabled\": \"No\"\n },\n \"channel\": \"channel-1\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/vnd.ekstep.h5p-archive\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2020-12-10T20:38:13.315+0000\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Learning Resource\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2020-12-10T20:38:13.315+0000\",\n \"contentEncoding\": \"gzip\",\n \"dialcodeRequired\": \"No\",\n \"trackable\": {\n \"enabled\": \"No\",\n \"autoBatch\": \"No\"\n },\n \"identifier\": \"do_21316972702362828813477\",\n \"lastStatusChangedOn\": \"2020-12-10T20:38:13.315+0000\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Default\",\n \"consumerId\": \"2eaff3db-cdd1-42e5-a611-bebbf906e6cf\",\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1607632693315\",\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"NCF\",\n \"createdBy\": \"874ed8a5-782e-4f6c-8f36-e0288455901e\",\n \"compatibilityLevel\": 1,\n \"name\": \"Test_h5p\",\n \"status\": \"Draft\"\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Read** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T20:37:58.223Z\",\n \"params\": {\n \"resmsgid\": \"96a71df0-3b27-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1131697204035993601314\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Submit for Review", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/review/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "review", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": ">- This API is associated with Submission of content for Review on the Sunbird Platform .\n - The**_/review/{content Id}_** endpoint executes \"Submit for Review\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Review Content** is `/content/v2/review`\n - It points to knowledge-mw-service - `/v2/content/review`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.Content sent for review operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/review/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "review", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"ekstep.learning.content.review\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T21:06:26ZZ\",\n \"params\": {\n \"resmsgid\": \"186cf9a9-8094-4cdd-926f-b77a030921d1\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"do_1131697376667402241315\",\n \"versionKey\": \"1607634386533\"\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Review** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/review/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "review", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"ekstep.learning.content.review\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T21:06:54ZZ\",\n \"params\": {\n \"resmsgid\": \"944ac944-78d9-43e2-bfd5-337ab52b6552\",\n \"msgid\": null,\n \"err\": \"ERR_TAXONOMY_INVALID_CONTENT\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! While Fetching the Content for Operation | [Content Id: do_11316973766674022413151]\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! We track these errors automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/review/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "review", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Publish Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) User Registered ID , which is used for logging In", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) Authorizing user in order to grant access.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"Ekstep\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/publish/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "publish", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Content Id to be published" + } + ] + }, + "description": "This API is associated with publishing content on the Sunbird Platform .\n - **_/publish/_** endpoint executes the \"Publish Content\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Publish Content** is `/content/v1/publish`\n - It points to knowledge-mw-service - `/v2/content/publish`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.**Content publish operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) User Registered ID , which is used for logging In", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) Authorizing user in order to grant access.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"Ekstep\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/publish/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "publish", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Content Id to be published" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v3.publish\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T21:41:23.491Z\",\n \"params\": {\n \"resmsgid\": \"72c4ef30-3b30-11eb-b0a2-8d5c9f561887\",\n \"msgid\": \"72979da0-3b30-11eb-b0a2-8d5c9f561887\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content_id\": \"do_21316975736724684813479\",\n \"publishStatus\": \"Publish Operation for Content Id 'do_21316975736724684813479' Started Successfully!\"\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Review** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) User Registered ID , which is used for logging In", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) Authorizing user in order to grant access.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"Ekstep\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/publish/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "publish", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Content Id to be published" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v3.publish\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T21:51:39.870Z\",\n \"params\": {\n \"resmsgid\": \"e228e7e0-3b31-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_213169757367246848134791\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! We track these errors automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json\n - Multipart/form-data\n - Application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) User Registered ID , which is used for logging In", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) Authorizing user in order to grant access.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"lastPublishedBy\": \"Ekstep\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/publish/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "publish", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Content Id to be published" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Reject Content Review", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with rejecting content under review.\n - The **_/reject/{contentId}_** endpoint executes request for rejecting content which is under review.\n - The endpoint for **Reject Content** is `/content/v2/reject`\n - It points to knowledge-mw-service - `/v2/content/reject`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Content reject\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v3.reject\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T08:09:46.451Z\",\n \"params\": {\n \"resmsgid\": \"3b7c1230-3b88-11eb-b0a2-8d5c9f561887\",\n \"msgid\": \"3b692670-3b88-11eb-b0a2-8d5c9f561887\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"do_213168073757138944196\",\n \"versionKey\": \"1607674186357\"\n }\n}" + }, + { + "name": "ERROR CONTENT NOT IN REVIEW STATUS ! Looks like contentId provided is not in review status.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v3.reject\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T08:03:51.807Z\",\n \"params\": {\n \"resmsgid\": \"6819b0f0-3b87-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_CONTENT_NOT_IN_REVIEW\",\n \"errmsg\": \"Content is not in review state for identifier: do_2131680942619115521243\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND ! Looks like contentId provided is not a valid content.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v3.reject\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T08:08:30.454Z\",\n \"params\": {\n \"resmsgid\": \"0e2fd960-3b88-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_2123462720305561601147\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Retire Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "DELETE", + "header": [ + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/retire/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "retire", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with retiring content on the Sunbird Platform .\n - The **_/retire/{contentId}_** endpoint executes \"Retire Content\" request based on content Id to delete the specific content.'\n - The endpoint for **Retire Content** is `/content/v2/retire`\n - It points to knowledge-mw-service - `/v2/content/retire`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.**Content deletion operation was successfully executed.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/retire/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "retire", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T12:05:52ZZ\",\n \"params\": {\n \"resmsgid\": \"ebd2655a-a69a-460c-9bcf-0f45f5938fac\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_11283183576849612818\",\n \"node_id\": \"do_11283183576849612818\"\n }\n}" + }, + { + "name": "ERR CONTENT RETIRE! Content is already retired.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/retire/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "retire", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T12:01:24ZZ\",\n \"params\": {\n \"resmsgid\": \"ec20ec05-581c-4559-988a-449f3c5ae698\",\n \"msgid\": null,\n \"err\": \"ERR_CONTENT_RETIRE\",\n \"status\": \"failed\",\n \"errmsg\": \"Content with Identifier do_11316298360130764811 is already Retired.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Flag Accept** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/retire/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "retire", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.retire\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-11T11:56:35ZZ\",\n \"params\": {\n \"resmsgid\": \"d826e808-2866-4603-9b60-0a3130a37128\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_213143334457982976144\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/retire/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "retire", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Content Discard", + "request": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/discard/:content_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "discard", + ":content_id" + ], + "variable": [ + { + "key": "content_id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with Discard Content on the Sunbird Platform .\n - **_/Content/_** endpoint executes the \"Discard Content\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Discard Content** is `/Content/v2/discard`\n - It points to knowledge-mw-service - `/v2/Content/discard`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID used to execute the API.", + "key": "X-Authenticated-User-Token", + "value": "ea laborum" + }, + { + "description": "The unique ID provided to the State to which the user belongs.", + "key": "X-Channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v2/discard/:content_id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "discard", + ":content_id" + ], + "variable": [ + { + "key": "content_id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.discard\",\n \"ver\": \"4.0\",\n \"ts\": \"2022-01-06T06:41:51ZZ\",\n \"params\": {\n \"resmsgid\": \"fc899cd5-958b-40ad-9a5c-6f31b65404b4\",\n \"status\": \"successful\"\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"do_1134467341514588161171\",\n \"message\": \"Draft version of the content with id : do_1134467341514588161171 is discarded\",\n \"node_id\": \"do_1134467341514588161171\"\n }\n}" + } + ] + }, + { + "name": "Copy Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"createdBy\": \"user1\",\n \"createdFor\": [\n \"user2\"\n ],\n \"organisation\": [\n \"sunbird\"\n ],\n \"framework\": \"NCF\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/copy/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "copy", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with copying a content on the Sunbird Platform .\n - The **_/copy/_**endpoint executes request for copy content, if content is found appropriate.\n - The endpoint for **Copy Content** is `/content/v2/copy`\n - It points to knowledge-mw-service - `/v2/content/copy`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Copy\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"createdBy\": \"user1\",\n \"createdFor\": [\n \"user2\"\n ],\n \"organisation\": [\n \"sunbird\"\n ],\n \"framework\": \"NCF\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/copy/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "copy", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T23:28:25.343Z\",\n \"params\": {\n \"resmsgid\": \"90e788f0-3c08-11eb-b0a2-8d5c9f561887\",\n \"msgid\": \"90b3f5d0-3c08-11eb-b0a2-8d5c9f561887\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": {\n \"do_21316884557433241613378\": \"do_21317051846683852813814\"\n },\n \"versionKey\": \"1607729305036\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Copy Content\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"createdBy\": \"user1\",\n \"createdFor\": [\n \"user2\"\n ],\n \"organisation\": [\n \"sunbird\"\n ],\n \"framework\": \"NCF\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/copy/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "copy", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T23:27:31.708Z\",\n \"params\": {\n \"resmsgid\": \"70ef7bc0-3c08-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_REQUEST\",\n \"errmsg\": \"Please provide valid value for List(createdBy, createdFor, organisation, framework)\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Copy** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"createdBy\": \"user1\",\n \"createdFor\": [\n \"user2\"\n ],\n \"organisation\": [\n \"sunbird\"\n ],\n \"framework\": \"NCF\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/copy/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "copy", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-11T23:28:36.863Z\",\n \"params\": {\n \"resmsgid\": \"97c558f0-3c08-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_213168845574332416133781\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"createdBy\": \"user1\",\n \"createdFor\": [\n \"user2\"\n ],\n \"organisation\": [\n \"sunbird\"\n ],\n \"framework\": \"NCF\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/copy/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "copy", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Import Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": [\n {\n \"source\": \"https://vdn.diksha.gov.in/api/content/v2/read/do_4313124780355543040126730\",\n \"metadata\": {},\n \"collection\": [\n {\n \"identifier\": \"do_3131112587361648641337\",\n \"unitId\": \"do_3131112587369922561425\"\n }\n ]\n }\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/import", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "import" + ] + }, + "description": "This API is associated with importing a content on the Sunbird Platform from different environment.\n - The **_/import/_**endpoint executes request for impoting content, if content is found appropriate.\n - The endpoint for **Import Content** is `/content/v2/import`\n - It points to content-service - `/content/v3/import`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Import\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": [\n {\n \"source\": \"https://vdn.diksha.gov.in/api/content/v2/read/do_4313124780355543040126730\",\n \"metadata\": {},\n \"collection\": [\n {\n \"identifier\": \"do_3131112587361648641337\",\n \"unitId\": \"do_3131112587369922561425\"\n }\n ]\n }\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/import", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "import" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.import\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T16:29:11ZZ\",\n \"params\": {\n \"resmsgid\": \"7cce8934-0a2f-46bf-a0ee-be5f49874cae\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"processId\": \"80a765c7-2202-4b3f-b473-448b5c2cd20e\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Import Content\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": [\n {\n \"source\": \"https://vdn.diksha.gov.in/api/content/v2/read/do_4313124780355543040126730\",\n \"metadata\": {},\n \"collection\": [\n {\n \"identifier\": \"do_3131112587361648641337\",\n \"unitId\": \"do_3131112587369922561425\"\n }\n ]\n }\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/import", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "import" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.import\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T16:30:16ZZ\",\n \"params\": {\n \"resmsgid\": \"31f88a39-2514-4d86-b424-240166e315be\",\n \"msgid\": null,\n \"err\": \"ERR_REQUIRED_PROPS_VALIDATION\",\n \"status\": \"failed\",\n \"errmsg\": \"Validation Failed! Mandatory Properties Are [name, code, mimeType, primaryCategory, artifactUrl, framework]\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": [\n {\n \"source\": \"https://vdn.diksha.gov.in/api/content/v2/read/do_4313124780355543040126730\",\n \"metadata\": {},\n \"collection\": [\n {\n \"identifier\": \"do_3131112587361648641337\",\n \"unitId\": \"do_3131112587369922561425\"\n }\n ]\n }\n ]\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v2/import", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v2", + "import" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + } + ] + }, + { + "name": "v1", + "item": [ + { + "name": "lock", + "item": [ + { + "name": "lock list", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"resourceId\": [\n \"do_11319479631000371211\",\n \"do_11319851826161254418\",\n \"do_113107812356022272164\",\n \"do_1131983093687828481353\",\n \"do_113197939102253056140\",\n \"do_113197917149233152139\",\n \"do_113197913444737024138\",\n \"do_113197844509425664130\",\n \"do_113197903995772928137\"\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "list" + ] + }, + "description": "This API searches for a list of Locked resource by a user or system or service.\n - The **_lock/v1/list_** endpoint searches the list of Locked resource.\n - All fields marked with an * are mandatory\n - Required fields cannot be null or empty\n - Backend Route: http://knowledge-mw-service:5000/v1/lock/list" + }, + "response": [ + { + "name": "This represents the list response success", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"resourceId\": [\n \"do_11319479631000371211\",\n \"do_11319851826161254418\",\n \"do_113107812356022272164\",\n \"do_1131983093687828481353\",\n \"do_113197939102253056140\",\n \"do_113197917149233152139\",\n \"do_113197913444737024138\",\n \"do_113197844509425664130\",\n \"do_113197903995772928137\"\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "list" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.list\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T09:44:03.248Z\",\n \"params\": {\n \"resmsgid\": \"32228b00-5bcd-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"3247615a-0095-9236-9b6b-357d7b130b6e\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"count\": 1,\n \"data\": [\n {\n \"lockId\": \"1d767439-a517-4433-bf46-fb0cca148d31\",\n \"resourceId\": \"do_11319479631000371211\",\n \"resourceType\": \"Content\",\n \"resourceInfo\": \"{\\\"contentType\\\":\\\"TextBook\\\",\\\"framework\\\":\\\"ekstep_ncert_k-12\\\",\\\"identifier\\\":\\\"do_11319479631000371211\\\",\\\"mimeType\\\":\\\"application/vnd.ekstep.content-collection\\\"}\",\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"creatorInfo\": \"{\\\"name\\\":\\\"N11\\\",\\\"id\\\":\\\"5a587cc1-e018-4859-a0a8-e842650b9d64\\\"}\",\n \"createdOn\": \"2021-01-21T09:22:12.577Z\",\n \"deviceId\": \"ad8ed16beb09e7c91534c2af80a1a41e\",\n \"expiresAt\": \"2021-01-21T10:26:27.591Z\"\n }\n ]\n }\n}" + }, + { + "name": "This represents the Internal Server Error", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"resourceId\": [\n \"do_11319479631000371211\",\n \"do_11319851826161254418\",\n \"do_113107812356022272164\",\n \"do_1131983093687828481353\",\n \"do_113197939102253056140\",\n \"do_113197917149233152139\",\n \"do_113197913444737024138\",\n \"do_113197844509425664130\",\n \"do_113197903995772928137\"\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "list" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.list\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T12:59:12.456Z\",\n \"params\": {\n \"resmsgid\": \"755de480-5be8-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_LISTING_LOCK_FAILED\",\n \"errmsg\": \"Listing lock failed\"\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Lock retire", + "request": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "url": { + "raw": "{{baseUrl}}/lock/v1/retire", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "retire" + ] + }, + "description": "This API used to retire the Locked resource by a user or system or service.\n - The **_lock/v1/retire_** endpoint used to retire the Locked resource by a user or system or service.\n - All fields marked with an * are mandatory\n - Required fields cannot be null or empty\n - Backend Route: http://knowledge-mw-service:5000/v1/lock/retire" + }, + "response": [ + { + "name": "This represents the retire success response", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "url": { + "raw": "{{baseUrl}}/lock/v1/retire", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "retire" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.retire\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T11:07:24.864Z\",\n \"params\": {\n \"resmsgid\": \"d7549400-5bd8-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"eb729276-769e-8db2-477a-91548152908b\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {}\n}" + }, + { + "name": "This represents the retire Bad Request", + "originalRequest": { + "method": "DELETE", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "url": { + "raw": "{{baseUrl}}/lock/v1/retire", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "retire" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.retire\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T11:08:43.618Z\",\n \"params\": {\n \"resmsgid\": \"0645ca40-5bd9-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_LOCK_RETIRING_FAILED\",\n \"errmsg\": \"Resource is not locked\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Lock create", + "request": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11319479631000371211\",\n \"resourceType\": \"Content\",\n \"resourceInfo\": \"{\\\"contentType\\\":\\\"TextBook\\\",\\\"framework\\\":\\\"ekstep_ncert_k-12\\\",\\\"identifier\\\":\\\"do_11319479631000371211\\\",\\\"mimeType\\\":\\\"application/vnd.ekstep.content-collection\\\"}\",\n \"creatorInfo\": \"{\\\"name\\\":\\\"N11\\\",\\\"id\\\":\\\"5a587cc1-e018-4859-a0a8-e842650b9d64\\\"}\",\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"isRootOrgAdmin\": true\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "create" + ] + }, + "description": "This API used to create the Lock resource by a user or system or service.\n - The **_lock/v1/create_** endpoint search the bulk job approve of the content.\n - All fields marked with an * are mandatory\n - Required fields cannot be null or empty\n - Backend Route: http://knowledge-mw-service:5000/v1/lock/create" + }, + "response": [ + { + "name": "This represents the create response body 200", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11319479631000371211\",\n \"resourceType\": \"Content\",\n \"resourceInfo\": \"{\\\"contentType\\\":\\\"TextBook\\\",\\\"framework\\\":\\\"ekstep_ncert_k-12\\\",\\\"identifier\\\":\\\"do_11319479631000371211\\\",\\\"mimeType\\\":\\\"application/vnd.ekstep.content-collection\\\"}\",\n \"creatorInfo\": \"{\\\"name\\\":\\\"N11\\\",\\\"id\\\":\\\"5a587cc1-e018-4859-a0a8-e842650b9d64\\\"}\",\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"isRootOrgAdmin\": true\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.create\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T11:14:46.643Z\",\n \"params\": {\n \"resmsgid\": \"dea6b430-5bd9-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"33e7b5d4-7662-bab6-1c06-72e71b8b8221\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"lockKey\": \"bad55f24-8081-4d03-baab-364296062b47\",\n \"expiresAt\": \"2021-01-21T12:14:46.594Z\",\n \"expiresIn\": 60\n }\n}" + }, + { + "name": "This represents the create response Bad Request", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11319479631000371211\",\n \"resourceType\": \"Content\",\n \"resourceInfo\": \"{\\\"contentType\\\":\\\"TextBook\\\",\\\"framework\\\":\\\"ekstep_ncert_k-12\\\",\\\"identifier\\\":\\\"do_11319479631000371211\\\",\\\"mimeType\\\":\\\"application/vnd.ekstep.content-collection\\\"}\",\n \"creatorInfo\": \"{\\\"name\\\":\\\"N11\\\",\\\"id\\\":\\\"5a587cc1-e018-4859-a0a8-e842650b9d64\\\"}\",\n \"createdBy\": \"5a587cc1-e018-4859-a0a8-e842650b9d64\",\n \"isRootOrgAdmin\": true\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.lock.create\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T13:09:44.935Z\",\n \"params\": {\n \"resmsgid\": \"ee5a8770-5be9-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_LOCK_CREATION_FIELDS_MISSING\",\n \"errmsg\": \"\\\"resourceInfo\\\" is required\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Lock Refresh", + "request": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + }, + { + "key": "Accept", + "value": "application/json; charset=utf-8" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11316513466772684811649\",\n \"resourceType\": \"Content\",\n \"lockId\": \"0e6b1631-93e6-4726-9dcf-0debdba37aa1\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/refresh", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "refresh" + ] + }, + "description": "This API used to refresh the Locked resource by a user or system or service.\n - The **_lock/v1/refresh_** endpoint refresh the Locked resource by a user or system or service.\n - All fields marked with an * are mandatory\n - Required fields cannot be null or empty\n - Backend Route: http://knowledge-mw-service:5000/v1/lock/refresh" + }, + "response": [ + { + "name": "This represents the refresh response 200", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11316513466772684811649\",\n \"resourceType\": \"Content\",\n \"lockId\": \"0e6b1631-93e6-4726-9dcf-0debdba37aa1\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/refresh", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "refresh" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json; charset=utf-8" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v1.refresh\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T11:24:31.981Z\",\n \"params\": {\n \"resmsgid\": \"3b8a31d0-5bdb-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"3a33a280-5bdb-11eb-9b0c-abcfbdf41bc3\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"lockKey\": \"0e6b1631-93e6-4726-9dcf-0debdba37aa1\",\n \"expiresAt\": \"2021-01-21T12:24:29.738Z\",\n \"expiresIn\": 60\n }\n}" + }, + { + "name": "This represents the refresh response for Bad Request", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource", + "key": "Content-Type", + "value": "application/json" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ipsum voluptate qui" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"resourceId\": \"do_11316513466772684811649\",\n \"resourceType\": \"Content\",\n \"lockId\": \"0e6b1631-93e6-4726-9dcf-0debdba37aa1\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/lock/v1/refresh", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "lock", + "v1", + "refresh" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.v1.refresh\",\n \"ver\": 1,\n \"ts\": \"2021-01-21T11:27:46.193Z\",\n \"params\": {\n \"resmsgid\": \"af4c9810-5bdb-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_LOCK_REFRESHING_FIELDS_MISSING\",\n \"errmsg\": \"\\\"resourceId\\\" is required\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + } + ] + } + ], + "description": "This services explains the design of lock service and types of locks that are possible with mechanisms to lock a resource and unlock it with different possibilities.\n\nContact Support: \nEmail: [info@sunbird.org](https://desktop.postman.com/?desktopVersion=10.6.0&userId=1419050&teamId=304931)" + }, + { + "name": "Read Private Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v1/private/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "private", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the private and public contents on the Sunbird Platform.\n - The **_/private/read/{contentId}_** endpoint executes request for fetching the private and public contents to be viewed and read.\n - The endpoint for **Read Private Content** is `/content/v1/private/read`\n - It points to content-service - `/content/v4/private/read`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid content Id value in {Content Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Read Private content\" operation was successfully executed. For all possible metadata, please [refer](https://github.com/project-sunbird/knowledge-platform/blob/master/schemas/content/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v1/private/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "private", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.private.read\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-09-20T20:12:52Z+05:30\",\n \"params\": {\n \"resmsgid\": \"a31dbba1-c3bf-4c3d-a028-f3e5b7de4b57\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"content\": {\n \"ownershipType\": [\n \"createdBy\"\n ],\n \"code\": \"name\",\n \"interceptionPoints\": {},\n \"credentials\": {\n \"enabled\": \"No\"\n },\n \"channel\": \"abc-123\",\n \"language\": [\n \"English\"\n ],\n \"mimeType\": \"application/pdf\",\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2021-08-30T13:11:08.485+0530\",\n \"objectType\": \"Content\",\n \"primaryCategory\": \"Learning Resource\",\n \"contentDisposition\": \"inline\",\n \"lastUpdatedOn\": \"2021-08-30T13:11:08.485+0530\",\n \"contentEncoding\": \"identity\",\n \"dialcodeRequired\": \"No\",\n \"trackable\": {\n \"enabled\": \"No\",\n \"autoBatch\": \"No\"\n },\n \"identifier\": \"do_11335549352263680012\",\n \"lastStatusChangedOn\": \"2021-08-30T13:11:08.485+0530\",\n \"audience\": [\n \"Student\"\n ],\n \"os\": [\n \"All\"\n ],\n \"visibility\": \"Private\",\n \"discussionForum\": {\n \"enabled\": \"No\"\n },\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"languageCode\": [\n \"en\"\n ],\n \"version\": 2,\n \"versionKey\": \"1630309268485\",\n \"license\": \"CC BY 4.0\",\n \"idealScreenDensity\": \"hdpi\",\n \"framework\": \"NCF\",\n \"compatibilityLevel\": 1,\n \"name\": \"Untitled Resource\",\n \"status\": \"Draft\"\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Private Read** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v1/private/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "private", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.private.read\",\n \"ver\": \"4.0\",\n \"ts\": \"2021-09-20T21:07:50Z+05:30\",\n \"params\": {\n \"resmsgid\": \"a8bb809b-c2e1-4749-b05b-b2cd7dc1ca49\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_11335549352263680013\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) Represents a physical location (e.g: state) uniquely to which the user belongs.", + "key": "x-channel-id", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "url": { + "raw": "{{baseUrl}}/content/v1/private/read/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "private", + "read", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "ea laborum", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"cupidatat reprehenderit dolor\",\n \"ver\": \"minim anim commodo\",\n \"ts\": \"commodo\",\n \"params\": {\n \"resmsgid\": \"pariatur repre\",\n \"msgid\": \"irure\",\n \"err\": \"consequat esse ea\",\n \"status\": \"esse\",\n \"errmsg\": \"aliqua laboris velit\"\n },\n \"responseCode\": \"reprehenderit dolor elit sunt dolore\"\n}" + } + ] + }, + { + "name": "Search Content", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"objectType\": \"Content\",\n \"status\": []\n },\n \"limit\": 1\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/search", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "search" + ] + }, + "description": "This API is associated with Searching Object created as part of sourcing object creation on the Sunbird Platform.\n - The endpoint for **Content Search** is `/content/v1/search`\n - It points to knowledge-mw-service - `/v1/search`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Content Search** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"objectType\": \"Content\",\n \"status\": []\n },\n \"limit\": 1\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/search", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "search" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-22T06:24:45.025Z\",\n \"params\": {\n \"resmsgid\": \"62160510-441e-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"620ae180-441e-11eb-9b0c-abcfbdf41bc3\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"count\": 480465,\n \"content\": [\n {\n \"code\": \"org.ekstep.jun16.story.test05\",\n \"subject\": [\n \"literacy\"\n ],\n \"flags\": [\n \"Test case\"\n ],\n \"channel\": \"in.ekstep\",\n \"downloadUrl\": \"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/ecar_files/org.ekstep.jun16.story.test05/vrgaatiil-upkrmclassroom-activities_1498725323304_org.ekstep.jun16.story.test05_1.0.ecar\",\n \"description\": \"शेर का साथी हाथी\",\n \"lastFlaggedOn\": \"2017-12-27T13:30:48.942+0000\",\n \"language\": [\n \"English\"\n ],\n \"variants\": {\n \"spine\": {\n \"ecarUrl\": \"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/ecar_files/org.ekstep.jun16.story.test05/vrgaatiil-upkrmclassroom-activities_1498725324677_org.ekstep.jun16.story.test05_1.0_spine.ecar\",\n \"size\": 851\n }\n },\n \"mimeType\": \"application/vnd.ekstep.ecml-archive\",\n \"flaggedBy\": [\n \"Test case\"\n ],\n \"idealScreenSize\": \"normal\",\n \"createdOn\": \"2017-06-29T07:44:15.875+0000\",\n \"objectType\": \"Content\",\n \"collections\": [\n \"do_11228062262625075214\"\n ],\n \"appId\": \"ekstep_portal\",\n \"contentDisposition\": \"inline\",\n \"contentEncoding\": \"gzip\",\n \"artifactUrl\": \"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/content/org.ekstep.jun16.story.test05/artifact/1485166711340_do_30102464_1498725273215.zip\",\n \"lastUpdatedOn\": \"2017-12-27T13:30:48.968+0000\",\n \"SYS_INTERNAL_LAST_UPDATED_ON\": \"2018-01-09T18:41:32.368+0000\",\n \"primaryCategory\": \"Story\",\n \"owner\": \"EkStep\",\n \"lastUpdatedBy\": \"Test case\",\n \"identifier\": \"org.ekstep.jun16.story.test05\",\n \"audience\": [\n \"Learner\"\n ],\n \"flagReasons\": [\n \"Copyright Violation\"\n ],\n \"visibility\": \"default\",\n \"os\": [\n \"All\"\n ],\n \"consumerId\": \"72e54829-6402-4cf0-888e-9b30733c1b88\",\n \"mediaType\": \"content\",\n \"osId\": \"org.ekstep.quiz.app\",\n \"graph_id\": \"domain\",\n \"nodeType\": \"DATA_NODE\",\n \"pkgVersion\": 1,\n \"versionKey\": \"1515523292368\",\n \"prevState\": \"Draft\",\n \"idealScreenDensity\": \"hdpi\",\n \"dialcodes\": [\n \"DAKDF\",\n \"FSDFDSA\"\n ],\n \"s3Key\": \"ecar_files/org.ekstep.jun16.story.test05/vrgaatiil-upkrmclassroom-activities_1498725323304_org.ekstep.jun16.story.test05_1.0.ecar\",\n \"size\": 9983654,\n \"lastPublishedOn\": \"2017-06-29T08:35:23.302+0000\",\n \"compatibilityLevel\": 1,\n \"name\": \"\\tवर्गातील उपक्रम(Classroom Activities)\",\n \"resourceType\": \"Story\",\n \"status\": \"Flagged\",\n \"node_id\": 105761\n }\n ]\n }\n}" + }, + { + "name": "BAD REQUEST. The **Content Search** operation failed. You may have missed input for a mandatory parameter - Content-Type in header", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"objectType\": \"Content\",\n \"status\": []\n },\n \"limit\": 1\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/search", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "search" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-21T11:46:50.400Z\",\n \"params\": {\n \"resmsgid\": \"367e7a00-4382-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_CONTENT_SEARCH_FIELDS_MISSING\",\n \"errmsg\": \"Required fields for search content are missing\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "'INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"filters\": {\n \"objectType\": \"Content\",\n \"status\": []\n },\n \"limit\": 1\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/search", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "search" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Url Fetch Metadata", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"url\": \"https://github.com/project-sunbird/sunbird.org-docs\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/fetchmeta", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "fetchmeta" + ] + }, + "description": "This API is associated with fetching url metadata details on the Sunbird Platform.\n - The endpoint for **Create Content** is `/content/v1/fetchmeta`\n - It points to knowledge-mw-service - `/v1/url/fetchmeta`\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **URL Fetch Metadata** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"url\": \"https://github.com/project-sunbird/sunbird.org-docs\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/fetchmeta", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "fetchmeta" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": \"2021-02-02T08:38:20.329Z\",\n \"params\": {\n \"resmsgid\": \"00edd990-6532-11eb-9dac-65b700f4c009\",\n \"msgid\": null,\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"url\": \"https://github.com/project-sunbird/sunbird.org-docs\",\n \"canonical\": \"https://github.com/project-sunbird/sunbird.org-docs\",\n \"title\": \"project-sunbird/sunbird.org-docs\",\n \"image\": \"https://avatars.githubusercontent.com/u/26705730?s=400&v=4\",\n \"author\": \"\",\n \"description\": \"Repository for documentation on sunbird.org. Contribute to project-sunbird/sunbird.org-docs development by creating an account on GitHub.\",\n \"keywords\": \"\",\n \"source\": \"github.com\",\n \"og:url\": \"https://github.com/project-sunbird/sunbird.org-docs\",\n \"og:locale\": \"\",\n \"og:locale:alternate\": \"\",\n \"og:title\": \"project-sunbird/sunbird.org-docs\",\n \"og:type\": \"object\",\n \"og:description\": \"Repository for documentation on sunbird.org. Contribute to project-sunbird/sunbird.org-docs development by creating an account on GitHub.\",\n \"og:determiner\": \"\",\n \"og:site_name\": \"GitHub\",\n \"og:image\": \"https://avatars.githubusercontent.com/u/26705730?s=400&v=4\",\n \"og:image:secure_url\": \"\",\n \"og:image:type\": \"\",\n \"og:image:width\": \"\",\n \"og:image:height\": \"\",\n \"price\": \"\",\n \"priceCurrency\": \"\",\n \"availability\": \"\"\n }\n}" + }, + { + "name": "'BAD REQUEST. The **URL Fetch Metadata** operation failed. You may have missed input for a mandatory parameter.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"url\": \"https://github.com/project-sunbird/sunbird.org-docs\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/fetchmeta", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "fetchmeta" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": \"2021-02-02T08:53:31.808Z\",\n \"params\": {\n \"resmsgid\": \"20368200-6534-11eb-9dac-65b700f4c009\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_FETCH_URLMETA_MISSING\",\n \"errmsg\": \"Required fields for fetching url meta data are missing\"\n },\n \"responseCode\": \"CLIENT_ERROR\"\n}" + }, + { + "name": "'INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.'", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:-\n - Application/json", + "key": "Content-Type", + "value": "ea laborum" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "ea laborum" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"url\": \"https://github.com/project-sunbird/sunbird.org-docs\"\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/fetchmeta", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "fetchmeta" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": \"2021-02-02T08:57:49.620Z\",\n \"params\": {\n \"resmsgid\": \"b9e17f40-6534-11eb-9dac-65b700f4c009\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_FETCH_URLMETA_FAILED\",\n \"errmsg\": \"Unable to load the url metadata\"\n }\n}" + } + ] + }, + { + "name": "Reject Flag", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "nisi amet " + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "nisi amet " + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "nisi amet " + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/flag/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "flag", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "nisi amet ", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + }, + "description": "This API is associated with setting Reject Flag for content under review process on the Sunbird Platform .\n - The **_/flag/reject/_**endpoint executes request for flagging content under review to \"Reject flag\", if content is found in-appropriate.\n - The endpoint for **Flag Reject Content** is `/content/v1/flag/reject`\n - It points to knowledge-mw-service - `/v1/content/flag/reject`\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Reject Flag\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "nisi amet " + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "nisi amet " + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "nisi amet " + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/flag/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "flag", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "nisi amet ", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {\n \"node_id\": \"ex reprehenderit magna eiusmod\",\n \"content_id\": \"adipisicing velit ullamco\",\n \"versionKey\": \"pari\"\n },\n \"id\": \"cupidatat dolore elit\",\n \"ver\": \"nostrud exercitation dolore labore minim\",\n \"ts\": \"nisi\",\n \"params\": {\n \"resmsgid\": \"ea officia voluptate\",\n \"msgid\": \"fugiat consequat\",\n \"err\": \"do non minim\",\n \"status\": \"in enim\",\n \"errmsg\": \"mollit eu quis\"\n },\n \"responseCode\": \"consectetur voluptate\"\n}" + }, + { + "name": "ERR CONTENT NOT FLAGGED! Looks like contentId is not in flagged status.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "nisi amet " + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "nisi amet " + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "nisi amet " + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/flag/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "flag", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "nisi amet ", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"voluptate culpa Excepteur minim\",\n \"ver\": \"Duis\",\n \"ts\": \"exercitation cupidatat aute repr\",\n \"params\": {\n \"resmsgid\": \"do Duis cupidatat nostrud\",\n \"msgid\": \"laboris veniam \",\n \"err\": \"ut ad pariatur adipisicing enim\",\n \"status\": \"sit sunt aliqua\",\n \"errmsg\": \"elit dolore irure\"\n },\n \"responseCode\": \"occaecat\"\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Content Send for Flag Reject** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "nisi amet " + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "nisi amet " + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "nisi amet " + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/flag/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "flag", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "nisi amet ", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"voluptate culpa Excepteur minim\",\n \"ver\": \"Duis\",\n \"ts\": \"exercitation cupidatat aute repr\",\n \"params\": {\n \"resmsgid\": \"do Duis cupidatat nostrud\",\n \"msgid\": \"laboris veniam \",\n \"err\": \"ut ad pariatur adipisicing enim\",\n \"status\": \"sit sunt aliqua\",\n \"errmsg\": \"elit dolore irure\"\n },\n \"responseCode\": \"occaecat\"\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "nisi amet " + }, + { + "description": "(Required) The Registered User ID to execute the API.", + "key": "X-Authenticated-User-token", + "value": "nisi amet " + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "nisi amet " + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/content/v1/flag/reject/:Content_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "content", + "v1", + "flag", + "reject", + ":Content_Id" + ], + "variable": [ + { + "key": "Content_Id", + "value": "nisi amet ", + "description": "(Required) Please append a valid Content Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"result\": {},\n \"id\": \"voluptate culpa Excepteur minim\",\n \"ver\": \"Duis\",\n \"ts\": \"exercitation cupidatat aute repr\",\n \"params\": {\n \"resmsgid\": \"do Duis cupidatat nostrud\",\n \"msgid\": \"laboris veniam \",\n \"err\": \"ut ad pariatur adipisicing enim\",\n \"status\": \"sit sunt aliqua\",\n \"errmsg\": \"elit dolore irure\"\n },\n \"responseCode\": \"occaecat\"\n}" + } + ] + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.sunbirded.org/api", + "type": "string" + } + ] +} \ No newline at end of file diff --git a/content-api/collection-csv-actors/pom.xml b/content-api/collection-csv-actors/pom.xml index 45e1060f9..e225b3f7a 100644 --- a/content-api/collection-csv-actors/pom.xml +++ b/content-api/collection-csv-actors/pom.xml @@ -28,7 +28,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar diff --git a/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/manager/CollectionCSVManager.scala b/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/manager/CollectionCSVManager.scala index 61c4fd97a..3fe0c06e9 100644 --- a/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/manager/CollectionCSVManager.scala +++ b/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/manager/CollectionCSVManager.scala @@ -23,16 +23,21 @@ import java.nio.charset.StandardCharsets import java.util import java.util.logging.Logger import scala.collection.immutable.{ListMap, Map} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters.{asJavaIterableConverter, mapAsScalaMapConverter} import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} +import scala.collection.JavaConverters._ + object CollectionCSVManager extends CollectionInputFileReader { private val CONTENT_FOLDER = "cloud_storage.content.folder" val logger = Logger.getLogger("CollectionCSVManager") + val categoryMap: java.util.Map[String, AnyRef] = Platform.getAnyRef("contentTypeToPrimaryCategory", + new util.HashMap[String, AnyRef]()).asInstanceOf[java.util.Map[String, AnyRef]] + def getCode(code: String): String = {DigestUtils.md5Hex(code)} def validateInputData(inputFileExtension: String, csvRecords: util.List[CSVRecord], mode: String, collectionHierarchy: Map[String, AnyRef])(implicit oec: OntologyEngineContext, ec: ExecutionContext): List[Map[String, AnyRef]] = { @@ -323,9 +328,9 @@ object CollectionCSVManager extends CollectionInputFileReader { else try { s""""${nodeInfo(CollectionTOCConstants.IDENTIFIER).toString}": {"isNew": false,"root": false, "metadata": {"mimeType": "application/vnd.ekstep.content-collection", - |"contentType": "$collectionUnitType","name": ${JsonUtils.serialize(nodeInfo("name").toString.trim)}, + |"contentType": "$collectionUnitType","name": ${JsonUtils.serialize(nodeInfo("name").toString.trim)}, "primaryCategory": "${getPrimaryCategory(collectionUnitType)}", |"description": ${if(nodeInfo.contains(CollectionTOCConstants.DESCRIPTION)) JsonUtils.serialize(nodeInfo(CollectionTOCConstants.DESCRIPTION).toString) else JsonUtils.serialize("")}, - |"dialcodeRequired": "${nodeInfo(CollectionTOCConstants.DIAL_CODE_REQUIRED).toString}","dialcodes": "${nodeInfo(CollectionTOCConstants.DIAL_CODES).toString}", + |"dialcodeRequired": "${nodeInfo(CollectionTOCConstants.DIAL_CODE_REQUIRED).toString}","dialcodes": ["${nodeInfo(CollectionTOCConstants.DIAL_CODES).toString}"], |"code": "${nodeInfo(CollectionTOCConstants.IDENTIFIER).toString}","framework": "$frameworkID", |"keywords": ${if(nodeInfo.contains(CollectionTOCConstants.KEYWORDS) && nodeInfo(CollectionTOCConstants.KEYWORDS).asInstanceOf[List[String]].nonEmpty) nodeInfo(CollectionTOCConstants.KEYWORDS).asInstanceOf[List[String]].map(keyword=>JsonUtils.serialize(keyword)).mkString("[",",","]") else "[]"}, @@ -500,5 +505,13 @@ object CollectionCSVManager extends CollectionInputFileReader { }) } + private def getPrimaryCategory(contentType: String): String ={ + val primaryCategory = categoryMap.get(contentType) + if(primaryCategory.isInstanceOf[String]) + primaryCategory.asInstanceOf[String] + else + primaryCategory.asInstanceOf[util.List[String]].asScala.headOption.getOrElse("Learning Resource") + + } } diff --git a/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/validator/CollectionCSVValidator.scala b/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/validator/CollectionCSVValidator.scala index e9678f7e5..da0bee6ed 100644 --- a/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/validator/CollectionCSVValidator.scala +++ b/content-api/collection-csv-actors/src/main/scala/org.sunbird/collectioncsv/validator/CollectionCSVValidator.scala @@ -16,7 +16,7 @@ import java.io.{File, FileInputStream, IOException, InputStreamReader} import java.nio.charset.StandardCharsets import java.text.MessageFormat import java.util -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters.{asScalaBufferConverter, mapAsJavaMapConverter, mapAsScalaMapConverter} import scala.collection.immutable.{HashMap, ListMap, Map} import scala.concurrent.ExecutionContext diff --git a/content-api/collection-csv-actors/src/test/resources/application.conf b/content-api/collection-csv-actors/src/test/resources/application.conf index 3bab0fda5..e49fdc833 100644 --- a/content-api/collection-csv-actors/src/test/resources/application.conf +++ b/content-api/collection-csv-actors/src/test/resources/application.conf @@ -1,4 +1,4 @@ -schema.base_path = "../../schemas/" +schema.base_path = "../../schemas" # Configuration graph.dir=/data/testingGraphDB diff --git a/content-api/collection-csv-actors/src/test/scala/org/sunbird/collectioncsv/TestCollectionCSVActor.scala b/content-api/collection-csv-actors/src/test/scala/org/sunbird/collectioncsv/TestCollectionCSVActor.scala index 15ff789b0..39cc30e96 100644 --- a/content-api/collection-csv-actors/src/test/scala/org/sunbird/collectioncsv/TestCollectionCSVActor.scala +++ b/content-api/collection-csv-actors/src/test/scala/org/sunbird/collectioncsv/TestCollectionCSVActor.scala @@ -33,7 +33,6 @@ class TestCollectionCSVActor extends FlatSpec with Matchers with MockFactory { "CollectionCSVActor" should "return failed response for 'unknown' operation" in { testUnknownOperation( Props(new CollectionCSVActor()), getCollectionRequest()) } - it should "return client error on giving content Id with no children" in { (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() diff --git a/content-api/content-actors/pom.xml b/content-api/content-actors/pom.xml index a429dc8f9..39b7c1bbd 100644 --- a/content-api/content-actors/pom.xml +++ b/content-api/content-actors/pom.xml @@ -28,7 +28,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar @@ -81,7 +81,7 @@ net.alchim31.maven scala-maven-plugin - 4.4.0 + 3.2.2 ${scala.version} false diff --git a/content-api/content-actors/src/main/scala/org/sunbird/channel/managers/ChannelManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/channel/managers/ChannelManager.scala index ae9d69050..c0cabc2fe 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/channel/managers/ChannelManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/channel/managers/ChannelManager.scala @@ -14,7 +14,7 @@ import org.apache.commons.lang3.StringUtils import org.sunbird.common.JsonUtils import scala.collection.JavaConverters._ -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.mutable.ListBuffer object ChannelManager { diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/dial/DIALManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/dial/DIALManager.scala index 2bffec273..fa98c7912 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/dial/DIALManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/dial/DIALManager.scala @@ -1,7 +1,7 @@ package org.sunbird.content.dial import org.apache.commons.lang3.StringUtils -import org.sunbird.common.Platform +import org.sunbird.common.{JsonUtils, Platform} import org.sunbird.common.dto.{Request, Response, ResponseHandler} import org.sunbird.common.exception._ import org.sunbird.content.util.ContentConstants @@ -476,7 +476,9 @@ object DIALManager { def getDIALReserveUpdateResponse(response: Response, count: Integer, contentId: String, node: Node): Response = { response.getResult.put(DIALConstants.COUNT, count) response.getResult.put(ContentConstants.NODE_ID, contentId) - response.getResult.put(DIALConstants.RESERVED_DIALCODES, node.getMetadata.get(DIALConstants.RESERVED_DIALCODES)) + val reservDialCodes: String = node.getMetadata.get(DIALConstants.RESERVED_DIALCODES).asInstanceOf[String] + if(StringUtils.isNotBlank(reservDialCodes)) + response.getResult.put(DIALConstants.RESERVED_DIALCODES, JsonUtils.deserialize(reservDialCodes, classOf[util.Map[String, Integer]])) response } diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/publish/mgr/PublishManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/publish/mgr/PublishManager.scala index 16a4e2f71..f08ac0c3d 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/publish/mgr/PublishManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/publish/mgr/PublishManager.scala @@ -78,7 +78,7 @@ object PublishManager { } //objectData - objectData.put(ContentConstants.ID, identifier) + objectData.put(ContentConstants.ID, identifier.replace(".img","")) objectData.put(ContentConstants.VER, metadata.get(ContentConstants.VERSION_KEY)) //edata @@ -91,7 +91,7 @@ object PublishManager { instructionEventMetadata.put(ContentConstants.PACKAGE_VERSION, metadata.getOrDefault(ContentConstants.PACKAGE_VERSION,0.asInstanceOf[AnyRef])) instructionEventMetadata.put(ContentConstants.MIME_TYPE, metadata.get(ContentConstants.MIME_TYPE)) instructionEventMetadata.put(ContentConstants.LAST_PUBLISHED_BY, metadata.get(ContentConstants.LAST_PUBLISHED_BY)) - instructionEventMetadata.put(ContentConstants.IDENTIFIER, identifier) + instructionEventMetadata.put(ContentConstants.IDENTIFIER, identifier.replace(".img","")) instructionEventMetadata.put(ContentConstants.OBJECT_TYPE, objectType) edata.put(ContentConstants.METADATA, instructionEventMetadata) edata.put(ContentConstants.ACTION, ContentConstants.PUBLISH) diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/util/DiscardManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/util/DiscardManager.scala index 6a2aa5ed6..92d762e57 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/util/DiscardManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/util/DiscardManager.scala @@ -16,7 +16,7 @@ import org.sunbird.managers.UpdateHierarchyManager.{fetchHierarchy, shouldImageB import org.sunbird.telemetry.logger.TelemetryManager import org.sunbird.utils.{HierarchyConstants, HierarchyErrorCodes} -import scala.collection.JavaConversions._ + import scala.concurrent.{ExecutionContext, Future} object DiscardManager { diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/util/FlagManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/util/FlagManager.scala index c42d4e48d..3de36faaf 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/util/FlagManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/util/FlagManager.scala @@ -16,7 +16,7 @@ import org.sunbird.graph.utils.NodeUtil import org.sunbird.telemetry.logger.TelemetryManager import org.sunbird.utils.HierarchyConstants import scala.concurrent.{ExecutionContext, Future} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ object FlagManager { diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/util/RetireManager.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/util/RetireManager.scala index cbd8e4eba..58ed4a138 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/util/RetireManager.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/util/RetireManager.scala @@ -19,7 +19,8 @@ import org.sunbird.parseq.Task import org.sunbird.telemetry.logger.TelemetryManager import org.sunbird.utils.HierarchyConstants -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} diff --git a/content-api/content-actors/src/test/resources/application.conf b/content-api/content-actors/src/test/resources/application.conf index bacdabc13..10807e335 100644 --- a/content-api/content-actors/src/test/resources/application.conf +++ b/content-api/content-actors/src/test/resources/application.conf @@ -463,8 +463,9 @@ framework.cache.read=false # Max size(width/height) of thumbnail in pixels max.thumbnail.size.pixels=150 -schema.base_path="../../schemas/" +schema.base_path = "../../schemas" content.hierarchy.removed_props_for_leafNodes=["collections","children","usedByContent","item_sets","methods","libraries","editorState"] +learning_content_youtube_apikey="" collection.keyspace = "hierarchy_store" content.keyspace = "content_store" @@ -489,7 +490,7 @@ content.copy.props_to_remove=["downloadUrl", "artifactUrl", "variants", "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl"] content.copy.origin_data=["name", "author", "license", "organisation"] -content.h5p.library.path="https://s3.ap-south-1.amazonaws.com/ekstep-public-dev/content/templates/h5p-library-latest.zip" +content.h5p.library.path="https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/templates/h5p-library-latest.zip" # DIAL Link dial_service { diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestCategoryActor.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestCategoryActor.scala index 86f237f6a..59cd97fdc 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestCategoryActor.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestCategoryActor.scala @@ -33,7 +33,11 @@ class TestCategoryActor extends BaseSpec with MockFactory{ (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() val request = getCategoryRequest() - request.putAll(mapAsJavaMap(Map("name" -> "do_1234"))) + request.getRequest.put("orgIdFieldName", "stateIds") + request.getRequest.put("targetIdFieldName", "targetStateIds") + request.getRequest.put("searchIdFieldName", "se_stateIds") + request.getRequest.put("searchLabelFieldName", "se_states") + request.putAll(mapAsJavaMap(Map("name" -> "do_1234", "code" -> "do_1234"))) request.setOperation("createCategory") val response = callActor(request, Props(new CategoryActor())) assert(response.get("identifier") != null) @@ -45,6 +49,11 @@ class TestCategoryActor extends BaseSpec with MockFactory{ implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val request = getCategoryRequest() request.setOperation("createCategory") + request.getRequest.put("orgIdFieldName", "stateIds") + request.getRequest.put("targetIdFieldName", "targetStateIds") + request.getRequest.put("searchIdFieldName", "se_stateIds") + request.getRequest.put("searchLabelFieldName", "se_states") + request.putAll(mapAsJavaMap(Map("code" -> "do_1234"))) val response = callActor(request, Props(new CategoryActor())) assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) assert(StringUtils.equalsIgnoreCase(response.get("messages").asInstanceOf[util.ArrayList[String]].get(0).asInstanceOf[String], "Required Metadata name not set")) @@ -54,7 +63,11 @@ class TestCategoryActor extends BaseSpec with MockFactory{ implicit val ss = mock[StorageService] implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val request = getCategoryRequest() - request.putAll(mapAsJavaMap(Map("identifier" -> "do_1234"))) + request.getRequest.put("orgIdFieldName", "stateIds") + request.getRequest.put("targetIdFieldName", "targetStateIds") + request.getRequest.put("searchIdFieldName", "se_stateIds") + request.getRequest.put("searchLabelFieldName", "se_states") + request.putAll(mapAsJavaMap(Map("identifier" -> "do_1234", "code" -> "do_1234"))) request.setOperation("createCategory") val response = callActor(request, Props(new CategoryActor())) assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) @@ -74,7 +87,7 @@ class TestCategoryActor extends BaseSpec with MockFactory{ implicit val ss = mock[StorageService] val request = getCategoryRequest() request.getContext.put("identifier","do_1234") - request.putAll(mapAsJavaMap(Map("description" -> "test desc"))) + request.putAll(mapAsJavaMap(Map("description" -> "test desc", "code" -> "do_1234"))) request.setOperation("updateCategory") val response = callActor(request, Props(new CategoryActor())) assert("successful".equals(response.getParams.getStatus)) @@ -109,7 +122,7 @@ class TestCategoryActor extends BaseSpec with MockFactory{ implicit val ss = mock[StorageService] val request = getCategoryRequest() request.getContext.put("identifier","do_1234") - request.putAll(mapAsJavaMap(Map("identifier" -> "do_1234"))) + request.putAll(mapAsJavaMap(Map("identifier" -> "do_1234", "code" -> "do_1234"))) request.setOperation("retireCategory") val response = callActor(request, Props(new CategoryActor())) assert("successful".equals(response.getParams.getStatus)) @@ -142,6 +155,10 @@ class TestCategoryActor extends BaseSpec with MockFactory{ put("status", "Live") put("name", "do_1234") put("versionKey", "1878141") + put("orgIdFieldName", "stateIds") + put("targetIdFieldName", "targetStateIds") + put("searchIdFieldName", "se_stateIds") + put("searchLabelFieldName", "se_states") } }) node diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestChannelActor.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestChannelActor.scala index 446428ea7..c261e6bdf 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestChannelActor.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestChannelActor.scala @@ -9,7 +9,7 @@ import org.sunbird.common.dto.Request import org.sunbird.graph.dac.model.{Node, SearchCriteria} import org.sunbird.graph.{GraphService, OntologyEngineContext} -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global @@ -64,7 +64,7 @@ class TestChannelActor extends BaseSpec with MockFactory { assert("failed".equals(response.getParams.getStatus)) } - ignore should "return success response for 'readChannel' operation" in { + it should "return success response for 'readChannel' operation" in { implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] (oec.graphService _).expects().returns(graphDB) diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestContentActor.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestContentActor.scala index fb9f22de2..f249d04dd 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestContentActor.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestContentActor.scala @@ -1,7 +1,6 @@ package org.sunbird.content.actors import akka.actor.Props -import com.google.common.io.Resources import org.scalamock.scalatest.MockFactory import org.sunbird.cloudstore.StorageService import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} @@ -12,9 +11,9 @@ import org.sunbird.graph.utils.ScalaJsonUtils import org.sunbird.graph.{GraphService, OntologyEngineContext} import org.sunbird.kafka.client.KafkaClient -import java.io.File import java.util -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future @@ -439,7 +438,7 @@ class TestContentActor extends BaseSpec with MockFactory { assert(response.get("processId") != null) } - it should "return success response for 'uploadContent' with jpeg asset" ignore { + /*it should "return success response for 'uploadContent' with jpeg asset" { implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] implicit val ss = mock[StorageService] @@ -459,7 +458,7 @@ class TestContentActor extends BaseSpec with MockFactory { val response = callActor(request, Props(new ContentActor())) assert("successful".equals(response.getParams.getStatus)) } - +*/ it should "return success response for 'systemUpdateContent'" in { implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] @@ -646,12 +645,12 @@ class TestContentActor extends BaseSpec with MockFactory { assert("successful".equals(response.getParams.getStatus)) } - it should "return success response for 'publishContent' for youtube mimeType" in { + ignore should "return success response for 'publishContent' for youtube mimeType" in { implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] implicit val ss = mock[StorageService] (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() - val node = getNodeForReview("do_123", "video/x-youtube", "LearningResource", "Content", "https://www.youtube.com/watch?v=EtYU11qNciQ&t=612s") + val node = getNodeForReview("do_123", "video/x-youtube", "LearningResource", "Content", "https://www.youtube.com/watch?v=GHmQ8euNwv8") node.getMetadata.put("contentType", "Resource") node.getMetadata.put("organisationBoardIds", new util.ArrayList[String](){{add("ncf_board_cbse")}}) (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestEventActor.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestEventActor.scala index f701e5d22..048aa6f46 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestEventActor.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/actors/TestEventActor.scala @@ -9,7 +9,7 @@ import org.sunbird.graph.dac.model.{Node, SearchCriteria} import org.sunbird.graph.{GraphService, OntologyEngineContext} import java.util -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/dial/DIALManagerTest.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/dial/DIALManagerTest.scala index 8b54cd6d6..10a849a49 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/dial/DIALManagerTest.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/dial/DIALManagerTest.scala @@ -644,4 +644,4 @@ class DIALManagerTest extends AsyncFlatSpec with Matchers with AsyncMockFactory val resString = "{\"id\": \"api.dialcode.generate\",\"ver\": \"1.0\",\"ts\": \"2022-07-05T09:47:26.000Z\",\"params\": {\"resmsgid\": \"79eb8b00-fc47-11ec-af25-0f53946b16ec\",\"msgid\": \"79be1260-fc47-11ec-8c03-63ca5ce41074\",\"status\": \"successful\",\"err\": null,\"errmsg\": null},\"responseCode\": \"OK\",\"result\": {\"dialcodes\": [\"K2C3R6\",\"H2E8F9\"],\"count\": 2,\"batchcode\": \"do_11357423520695910411\",\"publisher\": null}}" JsonUtils.deserialize(resString, classOf[Response]) } -} \ No newline at end of file +} diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/util/CopyManagerTest.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/util/CopyManagerTest.scala index 5830e52c3..d9137d6f4 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/util/CopyManagerTest.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/util/CopyManagerTest.scala @@ -1,18 +1,16 @@ package org.sunbird.content.util -import java.util - -import org.apache.commons.collections.MapUtils +import org.apache.commons.collections4.MapUtils import org.scalamock.scalatest.AsyncMockFactory import org.scalatest.{AsyncFlatSpec, Matchers} -import org.sunbird.cloud.storage.util.JSONUtils import org.sunbird.cloudstore.StorageService import org.sunbird.common.dto.{Property, Request} import org.sunbird.common.exception.{ClientException, ResponseCode} -import org.sunbird.graph.{GraphService, OntologyEngineContext} import org.sunbird.graph.dac.model.Node import org.sunbird.graph.utils.ScalaJsonUtils +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import java.util import scala.collection.JavaConversions.mapAsJavaMap import scala.concurrent.Future @@ -88,7 +86,6 @@ class CopyManagerTest extends AsyncFlatSpec with Matchers with AsyncMockFactory assert(MapUtils.isNotEmpty(metadata)) } - private def getNode(): Node = { val node = new Node() node.setGraphId("domain") diff --git a/content-api/content-actors/src/test/scala/org/sunbird/content/util/DiscardManagerTest.scala b/content-api/content-actors/src/test/scala/org/sunbird/content/util/DiscardManagerTest.scala index 45f19480c..db8d1704c 100644 --- a/content-api/content-actors/src/test/scala/org/sunbird/content/util/DiscardManagerTest.scala +++ b/content-api/content-actors/src/test/scala/org/sunbird/content/util/DiscardManagerTest.scala @@ -1,21 +1,14 @@ package org.sunbird.content.util -import java.util -import java.util.concurrent.CompletionException - -import akka.actor.Props -import org.apache.commons.lang3.BooleanUtils import org.scalamock.scalatest.MockFactory -import org.sunbird.cloudstore.StorageService import org.sunbird.common.dto.Request import org.sunbird.common.exception.ClientException import org.sunbird.content.actors.BaseSpec +import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.model.Node -import org.sunbird.graph.{GraphService, OntologyEngineContext} +import java.util import scala.collection.JavaConversions.mapAsJavaMap -import scala.concurrent.Future -import scala.concurrent.ExecutionContext.Implicits.global class DiscardManagerTest extends BaseSpec with MockFactory { diff --git a/content-api/content-service/conf/application.conf b/content-api/content-service/conf/application.conf index 5a77be1c5..b5571be75 100644 --- a/content-api/content-service/conf/application.conf +++ b/content-api/content-service/conf/application.conf @@ -355,7 +355,7 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" content.hierarchy.removed_props_for_leafNodes=["collections","children","usedByContent","item_sets","methods","libraries","editorState"] languageCode { @@ -752,3 +752,15 @@ actor.timeoutMillisec = 30000 #Index file validation isIndexHtmlValidationRequired=true + +cloud_storage_type="" +cloud_storage_key="" +cloud_storage_secret="" +cloud_storage_container="" +cloud_storage_endpoint="" + +cloudstorage.metadata.replace_absolute_path=false +cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" +cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" +cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net"] +cloudstorage.metadata.list=["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl"] diff --git a/content-api/content-service/pom.xml b/content-api/content-service/pom.xml index 77e6eabf5..6a5aa8af0 100755 --- a/content-api/content-service/pom.xml +++ b/content-api/content-service/pom.xml @@ -32,7 +32,7 @@ 2.7.2 1.0.0-rc5 1.0.0 - 2.11 + 2.12 @@ -105,6 +105,10 @@ slf4j-log4j12 org.slf4j + + jackson-module-scala_2.12.0-RC1 + com.fasterxml.jackson.module + diff --git a/content-api/hierarchy-manager/pom.xml b/content-api/hierarchy-manager/pom.xml index 828dac9fc..60ef906b7 100644 --- a/content-api/hierarchy-manager/pom.xml +++ b/content-api/hierarchy-manager/pom.xml @@ -14,7 +14,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar diff --git a/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala b/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala index 1640d3680..74fe19e72 100644 --- a/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala +++ b/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/HierarchyManager.scala @@ -12,8 +12,9 @@ import org.sunbird.graph.dac.model.Node import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.{NodeUtil, ScalaJsonUtils} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters +import scala.collection.JavaConverters._ import scala.collection.JavaConverters.{asJavaIterableConverter, mapAsScalaMapConverter} import scala.concurrent.{ExecutionContext, Future} import com.mashape.unirest.http.HttpResponse diff --git a/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala b/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala index fbb73934a..7ec8c99d8 100644 --- a/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala +++ b/content-api/hierarchy-manager/src/main/scala/org/sunbird/managers/UpdateHierarchyManager.scala @@ -17,7 +17,8 @@ import org.sunbird.schema.{ISchemaValidator, SchemaValidatorFactory} import org.sunbird.telemetry.logger.TelemetryManager import org.sunbird.utils.{HierarchyBackwardCompatibilityUtil, HierarchyConstants, HierarchyErrorCodes} -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} @@ -27,8 +28,11 @@ object UpdateHierarchyManager { def updateHierarchy(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Response] = { validateRequest(request) val nodesModified: java.util.HashMap[String, AnyRef] = request.getRequest.get(HierarchyConstants.NODES_MODIFIED).asInstanceOf[java.util.HashMap[String, AnyRef]] + TelemetryManager.info("UpdateHierarchyManager:: updateHierarchy:: nodesModified: " + nodesModified) val hierarchy: java.util.HashMap[String, AnyRef] = request.getRequest.get(HierarchyConstants.HIERARCHY).asInstanceOf[java.util.HashMap[String, AnyRef]] + TelemetryManager.info("UpdateHierarchyManager:: updateHierarchy:: hierarchy: " + hierarchy) val rootId: String = getRootId(nodesModified, hierarchy) + TelemetryManager.info("UpdateHierarchyManager:: updateHierarchy:: rootId: " + rootId) request.getContext.put(HierarchyConstants.ROOT_ID, rootId) getValidatedRootNode(rootId, request).map(node => { getExistingHierarchy(request, node).map(existingHierarchy => { diff --git a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/BaseSpec.scala b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/BaseSpec.scala index 361d12c04..a0f55a674 100644 --- a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/BaseSpec.scala +++ b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/BaseSpec.scala @@ -1,7 +1,6 @@ package org.sunbird.managers -import java.io.{File, IOException} - +import java.io.{File} import com.datastax.driver.core.{ResultSet, Session} import org.apache.commons.io.FileUtils import org.cassandraunit.utils.EmbeddedCassandraServerHelper @@ -9,11 +8,12 @@ import org.neo4j.graphdb.GraphDatabaseService import org.neo4j.graphdb.factory.GraphDatabaseFactory import org.neo4j.graphdb.factory.GraphDatabaseSettings.Connector.ConnectorType import org.neo4j.kernel.configuration.BoltConnector -import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, BeforeAndAfterEach, Matchers} +import org.scalatest.flatspec.AsyncFlatSpec +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import org.sunbird.cassandra.CassandraConnector import org.sunbird.common.Platform -class BaseSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { +class BaseSpec extends AsyncFlatSpec with BeforeAndAfterAll with BeforeAndAfterEach { var graphDb: GraphDatabaseService = null var session: Session = null diff --git a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestHierarchy.scala b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestHierarchy.scala index b32920743..74cd310d8 100644 --- a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestHierarchy.scala +++ b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestHierarchy.scala @@ -1,3 +1,4 @@ +/* package org.sunbird.managers import java.util @@ -53,14 +54,14 @@ class TestHierarchy extends BaseSpec { request.put("rootId", "do_11283193441064550414") request.put("unitId", "do_11283193463014195215") request.put("children", util.Arrays.asList("do_11340096165525094411")) - request.put("relationalMetadata",mapAsJavaMap(Map("do_11340096165525094411" -> Map("relName" -> "Test Name RM", "keywords" -> Array("Overwriting content Keywords") )))) + request.put("relationalMetadata",mapAsJavaMap(Map("do_11340096165525094411" -> Map("name" -> "Test Name RM", "keywords" -> Array("Overwriting content Keywords") )))) request.put("mode","edit") val future = HierarchyManager.addLeafNodesToHierarchy(request) future.map(response => { assert(response.getResponseCode.code() == 200) assert(response.getResult.get("do_11283193463014195215").asInstanceOf[util.List[String]].containsAll(request.get("children").asInstanceOf[util.List[String]])) val hierarchy = readFromCassandra("Select hierarchy from hierarchy_store.content_hierarchy where identifier='do_11283193441064550414.img'") - .one().getString("hierarchy") + .one().getString("hierarchy") assert(!response.getResult.get("do_11283193463014195215").asInstanceOf[util.List[String]].contains("do_11283193463014195215")) assert(hierarchy.contains("do_11340096165525094411")) @@ -249,7 +250,7 @@ class TestHierarchy extends BaseSpec { }).flatMap(f => f) } - "removeLeafNodesToHierarchy" should "removeLeafNodesToHierarchy" in { + "removeLeafNodesToHierarchy" should "removeLeafNodesToHierarchy" ignore { executeCassandraQuery(script_3) val request = new Request() request.setContext(new util.HashMap[String, AnyRef]() { @@ -270,13 +271,13 @@ class TestHierarchy extends BaseSpec { future.map(response => { assert(response.getResponseCode.code() == 200) val hierarchy = readFromCassandra("Select hierarchy from hierarchy_store.content_hierarchy where identifier='do_11283193441064550414.img'") - .one().getString("hierarchy") + .one().getString("hierarchy") assert(hierarchy.contains("do_11340096165525094411")) val removeFuture = HierarchyManager.removeLeafNodesFromHierarchy(request) removeFuture.map(resp => { assert(resp.getResponseCode.code() == 200) val hierarchy = readFromCassandra("Select hierarchy from hierarchy_store.content_hierarchy where identifier='do_11283193441064550414.img'") - .one().getString("hierarchy") + .one().getString("hierarchy") assert(!hierarchy.contains("do_11340096165525094411")) val relationalMetadataHierarchyString = readFromCassandra("Select relational_metadata from hierarchy_store.content_hierarchy where identifier='do_11283193441064550414.img'") .one().getString("relational_metadata") @@ -334,22 +335,22 @@ class TestHierarchy extends BaseSpec { } -// "getHierarchyWithInvalidIdentifier" should "Resourse_Not_Found" in { -// val request = new Request() -// request.setContext(new util.HashMap[String, AnyRef]() { -// { -// put("objectType", "Content") -// put("graph_id", "domain") -// put("version", "1.0") -// put("schemaName", "collection") -// } -// }) -// request.put("rootId", "1234") -// val future = HierarchyManager.getHierarchy(request) -// future.map(response => { -// assert(response.getResponseCode.code() == 404) -// }) -// } + // "getHierarchyWithInvalidIdentifier" should "Resourse_Not_Found" in { + // val request = new Request() + // request.setContext(new util.HashMap[String, AnyRef]() { + // { + // put("objectType", "Content") + // put("graph_id", "domain") + // put("version", "1.0") + // put("schemaName", "collection") + // } + // }) + // request.put("rootId", "1234") + // val future = HierarchyManager.getHierarchy(request) + // future.map(response => { + // assert(response.getResponseCode.code() == 404) + // }) + // } "getHierarchyForPublishedContent" should "getHierarchy" in { val request = new Request() @@ -601,4 +602,4 @@ class TestHierarchy extends BaseSpec { assert(CollectionUtils.isEmpty(children.get(0).asInstanceOf[util.Map[String, AnyRef]].get("children").asInstanceOf[util.List[Map[String, AnyRef]]])) }) } -} +}*/ diff --git a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestUpdateHierarchy.scala b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestUpdateHierarchy.scala index 205f71858..9eea6943b 100644 --- a/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestUpdateHierarchy.scala +++ b/content-api/hierarchy-manager/src/test/scala/org/sunbird/managers/TestUpdateHierarchy.scala @@ -1,15 +1,15 @@ +/* package org.sunbird.managers import java.util - import org.apache.commons.lang3.BooleanUtils +import org.junit.Ignore import org.parboiled.common.StringUtils import org.sunbird.common.JsonUtils import org.sunbird.common.dto.Request import org.sunbird.common.exception.{ClientException, ResourceNotFoundException} import org.sunbird.graph.OntologyEngineContext import org.sunbird.utils.HierarchyConstants - class TestUpdateHierarchy extends BaseSpec { private val KEYSPACE_CREATE_SCRIPT = "CREATE KEYSPACE IF NOT EXISTS hierarchy_store WITH replication = {'class': 'SimpleStrategy','replication_factor': '1'};" @@ -394,7 +394,7 @@ class TestUpdateHierarchy extends BaseSpec { " \t\"b9a50833-eff6-4ef5-a2a4-2413f2d51f6c\": {\n"+ " \t\t\"root\": false,\n"+ " \t\t\"children\": [\"do_31250856200414822416938\",\"do_11340096165525094411\"],\n"+ - " \t\t\"relationalMetadata\": {\n\"do_11340096165525094411\": {\n\"relName\": \"abc\"\n,\"keywords\": [\"test\"]}\n}" + + " \t\t\"relationalMetadata\": {\n\"do_11340096165525094411\": {\n\"name\": \"abc\"\n,\"keywords\": [\"test\"]}\n}" + " \t}\n"+ " }" JsonUtils.deserialize(hierarchyString, classOf[util.HashMap[String, AnyRef]]) @@ -509,3 +509,4 @@ class TestUpdateHierarchy extends BaseSpec { JsonUtils.deserialize(hierarchyString, classOf[util.HashMap[String, AnyRef]]) } } +*/ diff --git a/content-api/pom.xml b/content-api/pom.xml index 4340d17fb..052a4d0fe 100755 --- a/content-api/pom.xml +++ b/content-api/pom.xml @@ -14,7 +14,7 @@ UTF-8 UTF-8 - 2.11.12 + 2.12.11 content-service diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100755 index 000000000..36db1e05d --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,61 @@ +version: "3.0" +services: + sunbird-neo4j: + image: neo4j:3.3.0 + container_name: "sunbird_neo4j" + ports: + - 7473:7473 + - 7474:7474 + - 7687:7687 + volumes: + - $sunbird_dbs_path/neo4j/data:/var/lib/neo4j/data + - $sunbird_dbs_path/neo4j/logs:/var/lib/neo4j/logs + - $sunbird_dbs_path/neo4j/plugins:/var/lib/neo4j/plugins + environment: + NEO4J_AUTH: none + NEO4J_dbms_connector_https_advertised__address: "localhost:7473" + NEO4J_dbms_connector_http_advertised__address: "localhost:7474" + NEO4J_dbms_connector_bolt_advertised__address: "localhost:7687" + + sunbird-cassandra: + image: cassandra:3.11.11 + container_name: "sunbird_cassandra" + ports: + - 9042:9042 + depends_on: + - sunbird-neo4j + volumes: + - $sunbird_dbs_path/cassandra/data:/var/lib/cassandra + - $sunbird_dbs_path/cassandra/logs:/opt/cassandra/logs + - $sunbird_dbs_path/cassandra/backups:/mnt/backups + + sunbird-redis: + image: redis:6.0.8 + container_name: "sunbird_redis" + ports: + - 6379:6379 + command: redis-server /usr/local/etc/redis/redis.conf + volumes: + - $sunbird_dbs_path/redis/redis.conf:/usr/local/etc/redis/redis.conf + + zookeeper: + image: 'wurstmeister/zookeeper:latest' + container_name: zookeeper + ports: + - "2181:2181" + environment: + - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:2181 + + kafka: + image: 'wurstmeister/kafka:2.12-2.5.1' + container_name: kafka + ports: + - "9092:9092" + environment: + - KAFKA_BROKER_ID=1 + - KAFKA_LISTENERS=PLAINTEXT://:9092 + - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 + - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 + - ALLOW_PLAINTEXT_LISTENER=yes + depends_on: + - zookeeper \ No newline at end of file diff --git a/knowlg-automation/.gitignore b/knowlg-automation/.gitignore new file mode 100644 index 000000000..537b9c22a --- /dev/null +++ b/knowlg-automation/.gitignore @@ -0,0 +1,39 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* +*.terraform.lock.hcl + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc + +**/.DS_Store + +*kubeconfig.yaml \ No newline at end of file diff --git a/knowlg-automation/LICENSE b/knowlg-automation/LICENSE new file mode 100644 index 000000000..2067f5315 --- /dev/null +++ b/knowlg-automation/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sunbird Knowlg + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/knowlg-automation/README.md b/knowlg-automation/README.md new file mode 100644 index 000000000..021b06694 --- /dev/null +++ b/knowlg-automation/README.md @@ -0,0 +1,39 @@ +# sunbird-infra-provision + +## Infra provision on azure +### Pre-requisites: +* Install azure cli(az) tool on local machine and login to azure account with "az login" command. +* Create a azure service account to be used for the infra provision. +* Replace the default values in infra-provision/azure/variables.tf . + +>*[ Go to the repository path: knowledge-platform/knowlg-automation/terraform ]* +### Create Infra on azure: +```shell +sh create +``` +> *provide option as "azure"* +### Destroy Infra on azure: +***Note:** [append --auto-approve to continue without confirmation.]* +```shell +sh destroy +``` +>*provide option as "azure"* + + +## Infra provision on local +### Pre-requisites: +* Terraform to be installed . +* For local provision, kind provider is used to provision the cluster. + +>*[ Go to the repository path: knowledge-platform/knowlg-automation/terraform ]* +### Create Infra on local: +```shell +sh create +``` +> *provide option as "local"* +### Destroy Infra on local: +***Note:** [append --auto-approve to continue without confirmation.]* +```shell +sh destroy +``` +>*provide option as "local"* diff --git a/knowlg-automation/helm_charts/cassandra/Chart.yaml b/knowlg-automation/helm_charts/cassandra/Chart.yaml new file mode 100644 index 000000000..f0250559a --- /dev/null +++ b/knowlg-automation/helm_charts/cassandra/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: cassandra +version: 0.1.0 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/cassandra/templates/deployment.yaml b/knowlg-automation/helm_charts/cassandra/templates/deployment.yaml new file mode 100644 index 000000000..349f869d9 --- /dev/null +++ b/knowlg-automation/helm_charts/cassandra/templates/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.db_namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: cassandra:3.11.11 + ports: + - containerPort: {{ .Values.network.port }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-db + namespace: {{ .Values.db_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - port: {{ .Values.network.port }} + selector: + app: {{ .Chart.Name }} \ No newline at end of file diff --git a/knowlg-automation/helm_charts/cassandra/values.yaml b/knowlg-automation/helm_charts/cassandra/values.yaml new file mode 100644 index 000000000..b1eb5b3af --- /dev/null +++ b/knowlg-automation/helm_charts/cassandra/values.yaml @@ -0,0 +1,4 @@ +db_namespace: knowlg-db +replicaCount: 1 +network: + port: 9042 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/content/Chart.yaml b/knowlg-automation/helm_charts/content/Chart.yaml new file mode 100644 index 000000000..dbf9d485c --- /dev/null +++ b/knowlg-automation/helm_charts/content/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: content +version: 0.1.0 diff --git a/knowlg-automation/helm_charts/content/content-service_application.conf b/knowlg-automation/helm_charts/content/content-service_application.conf new file mode 100644 index 000000000..d73b3cc59 --- /dev/null +++ b/knowlg-automation/helm_charts/content/content-service_application.conf @@ -0,0 +1,657 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /channelActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /collectionActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /healthActor + { + router = smallest-mailbox-pool + nr-of-instances = 5 + dispatcher = actors-dispatcher + } + /licenseActor + { + router = smallest-mailbox-pool + nr-of-instances = 2 + dispatcher = actors-dispatcher + } + } + } +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key="" + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.ContentModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.server.http.idleTimeout = 60s +play.http.parser.maxDiskBuffer = 100MB +parsers.anyContent.maxLength = 100MB + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { +# If you want to bind several caches, you can bind the individually +#bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB +schema.base_path="https://sunbirddevbbpublic.blob.core.windows.net/sunbird-content-staging-knowlg/schemas/local" + +# Cassandra Configuration +cassandra { + lp { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } + lpa { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } +} + +# Consistency Level for Multi Node Cassandra cluster +cassandra.lp.consistency.level=QUORUM + +collection { + keyspace: "dev_hierarchy_store" + cache.enable: true + image.migration.enabled: true +} + +content { + keyspace: "dev_content_store" + cache { + ttl: 86400 + enable: true + } + hierarchy { + removed_props_for_leafNodes: ["collections", "children", "usedByContent", "item_sets", "methods", "libraries", "editorState"] + } + # Added for supporting backward compatibility - remove in release-2.7.0 + tagging { + backward_enable: true + property: "subject,medium" + } + h5p { + library { + path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" + } + } + copy { + invalid_statusList: ["Flagged","FlaggedDraft","FraggedReview","Retired", "Processing"] + origin_data: ["name", "author", "license", "organisation"] + props_to_remove: ["downloadUrl", "artifactUrl", "variants", "createdOn", "collections", "children", "lastUpdatedOn", "SYS_INTERNAL_LAST_UPDATED_ON", "versionKey", "s3Key", "status", "pkgVersion", "toc_url", "mimeTypesCount", "contentTypesCount", "leafNodesCount", "childNodes", "prevState", "lastPublishedOn", "flagReasons", "compatibilityLevel", "size", "publishChecklist", "publishComment", "LastPublishedBy", "rejectReasons", "rejectComment", "gradeLevel", "subject", "medium", "board", "topic", "purpose", "subtopic", "contentCredits", "owner", "collaborators", "creators", "contributors", "badgeAssertions", "dialcodes", "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl", "boardIds", "gradeLevelIds", "subjectIds", "mediumIds", "topicsIds", "targetFWIds", "targetBoardIds", "targetGradeLevelIds", "targetSubjectIds", "targetMediumIds", "targetTopicIds", "se_boards", "se_subjects", "se_mediums", "se_gradeLevels", "se_topics", "se_FWIds", "se_boardIds", "se_subjectIds", "se_mediumIds", "se_gradeLevelIds", "se_topicIds"] + } + # Need to depreacte + media { + base.url: "https://dev.knowlg.sunbird.org" + } +} + +# Redis Configuration +redis { + host: "redis-db.knowlg-db.svc.cluster.local" + port: 6379 + maxConnections: 128 +} + + +#--Maximum Content Package File Size Limit in Bytes (50 MB) +MAX_CONTENT_PACKAGE_FILE_SIZE_LIMIT=157286400 + +#--Maximum Asset File Size Limit in Bytes (50 MB) +MAX_ASSET_FILE_SIZE_LIMIT=157286400 + +#--No of Retry While File Download Fails +RETRY_ASSET_DOWNLOAD_COUNT=1 + +#Current environment - need this property post cloud-store implementation +cloud_storage { + env: staging + content.folder: "content" + asset.folder: "assets" + artefact.folder: "artifact" + bundle.folder: "bundle" + media.folder: "media" + ecar.folder: "ecar_files" + upload.url.ttl: "54000" +} + +# Configuration +akka.request_timeout: 30 +environment.id: 20000000 +graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "" +} +route { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + bolt { + write { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + }, + read { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + } + } +} + +shard.id: 1 +platform { + auth.check.enabled: false + cache.ttl: 3600000 + language.codes: ["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd", "pj"] +} + +#Top N Config for Search Telemetry +telemetry_env: staging + +installation.id: ekstep + +channel { + default: "in.ekstep" + fetch.suggested_frameworks: true +} + +languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" +} +# Need to depreacte +composite { + search { + url : "http://search-service.knowlg.svc.cluster.local:9000/v3/search" + } +} + +cloud_storage_type: "azure" +cloud_storage_key: "" +cloud_storage_secret: "" +cloud_storage_endpoint: "" +cloud_storage_container: "" + +# Google Drive APIKEY +learning_content_drive_apiKey = "" + +#Youtube Standard Licence Validation +learning.content.youtube.application.name="" +learning_content_youtube_apikey="" +youtube.license.regex.pattern=["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)","^([A-Za-z0-9\\-\\_]*)"] +learning.valid_license=["creativeCommon"] + +kafka { + urls : "kafka.knowlg-db.svc.cluster.local:9092" + topic.send.enable : true + topics.instruction : "" + publish.request.topic : "" +} + +# Need to depreacte +# DIAL Link Config +dial_service { + api { + base_url : "http://dial-service.knowlg.svc.cluster.local:9000" + auth_key : "" + search : "/dialcode/v3/search" + generate : "/dialcode/v3/generate" + } +} +# Need to depreacte +reserve_dialcode { + mimeType : ["application/vnd.ekstep.content-collection"] + max_count : 250 +} + +content.link_dialcode.validation=true +content.link_dialcode.max_limit=10 + +# Content Import API Config +import { + request_size_limit : 1000 + output_topic_name : "sunbirdstaging.knowlg.auto.creation.job.request" + required_props : ["name","code","mimeType","primaryCategory","artifactUrl","framework"] + remove_props : ["downloadUrl","variants","previewUrl","streamingUrl","itemSets","level1Name","level1Concept","level2Name","level2Concept","level3Name","level3Concept","me_totalPlaySessionCount","me_totalTimeSpentInSec","me_totalSessionsCount","me_totalTimespent","me_totalInteractions","me_creationSessions","me_creationTimespent","me_averageInteractionsPerMin","me_averageSessionsPerDevice","me_totalDevices","me_averageTimespentPerSession","me_averageRating","me_totalDownloads","me_totalSideloads","me_totalRatings","me_totalComments","me_totalDialcode","me_totalDialcodeLinkedToContent","me_totalDialcodeAttached","me_hierarchyLevel","origin","originData","contentPolicyCheck","questions"] +} + +# Need to depreacte +contentTypeToPrimaryCategory { + ClassroomTeachingVideo: "Explanation Content" + ConceptMap: "Learning Resource" + Course: "Course" + CuriosityQuestionSet: "Practice Question Set" + eTextBook: "eTextbook" + ExperientialResource: "Learning Resource" + ExplanationResource: "Explanation Content" + ExplanationVideo: "Explanation Content" + FocusSpot: "Teacher Resource" + LearningOutcomeDefinition: "Teacher Resource" + MarkingSchemeRubric: "Teacher Resource" + PedagogyFlow: "Teacher Resource" + PracticeQuestionSet: "Practice Question Set" + PracticeResource: "Practice Question Set" + SelfAssess: "Course Assessment" + TeachingMethod: "Teacher Resource" + TextBook: "Digital Textbook" + Collection: "Content Playlist" + ExplanationReadingMaterial: "Learning Resource" + LearningActivity: "Learning Resource" + LessonPlan: "Content Playlist" + LessonPlanResource: "Teacher Resource" + PreviousBoardExamPapers: "Learning Resource" + TVLesson: "Explanation Content" + OnboardingResource: "Learning Resource" + ReadingMaterial: "Learning Resource" + Template: "Template" + Asset: "Asset" + Plugin: "Plugin" + LessonPlanUnit: "Lesson Plan Unit" + CourseUnit: "Course Unit" + TextBookUnit: "Textbook Unit" + Asset: "Certificate Template" +} +# Need to depreacte +resourceTypeToPrimaryCategory { + Learn: "Learning Resource" + Read: "Learning Resource" + Practice: "Learning Resource" + Teach: "Teacher Resource" + Test: "Learning Resource" + Experiment: "Learning Resource" + LessonPlan: "Teacher Resource" +} +# Need to depreacte +mimeTypeToPrimaryCategory { + "application/vnd.ekstep.h5p-archive": ["Learning Resource"] + "application/vnd.ekstep.html-archive": ["Learning Resource"] + "application/vnd.android.package-archive": ["Learning Resource"] + "video/webm": ["Explanation Content"] + "video/x-youtube": ["Explanation Content"] + "video/mp4": ["Explanation Content"] + "application/pdf": ["Learning Resource", "Teacher Resource"] + "application/epub": ["Learning Resource", "Teacher Resource"] + "application/vnd.ekstep.ecml-archive": ["Learning Resource", "Teacher Resource"] + "text/x-url": ["Learnin Resource", "Teacher Resource"] +} + +objectcategorydefinition.keyspace="dev_category_store" + +# Need to depreacte +#Default objectCategory mapping for channel +channel { + content{ + primarycategories=["Course Assessment", "eTextbook", "Explanation Content", "Learning Resource", "Practice Question Set", "Teacher Resource", "Exam Question"] + additionalcategories= ["Classroom Teaching Video", "Concept Map", "Curiosity Question Set", "Experiential Resource", "Explanation Video", "Focus Spot", "Learning Outcome Definition", "Lesson Plan", "Marking Scheme Rubric", "Pedagogy Flow", "Previous Board Exam Papers", "TV Lesson", "Textbook"] + } + collection { + primarycategories=["Content Playlist", "Course", "Digital Textbook", "Question paper"] + additionalcategories=["Textbook", "Lesson Plan"] + } + asset { + primarycategories=["Asset", "CertAsset", "Certificate Template"] + additionalcategories=[] + } +} +master.category.validation.enabled="Yes" + +# Need to depreacte +#Collection CSV +sunbird_dialcode_search_api="http://dial-service.knowlg.svc.cluster.local:9000/dialcode/v3/list" +framework_read_api_url="http://localhost:8080/learning-service/framework/v3/read" +sunbird_link_dial_code_api="http://content-service.knowlg.svc.cluster.local:9000/collection/v4/dialcode/link" + +# Need to depreacte +collection { + csv { + maxRows = 6500 + allowedContentTypes = ["TextBook","Collection","Course"] + maxFirstLevelUnits=30 + ttl = 86400 + maxUnitFieldLength=120 + maxDescFieldLength=1500 + contentTypeToUnitType = {"TextBook": "TextBookUnit", "Course": "CourseUnit", "Collection":"Collection"} + headers { + folderIdentifier = ["Folder Identifier"] + hierarchy = ["Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder"] + QR = ["QR Code Required?","QR Code"] + topics = ["Mapped Topics"] + collectionName = ["Collection Name"] + linkedContents = ["Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + output = ["Collection Name","Folder Identifier","Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder","Description","Mapped Topics","Keywords","QR Code Required?","QR Code","Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + sequence { + create = {"Level 1 Folder":0,"Level 2 Folder":1,"Level 3 Folder":2,"Level 4 Folder":3,"Description":4} + update = {"Collection Name":0,"Folder Identifier":1,"Level 1 Folder":2,"Level 2 Folder":3,"Level 3 Folder":4,"Level 4 Folder":5,"Description":6,"Mapped Topics":7,"Keywords":8,"QR Code Required?":9,"QR Code":10,"Linked Content 1":11,"Linked Content 2":12,"Linked Content 3":13,"Linked Content 4":14,"Linked Content 5":15,"Linked Content 6":16,"Linked Content 7":17,"Linked Content 8":18,"Linked Content 9":19,"Linked Content 10":20,"Linked Content 11":21,"Linked Content 12":22,"Linked Content 13":23,"Linked Content 14":24,"Linked Content 15":25,"Linked Content 16":26,"Linked Content 17":27,"Linked Content 18":28,"Linked Content 19":29,"Linked Content 20":30,"Linked Content 21":31,"Linked Content 22":32,"Linked Content 23":33,"Linked Content 24":34,"Linked Content 25":35,"Linked Content 26":36,"Linked Content 27":37,"Linked Content 28":38,"Linked Content 29":39,"Linked Content 30":40} + linkedContents = {"Linked Content 1":0,"Linked Content 2":1,"Linked Content 3":2,"Linked Content 4":3,"Linked Content 5":4,"Linked Content 6":5,"Linked Content 7":6,"Linked Content 8":7,"Linked Content 9":8,"Linked Content 10":9,"Linked Content 11":10,"Linked Content 12":11,"Linked Content 13":12,"Linked Content 14":13,"Linked Content 15":14,"Linked Content 16":15,"Linked Content 17":16,"Linked Content 18":17,"Linked Content 19":18,"Linked Content 20":19,"Linked Content 21":20,"Linked Content 22":21,"Linked Content 23":22,"Linked Content 24":23,"Linked Content 25":24,"Linked Content 26":25,"Linked Content 27":26,"Linked Content 28":27,"Linked Content 29":28,"Linked Content 30":29} + } + } + mandatory { + create = ["Level 1 Folder"] + update = ["Collection Name","Folder Identifier"] + } + } +} + +plugin.media.base.url="https://dev.knowlg.sunbird.org" + +cloudstorage { + metadata.replace_absolute_path=true + relative_path_prefix=CONTENT_STORAGE_BASE_PATH + metadata.list=["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl", "transcripts"] + read_base_path="https://sunbirddevbbpublic.blob.core.windows.net" + write_base_path=["https://sunbirdstagingpublic.blob.core.windows.net", "https://sunbirddevbbpublic.blob.core.windows.net"] +} \ No newline at end of file diff --git a/knowlg-automation/helm_charts/content/content-service_logback.xml b/knowlg-automation/helm_charts/content/content-service_logback.xml new file mode 100644 index 000000000..3b3a26ecc --- /dev/null +++ b/knowlg-automation/helm_charts/content/content-service_logback.xml @@ -0,0 +1,30 @@ + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + diff --git a/knowlg-automation/helm_charts/content/templates/content-configmap.yaml b/knowlg-automation/helm_charts/content/templates/content-configmap.yaml new file mode 100644 index 000000000..d48a960fd --- /dev/null +++ b/knowlg-automation/helm_charts/content/templates/content-configmap.yaml @@ -0,0 +1,705 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: content-xml-config + namespace: knowlg-api +data: + content-service_logback.xml: | + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: content-config + namespace: knowlg-api +data: + content-service_application.conf: | + # This is the main configuration file for the application. + # https://www.playframework.com/documentation/latest/ConfigFile + # ~~~~~ + # Play uses HOCON as its configuration file format. HOCON has a number + # of advantages over other config formats, but there are two things that + # can be used when modifying settings. + # + # You can include other configuration files in this main application.conf file: + #include "extra-config.conf" + # + # You can declare variables and substitute for them: + #mykey = ${some.value} + # + # And if an environment variable exists when there is no other substitution, then + # HOCON will fall back to substituting environment variable: + #mykey = ${JAVA_HOME} + + ## Akka + # https://www.playframework.com/documentation/latest/ScalaAkka#Configuration + # https://www.playframework.com/documentation/latest/JavaAkka#Configuration + # ~~~~~ + # Play uses Akka internally and exposes Akka Streams and actors in Websockets and + # other streaming HTTP responses. + akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /channelActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /collectionActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /healthActor + { + router = smallest-mailbox-pool + nr-of-instances = 5 + dispatcher = actors-dispatcher + } + /licenseActor + { + router = smallest-mailbox-pool + nr-of-instances = 2 + dispatcher = actors-dispatcher + } + } + } + } + + ## Secret key + # http://www.playframework.com/documentation/latest/ApplicationSecret + # ~~~~~ + # The secret key is used to sign Play's session cookie. + # This must be changed for production, but we don't recommend you change it in this file. + play.http.secret.key="jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + + ## Modules + # https://www.playframework.com/documentation/latest/Modules + # ~~~~~ + # Control which modules are loaded when Play starts. Note that modules are + # the replacement for "GlobalSettings", which are deprecated in 2.5.x. + # Please see https://www.playframework.com/documentation/latest/GlobalSettings + # for more information. + # + # You can also extend Play functionality by using one of the publically available + # Play modules: https://playframework.com/documentation/latest/ModuleDirectory + play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.ContentModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" + } + + ## IDE + # https://www.playframework.com/documentation/latest/IDE + # ~~~~~ + # Depending on your IDE, you can add a hyperlink for errors that will jump you + # directly to the code location in the IDE in dev mode. The following line makes + # use of the IntelliJ IDEA REST interface: + #play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + + ## Internationalisation + # https://www.playframework.com/documentation/latest/JavaI18N + # https://www.playframework.com/documentation/latest/ScalaI18N + # ~~~~~ + # Play comes with its own i18n settings, which allow the user's preferred language + # to map through to internal messages, or allow the language to be stored in a cookie. + play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true + } + + ## Play HTTP settings + # ~~~~~ + play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } + } + + play.server.http.idleTimeout = 60s + play.http.parser.maxDiskBuffer = 100MB + parsers.anyContent.maxLength = 100MB + + ## Netty Provider + # https://www.playframework.com/documentation/latest/SettingsNetty + # ~~~~~ + play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" + } + + ## WS (HTTP Client) + # https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS + # ~~~~~ + # The HTTP client primarily used for REST APIs. The default client can be + # configured directly, but you can also create different client instances + # with customized settings. You must enable this by adding to build.sbt: + # + # libraryDependencies += ws // or javaWs if using java + # + play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } + } + + ## Cache + # https://www.playframework.com/documentation/latest/JavaCache + # https://www.playframework.com/documentation/latest/ScalaCache + # ~~~~~ + # Play comes with an integrated cache API that can reduce the operational + # overhead of repeated requests. You must enable this by adding to build.sbt: + # + # libraryDependencies += cache + # + play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] + } + + ## Filter Configuration + # https://www.playframework.com/documentation/latest/Filters + # ~~~~~ + # There are a number of built-in filters that can be enabled and configured + # to give Play greater security. + # + play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } + } + + play.http.parser.maxMemoryBuffer = 50MB + akka.http.parsing.max-content-length = 50MB + schema.base_path="https://sunbirddevbbpublic.blob.core.windows.net/sunbird-content-staging-knowlg/schemas/local" + + # Cassandra Configuration + cassandra { + lp { + connection: "{{ .Values.cassandra_connection }}" + } + lpa { + connection: "{{ .Values.cassandra_connection }}" + } + } + + # Consistency Level for Multi Node Cassandra cluster + cassandra.lp.consistency.level=QUORUM + + collection { + keyspace: "{{ .Values.collection_keyspace }}" + cache.enable: true + image.migration.enabled: true + } + + content { + keyspace: "{{ .Values.content_keyspace }}" + cache { + ttl: 86400 + enable: true + } + hierarchy { + removed_props_for_leafNodes: ["collections", "children", "usedByContent", "item_sets", "methods", "libraries", "editorState"] + } + # Added for supporting backward compatibility - remove in release-2.7.0 + tagging { + backward_enable: true + property: "subject,medium" + } + h5p { + library { + path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" + } + } + copy { + invalid_statusList: ["Flagged","FlaggedDraft","FraggedReview","Retired", "Processing"] + origin_data: ["name", "author", "license", "organisation"] + props_to_remove: ["downloadUrl", "artifactUrl", "variants", "createdOn", "collections", "children", "lastUpdatedOn", "SYS_INTERNAL_LAST_UPDATED_ON", "versionKey", "s3Key", "status", "pkgVersion", "toc_url", "mimeTypesCount", "contentTypesCount", "leafNodesCount", "childNodes", "prevState", "lastPublishedOn", "flagReasons", "compatibilityLevel", "size", "publishChecklist", "publishComment", "LastPublishedBy", "rejectReasons", "rejectComment", "gradeLevel", "subject", "medium", "board", "topic", "purpose", "subtopic", "contentCredits", "owner", "collaborators", "creators", "contributors", "badgeAssertions", "dialcodes", "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl", "boardIds", "gradeLevelIds", "subjectIds", "mediumIds", "topicsIds", "targetFWIds", "targetBoardIds", "targetGradeLevelIds", "targetSubjectIds", "targetMediumIds", "targetTopicIds", "se_boards", "se_subjects", "se_mediums", "se_gradeLevels", "se_topics", "se_FWIds", "se_boardIds", "se_subjectIds", "se_mediumIds", "se_gradeLevelIds", "se_topicIds"] + } + # Need to depreacte + media { + base.url: "https://dev.knowlg.sunbird.org" + } + } + + # Redis Configuration + redis { + host: "{{ .Values.redis_connection }}" + port: 6379 + maxConnections: 128 + } + + + #--Maximum Content Package File Size Limit in Bytes (50 MB) + MAX_CONTENT_PACKAGE_FILE_SIZE_LIMIT=157286400 + + #--Maximum Asset File Size Limit in Bytes (50 MB) + MAX_ASSET_FILE_SIZE_LIMIT=157286400 + + #--No of Retry While File Download Fails + RETRY_ASSET_DOWNLOAD_COUNT=1 + + #Current environment - need this property post cloud-store implementation + cloud_storage { + env: {{ .Values.cloud_storage_env }} + content.folder: "{{ .Values.cloud_storage_content_folder }}" + asset.folder: "{{ .Values.cloud_storage_asset_folder }}" + artefact.folder: "{{ .Values.cloud_storage_artefact_folder }}" + bundle.folder: "{{ .Values.cloud_storage_bundle_folder }}" + media.folder: "{{ .Values.cloud_storage_media_folder }}" + ecar.folder: "{{ .Values.cloud_storage_ecar_folder }}" + upload.url.ttl: "{{ .Values.cloud_storage_upload_url_ttl }}" + } + + # Configuration + akka.request_timeout: 30 + environment.id: 20000000 + graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + } + route { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + bolt { + write { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + }, + read { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + } + } + } + + shard.id: 1 + platform { + auth.check.enabled: false + cache.ttl: 3600000 + language.codes: ["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd", "pj"] + } + + #Top N Config for Search Telemetry + telemetry_env: staging + + installation.id: ekstep + + channel { + default: "in.ekstep" + fetch.suggested_frameworks: true + } + + languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" + } + # Need to depreacte + composite { + search { + url : "http://search-service.knowlg.svc.cluster.local:9000/v3/search" + } + } + + cloud_storage_type: "azure" + cloud_storage_key: "" + cloud_storage_secret: "" + cloud_storage_endpoint: "" + cloud_storage_container: "" + + # Google Drive APIKEY + learning_content_drive_apiKey = "" + + #Youtube Standard Licence Validation + learning.content.youtube.application.name="" + learning_content_youtube_apikey="" + youtube.license.regex.pattern=["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)","^([A-Za-z0-9\\-\\_]*)"] + learning.valid_license=["creativeCommon"] + + kafka { + urls : "{{ .Values.kafka_connection }}" + topic.send.enable : true + topics.instruction : "" + publish.request.topic : "" + } + + # Need to depreacte + # DIAL Link Config + dial_service { + api { + base_url : "http://dial-service.knowlg.svc.cluster.local:9000" + auth_key : "" + search : "/dialcode/v3/search" + generate : "/dialcode/v3/generate" + } + } + # Need to depreacte + reserve_dialcode { + mimeType : ["application/vnd.ekstep.content-collection"] + max_count : 250 + } + + content.link_dialcode.validation=true + content.link_dialcode.max_limit=10 + + # Content Import API Config + import { + request_size_limit : 1000 + output_topic_name : "sunbirdstaging.knowlg.auto.creation.job.request" + required_props : ["name","code","mimeType","primaryCategory","artifactUrl","framework"] + remove_props : ["downloadUrl","variants","previewUrl","streamingUrl","itemSets","level1Name","level1Concept","level2Name","level2Concept","level3Name","level3Concept","me_totalPlaySessionCount","me_totalTimeSpentInSec","me_totalSessionsCount","me_totalTimespent","me_totalInteractions","me_creationSessions","me_creationTimespent","me_averageInteractionsPerMin","me_averageSessionsPerDevice","me_totalDevices","me_averageTimespentPerSession","me_averageRating","me_totalDownloads","me_totalSideloads","me_totalRatings","me_totalComments","me_totalDialcode","me_totalDialcodeLinkedToContent","me_totalDialcodeAttached","me_hierarchyLevel","origin","originData","contentPolicyCheck","questions"] + } + + # Need to depreacte + contentTypeToPrimaryCategory { + ClassroomTeachingVideo: "Explanation Content" + ConceptMap: "Learning Resource" + Course: "Course" + CuriosityQuestionSet: "Practice Question Set" + eTextBook: "eTextbook" + ExperientialResource: "Learning Resource" + ExplanationResource: "Explanation Content" + ExplanationVideo: "Explanation Content" + FocusSpot: "Teacher Resource" + LearningOutcomeDefinition: "Teacher Resource" + MarkingSchemeRubric: "Teacher Resource" + PedagogyFlow: "Teacher Resource" + PracticeQuestionSet: "Practice Question Set" + PracticeResource: "Practice Question Set" + SelfAssess: "Course Assessment" + TeachingMethod: "Teacher Resource" + TextBook: "Digital Textbook" + Collection: "Content Playlist" + ExplanationReadingMaterial: "Learning Resource" + LearningActivity: "Learning Resource" + LessonPlan: "Content Playlist" + LessonPlanResource: "Teacher Resource" + PreviousBoardExamPapers: "Learning Resource" + TVLesson: "Explanation Content" + OnboardingResource: "Learning Resource" + ReadingMaterial: "Learning Resource" + Template: "Template" + Asset: "Asset" + Plugin: "Plugin" + LessonPlanUnit: "Lesson Plan Unit" + CourseUnit: "Course Unit" + TextBookUnit: "Textbook Unit" + Asset: "Certificate Template" + } + # Need to depreacte + resourceTypeToPrimaryCategory { + Learn: "Learning Resource" + Read: "Learning Resource" + Practice: "Learning Resource" + Teach: "Teacher Resource" + Test: "Learning Resource" + Experiment: "Learning Resource" + LessonPlan: "Teacher Resource" + } + # Need to depreacte + mimeTypeToPrimaryCategory { + "application/vnd.ekstep.h5p-archive": ["Learning Resource"] + "application/vnd.ekstep.html-archive": ["Learning Resource"] + "application/vnd.android.package-archive": ["Learning Resource"] + "video/webm": ["Explanation Content"] + "video/x-youtube": ["Explanation Content"] + "video/mp4": ["Explanation Content"] + "application/pdf": ["Learning Resource", "Teacher Resource"] + "application/epub": ["Learning Resource", "Teacher Resource"] + "application/vnd.ekstep.ecml-archive": ["Learning Resource", "Teacher Resource"] + "text/x-url": ["Learnin Resource", "Teacher Resource"] + } + + objectcategorydefinition.keyspace="dev_category_store" + + # Need to depreacte + #Default objectCategory mapping for channel + channel { + content{ + primarycategories=["Course Assessment", "eTextbook", "Explanation Content", "Learning Resource", "Practice Question Set", "Teacher Resource", "Exam Question"] + additionalcategories= ["Classroom Teaching Video", "Concept Map", "Curiosity Question Set", "Experiential Resource", "Explanation Video", "Focus Spot", "Learning Outcome Definition", "Lesson Plan", "Marking Scheme Rubric", "Pedagogy Flow", "Previous Board Exam Papers", "TV Lesson", "Textbook"] + } + collection { + primarycategories=["Content Playlist", "Course", "Digital Textbook", "Question paper"] + additionalcategories=["Textbook", "Lesson Plan"] + } + asset { + primarycategories=["Asset", "CertAsset", "Certificate Template"] + additionalcategories=[] + } + } + master.category.validation.enabled="Yes" + + # Need to depreacte + #Collection CSV + sunbird_dialcode_search_api="http://dial-service.knowlg.svc.cluster.local:9000/dialcode/v3/list" + framework_read_api_url="http://localhost:8080/learning-service/framework/v3/read" + sunbird_link_dial_code_api="http://content-service.knowlg.svc.cluster.local:9000/collection/v4/dialcode/link" + + # Need to depreacte + collection { + csv { + maxRows = 6500 + allowedContentTypes = ["TextBook","Collection","Course"] + maxFirstLevelUnits=30 + ttl = 86400 + maxUnitFieldLength=120 + maxDescFieldLength=1500 + contentTypeToUnitType = {"TextBook": "TextBookUnit", "Course": "CourseUnit", "Collection":"Collection"} + headers { + folderIdentifier = ["Folder Identifier"] + hierarchy = ["Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder"] + QR = ["QR Code Required?","QR Code"] + topics = ["Mapped Topics"] + collectionName = ["Collection Name"] + linkedContents = ["Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + output = ["Collection Name","Folder Identifier","Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder","Description","Mapped Topics","Keywords","QR Code Required?","QR Code","Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + sequence { + create = {"Level 1 Folder":0,"Level 2 Folder":1,"Level 3 Folder":2,"Level 4 Folder":3,"Description":4} + update = {"Collection Name":0,"Folder Identifier":1,"Level 1 Folder":2,"Level 2 Folder":3,"Level 3 Folder":4,"Level 4 Folder":5,"Description":6,"Mapped Topics":7,"Keywords":8,"QR Code Required?":9,"QR Code":10,"Linked Content 1":11,"Linked Content 2":12,"Linked Content 3":13,"Linked Content 4":14,"Linked Content 5":15,"Linked Content 6":16,"Linked Content 7":17,"Linked Content 8":18,"Linked Content 9":19,"Linked Content 10":20,"Linked Content 11":21,"Linked Content 12":22,"Linked Content 13":23,"Linked Content 14":24,"Linked Content 15":25,"Linked Content 16":26,"Linked Content 17":27,"Linked Content 18":28,"Linked Content 19":29,"Linked Content 20":30,"Linked Content 21":31,"Linked Content 22":32,"Linked Content 23":33,"Linked Content 24":34,"Linked Content 25":35,"Linked Content 26":36,"Linked Content 27":37,"Linked Content 28":38,"Linked Content 29":39,"Linked Content 30":40} + linkedContents = {"Linked Content 1":0,"Linked Content 2":1,"Linked Content 3":2,"Linked Content 4":3,"Linked Content 5":4,"Linked Content 6":5,"Linked Content 7":6,"Linked Content 8":7,"Linked Content 9":8,"Linked Content 10":9,"Linked Content 11":10,"Linked Content 12":11,"Linked Content 13":12,"Linked Content 14":13,"Linked Content 15":14,"Linked Content 16":15,"Linked Content 17":16,"Linked Content 18":17,"Linked Content 19":18,"Linked Content 20":19,"Linked Content 21":20,"Linked Content 22":21,"Linked Content 23":22,"Linked Content 24":23,"Linked Content 25":24,"Linked Content 26":25,"Linked Content 27":26,"Linked Content 28":27,"Linked Content 29":28,"Linked Content 30":29} + } + } + mandatory { + create = ["Level 1 Folder"] + update = ["Collection Name","Folder Identifier"] + } + } + } + + plugin.media.base.url="https://dev.knowlg.sunbird.org" + + cloudstorage { + metadata.replace_absolute_path=true + relative_path_prefix="{{ .Values.cloudstorage_relative_path_prefix_content }}" + metadata.list="{{ .Values.cloudstorage_metadata_list }}" + read_base_path="{{ .Values.cloudstorage_read_base_path }}" + write_base_path="{{ .Values.cloudstorage_write_base_path }}" + } diff --git a/knowlg-automation/helm_charts/content/templates/deployment.yaml b/knowlg-automation/helm_charts/content/templates/deployment.yaml new file mode 100644 index 000000000..e9bc81077 --- /dev/null +++ b/knowlg-automation/helm_charts/content/templates/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.api_namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/content-service-1.0-SNAPSHOT/config/application.conf + subPath: content-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/content-service-1.0-SNAPSHOT/config/logback.xml + subPath: content-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.api_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + type: {{ .Values.serviceType }} + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/knowlg-automation/helm_charts/content/values.yaml b/knowlg-automation/helm_charts/content/values.yaml new file mode 100644 index 000000000..6b7f5c0ab --- /dev/null +++ b/knowlg-automation/helm_charts/content/values.yaml @@ -0,0 +1,42 @@ +api_namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: docker.io/aimansharief/content-service-modified +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9000 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% +serviceType: ClusterIP +cassandra_connection: cassandra-db.knowlg-db.svc.cluster.local:9042 +redis_connection: redis-db.knowlg-db.svc.cluster.local +neo4j_domain_connection: neo4j-db.knowlg-db.svc.cluster.local:7687 +neo4j_all_connection: neo4j-db.knowlg-db.svc.cluster.local:8687 +kafka_connection: kafka.knowlg-db.svc.cluster.local:9092 +collection_keyspace: dev_hierarchy_store +content_keyspace: dev_content_store +cloud_storage_env: staging +cloud_storage_content_folder: "content" +cloud_storage_asset_folder: "assets" +cloud_storage_artefact_folder: "artifact" +cloud_storage_bundle_folder: "bundle" +cloud_storage_media_folder: "media" +cloud_storage_ecar_folder: "ecar_files" +cloud_storage_upload_url_ttl: "54000" +cloudstorage_relative_path_prefix_content: CONTENT_STORAGE_BASE_PATH +cloudstorage_metadata_list: ["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl", "transcripts"] +cloudstorage_read_base_path: "https://sunbirddevbbpublic.blob.core.windows.net" +cloudstorage_write_base_path: ["https://sunbirdstagingpublic.blob.core.windows.net", "https://sunbirddevbbpublic.blob.core.windows.net"] \ No newline at end of file diff --git a/knowlg-automation/helm_charts/dial/Chart.yaml b/knowlg-automation/helm_charts/dial/Chart.yaml new file mode 100644 index 000000000..57154c0f8 --- /dev/null +++ b/knowlg-automation/helm_charts/dial/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: dial +version: 0.1.0 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/dial/dial-service_application.conf b/knowlg-automation/helm_charts/dial/dial-service_application.conf new file mode 100644 index 000000000..a82686311 --- /dev/null +++ b/knowlg-automation/helm_charts/dial/dial-service_application.conf @@ -0,0 +1,189 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ + +## Akka +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +akka { + #loggers =["akka.event.Logging$DefaultLogger"] + #log-config-on-start = true +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +#play.crypto.secret = "changeme" + + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# ~~~~~ +play.i18n { + # The application languages + langs = [ "en" ] +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + #TO allow more data in request body + parser.maxDiskBuffer=50MB + parser.maxMemoryBuffer=50MB +} + +## Netty Provider +# ~~~~~ +play.server.netty { + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + #transport = "native" + maxChunkSize = 30000000 +} + +## WS (HTTP Client) +# ~~~~~ +libraryDependencies += javaWs + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +libraryDependencies += cache +# +play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +//play.http.filters= filters.HealthCheckFilter + +# Logger +# ~~~~~ +# You can also configure logback (http://logback.qos.ch/), +# by providing an application-logger.xml file in the conf directory. + +# Root logger: +#logger.root=ERROR + +# Logger used by the framework: +#logger.play=INFO + +# Logger provided to your application: +#logger.application=DEBUG + +# APP Specific config +# ~~~~~ +# Application specific configurations can be provided here +application.global=Global + +play.http.parser.maxMemoryBuffer = 900000K +play.http.parser.maxDiskBuffer = 900000K + +play.server.http.port = 9000 +# Logger +# ~~~~~ +# You can also configure logback (http://logback.qos.ch/), +# by providing an application-logger.xml file in the conf directory. + +# Root logger: +logger.root=ERROR + +# Logger used by the framework: +logger.play=INFO + +# Logger provided to your application: +logger.application=DEBUG + +# APP Specific config +# ~~~~~ +# Application specific configurations can be provided here + +# Cache-Manager Configuration +cache.type="redis" + + +search.es_conn_info="{{ .Values.es_connection }}" +search.fields.query=["name^100","title^100","lemma^100","code^100","tags^100","question^100","domain","subject","description^10","keywords^25","ageGroup^10","filter^10","theme^10","genre^10","objects^25","contentType^100","language^200","teachingMode^25","skills^10","learningObjective^10","curriculum^100","gradeLevel^100","developer^100","attributions^10","owner^50","text","words","releaseNotes","body"] +search.fields.date=["lastUpdatedOn","createdOn","versionDate","lastSubmittedOn","lastPublishedOn"] +search.batch.size=500 +search.connection.timeout=30 + +#Top N Config for Search Telemetry +telemetry.search.topn=5 +telemetry_env=dev +installation.id=ekstep + +# Configuration for default channel ID +channel.default="in.ekstep" + +# Redis Configuration +redis.host="{{ .Values.redis_connection}}" +redis.port=6379 +redis.maxConnections=128 + +# DIAL Code Configuration +dialcode.keyspace.name="{{ .Values.dialcode_keyspace_name }}" +dialcode.keyspace.table="{{ .Values.dialcode_keyspace_table }}" +dialcode.max_count=1000 + +# System Configuration +system.config.keyspace.name="{{ .Values.system_config_keyspace_name}}" +system.config.table="{{ .Values.system_config_table}}" + +#Publisher Configuration +publisher.keyspace.name="{{ .Values.system_config_table}}" +publisher.keyspace.table="{{ .Values.publisher_keyspace_table}}" + +#DIAL Code Generator Configuration +dialcode.strip.chars="0" +dialcode.length=6.0 +dialcode.large.prime_number=1679979167 + +#DIAL Code ElasticSearch Configuration +dialcode.index=true +dialcode.object_type="{{ .Values.dialocode_object_type}}" + +# Cassandra Configuration +cassandra.lp.connection="{{ .Values.cassandra_connection }}" +cassandra.lpa.connection="{{ .Values.cassandra_connection }}" + + +# Consistency Level for Multi Node Cassandra cluster +#cassandra.lp.consistency.level=QUORUM + +#Kafka Configuration +kafka { + urls = "{{ .Values.kafka_connection}}" + topic { + send_enable = false + graph_event = "{{ .Values.graph_event}}" + } +} + +jsonld { + basePath = "https://sunbirdstagingpublic.blob.core.windows.net/sunbird-dial-staging/jsonld-schema/local" + type = "sbed" + localPath = "/tmp" + ttl = 43200 + sb_schema = ["http://store.knowlg.sunbird.org/dial/specs/sbed/schema.jsonld"] +} + +dial_id = "https://dev.sunbirded.org/dial/{dialcode}" +dial_type = "https://dev.sunbirded.org/ns/" + diff --git a/ontology-engine/graph-core_2.11/src/test/resources/logback.xml b/knowlg-automation/helm_charts/dial/dial-service_logback.xml similarity index 100% rename from ontology-engine/graph-core_2.11/src/test/resources/logback.xml rename to knowlg-automation/helm_charts/dial/dial-service_logback.xml diff --git a/knowlg-automation/helm_charts/dial/templates/deployment.yaml b/knowlg-automation/helm_charts/dial/templates/deployment.yaml new file mode 100644 index 000000000..f64a4e2a8 --- /dev/null +++ b/knowlg-automation/helm_charts/dial/templates/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.api_namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/sunbird-dial-service-1.0-SNAPSHOT/config/application.conf + subPath: dial-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/sunbird-dial-service-1.0-SNAPSHOT/config/logback.xml + subPath: dial-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.api_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + type: {{ .Values.serviceType }} + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/knowlg-automation/helm_charts/dial/templates/dial-configmap b/knowlg-automation/helm_charts/dial/templates/dial-configmap new file mode 100644 index 000000000..646e5f5fe --- /dev/null +++ b/knowlg-automation/helm_charts/dial/templates/dial-configmap @@ -0,0 +1,234 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: dial-xml-config + namespace: knowlg-api +data: + dial-service_logback.xml: | + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: dial-config + namespace: knowlg-api +data: + dial-service_application.conf: |- + # This is the main configuration file for the application. + # https://www.playframework.com/documentation/latest/ConfigFile + # ~~~~~ + + ## Akka + # https://www.playframework.com/documentation/latest/JavaAkka#Configuration + # ~~~~~ + akka { + #loggers =["akka.event.Logging$DefaultLogger"] + #log-config-on-start = true + } + + ## Secret key + # http://www.playframework.com/documentation/latest/ApplicationSecret + # ~~~~~ + #play.crypto.secret = "changeme" + + + ## Internationalisation + # https://www.playframework.com/documentation/latest/JavaI18N + # ~~~~~ + play.i18n { + # The application languages + langs = [ "en" ] + } + + ## Play HTTP settings + # ~~~~~ + play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + #TO allow more data in request body + parser.maxDiskBuffer=50MB + parser.maxMemoryBuffer=50MB + } + + ## Netty Provider + # ~~~~~ + play.server.netty { + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + #transport = "native" + maxChunkSize = 30000000 + } + + ## WS (HTTP Client) + # ~~~~~ + libraryDependencies += javaWs + + ## Cache + # https://www.playframework.com/documentation/latest/JavaCache + # https://www.playframework.com/documentation/latest/ScalaCache + # ~~~~~ + # Play comes with an integrated cache API that can reduce the operational + # overhead of repeated requests. You must enable this by adding to build.sbt: + # + libraryDependencies += cache + # + play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] + } + + //play.http.filters= filters.HealthCheckFilter + + # Logger + # ~~~~~ + # You can also configure logback (http://logback.qos.ch/), + # by providing an application-logger.xml file in the conf directory. + + # Root logger: + #logger.root=ERROR + + # Logger used by the framework: + #logger.play=INFO + + # Logger provided to your application: + #logger.application=DEBUG + + # APP Specific config + # ~~~~~ + # Application specific configurations can be provided here + application.global=Global + + play.http.parser.maxMemoryBuffer = 900000K + play.http.parser.maxDiskBuffer = 900000K + + play.server.http.port = 9000 + # Logger + # ~~~~~ + # You can also configure logback (http://logback.qos.ch/), + # by providing an application-logger.xml file in the conf directory. + + # Root logger: + logger.root=ERROR + + # Logger used by the framework: + logger.play=INFO + + # Logger provided to your application: + logger.application=DEBUG + + # APP Specific config + # ~~~~~ + # Application specific configurations can be provided here + + # Cache-Manager Configuration + cache.type="redis" + + + search.es_conn_info="{{ .Values.es_connection }}" + search.fields.query=["name^100","title^100","lemma^100","code^100","tags^100","question^100","domain","subject","description^10","keywords^25","ageGroup^10","filter^10","theme^10","genre^10","objects^25","contentType^100","language^200","teachingMode^25","skills^10","learningObjective^10","curriculum^100","gradeLevel^100","developer^100","attributions^10","owner^50","text","words","releaseNotes","body"] + search.fields.date=["lastUpdatedOn","createdOn","versionDate","lastSubmittedOn","lastPublishedOn"] + search.batch.size=500 + search.connection.timeout=30 + + #Top N Config for Search Telemetry + telemetry.search.topn=5 + telemetry_env=dev + installation.id=ekstep + + # Configuration for default channel ID + channel.default="in.ekstep" + + # Redis Configuration + redis.host="{{ .Values.redis_connection}}" + redis.port=6379 + redis.maxConnections=128 + + # DIAL Code Configuration + dialcode.keyspace.name="{{ .Values.dialcode_keyspace_name }}" + dialcode.keyspace.table="{{ .Values.dialcode_keyspace_table }}" + dialcode.max_count=1000 + + # System Configuration + system.config.keyspace.name="{{ .Values.system_config_keyspace_name}}" + system.config.table="{{ .Values.system_config_table}}" + + #Publisher Configuration + publisher.keyspace.name="{{ .Values.system_config_table}}" + publisher.keyspace.table="{{ .Values.publisher_keyspace_table}}" + + #DIAL Code Generator Configuration + dialcode.strip.chars="0" + dialcode.length=6.0 + dialcode.large.prime_number=1679979167 + + #DIAL Code ElasticSearch Configuration + dialcode.index=true + dialcode.object_type="{{ .Values.dialocode_object_type}}" + + # Cassandra Configuration + cassandra.lp.connection="{{ .Values.cassandra_connection }}" + cassandra.lpa.connection="{{ .Values.cassandra_connection }}" + + + # Consistency Level for Multi Node Cassandra cluster + #cassandra.lp.consistency.level=QUORUM + + #Kafka Configuration + kafka { + urls = "{{ .Values.kafka_connection}}" + topic { + send_enable = false + graph_event = "{{ .Values.graph_event}}" + } + } + + jsonld { + basePath = "https://sunbirdstagingpublic.blob.core.windows.net/sunbird-dial-staging/jsonld-schema/local" + type = "sbed" + localPath = "/tmp" + ttl = 43200 + sb_schema = ["http://store.knowlg.sunbird.org/dial/specs/sbed/schema.jsonld"] + } + + dial_id = "https://dev.sunbirded.org/dial/{dialcode}" + dial_type = "https://dev.sunbirded.org/ns/" + diff --git a/knowlg-automation/helm_charts/dial/values.yaml b/knowlg-automation/helm_charts/dial/values.yaml new file mode 100644 index 000000000..9c8ec61ad --- /dev/null +++ b/knowlg-automation/helm_charts/dial/values.yaml @@ -0,0 +1,34 @@ +api_namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: docker.io/aimansharief/dial-service +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9000 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% +serviceType: ClusterIP +cassandra_connection: cassandra-db.knowlg-db.svc.cluster.local:9042 +redis_connection: redis-db.knowlg-db.svc.cluster.local +es_connection: elasticsearch.knowlg-db.svc.cluster.local:9200 +dialcode_keyspace_name: dialcode_store +dialcode_keyspace_table: dial_code +systemconfig_keyspace_name: dialcode_store +systemconfig_table: system_config +publisher_keyspace_name: dialcode_store +publisher_keyspace_table: publisher +dialcode_object_type: DialCode +graph_event: local.learning.graph.events \ No newline at end of file diff --git a/knowlg-automation/helm_charts/elasticsearch/.helmignore b/knowlg-automation/helm_charts/elasticsearch/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/knowlg-automation/helm_charts/elasticsearch/Chart.lock b/knowlg-automation/helm_charts/elasticsearch/Chart.lock new file mode 100644 index 000000000..6f6b4a208 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: kibana + repository: https://charts.bitnami.com/bitnami + version: 10.2.16 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.4 +digest: sha256:4d5edc18f6f4c69d66d3e2c1723fcd7f722141237052c3852562232c141bfcf3 +generated: "2023-03-18T21:45:26.026217282Z" diff --git a/knowlg-automation/helm_charts/elasticsearch/Chart.yaml b/knowlg-automation/helm_charts/elasticsearch/Chart.yaml new file mode 100644 index 000000000..5fb0a720b --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + category: Analytics +apiVersion: v2 +appVersion: 8.6.0 +dependencies: +- condition: global.kibanaEnabled + name: kibana + repository: https://charts.bitnami.com/bitnami + version: 10.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Elasticsearch is a distributed search and analytics engine. It is used + for web search, log monitoring, and real-time analytics. Ideal for Big Data applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/elasticsearch +icon: https://bitnami.com/assets/stacks/elasticsearch/img/elasticsearch-stack-220x234.png +keywords: +- elasticsearch +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: elasticsearch +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch +- https://www.elastic.co/products/elasticsearch +version: 19.5.8 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/elasticsearch/README.md b/knowlg-automation/helm_charts/elasticsearch/README.md new file mode 100644 index 000000000..b95192856 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/README.md @@ -0,0 +1,969 @@ + + +# Bitnami Elasticsearch Stack + +Elasticsearch is a distributed search and analytics engine. It is used for web search, log monitoring, and real-time analytics. Ideal for Big Data applications. + +[Overview of Elasticsearch](https://www.elastic.co/products/elasticsearch) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/elasticsearch +``` + +## Introduction + +This chart bootstraps a [Elasticsearch](https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/elasticsearch +``` + +These commands deploy Elasticsearch on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` release: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. Remove also the chart using `--purge` option: + +```console +helm delete --purge my-release +``` + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------- | --------------- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.elasticsearch.service.name` | Elasticsearch service name to be used in the Kibana subchart (ignored if kibanaEnabled=false) | `elasticsearch` | +| `global.elasticsearch.service.ports.restAPI` | Elasticsearch service restAPI port to be used in the Kibana subchart (ignored if kibanaEnabled=false) | `9200` | +| `global.kibanaEnabled` | Whether or not to enable Kibana | `false` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### Elasticsearch cluster Parameters + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | +| `clusterName` | Elasticsearch cluster name | `elastic` | +| `containerPorts.restAPI` | Elasticsearch REST API port | `9200` | +| `containerPorts.transport` | Elasticsearch Transport port | `9300` | +| `plugins` | Comma, semi-colon or space separated list of plugins to install at initialization | `""` | +| `snapshotRepoPath` | File System snapshot repository path | `""` | +| `config` | Override elasticsearch configuration | `{}` | +| `extraConfig` | Append extra configuration to the elasticsearch node configuration | `{}` | +| `extraHosts` | A list of external hosts which are part of this cluster | `[]` | +| `extraVolumes` | A list of volumes to be added to the pod | `[]` | +| `extraVolumeMounts` | A list of volume mounts to be added to the pod | `[]` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraEnvVars` | Array containing extra env vars to be added to all pods (evaluated as a template) | `[]` | +| `extraEnvVarsCM` | ConfigMap containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `extraEnvVarsSecret` | Secret containing extra env vars to be added to all pods (evaluated as a template) | `""` | +| `sidecars` | Add additional sidecar containers to the all elasticsearch node pod(s) | `[]` | +| `initContainers` | Add additional init containers to the all elasticsearch node pod(s) | `[]` | +| `useIstioLabels` | Use this variable to add Istio labels to all pods | `true` | +| `image.registry` | Elasticsearch image registry | `docker.io` | +| `image.repository` | Elasticsearch image repository | `bitnami/elasticsearch` | +| `image.tag` | Elasticsearch image tag (immutable tags are recommended) | `8.6.2-debian-11-r10` | +| `image.digest` | Elasticsearch image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Elasticsearch image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Elasticsearch image pull secrets | `[]` | +| `image.debug` | Enable Elasticsearch image debug mode | `false` | +| `security.enabled` | Enable X-Pack Security settings | `false` | +| `security.elasticPassword` | Password for 'elastic' user | `""` | +| `security.existingSecret` | Name of the existing secret containing the Elasticsearch password and | `""` | +| `security.fipsMode` | Configure elasticsearch with FIPS 140 compliant mode | `false` | +| `security.tls.restEncryption` | Enable SSL/TLS encryption for Elasticsearch REST API. | `true` | +| `security.tls.autoGenerated` | Create self-signed TLS certificates. | `false` | +| `security.tls.verificationMode` | Verification mode for SSL communications. | `full` | +| `security.tls.master.existingSecret` | Existing secret containing the certificates for the master nodes | `""` | +| `security.tls.data.existingSecret` | Existing secret containing the certificates for the data nodes | `""` | +| `security.tls.ingest.existingSecret` | Existing secret containing the certificates for the ingest nodes | `""` | +| `security.tls.coordinating.existingSecret` | Existing secret containing the certificates for the coordinating nodes | `""` | +| `security.tls.keystoreFilename` | Name of the keystore file | `elasticsearch.keystore.jks` | +| `security.tls.truststoreFilename` | Name of the truststore | `elasticsearch.truststore.jks` | +| `security.tls.usePemCerts` | Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 | `false` | +| `security.tls.passwordsSecret` | Existing secret containing the Keystore and Truststore passwords, or key password if PEM certs are used | `""` | +| `security.tls.keystorePassword` | Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. | `""` | +| `security.tls.truststorePassword` | Password to access the JKS/PKCS12 truststore when they are password-protected. | `""` | +| `security.tls.keyPassword` | Password to access the PEM key when they are password-protected. | `""` | +| `security.tls.secretKeystoreKey` | Name of the secret key containing the Keystore password | `""` | +| `security.tls.secretTruststoreKey` | Name of the secret key containing the Truststore password | `""` | +| `security.tls.secretKey` | Name of the secret key containing the PEM key password | `""` | + +### Traffic Exposure Parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Elasticsearch service type | `ClusterIP` | +| `service.ports.restAPI` | Elasticsearch service REST API port | `9200` | +| `service.ports.transport` | Elasticsearch service transport port | `9300` | +| `service.nodePorts.restAPI` | Node port for REST API | `""` | +| `service.nodePorts.transport` | Node port for REST API | `""` | +| `service.clusterIP` | Elasticsearch service Cluster IP | `""` | +| `service.loadBalancerIP` | Elasticsearch service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Elasticsearch service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Elasticsearch service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Elasticsearch service | `{}` | +| `service.extraPorts` | Extra ports to expose in Elasticsearch service (normally used with the `sidecars` value) | `[]` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress record generation for Elasticsearch | `false` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `elasticsearch.local` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | + +### Master-elegible nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `master.masterOnly` | Deploy the Elasticsearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. | `true` | +| `master.replicaCount` | Number of master-elegible replicas to deploy | `2` | +| `master.nameOverride` | String to partially override elasticsearch.master.fullname | `""` | +| `master.fullnameOverride` | String to fully override elasticsearch.master.fullname | `""` | +| `master.servicenameOverride` | String to fully override elasticsearch.master.servicename | `""` | +| `master.annotations` | Annotations for the master statefulset | `{}` | +| `master.updateStrategy.type` | Master-elegible nodes statefulset stategy type | `RollingUpdate` | +| `master.resources.limits` | The resources limits for elasticsearch containers | `{}` | +| `master.resources.requests` | The requested resources for elasticsearch containers | `{}` | +| `master.heapSize` | Elasticsearch master-eligible node heap size. | `128m` | +| `master.podSecurityContext.enabled` | Enabled master-elegible pods' Security Context | `true` | +| `master.podSecurityContext.fsGroup` | Set master-elegible pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled master-elegible containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set master-elegible containers' Security Context runAsUser | `1001` | +| `master.containerSecurityContext.runAsNonRoot` | Set master-elegible containers' Security Context runAsNonRoot | `true` | +| `master.hostAliases` | master-elegible pods host aliases | `[]` | +| `master.podLabels` | Extra labels for master-elegible pods | `{}` | +| `master.podAnnotations` | Annotations for master-elegible pods | `{}` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for master-elegible pods assignment | `{}` | +| `master.nodeSelector` | Node labels for master-elegible pods assignment | `{}` | +| `master.tolerations` | Tolerations for master-elegible pods assignment | `[]` | +| `master.priorityClassName` | master-elegible pods' priorityClassName | `""` | +| `master.schedulerName` | Name of the k8s scheduler (other than default) for master-elegible pods | `""` | +| `master.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch Master pod needs to terminate gracefully | `""` | +| `master.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `master.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch master pods | `Parallel` | +| `master.startupProbe.enabled` | Enable/disable the startup probe (master nodes pod) | `false` | +| `master.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (master nodes pod) | `90` | +| `master.startupProbe.periodSeconds` | How often to perform the probe (master nodes pod) | `10` | +| `master.startupProbe.timeoutSeconds` | When the probe times out (master nodes pod) | `5` | +| `master.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) | `1` | +| `master.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.livenessProbe.enabled` | Enable/disable the liveness probe (master-eligible nodes pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (master-eligible nodes pod) | `90` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.readinessProbe.enabled` | Enable/disable the readiness probe (master-eligible nodes pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (master-eligible nodes pod) | `90` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (master-eligible nodes pod) | `10` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (master-eligible nodes pod) | `5` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `master.customStartupProbe` | Override default startup probe | `{}` | +| `master.customLivenessProbe` | Override default liveness probe | `{}` | +| `master.customReadinessProbe` | Override default readiness probe | `{}` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.lifecycleHooks` | for the master-elegible container(s) to automate configuration before or after startup | `{}` | +| `master.extraEnvVars` | Array with extra environment variables to add to master-elegible nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for master-elegible nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for master-elegible nodes | `""` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the master-elegible pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the master-elegible container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the master-elegible pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the master-elegible pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `master.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `master.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `master.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. | `""` | +| `master.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` | `{}` | +| `master.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume Size | `8Gi` | +| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `master.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `master.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `master.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `master.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` | +| `master.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` | +| `master.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` | +| `master.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` | +| `master.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` | + +### Data-only nodes parameters + +| Name | Description | Value | +| -------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | +| `data.replicaCount` | Number of data-only replicas to deploy | `2` | +| `data.nameOverride` | String to partially override elasticsearch.data.fullname | `""` | +| `data.fullnameOverride` | String to fully override elasticsearch.data.fullname | `""` | +| `data.servicenameOverride` | String to fully override elasticsearch.data.servicename | `""` | +| `data.annotations` | Annotations for the data statefulset | `{}` | +| `data.updateStrategy.type` | Data-only nodes statefulset stategy type | `RollingUpdate` | +| `data.resources.limits` | The resources limits for the data containers | `{}` | +| `data.resources.requests` | The requested resources for the data containers | `{}` | +| `data.heapSize` | Elasticsearch data node heap size. | `1024m` | +| `data.podSecurityContext.enabled` | Enabled data pods' Security Context | `true` | +| `data.podSecurityContext.fsGroup` | Set data pod's Security Context fsGroup | `1001` | +| `data.containerSecurityContext.enabled` | Enabled data containers' Security Context | `true` | +| `data.containerSecurityContext.runAsUser` | Set data containers' Security Context runAsUser | `1001` | +| `data.containerSecurityContext.runAsNonRoot` | Set data containers' Security Context runAsNonRoot | `true` | +| `data.hostAliases` | data pods host aliases | `[]` | +| `data.podLabels` | Extra labels for data pods | `{}` | +| `data.podAnnotations` | Annotations for data pods | `{}` | +| `data.podAffinityPreset` | Pod affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `data.nodeAffinityPreset.key` | Node label key to match. Ignored if `data.affinity` is set | `""` | +| `data.nodeAffinityPreset.values` | Node label values to match. Ignored if `data.affinity` is set | `[]` | +| `data.affinity` | Affinity for data pods assignment | `{}` | +| `data.nodeSelector` | Node labels for data pods assignment | `{}` | +| `data.tolerations` | Tolerations for data pods assignment | `[]` | +| `data.priorityClassName` | data pods' priorityClassName | `""` | +| `data.schedulerName` | Name of the k8s scheduler (other than default) for data pods | `""` | +| `data.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch data pod needs to terminate gracefully | `""` | +| `data.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `data.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch data pods | `Parallel` | +| `data.startupProbe.enabled` | Enable/disable the startup probe (data nodes pod) | `false` | +| `data.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (data nodes pod) | `90` | +| `data.startupProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.startupProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.livenessProbe.enabled` | Enable/disable the liveness probe (data nodes pod) | `true` | +| `data.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (data nodes pod) | `90` | +| `data.livenessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.livenessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.readinessProbe.enabled` | Enable/disable the readiness probe (data nodes pod) | `true` | +| `data.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (data nodes pod) | `90` | +| `data.readinessProbe.periodSeconds` | How often to perform the probe (data nodes pod) | `10` | +| `data.readinessProbe.timeoutSeconds` | When the probe times out (data nodes pod) | `5` | +| `data.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) | `1` | +| `data.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `data.customStartupProbe` | Override default startup probe | `{}` | +| `data.customLivenessProbe` | Override default liveness probe | `{}` | +| `data.customReadinessProbe` | Override default readiness probe | `{}` | +| `data.command` | Override default container command (useful when using custom images) | `[]` | +| `data.args` | Override default container args (useful when using custom images) | `[]` | +| `data.lifecycleHooks` | for the data container(s) to automate configuration before or after startup | `{}` | +| `data.extraEnvVars` | Array with extra environment variables to add to data nodes | `[]` | +| `data.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data nodes | `""` | +| `data.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data nodes | `""` | +| `data.extraVolumes` | Optionally specify extra list of additional volumes for the data pod(s) | `[]` | +| `data.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the data container(s) | `[]` | +| `data.sidecars` | Add additional sidecar containers to the data pod(s) | `[]` | +| `data.initContainers` | Add additional init containers to the data pod(s) | `[]` | +| `data.persistence.enabled` | Enable persistence using a `PersistentVolumeClaim` | `true` | +| `data.persistence.storageClass` | Persistent Volume Storage Class | `""` | +| `data.persistence.existingClaim` | Existing Persistent Volume Claim | `""` | +| `data.persistence.existingVolume` | Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` is set. | `""` | +| `data.persistence.selector` | Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` | `{}` | +| `data.persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `data.persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `data.persistence.size` | Persistent Volume Size | `8Gi` | +| `data.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `data.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `data.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `data.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `data.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` | +| `data.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` | +| `data.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` | +| `data.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` | +| `data.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` | + +### Coordinating-only nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `coordinating.replicaCount` | Number of coordinating-only replicas to deploy | `2` | +| `coordinating.nameOverride` | String to partially override elasticsearch.coordinating.fullname | `""` | +| `coordinating.fullnameOverride` | String to fully override elasticsearch.coordinating.fullname | `""` | +| `coordinating.servicenameOverride` | String to fully override elasticsearch.coordinating.servicename | `""` | +| `coordinating.annotations` | Annotations for the coordinating-only statefulset | `{}` | +| `coordinating.updateStrategy.type` | Coordinating-only nodes statefulset stategy type | `RollingUpdate` | +| `coordinating.resources.limits` | The resources limits for the coordinating-only containers | `{}` | +| `coordinating.resources.requests` | The requested resources for the coordinating-only containers | `{}` | +| `coordinating.heapSize` | Elasticsearch coordinating node heap size. | `128m` | +| `coordinating.podSecurityContext.enabled` | Enabled coordinating-only pods' Security Context | `true` | +| `coordinating.podSecurityContext.fsGroup` | Set coordinating-only pod's Security Context fsGroup | `1001` | +| `coordinating.containerSecurityContext.enabled` | Enabled coordinating-only containers' Security Context | `true` | +| `coordinating.containerSecurityContext.runAsUser` | Set coordinating-only containers' Security Context runAsUser | `1001` | +| `coordinating.containerSecurityContext.runAsNonRoot` | Set coordinating-only containers' Security Context runAsNonRoot | `true` | +| `coordinating.hostAliases` | coordinating-only pods host aliases | `[]` | +| `coordinating.podLabels` | Extra labels for coordinating-only pods | `{}` | +| `coordinating.podAnnotations` | Annotations for coordinating-only pods | `{}` | +| `coordinating.podAffinityPreset` | Pod affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `coordinating.nodeAffinityPreset.key` | Node label key to match. Ignored if `coordinating.affinity` is set | `""` | +| `coordinating.nodeAffinityPreset.values` | Node label values to match. Ignored if `coordinating.affinity` is set | `[]` | +| `coordinating.affinity` | Affinity for coordinating-only pods assignment | `{}` | +| `coordinating.nodeSelector` | Node labels for coordinating-only pods assignment | `{}` | +| `coordinating.tolerations` | Tolerations for coordinating-only pods assignment | `[]` | +| `coordinating.priorityClassName` | coordinating-only pods' priorityClassName | `""` | +| `coordinating.schedulerName` | Name of the k8s scheduler (other than default) for coordinating-only pods | `""` | +| `coordinating.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch coordinating pod needs to terminate gracefully | `""` | +| `coordinating.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `coordinating.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch coordinating pods | `Parallel` | +| `coordinating.startupProbe.enabled` | Enable/disable the startup probe (coordinating-only nodes pod) | `false` | +| `coordinating.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.startupProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.startupProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.livenessProbe.enabled` | Enable/disable the liveness probe (coordinating-only nodes pod) | `true` | +| `coordinating.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.livenessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.livenessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.readinessProbe.enabled` | Enable/disable the readiness probe (coordinating-only nodes pod) | `true` | +| `coordinating.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (coordinating-only nodes pod) | `90` | +| `coordinating.readinessProbe.periodSeconds` | How often to perform the probe (coordinating-only nodes pod) | `10` | +| `coordinating.readinessProbe.timeoutSeconds` | When the probe times out (coordinating-only nodes pod) | `5` | +| `coordinating.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) | `1` | +| `coordinating.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `coordinating.customStartupProbe` | Override default startup probe | `{}` | +| `coordinating.customLivenessProbe` | Override default liveness probe | `{}` | +| `coordinating.customReadinessProbe` | Override default readiness probe | `{}` | +| `coordinating.command` | Override default container command (useful when using custom images) | `[]` | +| `coordinating.args` | Override default container args (useful when using custom images) | `[]` | +| `coordinating.lifecycleHooks` | for the coordinating-only container(s) to automate configuration before or after startup | `{}` | +| `coordinating.extraEnvVars` | Array with extra environment variables to add to coordinating-only nodes | `[]` | +| `coordinating.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for coordinating-only nodes | `""` | +| `coordinating.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for coordinating-only nodes | `""` | +| `coordinating.extraVolumes` | Optionally specify extra list of additional volumes for the coordinating-only pod(s) | `[]` | +| `coordinating.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the coordinating-only container(s) | `[]` | +| `coordinating.sidecars` | Add additional sidecar containers to the coordinating-only pod(s) | `[]` | +| `coordinating.initContainers` | Add additional init containers to the coordinating-only pod(s) | `[]` | +| `coordinating.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `coordinating.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `coordinating.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `coordinating.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `coordinating.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` | +| `coordinating.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` | +| `coordinating.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` | +| `coordinating.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` | +| `coordinating.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` | + +### Ingest-only nodes parameters + +| Name | Description | Value | +| ---------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `ingest.enabled` | Enable ingest nodes | `true` | +| `ingest.replicaCount` | Number of ingest-only replicas to deploy | `2` | +| `ingest.nameOverride` | String to partially override elasticsearch.ingest.fullname | `""` | +| `ingest.fullnameOverride` | String to fully override elasticsearch.ingest.fullname | `""` | +| `ingest.servicenameOverride` | String to fully override ingest.master.servicename | `""` | +| `ingest.annotations` | Annotations for the ingest statefulset | `{}` | +| `ingest.containerPorts.restAPI` | Elasticsearch REST API port | `9200` | +| `ingest.containerPorts.transport` | Elasticsearch Transport port | `9300` | +| `ingest.updateStrategy.type` | Ingest-only nodes statefulset stategy type | `RollingUpdate` | +| `ingest.resources.limits` | The resources limits for the ingest-only containers | `{}` | +| `ingest.resources.requests` | The requested resources for the ingest-only containers | `{}` | +| `ingest.heapSize` | Elasticsearch ingest-only node heap size. | `128m` | +| `ingest.podSecurityContext.enabled` | Enabled ingest-only pods' Security Context | `true` | +| `ingest.podSecurityContext.fsGroup` | Set ingest-only pod's Security Context fsGroup | `1001` | +| `ingest.containerSecurityContext.enabled` | Enabled ingest-only containers' Security Context | `true` | +| `ingest.containerSecurityContext.runAsUser` | Set ingest-only containers' Security Context runAsUser | `1001` | +| `ingest.containerSecurityContext.runAsNonRoot` | Set ingest-only containers' Security Context runAsNonRoot | `true` | +| `ingest.hostAliases` | ingest-only pods host aliases | `[]` | +| `ingest.podLabels` | Extra labels for ingest-only pods | `{}` | +| `ingest.podAnnotations` | Annotations for ingest-only pods | `{}` | +| `ingest.podAffinityPreset` | Pod affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `ingest.nodeAffinityPreset.key` | Node label key to match. Ignored if `ingest.affinity` is set | `""` | +| `ingest.nodeAffinityPreset.values` | Node label values to match. Ignored if `ingest.affinity` is set | `[]` | +| `ingest.affinity` | Affinity for ingest-only pods assignment | `{}` | +| `ingest.nodeSelector` | Node labels for ingest-only pods assignment | `{}` | +| `ingest.tolerations` | Tolerations for ingest-only pods assignment | `[]` | +| `ingest.priorityClassName` | ingest-only pods' priorityClassName | `""` | +| `ingest.schedulerName` | Name of the k8s scheduler (other than default) for ingest-only pods | `""` | +| `ingest.terminationGracePeriodSeconds` | In seconds, time the given to the Elasticsearch ingest pod needs to terminate gracefully | `""` | +| `ingest.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `ingest.podManagementPolicy` | podManagementPolicy to manage scaling operation of Elasticsearch ingest pods | `Parallel` | +| `ingest.startupProbe.enabled` | Enable/disable the startup probe (ingest-only nodes pod) | `false` | +| `ingest.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (ingest-only nodes pod) | `90` | +| `ingest.startupProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.startupProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.livenessProbe.enabled` | Enable/disable the liveness probe (ingest-only nodes pod) | `true` | +| `ingest.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (ingest-only nodes pod) | `90` | +| `ingest.livenessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.livenessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.readinessProbe.enabled` | Enable/disable the readiness probe (ingest-only nodes pod) | `true` | +| `ingest.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (ingest-only nodes pod) | `90` | +| `ingest.readinessProbe.periodSeconds` | How often to perform the probe (ingest-only nodes pod) | `10` | +| `ingest.readinessProbe.timeoutSeconds` | When the probe times out (ingest-only nodes pod) | `5` | +| `ingest.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) | `1` | +| `ingest.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `ingest.customStartupProbe` | Override default startup probe | `{}` | +| `ingest.customLivenessProbe` | Override default liveness probe | `{}` | +| `ingest.customReadinessProbe` | Override default readiness probe | `{}` | +| `ingest.command` | Override default container command (useful when using custom images) | `[]` | +| `ingest.args` | Override default container args (useful when using custom images) | `[]` | +| `ingest.lifecycleHooks` | for the ingest-only container(s) to automate configuration before or after startup | `{}` | +| `ingest.extraEnvVars` | Array with extra environment variables to add to ingest-only nodes | `[]` | +| `ingest.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ingest-only nodes | `""` | +| `ingest.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ingest-only nodes | `""` | +| `ingest.extraVolumes` | Optionally specify extra list of additional volumes for the ingest-only pod(s) | `[]` | +| `ingest.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ingest-only container(s) | `[]` | +| `ingest.sidecars` | Add additional sidecar containers to the ingest-only pod(s) | `[]` | +| `ingest.initContainers` | Add additional init containers to the ingest-only pod(s) | `[]` | +| `ingest.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `ingest.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | +| `ingest.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `ingest.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `ingest.autoscaling.enabled` | Whether enable horizontal pod autoscale | `false` | +| `ingest.autoscaling.minReplicas` | Configure a minimum amount of pods | `3` | +| `ingest.autoscaling.maxReplicas` | Configure a maximum amount of pods | `11` | +| `ingest.autoscaling.targetCPU` | Define the CPU target to trigger the scaling actions (utilization percentage) | `""` | +| `ingest.autoscaling.targetMemory` | Define the memory target to trigger the scaling actions (utilization percentage) | `""` | +| `ingest.service.enabled` | Enable Ingest-only service | `false` | +| `ingest.service.type` | Elasticsearch ingest-only service type | `ClusterIP` | +| `ingest.service.ports.restAPI` | Elasticsearch service REST API port | `9200` | +| `ingest.service.ports.transport` | Elasticsearch service transport port | `9300` | +| `ingest.service.nodePorts.restAPI` | Node port for REST API | `""` | +| `ingest.service.nodePorts.transport` | Node port for REST API | `""` | +| `ingest.service.clusterIP` | Elasticsearch ingest-only service Cluster IP | `""` | +| `ingest.service.loadBalancerIP` | Elasticsearch ingest-only service Load Balancer IP | `""` | +| `ingest.service.loadBalancerSourceRanges` | Elasticsearch ingest-only service Load Balancer sources | `[]` | +| `ingest.service.externalTrafficPolicy` | Elasticsearch ingest-only service external traffic policy | `Cluster` | +| `ingest.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `ingest.service.annotations` | Additional custom annotations for Elasticsearch ingest-only service | `{}` | +| `ingest.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `ingest.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingest.ingress.enabled` | Enable ingress record generation for Elasticsearch | `false` | +| `ingest.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingest.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingest.ingress.hostname` | Default host for the ingress record | `elasticsearch-ingest.local` | +| `ingest.ingress.path` | Default path for the ingress record | `/` | +| `ingest.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingest.ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingest.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingest.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingest.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingest.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingest.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingest.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingest.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | +| `metrics.enabled` | Enable prometheus exporter | `false` | +| `metrics.nameOverride` | Metrics pod name | `""` | +| `metrics.fullnameOverride` | String to fully override common.names.fullname | `""` | +| `metrics.image.registry` | Metrics exporter image registry | `docker.io` | +| `metrics.image.repository` | Metrics exporter image repository | `bitnami/elasticsearch-exporter` | +| `metrics.image.tag` | Metrics exporter image tag | `1.5.0-debian-11-r80` | +| `metrics.image.digest` | Metrics exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Metrics exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Metrics exporter image pull secrets | `[]` | +| `metrics.annotations` | Annotations for metrics | `{}` | +| `metrics.extraArgs` | Extra arguments to add to the default exporter command | `[]` | +| `metrics.hostAliases` | Add deployment host aliases | `[]` | +| `metrics.schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `metrics.priorityClassName` | Elasticsearch metrics exporter pods' priorityClassName | `""` | +| `metrics.service.type` | Metrics exporter endpoint service type | `ClusterIP` | +| `metrics.service.port` | Metrics exporter endpoint service port | `9114` | +| `metrics.service.annotations` | Provide any additional annotations which may be required. | `{}` | +| `metrics.podAffinityPreset` | Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.podAntiAffinityPreset` | Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.type` | Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.nodeAffinityPreset.key` | Metrics Node label key to match Ignored if `affinity` is set. | `""` | +| `metrics.nodeAffinityPreset.values` | Metrics Node label values to match. Ignored if `affinity` is set. | `[]` | +| `metrics.affinity` | Metrics Affinity for pod assignment | `{}` | +| `metrics.nodeSelector` | Metrics Node labels for pod assignment | `{}` | +| `metrics.tolerations` | Metrics Tolerations for pod assignment | `[]` | +| `metrics.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `metrics.resources.limits` | The resources limits for the container | `{}` | +| `metrics.resources.requests` | The requested resources for the container | `{}` | +| `metrics.livenessProbe.enabled` | Enable/disable the liveness probe (metrics pod) | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (metrics pod) | `60` | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `5` | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.readinessProbe.enabled` | Enable/disable the readiness probe (metrics pod) | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (metrics pod) | `5` | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.startupProbe.enabled` | Enable/disable the startup probe (metrics pod) | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Delay before startup probe is initiated (metrics pod) | `5` | +| `metrics.startupProbe.periodSeconds` | How often to perform the probe (metrics pod) | `10` | +| `metrics.startupProbe.timeoutSeconds` | When the probe times out (metrics pod) | `1` | +| `metrics.startupProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | `5` | +| `metrics.startupProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) | `1` | +| `metrics.customStartupProbe` | Custom liveness probe for the Web component | `{}` | +| `metrics.customLivenessProbe` | Custom liveness probe for the Web component | `{}` | +| `metrics.customReadinessProbe` | Custom readiness probe for the Web component | `{}` | +| `metrics.podAnnotations` | Metrics exporter pod Annotation and Labels | `{}` | +| `metrics.podLabels` | Extra labels to add to Pod | `{}` | +| `metrics.podSecurityContext.enabled` | Enabled Elasticsearch metrics exporter pods' Security Context | `true` | +| `metrics.podSecurityContext.fsGroup` | Set Elasticsearch metrics exporter pod's Security Context fsGroup | `1001` | +| `metrics.containerSecurityContext.enabled` | Enabled Elasticsearch metrics exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Elasticsearch metrics exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set Elasticsearch metrics exporter container's Security Context runAsNonRoot | `true` | +| `metrics.command` | Override default container command (useful when using custom images) | `[]` | +| `metrics.args` | Override default container args (useful when using custom images) | `[]` | +| `metrics.extraEnvVars` | Array with extra environment variables to add to Elasticsearch metrics exporter nodes | `[]` | +| `metrics.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Elasticsearch metrics exporter nodes | `""` | +| `metrics.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Elasticsearch metrics exporter nodes | `""` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Elasticsearch metrics exporter pod(s) | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Elasticsearch metrics exporter container(s) | `[]` | +| `metrics.sidecars` | Add additional sidecar containers to the Elasticsearch metrics exporter pod(s) | `[]` | +| `metrics.initContainers` | Add additional init containers to the Elasticsearch metrics exporter pod(s) | `[]` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.prometheusRule.enabled` | Creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions | `[]` | + +### Init Container Parameters + +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `11-debian-11-r97` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the container | `{}` | +| `sysctlImage.enabled` | Enable kernel settings modifier image | `true` | +| `sysctlImage.registry` | Kernel settings modifier image registry | `docker.io` | +| `sysctlImage.repository` | Kernel settings modifier image repository | `bitnami/bitnami-shell` | +| `sysctlImage.tag` | Kernel settings modifier image tag | `11-debian-11-r97` | +| `sysctlImage.digest` | Kernel settings modifier image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sysctlImage.pullPolicy` | Kernel settings modifier image pull policy | `IfNotPresent` | +| `sysctlImage.pullSecrets` | Kernel settings modifier image pull secrets | `[]` | +| `sysctlImage.resources.limits` | The resources limits for the container | `{}` | +| `sysctlImage.resources.requests` | The requested resources for the container | `{}` | + +### Kibana Parameters + +| Name | Description | Value | +| ---------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------- | +| `kibana.elasticsearch.hosts` | Array containing hostnames for the ES instances. Used to generate the URL | `[]` | +| `kibana.elasticsearch.port` | Port to connect Kibana and ES instance. Used to generate the URL | `{{ include "elasticsearch.service.ports.restAPI" . }}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set name=my-elastic,client.service.port=8080 \ + my-repo/elasticsearch +``` + +The above command sets the Elasticsearch cluster name to `my-elastic` and REST port number to `8080`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml my-repo/elasticsearch +``` + +> **Tip**: You can use the default [values.yaml](values.yaml). + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change ElasticSearch version + +To modify the ElasticSearch version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/elasticsearch/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Default kernel settings + +Currently, Elasticsearch requires some changes in the kernel of the host machine to work as expected. If those values are not set in the underlying operating system, the ES containers fail to boot with ERROR messages. More information about these requirements can be found in the links below: + +- [File Descriptor requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html) +- [Virtual memory requirements](https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html) + +This chart uses a **privileged** initContainer to change those settings in the Kernel by running: `sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536`. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### Enable bundled Kibana + +This Elasticsearch chart contains Kibana as subchart, you can enable it just setting the `global.kibanaEnabled=true` parameter. +To see the notes with some operational instructions from the Kibana chart, please use the `--render-subchart-notes` as part of your `helm install` command, in this way you can see the Kibana and ES notes in your terminal. + +When enabling the bundled kibana subchart, there are a few gotchas that you should be aware of listed below. + +#### Elasticsearch rest Encryption + +When enabling elasticsearch' rest endpoint encryption you will also need to set `kibana.elasticsearch.security.tls.enabled` to the SAME value along with some additional values shown below for an "out of the box experience": + +```yaml +security: + enabled: true + # PASSWORD must be the same value passed to elasticsearch to get an "out of the box" experience + elasticPassword: "" + tls: + # AutoGenerate TLS certs for elastic + autoGenerated: true + +kibana: + elasticsearch: + security: + auth: + enabled: true + # default in the elasticsearch chart is elastic + kibanaUsername: "" + kibanaPassword: "" + tls: + # Instruct kibana to connect to elastic over https + enabled: true + # Bit of a catch 22, as you will need to know the name upfront of your release + existingSecret: RELEASENAME-elasticsearch-coordinating-crt # or just 'elasticsearch-coordinating-crt' if the release name happens to be 'elasticsearch' + # As the certs are auto-generated, they are pemCerts so set to true + usePemCerts: true +``` + +At a bare-minimum, when working with kibana and elasticsearch together the following values MUST be the same, otherwise things will fail: + +```yaml +security: + tls: + restEncryption: true + +# assumes global.kibanaEnabled=true +kibana: + elasticsearch: + security: + tls: + enabled: true +``` + +### Adding extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: ELASTICSEARCH_VERSION + value: 7.0 +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Using custom init scripts + +For advanced operations, the Bitnami Elasticsearch charts allows using custom init scripts that will be mounted inside `/docker-entrypoint.init-db`. You can include the file directly in your `values.yaml` with `initScripts`, or use a ConfigMap or a Secret (in case of sensitive data) for mounting these extra scripts. In this case you use the `initScriptsCM` and `initScriptsSecret` values. + +```console +initScriptsCM=special-scripts +initScriptsSecret=special-scripts-sensitive +``` + +### Snapshot and restore operations + +As it's described in the [official documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository), it's necessary to register a snapshot repository before you can perform snapshot and restore operations. + +This chart allows you to configure Elasticsearch to use a shared file system to store snapshots. To do so, you need to mount a RWX volume on every Elasticsearch node, and set the parameter `snapshotRepoPath` with the path where the volume is mounted. In the example below, you can find the values to set when using a NFS Perstitent Volume: + +```yaml +extraVolumes: + - name: snapshot-repository + nfs: + server: nfs.example.com # Please change this to your NFS server + path: /share1 +extraVolumeMounts: + - name: snapshot-repository + mountPath: /snapshots +snapshotRepoPath: "/snapshots" +``` + +### Sidecars and Init Containers + +If you have a need for additional containers to run within the same pod as Elasticsearch components (e.g. an additional metrics or logging exporter), you can do so via the `XXX.sidecars` parameter(s), where XXX is placeholder you need to replace with the actual component(s). Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +Similarly, you can add extra init containers using the `initContainers` parameter. + +```yaml +initContainers: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Persistence + +The [Bitnami Elasticsearch](https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch) image stores the Elasticsearch data at the `/bitnami/elasticsearch/data` path of the container. + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. See the [Parameters](#parameters) section to configure the PVC. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 19.0.0 + +The new version of this chart no longer supports elasticsearch-curator, this repository has been deprecated. + +### To 18.0.0 + +This major release refactors the bitnami/elasticsearch chart, adding some organization and functional changes. + +- Each role has now the same structure: its Statefulset, a headless service (for FQDN, it gives each node an individual Advertised name, required for TLS verification), its own ServiceAccount (BWC), and HorizontalPodAutoscaling. +- Previously, the chart would alternate between a Coordinating service and an All-nodes service for traffic exposure. This logic has been replaced with a single Traffic exposure service, that will have coordinating-only nodes as backend pods, or master pods if no coordinating nodes are enabled. +- Master-eligible nodes can now be deployed as multi-role nodes using the setting masterOnly. This allows the creation of different topologies, smaller clusters with HA (3 multi-role master-eligible nodes), and single-node deployments. +- Renamed several values to be in line with the rest of the catalog. + +This major release also upgrades Elasticsearch to its version 8.x.x and the updates Kibana subchart. + +- Upgrade to Elasticsearch 8 +- Upgrade Kibana subchart. + +In addition, several modifications have been performed adding missing features and renaming values, in order to get aligned with the rest of the assets in the Bitnami charts repository. + +The following values have been modified: + +- `coordinating.service.*` have been renamed as `service.*`. This service will be backed by coordinating nodes if enabled, or master nodes if not. +- `master.service.*` has been removed. +- `data.service.*` has been removed. +- `master.ingress.*` has been renamed as `ingress.*`. This ingress will be backed by the coordinating/master service previously mentioned. +- In addition, an Ingest-only service and ingress have been added, for use cases where separated ingrestion and search channels are needed. +- `global.coordinating.name` have been renamed as `global.elasticsaerch.service.name`. +- `name` has been renamed as `clusterName`. +- `extraEnvVarsConfigMap` has been renamed as `extraEnvVarsCM`. +- `{master/data/ingest/coordinating}.replicas` has been renamed as `{master/data/ingest/coordinating}.replicaCount`. +- `{master/data/ingest/coordinating}.securityContext` has been separated in two different values: `podSecurityContext` and `containerSecurityContext`. +- `{master/data/ingest/coordinating}.updateStrategy` is now interpreted as an object. `rollingUpdatePartition` has been removed and has to be configured inside the updateStrategy object when needed. +- Default values for `kibana.elasticsearch.hosts` and `kibana.elasticsearch.port` have been modified to use the new helpers. +- `{master/data/ingest/coordinating/curator/metrics}.name` has been renamed as `{master/data/ingest/coordinating/curator}.nameOverride`. + +### To 17.0.0 + +This version bumps in a major the version of the Kibana Helm Chart bundled as dependecy, [here](https://github.com/bitnami/charts/tree/main/bitnami/kibana#to-900) you can see the changes implemented in this Kibana major version. + +### To 16.0.0 + +This version replaces the Ingest and Coordinating Deployments with Statefulsets. This change is required so Coordinating and Ingest nodes have their services associated, required for TLS hostname verification. + +We haven't encountered any issues during our upgrade test, but we recommend creating volumes backups before upgrading this major version, especially for users with additional volumes and custom configurations. + +Additionally, this version adds support for X-Pack Security features such as TLS/SSL encryption and basic authentication. + +### To 15.0.0 + +From this version onwards, Elasticsearch container components are now licensed under the [Elastic License](https://www.elastic.co/licensing/elastic-license) that is not currently accepted as an Open Source license by the Open Source Initiative (OSI). + +Also, from now on, the Helm Chart will include the X-Pack plugin installed by default. + +Regular upgrade is compatible from previous versions. + +### To 14.0.0 + +This version standardizes the way of defining Ingress rules in the Kibana subchart. When configuring a single hostname for the Ingress rule, set the `kibana.ingress.hostname` value. When defining more than one, set the `kibana.ingress.extraHosts` array. Apart from this case, no issues are expected to appear when upgrading. + +### To 13.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +### To 12.0.0 + +Several changes were introduced that breaks backwards compatibility: + +- Ports names were prefixed with the protocol to comply with Istio (see ). +- Labels are adapted to follow the Helm charts best practices. +- Elasticsearch data pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +### To 11.0.0 + +Elasticsearch master pods are now deployed in parallel in order to bootstrap the cluster and be discovered. + +The field `podManagementPolicy` can't be updated in a StatefulSet, so you need to destroy it before you upgrade the chart to this version. + +```console +kubectl delete statefulset elasticsearch-master +helm upgrade my-repo/elasticsearch +``` + +### TO 10.0.0 + +In this version, Kibana was added as dependent chart. More info about how to enable and work with this bundled Kibana in the ["Enable bundled Kibana"](#enable-bundled-kibana) section. + +### To 9.0.0 + +Elasticsearch master nodes store the cluster status at `/bitnami/elasticsearch/data`. Among other things this includes the UUID of the elasticsearch cluster. Without a persistent data store for this data, the UUID of a cluster could change if k8s node(s) hosting the es master nodes go down and are scheduled on some other master node. In the event that this happens, the data nodes will no longer be able to join a cluster as the uuid changed resulting in a broken cluster. + +To resolve such issues, PVC's are now attached for master node data persistence. + +--- + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In [4dfac075aacf74405e31ae5b27df4369e84eb0b0](https://github.com/bitnami/charts/commit/4dfac075aacf74405e31ae5b27df4369e84eb0b0) the `apiVersion` of the deployment resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +### To 7.4.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 7.0.0 + +This version enabled by default the initContainer that modify some kernel settings to meet the Elasticsearch requirements. More info in the ["Default kernel settings"](#default-kernel-settings) section. +You can disable the initContainer using the `sysctlImage.enabled=false` parameter. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is elasticsearch: + +```console +kubectl patch deployment elasticsearch-coordinating --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployment elasticsearch-ingest --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployment elasticsearch-master --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployment elasticsearch-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl delete statefulset elasticsearch-data --cascade=false +``` + +## License + +Copyright © 2023 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/knowlg-automation/helm_charts/elasticsearch/charts/common-2.2.4.tgz b/knowlg-automation/helm_charts/elasticsearch/charts/common-2.2.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9016f32e2cfd76371466da567554893ec977e7c3 GIT binary patch literal 12364 zcmV-SFtg7eiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMa0ciT3yDBhp>SImb_Z>(%cNp|Aw^{)4G-6Y-hX`AHPX>#^8 z_q7lSNvJ7;1wc94#P4r^2j58Wo4(kxiSPHK_BU@({&%nSxVL}xRa_McA?6X62Q zl5e*E3KJ|`BZk-l{(+MjD5irkWiVkh&qz7} z&5n@J35j5Y#RbMGjEG3lj6fvI^Uwo$J0)EI1i~Su0%Dvcbb&?*hS36u;5ZqF-jvSp zAxwqH_+h_4A!3@3!idiL24r7@>{SbzoP-I)t1}o(M+1_x!CjU>LJ{jK2|7|>G5DIhLWiYfEgnh$KJ3OM z65yZXg%AGsuV4G%>v_nB-Gp!f{&&ZxzWn!p`@J3nmzSQx=F_J@H@LC|StVn5j2Xl6 zi=2^kau!W-oF^omoJ>+$yngu+N4c_6J%9joC7ecB{+7=SiD>%rW5zJ&)>_t20Oxpd z2%kPdc#aovd0Bh`pc!UJP$unZ=e6$Uq4Nen_KKl&f+5tOtB_X2(&tAgkEvX#HW75YBLdBSG0)%w~q*!PhjF zOGfV+16Pch6m}ncEiE@Pf_m_DpU4>S9}vn$ZJVa&>g#%1BXj`bgySky%`clk)io+m z4ZWApUeJstbh7vXZ$NsdQaJ$bJ4NNS$y=$a@dfuKe3q3_o=DluN0_BpU>*|MpHePO zKJ2gRrc(47jV+t?<=?3wo3mX;{31^y^}c(67N9keVd9u$bd;ZaFJAHwoOQMQ4YQ$_W&2TkO!J}Z?YA@!NnIf(>o z6a}X(M#B!* zsmj+8cDF=t^J#l~QdybA?~YWKGym;L<>od<&@O%3lf6H?EV;JeU0>{4gzkzWx3<95 zn!HhATOwXtldRjZvUE#ZFtVJvaC1 zDyII?h2a)lQYvtYDXr%Gw&p?j+{N>UvUX^p*akWwyQ@G2w6@obLSb2`fq!{GmQ814;&f#0-y zy7?gN6#$j(0={mtKOc^;KtsPu)~>Qmr}nC}O0l@0?A&yGJt~>vuo1|e-t=X z(X938We_FlBs~AKG7Gm!x`@&AcRE^=Xtwli^43TitLu@<7Tq3sbhg3_r|SxzQ(AQYw6 zC}RDIFT=3y4gcr08*WplThYSzRJ-lD>-4+h=fJQ@m|9b+*a$@%=$q@$_qxGcOerJ( zQVDz;k#Je{?UC~N>5Fv<7_oQ*`dy#(P)16&--vV_HMd8#<2NT~n9WIq*Q6cFh^??b z2?yICA;S~G1>2neI_qwajQ^0VO;dpqctc}2&IRQWN-$fKs?!W@V3F=`O3otI>&~!} za(fM08O%vK>Cg96krl*kROlk>%F62x2M57&HC4!Q$vKX7mahp60O7D7?*A$Dj^P3= zKv0OLI6ChreSvgdFG&TXaUS6q@>GzdD6k*yhXc4EVp{X4B^X2H$8a$v(G(=95lK0q z6i^(i_$^An|8#lj0~ATlp=a`nC0c~LZ6gi{7@pvdD##5aB8b1whYYmyn_Ty1?Y zxltgdNPq;X6xaysa-rw_%+UlN>P6c*N=U4XftRv@!2k}A8Y@d>A#O6OLfLSFLj|%} zaZ}&)Ds+a#G!!(nSPcd~_#8(Ji{g)0DjCKf6P(1ViY^)q27Wm|H!vLD6o$hMFdP;b zHc&B=tOp4f;CT}hw_C+(WXp&&Nzkh<@JpNE)aH%b1|dTX>Fm1z>a9TlW-184)(qiC zi2xltrR6kD(g~M_gPQN8&GA@HK15}!Yt|zIE@=8tfS59RVJAu1uE7!lcwYQfw)X}x zoC;g0+dy1J(sG5fPAZjz@H-``N?K@A!Ck*aY1~rZT8!Z-PB7v)JmEiUJSyuC4-}4u zriVx1e@NdPpPjvbefr`IUY?%5K0Sng(p<}9#^{{H7#1{V5amo!p_Jd6kW0IQna2I@ zMvsgsS$XMyNTL3`q5^B>faf>`G!~cv!_v4yNuv7x7-$kh#BhvLK~TbBh8B_mQjBA5 zn29M~*th36lQtt{(n#S#_GV6#e1;)P5Sj66o&a=FSMEvVA&I+qPmr zqS!(e(}-90P)%@(8A|$7dJzcP7mVhk1oJ7SBADW2rZ-6hg!3Hp{!ZEXK(k+PF(nBO zig7XJ&B*X8bn)9-C6hZ+Db{4j&oD+LxnI@)nxCfqM`mcUy}cjbthWE24u((b_TRz% z{@>eqHn9Jks7uCZCKaG$6}3CH&HONor2gl_E@*2*{XC^qlzD{K{t;8GOM|ah@BMm0 zfHE+;=H_a9h1F)lObcm2$@wrWFbM4@w{S)|5wxSY{a9AuSQ9xac??J_7&0YIBX- z=c5)tExoQaUhPS9O7g$u{cpMa&##rOkDY^Q1+PZztJ#7Ezd6Kp0+vQ}UeM?qGdNM~ zUX0M0vh_i>#$h+c8N(40I6edwKC19J!)c7!n>!I`;ZOEbxcgB;)zePLmwUWR zTD7axT2)OE{{b(c0@|+=B1&>DFw;w6Ose^{s;wwq-3=`g9-wH3{hq7d{#}xhn)bih zn0|ZfKf~d0ud)8~bno8&zl~>u^&iKU=K`fMVzEB_fOo0`!%Mcf)&b1}2?`shuEgXA z9y)W;6eqKgPxU#aORA{vrcSdxm0*xY#;&sI6xL8{me;B!4n~V=4U=@;9`iH@Aqc8s z_cS$|zCxbiDCFk62>1L4mG;5sQMJS>+ExybxGs4&CDs2xh4^pwH1)q`iY==DmGPga z&+6wtK6$!-zyJSMo(=T>%a57VP|~7JO@KrtnyaJZJJpn{V}i1X*$S`fDs5uTPg(Ny z95Y5@44w;GHVCF*((Th#$G zp;WvJwN@N_D{W`DBl>R}i1-W)X9Gr%*w-ah@a#_#;ON858N4WSQ!0 z=)K1px+*VZ8xnI~Y(e03hTW)K={0J%B_T6nf>2^gIP4lyy?Avd%c@9NhNE8Sos4ZR zpaxQGvT$fm$^e(5yV^z}kvG^br@1WHa3n3C*pCIT1hp-wjRI@Y-E|=!q$USsZGLs| zEXbjuRHx1Y*UHA{vD=l_+*o$cVN82!nry~| zqRVn7FH@J)W!{|zg}CE zTA^-Rq>ht>UbtD+iWPY#XA7O?PBnRAthY4I@EI7}NuqV9kG6i+hPx4ktWQ8;ud9p$JnU^m ziZ5;Ysi$fG>-{FTxBfdk*xPTM|2}wjAOE?PX9N4cSpL-^OiUJW<@$1z=yEkh0_nZr}cw7*_ zr>cxlbbf(YTrR+qgos6`F!&Qm$MiZkAnn-R>vk1Q8^hu5;VS8P^p{{k#@nS8dbM71kdlj9d zPiZ~JD)rmHS1h-oSkCdHeLLtYC*C9rS$(0Zt`_g}@`U_ApGAClq%U}=h;571RF7P4 z$W66IHR-bnQMULK$}+qC!O2UNxEujVWBhShtp^3t-KnK3h0TIkEo;RFUdn)B!3dsX z$ysWTYQivz7Z52^K3+>E%k8pyMa4<&s%&<)E^}1|!8f~A<9F3llk5AZhb?=m_xv9C z2~HceoL#*oS}R5=7y55DsE(&RBC`VEeSpu(~b^+-M}PL+a-DP~yb!&@2?4qQ;l z?#pDM2DwET^z}6^p&(HEssE~h{jCbEu9WO9Pn&h$qLdy34kw4w1mk}s1%>?(-jgJO zj6~-!CX5Tc?;8uuOhTZM&sYP~sOm9+K;;*e+EU`IznGp*3~$E3P#V)&K{yRY4<`ih z9i<6IX%*D_DONjn1(l8^@WpII_1fGTpv&k*i}`@}hrDX2D$SC4hjMbz0$j{O4KCFRYWHLe zsD*`PTQpB&$Ww_$u`FPD&r9R!z%?8sNvFtE$noQPzp;_gf~cloYp3@^I8~yNEF5X6k<0Y>7*c#; zC8%JO5{lx3EouQ9E#{OT4j?q$FA5tTu$=XSHc{&HXf8;?!wD8Y;Klch&ZJg%uixT6 zgf-RgOkkz6($(xcd!IvR!>WXA+P!H(P1CJ?SlYTCo)<_UP@bYB`4EZE1FY@W16B ze26L5=bjn0S4{Hy`-jxG-L$PQJA%Pv26v3Jvjwf}Wk*?O?0ILW36hP|Ht`6{1bMj{yYU^maQW~tBSY@J{IF8NjEc|=GYUVd455-%A4juggHA*n>cpnG;YHh zo~E8mHuWz}JMYZKPNTMF3#|jcQeV*RR0otV?E`M=`>Znf?C1y1$!*%O@tv0y!WXy-)oeD@?8*N0eD zS3523n05B%Ua{Z3r>XzT^KP~^1FX>hgM-1dy8eH%|Lk7>-^R0%{!e4fiWLRDU{fs5 zSPoj2NZoRj5>(GpYMBb!o$sz$pxH!CgE!Xj=VdUo)O;dK>=*agt+v}E-734`yzqSf ziDpT!z4jeCd8sM?I@W)i`@aTzgU0@^`}<#S=h;C1^&USQxoq?sD5h@6B6PC$KNq)C(OAI}svgv;Nv~R_(o|FVp!u#O!6(_+v(y zFt?g(0GF3X4|dP<5spNnh+~5SPTKy`t++L3+6l~>Ls^U)4FQGUIZdUmJn}!O_)jQH z=rcerFE4%g3?szx!4nA7si5_E%6BJE4U`jHQeRX~WR?&Wpy2Wkbwz+Qe@%y-8ft~$IpX;HMpd=Y5Y!J7theFL8J*C| z^8>p|?1Ema2%*u_$InvuLT2!soyVTGnVYy$groIl+2k4o(GO zCOS^X@bFvy(1a9$q+qY_f3SQpwiYGbwc*tgnJR}kAC9^rqLskuT#jFvj-ab3h_ED# zssr?vhE5_#B}5ODnUW@WsYFO+049*qG%%&tVY%+Sg<2}#IbIB_e5exmHs!=hd-ViR z(|whbQ+#81jv2>4$d_tOK>akOs$Aw4tq=0mawx*qHRoO>Yr$cMTJ~tzTsB`CX*WU# zcf%09f4<1mwEr&^%l!W2D91NBTfdogR}cfMMB9oSGC`G^ZqPQrp}OnW+mZRdOg!=Y-KzZ=6!wGrDge zGPJ>r+e?U460bU^DKM=BvX#Io={d2B`#Ix($kWjOPPSVA*9dWr(wL!ZO$ICU|I@+n zpr-$y9Xz{V|GSlE1O0EZ{xCeZTv4XXlvl<6DP>D`+brT` zUCJa`LwKRxyhGjJ&N*^j;yO-eZTnk`3sXWcAR8MiEAULZ?^=L#*&2j+^fSlo4Z~yd zaV;?CcoFJKHApr)*v%U*ouM*+t=48Wg;kjqlxf4^L4EI^I)S>ri&U+#_UuPP5Pc|Z zb+=$6iicYMfV-B7ungqdfVMU8MsmxpY&P~*yR{Gc`h#KNtT761Y_NMoY{!<+ zz7k}Vny+J9M_Y*dx^%Z`&3e2yqPaU2SPP=C70j{VcV7D3POKR7x|FpIXZ6c{%H{pD z!vC8gMo>JuW+-rl|M%=*uYUjQv;8Oc{@<-USMmRTl3elPyRCbFmfP-(?`J#O>G`SM zFW2+>Y#TS@^Rs0W!7Ww47r9E`gkNX27k* za<*s;tb_Lzf;kp!3%siYa@GX1TWQ2 zbA#em>B4p7%$<=o5v-1STMo=g`Q2b-OEdec9Muy26(j0u$+i)5-+$RK+=}=QO(*n* z<3Gc}lg9bq&-M=n_wk?Gc&-%xkzDcOyZ>N#@|W!gfeg#togk3g+wMq^M*EtvAh)R- z3J5jHyIDZU)Za=#s0yniAhbM;Qw7lx##tFus&%S6sC3r?LNAa&BgE_VWApm90!nAt zZtQ4=lJvKgkdAXPeOdB~1E=7NjxdyZ{j!_QHi?(GSgi0xZy@@mV-PM1cQPzyRNcQp zSWH)Nh3q=57I0Pvt-Ev*X1nUZTTy)GYcEP~=# z`7K7^3?lcjw{?w;?Zw`d@ip3t+qeeaSBVK2uwQrV&2(aealdNptpmz+V{ZoR9gn@O z9ii#6pl>olvt>W0vk$AnG>$-BAxv|lA<<d&WeyWoi z`Bd$#g!o)YSIqdTAamt>FBi&vw%2{OSJT5+68&!w)cVr7UKP!Gr=m(l{Jk^*|iV@qoXF}h8+n+##x zjCfr;a-rqwE6?X@H>1D0(Of@7SZKq$oyT?6XL6gq*f+CNR`a;Ekd~4v1?W1JeSjHAGZ+ zmqmHk{aOuGYJ2;Zmw}EIZ8*NtlI)`P)>;EAVRf?kg3**_u~y1 znI;z1@QR_3Gmsl`p*xD@=vCtgElt$NV+pZ04Ja&S&b3D1X2T21dv=!s4OcUtV;qqf z7x&L$iI|)b&1FFb&t-F7li6~PLwmzfCQGb1-#`@mD9xyIp%6X zt0jQ08M$%?>u!gyY_OwY5^gvI9cJYX21B;%9<1G?8_^zR(VvS2(bXD6*~(4PT*9~D z%2?Jg8LM|XwN+}COgmY-F$=Y~cmtZmHCC^tN~-4WSIot&{n?&&a|yknrmI6;0cy+o z&TTEby2@+yZNFl6>Fm=Amg!)LtS06zKP)TvW#Y%n#$N`hZ*k((N%ysRl>QhAGlr>hSCal*<9T|DQp&eYKL)6&rAcolOZQPT+u4NK- zjm-dmJA&bp-r6J4YIaIm*-j!I%-YbOhOZ!h>`?K8tcjvOJM*&v&|4$C~2lf1~y{CKk{{L+}S8@N1J1om6 zM}LHyzP`5IRe2@Y&uKay45V_|t($zsEx+oO-{tEU$qdFs3I+pXCU>+p+-_^bqP2BQ zzAj5o-*enbo3<`A?>w#z17yx*Z+qDuR2e@0?m@jZpYHu z>xie6W<_T_rM+Da6P#j(1djXbZ?iPlzOQOu7c+e`$izsX_9nXX9@eny%c)9V5giuM zv5LdAntM1&Ck=O@ZNX*sj`^k1zPeAHeevs86s#1<*&=Yu zme#QI=e^L}uF%w6jRtfGs@=0i)djdt|C~Uw>|v%nka4()P*!E99<#ja&FrPbb#1QH zCbjLD6QlBdH|`YrrSsFXn=qX>&DHLCW1aiK92Gl8r8t(O5 zP#PDS!)T0J{KU4@MKx9FEq$5J-y!BENp_%7`Y`aebeAJtM6>qchx9{gvk|}LAJP_2 zM>no9mm_B=5cWC}y{Zua4gTS`{9#kA6&Ez4IEIHoAk8A_ZXG?4#f=t_SfDfxr59GR zMmhSAcwf+rM7*EqqQ9!CkoR|FnVyP&PZ-TJsZuo8K|&`af)N%M zSSlQYh^`lU-p&rZ{qZj^PCf6p-+q&pwHM}w=; zcHkrxjK;ZIhwwb5rL`rJF*+wP=722GZKw~XILR>Ma4{v(6e5&L8%V|f)lP>wPG|-x zzAysWn&kp`G{rNdzU;us3{CJUrJ`g4h9`szwvgi`78yPS zuKXYmAjUjmL^(r;@PbC?YRyF#cf33zfl9|Gp^uB z<5WD<_~-?AULu4TLkkap2&}^b%Q3ZHZL$Dvkxn%b$(05ArXAQVx85LL2q5o;o)_a$ z?vQ(=*XSCg(S1?tA!G~-n!^RnlNb_mj^!7QG2oAaAt33PvYEEAw2I~KAnDaR$=k;% zkdzCQM%Vxw;X`kyU`veR0sA4U5?XAro<{ zbau8d(9iMWCzMGu>aY&Wye}*&4Ku@$)I8^Sp)|jm2;`U4X<2?GX%`Y2e>zf5&oW|b z+onKu2V7)#0E*zAEM#+Ce!z=@l#-UJ!xFJvYO43r7riivm%wq#iSjO$EULq3ToZw$ zfDGb~fzufVL1vgs02e4#LOGGO1(nd5&Jf5b=VU|@A{M$?*?Td?gz-8xq-kMCcc;|k zeM2ktJ_v$#d(cqj>c${If<`zoGZf0Q^{eV3e4U{`a$Fdd#mzQ3p({YxRJr2Xr#QktOKP+|=TMPZo@!8q?*QYOv51@m6-!nQpbY6lXw8{V* zSHCPFmmMA5V-vY;981u?<*&Y2P;K%YFP64=?u3fhZP09hDl-i)FBSc~9d{i;k+mjK zRpDPHP4LcLQ8WVf>h;@~XQB8gDo)NdR%{1@+g-CA46m?iyY3;ht1|Vjy?(34=4322 z(wMSH&YYANYwBjFwptXBI*TNUvh9>ALkVVrH+@s3ijxGS^$W9wS%guM>)Jpa`TpY? zEsuO(as0zt(KSQk^#&eTLH|{PsD;bBL;hYNb@*dOnE(gO(Su#-7e*pcmF*H*p!8gZ zB3x|lEqMU~3cqujO7xEWPwGOX(BZ;MU#d66@xc=a;-1$=g5i}&Fmy>U+zttbH$Z}6 zodhDKIjo90Wn7QlKoO3<+(-#$-IkYFNe?iJVDL zag5bssP5{%wxARgDTl`7RWXgCDboWWg5zW?ohrm)V^6Prz3*cu-1#JeI2<2}LoaXys1+6jqqrYQH2MW2su=xsXWP@+3 z+$8bfkZJeV8rzQ)Vbyg1NUcD?6mb|yuW{oj3iLh#wC%4kB1%(uxB)RLvgc^+HtjpqhNfooF;P|Nc8s9zxzQ(xd=*B^ho#r=0aSjH?0*$ z37zzH863!-s5XQ7ULdIXbHD#LHLd+fBAiNx@;S{GIz9hCBAPA!BB!x^D>a^H%EgeU zu}LmJ&QLVP7Q$n&`+)Ys0qn{GzWL<${^CitnW2T^K&g1u@GHs2A0wP8$&ctPONjQK z?W|!ygx)_506G##3A@D7PIIOdAmMq68i)P<#l=O46pA5blYXL$@cxe{&tJYedl~G7 zgTHy+&uM}=FXze8LQj#AQq~E&0I5SJ3~Sv^Qn+A5sFhnz$KnDp?8Sr&Mn<`)kjLUg zIJgC*N>0H)K7*4pAHF+2J2`voy+3*TkJmrHh4;s&r^m0}p1eGR*QfCO^{W>rZ%qQ54|&tD~;PImyXAh2ojV|ax}p(p>xbq ywUCY3jBq_oqBQmrG9$t$!$x<)uV@kX{<(kdpZn(u&;JVm0RR64N9#oZLID6;h-UHt literal 0 HcmV?d00001 diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/NOTES.txt b/knowlg-automation/helm_charts/elasticsearch/templates/NOTES.txt new file mode 100644 index 000000000..6b7e75c27 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/NOTES.txt @@ -0,0 +1,121 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if contains .Values.service.type "LoadBalancer" }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "service.type=LoadBalancer" you have most likely + exposed the Elasticsearch service externally. + + Please note that Elasticsearch does not implement a authentication + mechanism to secure your cluster. For security reasons, we strongly + suggest that you switch to "ClusterIP" or "NodePort". +------------------------------------------------------------------------------- +{{- end }} +{{- if not .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + To check whether the host machine meets the requirements, run the command + below: + + kubectl logs --namespace {{ include "common.names.namespace" . }} $(kubectl get --namespace {{ include "common.names.namespace" . }} \ + pods -l app={{ template "common.names.name" . }},role=master -o jsonpath='{.items[0].metadata.name}') \ + elasticsearch + + You can adapt the Kernel parameters on you cluster as described in the + official documentation: + + https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster + + As an alternative, you can specify "sysctlImage.enabled=true" to use a + privileged initContainer to change those settings in the Kernel: + + helm upgrade --namespace {{ include "common.names.namespace" . }} {{ .Release.Name }} my-repo/elasticsearch --set sysctlImage.enabled=true + + Note that this requires the ability to run privileged containers, which is likely not + the case on many secure clusters. To cover this use case, you can also set some parameters + in the config file to customize the default settings: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-store.html + https://www.elastic.co/guide/en/cloud-on-k8s/master/k8s-virtual-memory.html + + For that, you can place the desired parameters by using the "config" block present in the values.yaml + +{{- else if .Values.sysctlImage.enabled }} + +------------------------------------------------------------------------------- + WARNING + + Elasticsearch requires some changes in the kernel of the host machine to + work as expected. If those values are not set in the underlying operating + system, the ES containers fail to boot with ERROR messages. + + More information about these requirements can be found in the links below: + + https://www.elastic.co/guide/en/elasticsearch/reference/current/file-descriptors.html + https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html + + This chart uses a privileged initContainer to change those settings in the Kernel + by running: sysctl -w vm.max_map_count=262144 && sysctl -w fs.file-max=65536 + +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/elasticsearch/entrypoint.sh /opt/bitnami/scripts/elasticsearch/run.sh + +{{- else }} + + Elasticsearch can be accessed within the cluster on port {{ include "elasticsearch.service.ports.restAPI" . }} at {{ template "elasticsearch.service.name" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} + + To access from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "elasticsearch.service.name" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + curl http://$NODE_IP:$NODE_PORT/ +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ include "common.names.namespace" . }} -w {{ template "elasticsearch.service.name" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "elasticsearch.service.name" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + curl http://$SERVICE_IP:{{ include "elasticsearch.service.ports.restAPI" . }}/ +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ template "elasticsearch.service.name" . }} {{ include "elasticsearch.service.ports.restAPI" . }}:9200 & + curl http://127.0.0.1:9200/ +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctlImage }} + +{{- end }} +{{ include "elasticsearch.validateValues" . }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/_helpers.tpl b/knowlg-automation/helm_charts/elasticsearch/templates/_helpers.tpl new file mode 100644 index 000000000..d0c27d458 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,585 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ES image name +*/}} +{{- define "elasticsearch.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "elasticsearch.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.sysctlImage .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper ES exporter image name +*/}} +{{- define "elasticsearch.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper sysctl image name +*/}} +{{- define "elasticsearch.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctlImage "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "elasticsearch.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + + +{{/* +Name for the Elasticsearch service +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +Required for the Kibana subchart to find Elasticsearch service. +*/}} +{{- define "elasticsearch.service.name" -}} +{{- if .Values.global.kibanaEnabled -}} + {{- $name := .Values.global.elasticsearch.service.name -}} + {{- if contains $name .Release.Name -}} + {{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{- else -}} + {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- end -}} +{{- else -}} + {{- printf "%s" ( include "common.names.fullname" . ) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Port number for the Elasticsearch service REST API port +Required for the Kibana subchart to find Elasticsearch service. +*/}} +{{- define "elasticsearch.service.ports.restAPI" -}} +{{- if .Values.global.kibanaEnabled -}} +{{- printf "%d" (int .Values.global.elasticsearch.service.ports.restAPI) -}} +{{- else -}} +{{- printf "%d" (int .Values.service.ports.restAPI) -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified master name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.master.fullname" -}} +{{- $name := default "master" .Values.master.nameOverride -}} +{{- if .Values.master.fullnameOverride -}} +{{- .Values.master.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default master service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.master.servicename" -}} +{{- if .Values.master.servicenameOverride -}} +{{- .Values.master.servicenameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-hl" (include "elasticsearch.master.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified coordinating name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.coordinating.fullname" -}} +{{- $name := default "coordinating" .Values.coordinating.nameOverride -}} +{{- if .Values.coordinating.fullnameOverride -}} +{{- .Values.coordinating.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default coordinating service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.coordinating.servicename" -}} +{{- if .Values.coordinating.servicenameOverride -}} +{{- .Values.coordinating.servicenameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-hl" (include "elasticsearch.coordinating.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified data name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.data.fullname" -}} +{{- $name := default "data" .Values.data.nameOverride -}} +{{- if .Values.data.fullnameOverride -}} +{{- .Values.data.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default data service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.data.servicename" -}} +{{- if .Values.data.servicenameOverride -}} +{{- .Values.data.servicenameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-hl" (include "elasticsearch.data.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified ingest name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.ingest.fullname" -}} +{{- $name := default "ingest" .Values.ingest.nameOverride -}} +{{- if .Values.ingest.fullnameOverride -}} +{{- .Values.ingest.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default ingest service name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.ingest.servicename" -}} +{{- if .Values.ingest.servicenameOverride -}} +{{- .Values.ingest.servicenameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-hl" (include "elasticsearch.ingest.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified metrics name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.metrics.fullname" -}} +{{- $name := default "metrics" .Values.metrics.nameOverride -}} +{{- if .Values.metrics.fullnameOverride -}} +{{- .Values.metrics.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" (include "common.names.fullname" .) $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one master-elegible node replica has been configured. +*/}} +{{- define "elasticsearch.master.enabled" -}} +{{- if or .Values.master.autoscaling.enabled (gt (int .Values.master.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one coordinating-only node replica has been configured. +*/}} +{{- define "elasticsearch.coordinating.enabled" -}} +{{- if or .Values.coordinating.autoscaling.enabled (gt (int .Values.coordinating.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one data-only node replica has been configured. +*/}} +{{- define "elasticsearch.data.enabled" -}} +{{- if or .Values.data.autoscaling.enabled (gt (int .Values.data.replicaCount) 0) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if at least one ingest-only node replica has been configured. +*/}} +{{- define "elasticsearch.ingest.enabled" -}} +{{- if and .Values.ingest.enabled (or .Values.ingest.autoscaling.enabled (gt (int .Values.ingest.replicaCount) 0)) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the hostname of every ElasticSearch seed node +*/}} +{{- define "elasticsearch.hosts" -}} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- if (include "elasticsearch.master.enabled" .) -}} +{{- $masterFullname := include "elasticsearch.master.servicename" .}} +{{- $masterFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "elasticsearch.coordinating.enabled" .) -}} +{{- $coordinatingFullname := include "elasticsearch.coordinating.servicename" .}} +{{- $coordinatingFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "elasticsearch.data.enabled" .) -}} +{{- $dataFullname := include "elasticsearch.data.servicename" .}} +{{- $dataFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- if (include "elasticsearch.ingest.enabled" .) -}} +{{- $ingestFullname := include "elasticsearch.ingest.servicename" .}} +{{- $ingestFullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}, +{{- end -}} +{{- range .Values.extraHosts }} +{{- . }}, +{{- end }} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "elasticsearch.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "elasticsearch.initScriptsCM" -}} +{{- printf "%s" .Values.initScriptsCM -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "elasticsearch.initScriptsSecret" -}} +{{- printf "%s" .Values.initScriptsSecret -}} +{{- end -}} + +{{/* + Create the name of the master service account to use + */}} +{{- define "elasticsearch.master.serviceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (include "elasticsearch.master.fullname" .) .Values.master.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the coordinating-only service account to use + */}} +{{- define "elasticsearch.coordinating.serviceAccountName" -}} +{{- if .Values.coordinating.serviceAccount.create -}} + {{ default (include "elasticsearch.coordinating.fullname" .) .Values.coordinating.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.coordinating.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the data service account to use + */}} +{{- define "elasticsearch.data.serviceAccountName" -}} +{{- if .Values.data.serviceAccount.create -}} + {{ default (include "elasticsearch.data.fullname" .) .Values.data.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.data.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the ingest service account to use + */}} +{{- define "elasticsearch.ingest.serviceAccountName" -}} +{{- if .Values.ingest.serviceAccount.create -}} + {{ default (include "elasticsearch.ingest.fullname" .) .Values.ingest.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.ingest.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for master nodes. +*/}} +{{- define "elasticsearch.master.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.master.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.master.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for data nodes. +*/}} +{{- define "elasticsearch.data.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.data.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.data.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for ingest nodes. +*/}} +{{- define "elasticsearch.ingest.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.ingest.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.ingest.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the elasticsearch TLS credentials secret for coordinating-only nodes. +*/}} +{{- define "elasticsearch.coordinating.tlsSecretName" -}} +{{- $secretName := .Values.security.tls.coordinating.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "elasticsearch.createTlsSecret" -}} +{{- if and .Values.security.enabled .Values.security.tls.autoGenerated (not (include "elasticsearch.security.tlsSecretsProvided" .)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if an authentication credentials secret object should be created +*/}} +{{- define "elasticsearch.createSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Elasticsearch authentication credentials secret name +*/}} +{{- define "elasticsearch.secretName" -}} +{{- default (include "common.names.fullname" .) .Values.security.existingSecret -}} +{{- end -}} + +{{/* +Return true if a TLS password secret object should be created +*/}} +{{- define "elasticsearch.createTlsPasswordsSecret" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.passwordsSecret) (or .Values.security.tls.keystorePassword .Values.security.tls.truststorePassword .Values.security.tls.keyPassword ) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Elasticsearch TLS password secret name +*/}} +{{- define "elasticsearch.tlsPasswordsSecret" -}} +{{- default (printf "%s-tls-pass" (include "common.names.fullname" .)) .Values.security.tls.passwordsSecret -}} +{{- end -}} + +{{/* +Returns the name of the secret key containing the Keystore password +*/}} +{{- define "elasticsearch.keystorePasswordKey" -}} +{{- if .Values.security.tls.secretKeystoreKey -}} +{{- printf "%s" .Values.security.tls.secretKeystoreKey -}} +{{- else -}} +{{- print "keystore-password"}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the name of the secret key containing the Truststore password +*/}} +{{- define "elasticsearch.truststorePasswordKey" -}} +{{- if .Values.security.tls.secretTruststoreKey -}} +{{- printf "%s" .Values.security.tls.secretTruststoreKey -}} +{{- else -}} +{{- print "truststore-password"}} +{{- end -}} +{{- end -}} + +{{/* +Returns the name of the secret key containing the PEM key password +*/}} +{{- define "elasticsearch.keyPasswordKey" -}} +{{- if .Values.security.tls.secretKey -}} +{{- printf "%s" .Values.security.tls.secretKey -}} +{{- else -}} +{{- print "key-password"}} +{{- end -}} +{{- end -}} + +{{/* +Add environment variables to configure database values +*/}} +{{- define "elasticsearch.configure.security" -}} +- name: ELASTICSEARCH_ENABLE_SECURITY + value: "true" +- name: ELASTICSEARCH_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.secretName" . }} + key: elasticsearch-password +- name: ELASTICSEARCH_ENABLE_FIPS_MODE + value: {{ .Values.security.fipsMode | quote }} +- name: ELASTICSEARCH_TLS_VERIFICATION_MODE + value: {{ .Values.security.tls.verificationMode | quote }} +- name: ELASTICSEARCH_ENABLE_REST_TLS + value: {{ ternary "true" "false" .Values.security.tls.restEncryption | quote }} +{{- if or (include "elasticsearch.createTlsSecret" .) .Values.security.tls.usePemCerts }} +- name: ELASTICSEARCH_TLS_USE_PEM + value: "true" +{{- else }} +- name: ELASTICSEARCH_KEYSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.keystoreFilename }}" +- name: ELASTICSEARCH_TRUSTSTORE_LOCATION + value: "/opt/bitnami/elasticsearch/config/certs/{{ .Values.security.tls.truststoreFilename }}" +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.keystorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: {{ include "elasticsearch.keystorePasswordKey" . | quote }} +{{- end }} +{{- if and (not .Values.security.tls.usePemCerts) (or .Values.security.tls.truststorePassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: {{ include "elasticsearch.truststorePasswordKey" . | quote }} +{{- end }} +{{- if and .Values.security.tls.usePemCerts (or .Values.security.tls.keyPassword .Values.security.tls.passwordsSecret) }} +- name: ELASTICSEARCH_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.tlsPasswordsSecret" . }} + key: {{ include "elasticsearch.keyPasswordKey" . | quote }} +{{- end }} +{{- end -}} + +{{/* +Returns true if at least 1 existing secret was provided +*/}} +{{- define "elasticsearch.security.tlsSecretsProvided" -}} +{{- $masterSecret := (and (include "elasticsearch.master.enabled" .) .Values.security.tls.master.existingSecret) -}} +{{- $coordinatingSecret := (and (include "elasticsearch.coordinating.enabled" .) .Values.security.tls.coordinating.existingSecret) -}} +{{- $dataSecret := (and (include "elasticsearch.data.enabled" .) .Values.security.tls.data.existingSecret) -}} +{{- $ingestSecret := (and (include "elasticsearch.ingest.enabled" .) .Values.security.tls.ingest.existingSecret) -}} +{{- if or $masterSecret $coordinatingSecret $dataSecret $ingestSecret }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for master nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.master" -}} +{{- $masterSecret := (and (include "elasticsearch.master.enabled" .) (not .Values.security.tls.master.existingSecret)) -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $masterSecret -}} +elasticsearch: security.tls.master.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch master nodes. + Provide the certificates using --set .Values.security.tls.master.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for coordinating-only nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" -}} +{{- $coordinatingSecret := (and (include "elasticsearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret)) -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $coordinatingSecret -}} +elasticsearch: security.tls.coordinating.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch coordinating-only nodes. + Provide the certificates using --set .Values.security.tls.coordinating.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for data nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.data" -}} +{{- $dataSecret := (and (include "elasticsearch.data.enabled" .) (not .Values.security.tls.data.existingSecret)) -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $dataSecret -}} +elasticsearch: security.tls.data.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch data nodes. + Provide the certificates using --set .Values.security.tls.data.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - Existing secret not provided for ingest nodes */}} +{{- define "elasticsearch.validateValues.security.missingTlsSecrets.ingest" -}} +{{- $ingestSecret := (and (include "elasticsearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret)) -}} +{{- if and .Values.security.enabled (include "elasticsearch.security.tlsSecretsProvided" .) $ingestSecret -}} +elasticsearch: security.tls.ingest.existingSecret + Missing secret containing the TLS certificates for the Elasticsearch ingest nodes. + Provide the certificates using --set .Values.security.tls.ingest.existingSecret="my-secret". +{{- end -}} +{{- end -}} + +{{/* Validate values of Elasticsearch - TLS enabled but no certificates provided */}} +{{- define "elasticsearch.validateValues.security.tls" -}} +{{- if and .Values.security.enabled (not .Values.security.tls.autoGenerated) (not (include "elasticsearch.security.tlsSecretsProvided" .)) -}} +elasticsearch: security.tls + In order to enable X-Pack Security, it is necessary to configure TLS. + Three different mechanisms can be used: + - Provide an existing secret containing the Keystore and Truststore for each role + - Provide an existing secret containing the PEM certificates for each role and enable `security.tls.usePemCerts=true` + - Enable using auto-generated certificates with `security.tls.autoGenerated=true` + Existing secrets containing either JKS/PKCS12 or PEM certificates can be provided using --set Values.security.tls.master.existingSecret=master-certs, + --set Values.security.tls.data.existingSecret=data-certs, --set Values.security.tls.coordinating.existingSecret=coordinating-certs, --set Values.security.tls.ingest.existingSecret=ingest-certs +{{- end -}} +{{- end -}} + +{{/* Validate at least Elasticsearch one master node is configured */}} +{{- define "elasticsearch.validateValues.master.replicas" -}} +{{- if not (include "elasticsearch.master.enabled" .) -}} +elasticsearch: master.replicas + Elasticsearch needs at least one master-elegible node to form a cluster. +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "elasticsearch.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.master.replicas" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.tls" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.master" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.data" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.coordinating" .) -}} +{{- $messages := append $messages (include "elasticsearch.validateValues.security.missingTlsSecrets.ingest" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Sysctl set if less then +*/}} +{{- define "elasticsearch.sysctlIfLess" -}} +CURRENT=`sysctl -n {{ .key }}`; +DESIRED="{{ .value }}"; +if [ "$DESIRED" -gt "$CURRENT" ]; then + sysctl -w {{ .key }}={{ .value }}; +fi; +{{- end -}} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/configmap.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/configmap.yaml new file mode 100644 index 000000000..3efba80cc --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/configmap.yaml @@ -0,0 +1,23 @@ +{{- if or .Values.config .Values.extraConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- if .Values.config }} + elasticsearch.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.config "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.extraConfig }} + my_elasticsearch.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.extraConfig "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/hpa.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/hpa.yaml new file mode 100644 index 000000000..9de3b8efc --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and (include "elasticsearch.coordinating.enabled" .) .Values.coordinating.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.coordinating.fullname" . }} + minReplicas: {{ .Values.coordinating.autoscaling.minReplicas }} + maxReplicas: {{ .Values.coordinating.autoscaling.maxReplicas }} + metrics: + {{- if .Values.coordinating.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.coordinating.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.coordinating.autoscaling.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/serviceaccount.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/serviceaccount.yaml new file mode 100644 index 000000000..ba0861a32 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if and (include "elasticsearch.coordinating.enabled" .) .Values.coordinating.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elasticsearch.coordinating.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.coordinating.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.coordinating.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.coordinating.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/statefulset.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/statefulset.yaml new file mode 100644 index 000000000..692dc890f --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/statefulset.yaml @@ -0,0 +1,313 @@ +{{- if (include "elasticsearch.coordinating.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.coordinating.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.coordinating.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.coordinating.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if not .Values.coordinating.autoscaling.enabled }} + replicas: {{ .Values.coordinating.replicaCount }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.coordinating.updateStrategy }} + updateStrategy: {{- toYaml .Values.coordinating.updateStrategy | nindent 4 }} + {{- end }} + serviceName: {{ include "elasticsearch.coordinating.servicename" . }} + podManagementPolicy: {{ .Values.coordinating.podManagementPolicy }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: coordinating-only + {{- end }} + {{- if .Values.coordinating.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.coordinating.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.coordinating.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.coordinating.serviceAccountName" . }} + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.coordinating.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.coordinating.podAntiAffinityPreset "component" "coordinating-only" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.coordinating.nodeAffinityPreset.type "key" .Values.coordinating.nodeAffinityPreset.key "values" .Values.coordinating.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.coordinating.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.schedulerName }} + schedulerName: {{ .Values.coordinating.schedulerName }} + {{- end }} + {{- if .Values.coordinating.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.coordinating.priorityClassName }} + priorityClassName: {{ .Values.coordinating.priorityClassName | quote }} + {{- end }} + {{- if .Values.coordinating.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.coordinating.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.coordinating.podSecurityContext.enabled }} + securityContext: {{- omit .Values.coordinating.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.coordinating.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.coordinating.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.coordinating.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.coordinating.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.coordinating.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.coordinating.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_ROLES + value: "" + - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: ELASTICSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "elasticsearch.coordinating.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.coordinating.heapSize }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.coordinating.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.coordinating.extraEnvVarsCM .Values.coordinating.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.coordinating.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.coordinating.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.coordinating.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.coordinating.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.coordinating.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- if .Values.coordinating.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.coordinating.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.coordinating.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.coordinating.resources }} + resources: {{- toYaml .Values.coordinating.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.coordinating.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.coordinating.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.coordinating.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: "data" + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ include "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ include "elasticsearch.coordinating.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.coordinating.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.coordinating.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/svc-headless.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/svc-headless.yaml new file mode 100644 index 000000000..a62484fe8 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/coordinating/svc-headless.yaml @@ -0,0 +1,28 @@ +{{- if (include "elasticsearch.coordinating.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.coordinating.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: coordinating-only +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/data/hpa.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/data/hpa.yaml new file mode 100644 index 000000000..62c247822 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/data/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and (include "elasticsearch.data.enabled" .) .Values.data.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.data.fullname" . }} + minReplicas: {{ .Values.data.autoscaling.minReplicas }} + maxReplicas: {{ .Values.data.autoscaling.maxReplicas }} + metrics: + {{- if .Values.data.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.data.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.data.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.data.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.data.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.data.autoscaling.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/data/serviceaccount.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/data/serviceaccount.yaml new file mode 100644 index 000000000..bd71bd2d9 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/data/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if and (include "elasticsearch.data.enabled" .) .Values.data.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elasticsearch.data.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.data.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.data.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.data.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/data/statefulset.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/data/statefulset.yaml new file mode 100644 index 000000000..164275641 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/data/statefulset.yaml @@ -0,0 +1,373 @@ +{{- if (include "elasticsearch.data.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.data.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.data.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.data.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if not .Values.data.autoscaling.enabled }} + replicas: {{ .Values.data.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.data.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: data + serviceName: {{ include "elasticsearch.data.servicename" . }} + {{- if .Values.data.updateStrategy }} + updateStrategy: {{- toYaml .Values.data.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: data + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: data + {{- end }} + {{- if .Values.data.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.data.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.data.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.data.serviceAccountName" . }} + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.data.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.data.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.data.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAffinityPreset "component" "data" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.data.podAntiAffinityPreset "component" "data" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.data.nodeAffinityPreset.type "key" .Values.data.nodeAffinityPreset.key "values" .Values.data.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.data.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.data.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.data.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.schedulerName }} + schedulerName: {{ .Values.data.schedulerName }} + {{- end }} + {{- if .Values.data.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.data.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.data.priorityClassName }} + priorityClassName: {{ .Values.data.priorityClassName | quote }} + {{- end }} + {{- if .Values.data.podSecurityContext.enabled }} + securityContext: {{- omit .Values.data.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.data.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.data.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.data.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.data.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.data.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p /bitnami/elasticsearch/data + chown {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }} /bitnami/elasticsearch/data + find /bitnami/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.data.containerSecurityContext.runAsUser }}:{{ .Values.data.podSecurityContext.fsGroup }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- end }} + {{- if .Values.data.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.data.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.data.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.data.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.data.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.data.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.data.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.data.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_ROLES + value: "data" + - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: ELASTICSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "elasticsearch.data.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + {{- if .Values.data.heapSize }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.data.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.data.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.data.extraEnvVarsCM .Values.data.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.data.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.data.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.data.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.data.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.data.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.data.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.data.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- if .Values.data.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.data.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.data.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.data.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.data.resources }} + resources: {{- toYaml .Values.data.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.data.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.data.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.data.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ template "elasticsearch.data.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.data.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.data.persistence.enabled }} + - name: "data" + emptyDir: {} + {{- else if .Values.data.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.data.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + annotations: + {{- if .Values.data.persistence.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.data.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.data.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.data.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.data.persistence.existingVolume }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.data.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/data/svc-headless.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/data/svc-headless.yaml new file mode 100644 index 000000000..3d37d8e42 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/data/svc-headless.yaml @@ -0,0 +1,28 @@ +{{- if (include "elasticsearch.data.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.data.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: data + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: data +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/extra-list.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/hpa.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/hpa.yaml new file mode 100644 index 000000000..ef57de1bc --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.ingest.fullname" . }} + minReplicas: {{ .Values.ingest.autoscaling.minReplicas }} + maxReplicas: {{ .Values.ingest.autoscaling.maxReplicas }} + metrics: + {{- if .Values.ingest.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.ingest.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.ingest.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.ingest.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.ingest.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.ingest.autoscaling.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/ingress.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/ingress.yaml new file mode 100644 index 000000000..e4ad0a6d5 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/ingress.yaml @@ -0,0 +1,65 @@ +{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.service.enabled .Values.ingest.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.ingest.ingress.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.ingest.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.ingest.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingest.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingest.ingress.hostname }} + - http: + paths: + {{- if .Values.ingest.ingress.extraPaths }} + {{- toYaml .Values.ingest.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingest.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingest.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.ingest.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- if ne .Values.ingest.ingress.hostname "*" }} + host: {{ .Values.ingest.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.ingest.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "elasticsearch.ingest.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingest.ingress.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.ingress.extraRules "context" $ ) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned)) .Values.ingest.ingress.extraTls }} + tls: + {{- if and .Values.ingest.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingest.ingress.annotations )) .Values.ingest.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingest.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingest.ingress.hostname }} + {{- end }} + {{- if .Values.ingest.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/service.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/service.yaml new file mode 100644 index 000000000..d18b67758 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/service.yaml @@ -0,0 +1,60 @@ +{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.ingest.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.ingest.service.type }} + {{- if and .Values.ingest.service.clusterIP (eq .Values.ingest.service.type "ClusterIP") }} + clusterIP: {{ .Values.ingest.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.ingest.service.type "LoadBalancer") (eq .Values.ingest.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.ingest.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.ingest.service.type "LoadBalancer") .Values.ingest.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.ingest.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if (and (eq .Values.ingest.service.type "LoadBalancer") (not (empty .Values.ingest.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.ingest.service.loadBalancerIP }} + {{- end }} + {{- if .Values.ingest.service.sessionAffinity }} + sessionAffinity: {{ .Values.ingest.service.sessionAffinity }} + {{- end }} + {{- if .Values.ingest.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-rest-api + port: {{ .Values.ingest.service.ports.restAPI }} + targetPort: rest-api + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.restAPI)) }} + nodePort: {{ .Values.ingest.service.nodePorts.restAPI }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: {{ .Values.ingest.service.ports.transport }} + {{- if and (or (eq .Values.ingest.service.type "NodePort") (eq .Values.ingest.service.type "LoadBalancer")) (not (empty .Values.ingest.service.nodePorts.transport)) }} + nodePort: {{ .Values.ingest.service.nodePorts.transport }} + {{- else if eq .Values.ingest.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.ingest.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/serviceaccount.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/serviceaccount.yaml new file mode 100644 index 000000000..a3bff3816 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if and (include "elasticsearch.ingest.enabled" .) .Values.ingest.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elasticsearch.ingest.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.ingest.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.ingest.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.ingest.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/statefulset.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/statefulset.yaml new file mode 100644 index 000000000..d39b85ad7 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/statefulset.yaml @@ -0,0 +1,313 @@ +{{- if (include "elasticsearch.ingest.enabled" . ) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.ingest.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.ingest.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.ingest.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if not .Values.ingest.autoscaling.enabled }} + replicas: {{ .Values.ingest.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.ingest.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: ingest + serviceName: {{ include "elasticsearch.ingest.servicename" . }} + {{- if .Values.ingest.updateStrategy }} + updateStrategy: {{- toYaml .Values.ingest.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: ingest + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: ingest + {{- end }} + {{- if .Values.ingest.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.ingest.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.ingest.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.ingest.serviceAccountName" . }} + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.ingest.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.ingest.podAntiAffinityPreset "component" "ingest" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.ingest.nodeAffinityPreset.type "key" .Values.ingest.nodeAffinityPreset.key "values" .Values.ingest.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.ingest.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.schedulerName }} + schedulerName: {{ .Values.ingest.schedulerName }} + {{- end }} + {{- if .Values.ingest.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.ingest.priorityClassName }} + priorityClassName: {{ .Values.ingest.priorityClassName | quote }} + {{- end }} + {{- if .Values.ingest.podSecurityContext.enabled }} + securityContext: {{- omit .Values.ingest.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.ingest.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.ingest.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.ingest.initContainers .Values.sysctlImage.enabled }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.ingest.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.ingest.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.ingest.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.ingest.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.ingest.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: "yes" + - name: ELASTICSEARCH_NODE_ROLES + value: "ingest" + - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: ELASTICSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "elasticsearch.ingest.servicename" .)}}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.ingest.heapSize }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.ingest.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.ingest.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.ingest.extraEnvVarsCM .Values.ingest.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.ingest.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.ingest.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.ingest.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.ingest.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.ingest.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- if .Values.ingest.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.ingest.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.ingest.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.ingest.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.ingest.resources }} + resources: {{- toYaml .Values.ingest.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.ingest.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.ingest.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingest.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: "data" + emptyDir: {} + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ template "elasticsearch.ingest.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.ingest.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingest.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingest/svc-headless.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/svc-headless.yaml new file mode 100644 index 000000000..c0e0de401 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingest/svc-headless.yaml @@ -0,0 +1,28 @@ +{{- if (include "elasticsearch.ingest.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.ingest.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: ingest +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingress-tls-secrets.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingress-tls-secrets.yaml new file mode 100644 index 000000000..50ae8c9ca --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingress-tls-secrets.yaml @@ -0,0 +1,94 @@ +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ template "common.names.namespace" $ }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "elasticsearch-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "common.names.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} +{{- if .Values.ingest.ingress.enabled }} +{{- if .Values.ingest.ingress.secrets }} +{{- range .Values.ingest.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ printf "%s-ingest-ingress" (include "common.names.namespace" $ ) }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingest.ingress.tls .Values.ingest.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingest.ingress.hostname }} +{{- $ca := genCA "elasticsearch-ingest-ca" 365 }} +{{- $cert := genSignedCert .Values.ingest.ingress.hostname nil (list .Values.ingest.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "common.names.namespace" $ }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: ingest + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/ingress.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/ingress.yaml new file mode 100644 index 000000000..c39dec2ea --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/ingress.yaml @@ -0,0 +1,65 @@ +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: elasticsearch + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- if ne .Values.ingress.hostname "*" }} + host: {{ .Values.ingress.hostname }} + {{- end }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "tcp-rest-api" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.ingress.extraRules "context" $ ) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/initialization-configmap.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/initialization-configmap.yaml new file mode 100644 index 000000000..d84a353d6 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/initialization-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/master/hpa.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/master/hpa.yaml new file mode 100644 index 000000000..b41d43364 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/master/hpa.yaml @@ -0,0 +1,47 @@ +{{- if and (include "elasticsearch.master.enabled" .) .Values.master.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} + kind: StatefulSet + name: {{ include "elasticsearch.master.fullname" . }} + minReplicas: {{ .Values.master.autoscaling.minReplicas }} + maxReplicas: {{ .Values.master.autoscaling.maxReplicas }} + metrics: + {{- if .Values.master.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.master.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.master.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.master.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.master.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.master.autoscaling.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/master/serviceaccount.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/master/serviceaccount.yaml new file mode 100644 index 000000000..a6fefec01 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/master/serviceaccount.yaml @@ -0,0 +1,22 @@ +{{- if and (include "elasticsearch.master.enabled" .) .Values.master.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "elasticsearch.master.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if or .Values.master.serviceAccount.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.serviceAccount.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.serviceAccount.annotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/master/statefulset.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/master/statefulset.yaml new file mode 100644 index 000000000..88c6e6671 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/master/statefulset.yaml @@ -0,0 +1,373 @@ +{{- if (include "elasticsearch.master.enabled" .) }} +apiVersion: {{ template "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "elasticsearch.master.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.master.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.master.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if not .Values.master.autoscaling.enabled }} + replicas: {{ .Values.master.replicaCount }} + {{- end }} + podManagementPolicy: {{ .Values.master.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: master + serviceName: {{ include "elasticsearch.master.servicename" . }} + {{- if .Values.master.updateStrategy }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: master + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: master + {{- end }} + {{- if .Values.master.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if and (include "elasticsearch.createTlsSecret" .) (not .Values.security.tls.master.existingSecret) }} + checksum/tls: {{ include (print $.Template.BasePath "/tls-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "elasticsearch.master.serviceAccountName" . }} + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.master.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.master.initContainers .Values.sysctlImage.enabled (and .Values.volumePermissions.enabled .Values.master.persistence.enabled) }} + initContainers: + {{- if .Values.sysctlImage.enabled }} + ## Image that performs the sysctl operation to modify Kernel settings (needed sometimes to avoid boot errors) + - name: sysctl + image: {{ include "elasticsearch.sysctl.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- include "elasticsearch.sysctlIfLess" (dict "key" "vm.max_map_count" "value" "262144") | nindent 14 }} + {{- include "elasticsearch.sysctlIfLess" (dict "key" "fs.file-max" "value" "65536") | nindent 14 }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctlImage.resources }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.master.persistence.enabled }} + - name: volume-permissions + image: {{ include "elasticsearch.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + mkdir -p /bitnami/elasticsearch/data + chown {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} /bitnami/elasticsearch/data + find /bitnami/elasticsearch/data -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- end }} + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: elasticsearch + image: {{ include "elasticsearch.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ELASTICSEARCH_IS_DEDICATED_NODE + value: {{ ternary "yes" "no" .Values.master.masterOnly | quote }} + - name: ELASTICSEARCH_NODE_ROLES + value: "master" + - name: ELASTICSEARCH_TRANSPORT_PORT_NUMBER + value: {{ .Values.containerPorts.transport | quote }} + - name: ELASTICSEARCH_HTTP_PORT_NUMBER + value: {{ .Values.containerPorts.restAPI | quote }} + - name: ELASTICSEARCH_CLUSTER_NAME + value: {{ .Values.clusterName | quote }} + - name: ELASTICSEARCH_CLUSTER_HOSTS + value: {{ include "elasticsearch.hosts" . | quote }} + - name: ELASTICSEARCH_TOTAL_NODES + value: {{ add (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) (ternary .Values.data.autoscaling.minReplicas .Values.data.replicaCount .Values.data.autoscaling.enabled) | quote }} + - name: ELASTICSEARCH_CLUSTER_MASTER_HOSTS + {{- $elasticsearchMasterFullname := include "elasticsearch.master.fullname" . }} + {{- $replicas := int (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) }} + value: {{ range $i, $e := until $replicas }}{{ printf "%s-%d" $elasticsearchMasterFullname $e }} {{ end }} + - name: ELASTICSEARCH_MINIMUM_MASTER_NODES + value: {{ add (div (ternary .Values.master.autoscaling.minReplicas .Values.master.replicaCount .Values.master.autoscaling.enabled) 2) 1 | quote }} + - name: ELASTICSEARCH_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).{{ (include "elasticsearch.master.servicename" .) | trunc 63 | trimSuffix "-" }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}" + {{- if .Values.plugins }} + - name: ELASTICSEARCH_PLUGINS + value: {{ .Values.plugins | quote }} + {{- end }} + {{- if .Values.snapshotRepoPath }} + - name: ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + value: {{ .Values.snapshotRepoPath | quote }} + {{- end }} + {{- if .Values.master.heapSize }} + - name: ELASTICSEARCH_HEAP_SIZE + value: {{ .Values.master.heapSize | quote }} + {{- end }} + {{- if .Values.security.enabled }} + {{- include "elasticsearch.configure.security" . | nindent 12 }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsCM "context" $ ) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" ( dict "value" .Values.master.extraEnvVarsSecret "context" $ ) }} + {{- end }} + {{- end }} + ports: + - name: rest-api + containerPort: {{ .Values.containerPorts.restAPI }} + - name: transport + containerPort: {{ .Values.containerPorts.transport }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: rest-api + {{- end }} + {{- if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /opt/bitnami/scripts/elasticsearch/healthcheck.sh + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/elasticsearch/data + {{- if .Values.config }} + - mountPath: /opt/bitnami/elasticsearch/config/elasticsearch.yml + name: config + subPath: elasticsearch.yml + {{- end }} + {{- if .Values.extraConfig }} + - mountPath: /opt/bitnami/elasticsearch/config/my_elasticsearch.yml + name: config + subPath: my_elasticsearch.yml + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + mountPath: /opt/bitnami/elasticsearch/config/certs + readOnly: true + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.extraConfig }} + - name: config + configMap: + name: {{ template "common.names.fullname" . }} + {{- end }} + {{- if .Values.security.enabled }} + - name: elasticsearch-certificates + secret: + secretName: {{ template "elasticsearch.master.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "elasticsearch.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ template "elasticsearch.initScriptsCM" . }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "elasticsearch.initScriptsSecret" . }} + defaultMode: 0755 + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: "data" + emptyDir: {} + {{- else if .Values.master.persistence.existingClaim }} + - name: "data" + persistentVolumeClaim: + claimName: {{ .Values.master.persistence.existingClaim }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: "data" + annotations: + {{- if .Values.master.persistence.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- else if .Values.master.persistence.existingVolume }} + selector: + matchLabels: + volume: {{ .Values.master.persistence.existingVolume }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/master/svc-headless.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/master/svc-headless.yaml new file mode 100644 index 000000000..7b73ad59a --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/master/svc-headless.yaml @@ -0,0 +1,28 @@ +{{- if (include "elasticsearch.master.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.master.servicename" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{ include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: tcp-rest-api + port: {{ .Values.containerPorts.restAPI }} + targetPort: rest-api + - name: tcp-transport + port: {{ .Values.containerPorts.transport }} + targetPort: transport + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/metrics/deployment.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/deployment.yaml new file mode 100644 index 000000000..7ffa1e3b2 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/deployment.yaml @@ -0,0 +1,182 @@ +{{- if .Values.metrics.enabled }} +apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.metrics.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: metrics + {{- if .Values.useIstioLabels }} + ## Istio Labels: https://istio.io/docs/ops/deployment/requirements/ + app: metrics + {{- end }} + {{- if .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "elasticsearch.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.nodeAffinityPreset.type "key" .Values.metrics.nodeAffinityPreset.key "values" .Values.metrics.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.priorityClassName }} + priorityClassName: {{ .Values.metrics.priorityClassName | quote }} + {{- end }} + {{- if .Values.metrics.schedulerName }} + schedulerName: {{ .Values.metrics.schedulerName | quote }} + {{- end }} + {{- if .Values.metrics.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.metrics.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: metrics + image: {{ include "elasticsearch.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + {{- $protocol := (ternary "https" "http" (and .Values.security.enabled .Values.security.tls.restEncryption)) }} + - --es.uri={{$protocol}}://{{ include "common.names.fullname" . }}:{{ include "elasticsearch.service.ports.restAPI" . }} + - --es.all + {{- if .Values.security.tls.restEncryption }} + - --es.ssl-skip-verify + {{- end }} + {{- if .Values.metrics.extraArgs }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraArgs "context" $) | nindent 12 }} + {{- end }} + {{- end }} + env: + {{- if .Values.security.enabled }} + - name: ES_USERNAME + value: "elastic" + - name: ES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "elasticsearch.secretName" . }} + key: elasticsearch-password + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.metrics.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.metrics.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: metrics + containerPort: 9114 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: + initialDelaySeconds: {{ .Values.metrics.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.startupProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.startupProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.startupProbe.failureThreshold }} + httpGet: + path: /metrics + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + volumeMounts: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + volumes: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/metrics/prometheusrule.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/prometheusrule.yaml new file mode 100644 index 000000000..7c25edd7c --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/prometheusrule.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.db_namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 6 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/metrics/service.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/service.yaml new file mode 100644 index 000000000..bf3d5208c --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/metrics/servicemonitor.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/servicemonitor.yaml new file mode 100644 index 000000000..e64de5e34 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/metrics/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "elasticsearch.metrics.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.db_namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/secrets.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/secrets.yaml new file mode 100644 index 000000000..8fda48827 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/secrets.yaml @@ -0,0 +1,43 @@ +{{- if (include "elasticsearch.createSecret" . ) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + elasticsearch-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "elasticsearch-password" "length" 16 "providedValues" (list "security.elasticPassword") "context" $) }} +{{- end }} +{{- if (include "elasticsearch.createTlsPasswordsSecret" . ) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-tls-pass" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if .Values.security.tls.keystorePassword }} + keystore-password: {{ default "" .Values.security.tls.keystorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.truststorePassword }} + truststore-password: {{ default "" .Values.security.tls.truststorePassword | b64enc | quote }} + {{- end }} + {{- if .Values.security.tls.keyPassword }} + key-password: {{ default "" .Values.security.tls.keyPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/service.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/service.yaml new file mode 100644 index 000000000..cb3e67938 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/service.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "elasticsearch.service.name" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if (include "elasticsearch.coordinating.enabled" .) }} + app.kubernetes.io/component: coordinating-only + {{- else }} + app.kubernetes.io/component: master + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-rest-api + port: {{ include "elasticsearch.service.ports.restAPI" . }} + targetPort: rest-api + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.restAPI)) }} + nodePort: {{ .Values.service.nodePorts.restAPI }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-transport + port: {{ .Values.service.ports.transport }} + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.transport)) }} + nodePort: {{ .Values.service.nodePorts.transport }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + {{- if (include "elasticsearch.coordinating.enabled" .) }} + app.kubernetes.io/component: coordinating-only + {{- else }} + app.kubernetes.io/component: master + {{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/templates/tls-secret.yaml b/knowlg-automation/helm_charts/elasticsearch/templates/tls-secret.yaml new file mode 100644 index 000000000..985f4ce68 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/templates/tls-secret.yaml @@ -0,0 +1,117 @@ +{{- if (include "elasticsearch.createTlsSecret" .) }} +{{- $ca := genCA "elasticsearch-ca" 365 }} +{{- $releaseNamespace := include "common.names.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} + +{{- if and (include "elasticsearch.master.enabled" .) (not .Values.security.tls.master.existingSecret) }} +{{- $fullname := include "elasticsearch.master.fullname" . }} +{{- $serviceName := include "elasticsearch.master.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- if not (include "elasticsearch.coordinating.enabled" .) }} +{{- $altNames = append $altNames (include "elasticsearch.service.name" .) }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" (include "elasticsearch.service.name" .) $releaseNamespace $clusterDomain) }} +{{- end }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +{{- $secretName := printf "%s-crt" (include "elasticsearch.master.fullname" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "elasticsearch.data.enabled" .) (not .Values.security.tls.data.existingSecret) }} +{{- $fullname := include "elasticsearch.data.fullname" . }} +{{- $serviceName := include "elasticsearch.data.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +{{- $secretName := printf "%s-crt" (include "elasticsearch.data.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: data + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "elasticsearch.coordinating.enabled" .) (not .Values.security.tls.coordinating.existingSecret) }} +{{- $fullname := include "elasticsearch.coordinating.fullname" . }} +{{- $serviceName := include "elasticsearch.coordinating.servicename" . }} +{{- $altNames := list (include "elasticsearch.service.name" .) (printf "%s.%s.svc.%s" (include "elasticsearch.service.name" .) $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +{{- $secretName := printf "%s-crt" (include "elasticsearch.coordinating.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: coordinating-only + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- if and (include "elasticsearch.ingest.enabled" .) (not .Values.security.tls.ingest.existingSecret) }} +{{- $fullname := include "elasticsearch.ingest.fullname" . }} +{{- $serviceName := include "elasticsearch.ingest.servicename" . }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname "127.0.0.1" "localhost" }} +{{- if .Values.ingest.service.enabled }} +{{- $altNames = append $altNames (include "elasticsearch.ingest.fullname" .) }} +{{- $altNames = append $altNames (printf "%s.%s.svc.%s" (include "elasticsearch.ingest.fullname" .) $releaseNamespace $clusterDomain) }} +{{- end }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +{{- $secretName := printf "%s-crt" (include "elasticsearch.ingest.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + app.kubernetes.io/component: ingest + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/elasticsearch/values.yaml b/knowlg-automation/helm_charts/elasticsearch/values.yaml new file mode 100644 index 000000000..55ec460d5 --- /dev/null +++ b/knowlg-automation/helm_charts/elasticsearch/values.yaml @@ -0,0 +1,2145 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.elasticsearch.service.name Elasticsearch service name to be used in the Kibana subchart (ignored if kibanaEnabled=false) +## @param global.elasticsearch.service.ports.restAPI Elasticsearch service restAPI port to be used in the Kibana subchart (ignored if kibanaEnabled=false) +## @param global.kibanaEnabled Whether or not to enable Kibana +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + elasticsearch: + service: + name: elasticsearch + ports: + restAPI: 9200 + kibanaEnabled: false + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Elasticsearch cluster Parameters + +## @param clusterName Elasticsearch cluster name +## +clusterName: elastic +## @param containerPorts.restAPI Elasticsearch REST API port +## @param containerPorts.transport Elasticsearch Transport port +## +containerPorts: + restAPI: 9200 + transport: 9300 +## @param plugins Comma, semi-colon or space separated list of plugins to install at initialization +## ref: https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch#environment-variables +## +plugins: "" +## @param snapshotRepoPath File System snapshot repository path +## ref: https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch#environment-variables +## +snapshotRepoPath: "" +## @param config Override elasticsearch configuration +## +config: {} +## @param extraConfig Append extra configuration to the elasticsearch node configuration +## Use this instead of `config` to add more configuration +## See below example: +## extraConfig: +## node: +## store: +## allow_mmap: false +## ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html +## +extraConfig: {} +## @param extraHosts A list of external hosts which are part of this cluster +## Example Use Case: When you have a cluster with nodes spaned acorss multiple K8s or namespaces +## extraHosts: +## - datacenter2-elasticsearch-master-hl.namespace2.svc +## - datacenter2-elasticsearch-data-hl.namespace2.svc +extraHosts: [] +## @param extraVolumes A list of volumes to be added to the pod +## Example Use Case: mount ssl certificates when elasticsearch has tls enabled +## extraVolumes: +## - name: es-certs +## secret: +## defaultMode: 420 +## secretName: es-certs +extraVolumes: [] +## @param extraVolumeMounts A list of volume mounts to be added to the pod +## extraVolumeMounts: +## - name: es-certs +## mountPath: /certs +## readOnly: true +extraVolumeMounts: [] +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraEnvVars Array containing extra env vars to be added to all pods (evaluated as a template) +## For example: +## extraEnvVars: +## - name: MY_ENV_VAR +## value: env_var_value +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret containing extra env vars to be added to all pods (evaluated as a template) +## +extraEnvVarsSecret: "" +## @param sidecars Add additional sidecar containers to the all elasticsearch node pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the all elasticsearch node pod(s) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## command: ['sh', '-c', 'echo "hello world"'] +## +initContainers: [] +## @param useIstioLabels Use this variable to add Istio labels to all pods +## +useIstioLabels: true + +## Bitnami Elasticsearch image +## @param image.registry Elasticsearch image registry +## @param image.repository Elasticsearch image repository +## @param image.tag Elasticsearch image tag (immutable tags are recommended) +## @param image.digest Elasticsearch image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Elasticsearch image pull policy +## @param image.pullSecrets Elasticsearch image pull secrets +## @param image.debug Enable Elasticsearch image debug mode +## +image: + registry: docker.io + repository: bitnami/elasticsearch + tag: 6.8.22 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## X-Pack security parameters +## Note: TLS configuration is required in order to configure password authentication +## +security: + ## @param security.enabled Enable X-Pack Security settings + ## + enabled: false + ## @param security.elasticPassword Password for 'elastic' user + ## Ref: https://github.com/bitnami/containers/tree/main/bitnami/elasticsearch#security + ## + elasticPassword: "" + ## @param security.existingSecret Name of the existing secret containing the Elasticsearch password and + ## + existingSecret: "" + ## FIPS mode + ## @param security.fipsMode Configure elasticsearch with FIPS 140 compliant mode + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/fips-140-compliance.html + ## + fipsMode: false + ## TLS configuration + ## + tls: + ## @param security.tls.restEncryption Enable SSL/TLS encryption for Elasticsearch REST API. + ## + restEncryption: true + ## @param security.tls.autoGenerated Create self-signed TLS certificates. + ## NOTE: If autoGenerated certs are enabled and a new node type is enabled using helm upgrade, make sure you remove previously existing Elasticsearch TLS secrets. + ## Otherwise, the new node certs won't match the existing certs. + ## + autoGenerated: false + ## @param security.tls.verificationMode Verification mode for SSL communications. + ## Supported values: full, certificate, none. + ## Ref: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html + ## + verificationMode: "full" + ## @param security.tls.master.existingSecret Existing secret containing the certificates for the master nodes + ## @param security.tls.data.existingSecret Existing secret containing the certificates for the data nodes + ## @param security.tls.ingest.existingSecret Existing secret containing the certificates for the ingest nodes + ## @param security.tls.coordinating.existingSecret Existing secret containing the certificates for the coordinating nodes + ## + master: + existingSecret: "" + data: + existingSecret: "" + ingest: + existingSecret: "" + coordinating: + existingSecret: "" + ## @param security.tls.keystoreFilename Name of the keystore file + ## + keystoreFilename: elasticsearch.keystore.jks + ## @param security.tls.truststoreFilename Name of the truststore + ## + truststoreFilename: elasticsearch.truststore.jks + ## @param security.tls.usePemCerts Use this variable if your secrets contain PEM certificates instead of JKS/PKCS12 + ## Ignored when using autoGenerated certs. + ## + usePemCerts: false + ## @param security.tls.passwordsSecret Existing secret containing the Keystore and Truststore passwords, or key password if PEM certs are used + ## + passwordsSecret: "" + ## @param security.tls.keystorePassword Password to access the JKS/PKCS12 keystore or PEM key when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + keystorePassword: "" + ## @param security.tls.truststorePassword Password to access the JKS/PKCS12 truststore when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + truststorePassword: "" + ## @param security.tls.keyPassword Password to access the PEM key when they are password-protected. + ## Ignored if security.tls.passwordsSecret is provided. + ## + keyPassword: "" + ## @param security.tls.secretKeystoreKey Name of the secret key containing the Keystore password + ## + secretKeystoreKey: "" + ## @param security.tls.secretTruststoreKey Name of the secret key containing the Truststore password + ## + secretTruststoreKey: "" + ## @param security.tls.secretKey Name of the secret key containing the PEM key password + ## + secretKey: "" + +## @section Traffic Exposure Parameters +## + +## Elasticsearch service parameters +## +service: + ## @param service.type Elasticsearch service type + ## + type: ClusterIP + ## @param service.ports.restAPI Elasticsearch service REST API port + ## @param service.ports.transport Elasticsearch service transport port + ## + ports: + restAPI: 9200 + transport: 9300 + ## Node ports to expose + ## @param service.nodePorts.restAPI Node port for REST API + ## @param service.nodePorts.transport Node port for REST API + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + restAPI: "" + transport: "" + ## @param service.clusterIP Elasticsearch service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Elasticsearch service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Elasticsearch service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Elasticsearch service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Elasticsearch service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in Elasticsearch service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## Elasticsearch ingress parameters +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for Elasticsearch + ## + enabled: false + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: elasticsearch.local + ## @param ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: elasticsearch.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - elasticsearch.local + ## secretName: elasticsearch.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: elasticsearch.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## @section Master-elegible nodes parameters + +master: + ## @param master.masterOnly Deploy the Elasticsearch master-elegible nodes as master-only nodes. Recommended for high-demand deployments. + ## If you are + masterOnly: true + ## @param master.replicaCount Number of master-elegible replicas to deploy + ## + replicaCount: 1 + ## @param master.nameOverride String to partially override elasticsearch.master.fullname + ## + nameOverride: "" + ## @param master.fullnameOverride String to fully override elasticsearch.master.fullname + ## + fullnameOverride: "" + ## @param master.servicenameOverride String to fully override elasticsearch.master.servicename + ## + servicenameOverride: "" + ## @param master.annotations [object] Annotations for the master statefulset + ## + annotations: {} + ## @param master.updateStrategy.type Master-elegible nodes statefulset stategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## Elasticsearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param master.resources.limits The resources limits for elasticsearch containers + ## @param master.resources.requests The requested resources for elasticsearch containers + ## + resources: + ## Example: + ## limits: + ## cpu: 500m + ## memory: 1Gi + limits: {} + ## Examples: + ## requests: + ## cpu: 250m + ## memory: 256Mi + requests: {} + ## @param master.heapSize Elasticsearch master-eligible node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 128m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled master-elegible pods' Security Context + ## @param master.podSecurityContext.fsGroup Set master-elegible pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled master-elegible containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set master-elegible containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsNonRoot Set master-elegible containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param master.hostAliases master-elegible pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for master-elegible pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for master-elegible pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for master-elegible pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for master-elegible pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for master-elegible pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.priorityClassName master-elegible pods' priorityClassName + ## + priorityClassName: "" + ## @param master.schedulerName Name of the k8s scheduler (other than default) for master-elegible pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.terminationGracePeriodSeconds In seconds, time the given to the Elasticsearch Master pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param master.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param master.podManagementPolicy podManagementPolicy to manage scaling operation of Elasticsearch master pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for Elasticsearch master-elegible containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable/disable the startup probe (master nodes pod) + ## @param master.startupProbe.initialDelaySeconds Delay before startup probe is initiated (master nodes pod) + ## @param master.startupProbe.periodSeconds How often to perform the probe (master nodes pod) + ## @param master.startupProbe.timeoutSeconds When the probe times out (master nodes pod) + ## @param master.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master nodes pod) + ## @param master.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable/disable the liveness probe (master-eligible nodes pod) + ## @param master.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (master-eligible nodes pod) + ## @param master.livenessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.livenessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable/disable the readiness probe (master-eligible nodes pod) + ## @param master.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (master-eligible nodes pod) + ## @param master.readinessProbe.periodSeconds How often to perform the probe (master-eligible nodes pod) + ## @param master.readinessProbe.timeoutSeconds When the probe times out (master-eligible nodes pod) + ## @param master.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (master-eligible nodes pod) + ## @param master.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.lifecycleHooks for the master-elegible container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraEnvVars Array with extra environment variables to add to master-elegible nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for master-elegible nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for master-elegible nodes + ## + extraEnvVarsSecret: "" + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the master-elegible pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the master-elegible container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the master-elegible pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the master-elegible pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param master.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param master.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param master.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `master.persistence.selector` is set. + ## + existingVolume: "" + ## @param master.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `master.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param master.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param master.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param master.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param master.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param master.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: true + annotations: {} + ## Enable HorizontalPodAutoscaler for Elasticsearch Master pods + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## @param master.autoscaling.enabled Whether enable horizontal pod autoscale + ## @param master.autoscaling.minReplicas Configure a minimum amount of pods + ## @param master.autoscaling.maxReplicas Configure a maximum amount of pods + ## @param master.autoscaling.targetCPU Define the CPU target to trigger the scaling actions (utilization percentage) + ## @param master.autoscaling.targetMemory Define the memory target to trigger the scaling actions (utilization percentage) + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Data-only nodes parameters + +data: + ## @param data.replicaCount Number of data-only replicas to deploy + ## + replicaCount: 1 + ## @param data.nameOverride String to partially override elasticsearch.data.fullname + ## + nameOverride: "" + ## @param data.fullnameOverride String to fully override elasticsearch.data.fullname + ## + fullnameOverride: "" + ## @param data.servicenameOverride String to fully override elasticsearch.data.servicename + ## + servicenameOverride: "" + ## @param data.annotations [object] Annotations for the data statefulset + ## + annotations: {} + ## @param data.updateStrategy.type Data-only nodes statefulset stategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## Elasticsearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param data.resources.limits The resources limits for the data containers + ## @param data.resources.requests [object] The requested resources for the data containers + ## + resources: + limits: {} + requests: + cpu: 25m + memory: 2048Mi + ## @param data.heapSize Elasticsearch data node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 1024m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param data.podSecurityContext.enabled Enabled data pods' Security Context + ## @param data.podSecurityContext.fsGroup Set data pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param data.containerSecurityContext.enabled Enabled data containers' Security Context + ## @param data.containerSecurityContext.runAsUser Set data containers' Security Context runAsUser + ## @param data.containerSecurityContext.runAsNonRoot Set data containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param data.hostAliases data pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param data.podLabels Extra labels for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param data.podAnnotations Annotations for data pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param data.podAffinityPreset Pod affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param data.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node data.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param data.nodeAffinityPreset.type Node affinity preset type. Ignored if `data.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param data.nodeAffinityPreset.key Node label key to match. Ignored if `data.affinity` is set + ## + key: "" + ## @param data.nodeAffinityPreset.values Node label values to match. Ignored if `data.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param data.affinity Affinity for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `data.podAffinityPreset`, `data.podAntiAffinityPreset`, and `data.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param data.nodeSelector Node labels for data pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param data.tolerations Tolerations for data pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param data.priorityClassName data pods' priorityClassName + ## + priorityClassName: "" + ## @param data.schedulerName Name of the k8s scheduler (other than default) for data pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param data.terminationGracePeriodSeconds In seconds, time the given to the Elasticsearch data pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param data.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param data.podManagementPolicy podManagementPolicy to manage scaling operation of Elasticsearch data pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for Elasticsearch data containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param data.startupProbe.enabled Enable/disable the startup probe (data nodes pod) + ## @param data.startupProbe.initialDelaySeconds Delay before startup probe is initiated (data nodes pod) + ## @param data.startupProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.startupProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.livenessProbe.enabled Enable/disable the liveness probe (data nodes pod) + ## @param data.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (data nodes pod) + ## @param data.livenessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.livenessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.readinessProbe.enabled Enable/disable the readiness probe (data nodes pod) + ## @param data.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (data nodes pod) + ## @param data.readinessProbe.periodSeconds How often to perform the probe (data nodes pod) + ## @param data.readinessProbe.timeoutSeconds When the probe times out (data nodes pod) + ## @param data.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (data nodes pod) + ## @param data.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param data.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param data.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param data.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param data.command Override default container command (useful when using custom images) + ## + command: [] + ## @param data.args Override default container args (useful when using custom images) + ## + args: [] + ## @param data.lifecycleHooks for the data container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param data.extraEnvVars Array with extra environment variables to add to data nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param data.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data nodes + ## + extraEnvVarsCM: "" + ## @param data.extraEnvVarsSecret Name of existing Secret containing extra env vars for data nodes + ## + extraEnvVarsSecret: "" + ## @param data.extraVolumes Optionally specify extra list of additional volumes for the data pod(s) + ## + extraVolumes: [] + ## @param data.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the data container(s) + ## + extraVolumeMounts: [] + ## @param data.sidecars Add additional sidecar containers to the data pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param data.initContainers Add additional init containers to the data pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param data.persistence.enabled Enable persistence using a `PersistentVolumeClaim` + ## + enabled: true + ## @param data.persistence.storageClass Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param data.persistence.existingClaim Existing Persistent Volume Claim + ## then accept the value as an existing Persistent Volume Claim to which + ## the container should be bound + ## + existingClaim: "" + ## @param data.persistence.existingVolume Existing Persistent Volume for use as volume match label selector to the `volumeClaimTemplate`. Ignored when `data.persistence.selector` is set. + ## + existingVolume: "" + ## @param data.persistence.selector Configure custom selector for existing Persistent Volume. Overwrites `data.persistence.existingVolume` + ## selector: + ## matchLabels: + ## volume: + ## + selector: {} + ## @param data.persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param data.persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param data.persistence.size Persistent Volume Size + ## + size: 8Gi + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param data.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param data.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param data.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param data.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: true + annotations: {} + ## Enable HorizontalPodAutoscaler for Elasticsearch data pods + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## @param data.autoscaling.enabled Whether enable horizontal pod autoscale + ## @param data.autoscaling.minReplicas Configure a minimum amount of pods + ## @param data.autoscaling.maxReplicas Configure a maximum amount of pods + ## @param data.autoscaling.targetCPU Define the CPU target to trigger the scaling actions (utilization percentage) + ## @param data.autoscaling.targetMemory Define the memory target to trigger the scaling actions (utilization percentage) + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Coordinating-only nodes parameters + +coordinating: + ## @param coordinating.replicaCount Number of coordinating-only replicas to deploy + ## + replicaCount: 1 + ## @param coordinating.nameOverride String to partially override elasticsearch.coordinating.fullname + ## + nameOverride: "" + ## @param coordinating.fullnameOverride String to fully override elasticsearch.coordinating.fullname + ## + fullnameOverride: "" + ## @param coordinating.servicenameOverride String to fully override elasticsearch.coordinating.servicename + ## + servicenameOverride: "" + ## @param coordinating.annotations [object] Annotations for the coordinating-only statefulset + ## + annotations: {} + ## @param coordinating.updateStrategy.type Coordinating-only nodes statefulset stategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## Elasticsearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param coordinating.resources.limits The resources limits for the coordinating-only containers + ## @param coordinating.resources.requests [object] The requested resources for the coordinating-only containers + ## + resources: + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## @param coordinating.heapSize Elasticsearch coordinating node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 128m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param coordinating.podSecurityContext.enabled Enabled coordinating-only pods' Security Context + ## @param coordinating.podSecurityContext.fsGroup Set coordinating-only pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param coordinating.containerSecurityContext.enabled Enabled coordinating-only containers' Security Context + ## @param coordinating.containerSecurityContext.runAsUser Set coordinating-only containers' Security Context runAsUser + ## @param coordinating.containerSecurityContext.runAsNonRoot Set coordinating-only containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param coordinating.hostAliases coordinating-only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param coordinating.podLabels Extra labels for coordinating-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param coordinating.podAnnotations Annotations for coordinating-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param coordinating.podAffinityPreset Pod affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param coordinating.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node coordinating.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param coordinating.nodeAffinityPreset.type Node affinity preset type. Ignored if `coordinating.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param coordinating.nodeAffinityPreset.key Node label key to match. Ignored if `coordinating.affinity` is set + ## + key: "" + ## @param coordinating.nodeAffinityPreset.values Node label values to match. Ignored if `coordinating.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param coordinating.affinity Affinity for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `coordinating.podAffinityPreset`, `coordinating.podAntiAffinityPreset`, and `coordinating.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param coordinating.nodeSelector Node labels for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param coordinating.tolerations Tolerations for coordinating-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param coordinating.priorityClassName coordinating-only pods' priorityClassName + ## + priorityClassName: "" + ## @param coordinating.schedulerName Name of the k8s scheduler (other than default) for coordinating-only pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param coordinating.terminationGracePeriodSeconds In seconds, time the given to the Elasticsearch coordinating pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param coordinating.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param coordinating.podManagementPolicy podManagementPolicy to manage scaling operation of Elasticsearch coordinating pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for Elasticsearch coordinating-only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param coordinating.startupProbe.enabled Enable/disable the startup probe (coordinating-only nodes pod) + ## @param coordinating.startupProbe.initialDelaySeconds Delay before startup probe is initiated (coordinating-only nodes pod) + ## @param coordinating.startupProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.startupProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.livenessProbe.enabled Enable/disable the liveness probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.readinessProbe.enabled Enable/disable the readiness probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.periodSeconds How often to perform the probe (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.timeoutSeconds When the probe times out (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (coordinating-only nodes pod) + ## @param coordinating.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param coordinating.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param coordinating.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param coordinating.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param coordinating.command Override default container command (useful when using custom images) + ## + command: [] + ## @param coordinating.args Override default container args (useful when using custom images) + ## + args: [] + ## @param coordinating.lifecycleHooks for the coordinating-only container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param coordinating.extraEnvVars Array with extra environment variables to add to coordinating-only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param coordinating.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for coordinating-only nodes + ## + extraEnvVarsCM: "" + ## @param coordinating.extraEnvVarsSecret Name of existing Secret containing extra env vars for coordinating-only nodes + ## + extraEnvVarsSecret: "" + ## @param coordinating.extraVolumes Optionally specify extra list of additional volumes for the coordinating-only pod(s) + ## + extraVolumes: [] + ## @param coordinating.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the coordinating-only container(s) + ## + extraVolumeMounts: [] + ## @param coordinating.sidecars Add additional sidecar containers to the coordinating-only pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param coordinating.initContainers Add additional init containers to the coordinating-only pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param coordinating.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param coordinating.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param coordinating.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param coordinating.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: true + annotations: {} + ## Enable HorizontalPodAutoscaler for Elasticsearch coordinating pods + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## @param coordinating.autoscaling.enabled Whether enable horizontal pod autoscale + ## @param coordinating.autoscaling.minReplicas Configure a minimum amount of pods + ## @param coordinating.autoscaling.maxReplicas Configure a maximum amount of pods + ## @param coordinating.autoscaling.targetCPU Define the CPU target to trigger the scaling actions (utilization percentage) + ## @param coordinating.autoscaling.targetMemory Define the memory target to trigger the scaling actions (utilization percentage) + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + +## @section Ingest-only nodes parameters + +ingest: + ## @param ingest.enabled Enable ingest nodes + ## + enabled: true + ## @param ingest.replicaCount Number of ingest-only replicas to deploy + ## + replicaCount: 1 + ## @param ingest.nameOverride String to partially override elasticsearch.ingest.fullname + ## + nameOverride: "" + ## @param ingest.fullnameOverride String to fully override elasticsearch.ingest.fullname + ## + fullnameOverride: "" + ## @param ingest.servicenameOverride String to fully override ingest.master.servicename + ## + servicenameOverride: "" + ## @param ingest.annotations [object] Annotations for the ingest statefulset + ## + annotations: {} + ## @param ingest.containerPorts.restAPI Elasticsearch REST API port + ## @param ingest.containerPorts.transport Elasticsearch Transport port + ## + containerPorts: + restAPI: 9200 + transport: 9300 + ## @param ingest.updateStrategy.type Ingest-only nodes statefulset stategy type + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + ## Elasticsearch resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param ingest.resources.limits The resources limits for the ingest-only containers + ## @param ingest.resources.requests [object] The requested resources for the ingest-only containers + ## + resources: + limits: {} + requests: + cpu: 25m + memory: 256Mi + ## @param ingest.heapSize Elasticsearch ingest-only node heap size. + ## Note: The recommended heapSize is half of the container's memory. + ## If omitted, it will be automatically set. + ## Example: + ## heapSize: 128m + ## + heapSize: 128m + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param ingest.podSecurityContext.enabled Enabled ingest-only pods' Security Context + ## @param ingest.podSecurityContext.fsGroup Set ingest-only pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param ingest.containerSecurityContext.enabled Enabled ingest-only containers' Security Context + ## @param ingest.containerSecurityContext.runAsUser Set ingest-only containers' Security Context runAsUser + ## @param ingest.containerSecurityContext.runAsNonRoot Set ingest-only containers' Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param ingest.hostAliases ingest-only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param ingest.podLabels Extra labels for ingest-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param ingest.podAnnotations Annotations for ingest-only pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param ingest.podAffinityPreset Pod affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param ingest.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node ingest.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param ingest.nodeAffinityPreset.type Node affinity preset type. Ignored if `ingest.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param ingest.nodeAffinityPreset.key Node label key to match. Ignored if `ingest.affinity` is set + ## + key: "" + ## @param ingest.nodeAffinityPreset.values Node label values to match. Ignored if `ingest.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param ingest.affinity Affinity for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `ingest.podAffinityPreset`, `ingest.podAntiAffinityPreset`, and `ingest.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param ingest.nodeSelector Node labels for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param ingest.tolerations Tolerations for ingest-only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param ingest.priorityClassName ingest-only pods' priorityClassName + ## + priorityClassName: "" + ## @param ingest.schedulerName Name of the k8s scheduler (other than default) for ingest-only pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param ingest.terminationGracePeriodSeconds In seconds, time the given to the Elasticsearch ingest pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## @param ingest.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param ingest.podManagementPolicy podManagementPolicy to manage scaling operation of Elasticsearch ingest pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "Parallel" + ## Configure extra options for Elasticsearch ingest-only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param ingest.startupProbe.enabled Enable/disable the startup probe (ingest-only nodes pod) + ## @param ingest.startupProbe.initialDelaySeconds Delay before startup probe is initiated (ingest-only nodes pod) + ## @param ingest.startupProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.startupProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + startupProbe: + enabled: false + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.livenessProbe.enabled Enable/disable the liveness probe (ingest-only nodes pod) + ## @param ingest.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (ingest-only nodes pod) + ## @param ingest.livenessProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.livenessProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + livenessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.readinessProbe.enabled Enable/disable the readiness probe (ingest-only nodes pod) + ## @param ingest.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (ingest-only nodes pod) + ## @param ingest.readinessProbe.periodSeconds How often to perform the probe (ingest-only nodes pod) + ## @param ingest.readinessProbe.timeoutSeconds When the probe times out (ingest-only nodes pod) + ## @param ingest.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (ingest-only nodes pod) + ## @param ingest.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## + readinessProbe: + enabled: true + initialDelaySeconds: 90 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param ingest.customStartupProbe Override default startup probe + ## + customStartupProbe: {} + ## @param ingest.customLivenessProbe Override default liveness probe + ## + customLivenessProbe: {} + ## @param ingest.customReadinessProbe Override default readiness probe + ## + customReadinessProbe: {} + ## @param ingest.command Override default container command (useful when using custom images) + ## + command: [] + ## @param ingest.args Override default container args (useful when using custom images) + ## + args: [] + ## @param ingest.lifecycleHooks for the ingest-only container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param ingest.extraEnvVars Array with extra environment variables to add to ingest-only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param ingest.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ingest-only nodes + ## + extraEnvVarsCM: "" + ## @param ingest.extraEnvVarsSecret Name of existing Secret containing extra env vars for ingest-only nodes + ## + extraEnvVarsSecret: "" + ## @param ingest.extraVolumes Optionally specify extra list of additional volumes for the ingest-only pod(s) + ## + extraVolumes: [] + ## @param ingest.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ingest-only container(s) + ## + extraVolumeMounts: [] + ## @param ingest.sidecars Add additional sidecar containers to the ingest-only pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param ingest.initContainers Add additional init containers to the ingest-only pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Pods Service Account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param ingest.serviceAccount.create Specifies whether a ServiceAccount should be created + ## @param ingest.serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. + ## @param ingest.serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## @param ingest.serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + serviceAccount: + create: false + name: "" + automountServiceAccountToken: true + annotations: {} + ## Enable HorizontalPodAutoscaler for Elasticsearch ingest-only pods + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## @param ingest.autoscaling.enabled Whether enable horizontal pod autoscale + ## @param ingest.autoscaling.minReplicas Configure a minimum amount of pods + ## @param ingest.autoscaling.maxReplicas Configure a maximum amount of pods + ## @param ingest.autoscaling.targetCPU Define the CPU target to trigger the scaling actions (utilization percentage) + ## @param ingest.autoscaling.targetMemory Define the memory target to trigger the scaling actions (utilization percentage) + ## + autoscaling: + enabled: false + minReplicas: 3 + maxReplicas: 11 + targetCPU: "" + targetMemory: "" + + ## Elasticsearch Ingest-only Service + ## Recommended for heavy ingestion, improves performance by sending ingest traffic directly into the ingest nodes. + ## NOTE: Ingest nodes will only accept index requests with an associated pipeline, any other request won't be rerouted. + ## + service: + ## @param ingest.service.enabled Enable Ingest-only service + ## + enabled: false + ## @param ingest.service.type Elasticsearch ingest-only service type + ## + type: ClusterIP + ## @param ingest.service.ports.restAPI Elasticsearch service REST API port + ## @param ingest.service.ports.transport Elasticsearch service transport port + ## + ports: + restAPI: 9200 + transport: 9300 + ## Node ports to expose + ## @param ingest.service.nodePorts.restAPI Node port for REST API + ## @param ingest.service.nodePorts.transport Node port for REST API + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + restAPI: "" + transport: "" + ## @param ingest.service.clusterIP Elasticsearch ingest-only service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param ingest.service.loadBalancerIP Elasticsearch ingest-only service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param ingest.service.loadBalancerSourceRanges Elasticsearch ingest-only service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param ingest.service.externalTrafficPolicy Elasticsearch ingest-only service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param ingest.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param ingest.service.annotations Additional custom annotations for Elasticsearch ingest-only service + ## + annotations: {} + ## @param ingest.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param ingest.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Elasticsearch Ingest-only ingress parameters + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + ## @param ingest.ingress.enabled Enable ingress record generation for Elasticsearch + ## + enabled: false + ## @param ingest.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingest.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingest.ingress.hostname Default host for the ingress record + ## + hostname: elasticsearch-ingest.local + ## @param ingest.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingest.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingest.ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingest.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingest.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingest.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: elasticsearch.local + ## path: / + ## + extraHosts: [] + ## @param ingest.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingest.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - elasticsearch.local + ## secretName: elasticsearch.local-tls + ## + extraTls: [] + ## @param ingest.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: elasticsearch.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingest.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + + +## @section Metrics parameters + +## Elasticsearch Prometheus exporter configuration +## ref: https://hub.docker.com/r/bitnami/elasticsearch-exporter/tags/ +## +metrics: + ## @param metrics.enabled Enable prometheus exporter + ## + enabled: false + ## @param metrics.nameOverride Metrics pod name + ## + nameOverride: "" + ## @param metrics.fullnameOverride String to fully override common.names.fullname + ## + fullnameOverride: "" + ## @param metrics.image.registry Metrics exporter image registry + ## @param metrics.image.repository Metrics exporter image repository + ## @param metrics.image.tag Metrics exporter image tag + ## @param metrics.image.digest Metrics exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Metrics exporter image pull policy + ## @param metrics.image.pullSecrets Metrics exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/elasticsearch-exporter + tag: 1.5.0-debian-11-r80 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param metrics.annotations [object] Annotations for metrics + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + helm.sh/hook: "post-install,post-upgrade" + helm.sh/hook-weight: "5" + ## @param metrics.extraArgs Extra arguments to add to the default exporter command + ## ref: https://github.com/justwatchcom/elasticsearch_exporter + ## e.g + ## extraArgs: + ## - --es.snapshots + ## - --es.indices + ## + extraArgs: [] + ## @param metrics.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.schedulerName Name of the k8s scheduler (other than default) + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.priorityClassName Elasticsearch metrics exporter pods' priorityClassName + ## + priorityClassName: "" + ## Elasticsearch Prometheus exporter service type + ## + service: + ## @param metrics.service.type Metrics exporter endpoint service type + ## + type: ClusterIP + ## @param metrics.service.port Metrics exporter endpoint service port + ## + port: 9114 + ## @param metrics.service.annotations [object] Provide any additional annotations which may be required. + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9114" + ## @param metrics.podAffinityPreset Metrics Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.podAntiAffinityPreset Metrics Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: "" + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param metrics.nodeAffinityPreset.type Metrics Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## @param metrics.nodeAffinityPreset.key Metrics Node label key to match Ignored if `affinity` is set. + ## @param metrics.nodeAffinityPreset.values Metrics Node label values to match. Ignored if `affinity` is set. + ## + nodeAffinityPreset: + type: "" + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.affinity Metrics Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.nodeSelector Metrics Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.tolerations Metrics Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## Elasticsearch Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits The resources limits for the container + ## @param metrics.resources.requests The requested resources for the container + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + requests: {} + ## Elasticsearch metrics container's liveness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.livenessProbe.enabled Enable/disable the liveness probe (metrics pod) + ## @param metrics.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated (metrics pod) + ## @param metrics.livenessProbe.periodSeconds How often to perform the probe (metrics pod) + ## @param metrics.livenessProbe.timeoutSeconds When the probe times out (metrics pod) + ## @param metrics.livenessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param metrics.livenessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch metrics container's readiness probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.readinessProbe.enabled Enable/disable the readiness probe (metrics pod) + ## @param metrics.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated (metrics pod) + ## @param metrics.readinessProbe.periodSeconds How often to perform the probe (metrics pod) + ## @param metrics.readinessProbe.timeoutSeconds When the probe times out (metrics pod) + ## @param metrics.readinessProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param metrics.readinessProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## Elasticsearch metrics container's startup probe + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param metrics.startupProbe.enabled Enable/disable the startup probe (metrics pod) + ## @param metrics.startupProbe.initialDelaySeconds Delay before startup probe is initiated (metrics pod) + ## @param metrics.startupProbe.periodSeconds How often to perform the probe (metrics pod) + ## @param metrics.startupProbe.timeoutSeconds When the probe times out (metrics pod) + ## @param metrics.startupProbe.failureThreshold Minimum consecutive failures for the probe to be considered failed after having succeeded + ## @param metrics.startupProbe.successThreshold Minimum consecutive successes for the probe to be considered successful after having failed (metrics pod) + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.customStartupProbe Custom liveness probe for the Web component + ## + customStartupProbe: {} + ## @param metrics.customLivenessProbe Custom liveness probe for the Web component + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readiness probe for the Web component + ## + customReadinessProbe: {} + ## @param metrics.podAnnotations [object] Metrics exporter pod Annotation and Labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9114" + ## @param metrics.podLabels Extra labels to add to Pod + ## + podLabels: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.podSecurityContext.enabled Enabled Elasticsearch metrics exporter pods' Security Context + ## @param metrics.podSecurityContext.fsGroup Set Elasticsearch metrics exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Elasticsearch metrics exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Elasticsearch metrics exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsNonRoot Set Elasticsearch metrics exporter container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.command Override default container command (useful when using custom images) + ## + command: [] + ## @param metrics.args Override default container args (useful when using custom images) + ## + args: [] + ## @param metrics.extraEnvVars Array with extra environment variables to add to Elasticsearch metrics exporter nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param metrics.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Elasticsearch metrics exporter nodes + ## + extraEnvVarsCM: "" + ## @param metrics.extraEnvVarsSecret Name of existing Secret containing extra env vars for Elasticsearch metrics exporter nodes + ## + extraEnvVarsSecret: "" + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Elasticsearch metrics exporter pod(s) + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Elasticsearch metrics exporter container(s) + ## + extraVolumeMounts: [] + ## @param metrics.sidecars Add additional sidecar containers to the Elasticsearch metrics exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.initContainers Add additional init containers to the Elasticsearch metrics exporter pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.db-namespace db-namespace which Prometheus is running in + ## e.g: + ## db-namespace: monitoring + ## + db_namespace: knowlg-db + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) + ## + enabled: false + ## @param metrics.prometheusRule.db-namespace db-namespace for the PrometheusRule Resource (defaults to the Release db-namespace) + ## + db_namespace: knowlg-db + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Prometheus Rule definitions + # - alert: es cluster error + # annotations: + # summary: "es cluster error" + # description: "es cluster error, cluster state {{`{{`}} $labels.color {{`}}`}}" + # expr: elasticsearch_cluster_health_status{color="red"} ==1 or elasticsearch_cluster_health_status{color="yellow"} ==1 + # for: 1m + # labels: + # severity: critical + # group: PaaS + ## + rules: [] + +## @section Init Container Parameters + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image name + ## @param volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r97 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container' resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits The resources limits for the container + ## @param volumePermissions.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} +sysctlImage: + enabled: true + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r97 + digest: "" + pullPolicy: IfNotPresent + pullSecrets: [] + resources: + limits: {} + requests: {} +kibana: + elasticsearch: + hosts: + - '{{ include "elasticsearch.service.name" . }}' + port: '{{ include "elasticsearch.service.ports.restAPI" . }}' diff --git a/knowlg-automation/helm_charts/flink/.helmignore b/knowlg-automation/helm_charts/flink/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/knowlg-automation/helm_charts/flink/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/knowlg-automation/helm_charts/flink/Chart.yaml b/knowlg-automation/helm_charts/flink/Chart.yaml new file mode 100644 index 000000000..31dcba4d2 --- /dev/null +++ b/knowlg-automation/helm_charts/flink/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: flink +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.16.0 diff --git a/knowlg-automation/helm_charts/flink/templates/_helpers.tpl b/knowlg-automation/helm_charts/flink/templates/_helpers.tpl new file mode 100644 index 000000000..18df42085 --- /dev/null +++ b/knowlg-automation/helm_charts/flink/templates/_helpers.tpl @@ -0,0 +1,67 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "datapipeline_jobs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "name" -}} +{{- default .Release.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "datapipeline_jobs.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "datapipeline_jobs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "datapipeline_jobs.labels" -}} +helm.sh/chart: {{ include "datapipeline_jobs.chart" . }} +{{ include "datapipeline_jobs.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "datapipeline_jobs.selectorLabels" -}} +app.kubernetes.io/name: {{ include "datapipeline_jobs.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "datapipeline_jobs.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "datapipeline_jobs.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/flink/templates/flink_job_configmap.yaml b/knowlg-automation/helm_charts/flink/templates/flink_job_configmap.yaml new file mode 100644 index 000000000..6f3356860 --- /dev/null +++ b/knowlg-automation/helm_charts/flink/templates/flink_job_configmap.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-config + namespace: {{ .Values.namespace }} + labels: + app: flink +data: + base-config: |+ +{{ .Values.base_config | indent 4 }} +{{- $name := .Release.Name }} +{{ index .Values $name | toYaml | indent 2 }} + log4j_console_properties: |+ +{{ .Values.log4j_console_properties | indent 4 }} \ No newline at end of file diff --git a/knowlg-automation/helm_charts/flink/templates/flink_job_deployment.yaml b/knowlg-automation/helm_charts/flink/templates/flink_job_deployment.yaml new file mode 100644 index 000000000..d1ec58710 --- /dev/null +++ b/knowlg-automation/helm_charts/flink/templates/flink_job_deployment.yaml @@ -0,0 +1,202 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-jobmanager + namespace: {{ .Values.namespace }} + labels: + app: flink + component: {{ .Release.Name }}-jobmanager +spec: + type: ClusterIP + ports: + - name: rpc + port: {{ .Values.jobmanager.rpc_port }} + - name: blob + port: {{ .Values.jobmanager.blob_port }} + - name: query + port: {{ .Values.jobmanager.query_port }} + - name: ui + port: {{ .Values.jobmanager.ui_port }} + - name: prom + port: {{ .Values.jobmanager.prom_port }} + selector: + app: flink + component: {{ .Release.Name }}-jobmanager +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-jobmanager-webui + namespace: {{ .Values.namespace }} + {{- if .Values.service.annotations }} + {{- with .Values.service.annotations }} +annotations: + {{ toYaml . | indent 4 }} + {{- end }} + {{- end }} +spec: + {{- if eq .Values.service.type "ClusterIP" }} + type: ClusterIP + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + type: LoadBalancer + {{- end }} + ports: + - name: rest + port: {{ .Values.rest_port }} + protocol: TCP + targetPort: {{ .Values.resttcp_port }} + selector: + app: flink + component: {{ .Release.Name }}-jobmanager +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-jobmanager + namespace: {{ .Values.namespace }} +spec: + template: + metadata: + labels: + app: flink + component: {{ .Release.Name }}-jobmanager + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: "{{ .Values.jobmanager.prom_port }}" + spec: + volumes: + - name: flink-config-volume + configMap: + name: {{ .Release.Name }}-config + items: + - key: flink-conf + path: flink-conf.yaml + - key: base-config + path: base-config.conf + - key: {{ .Release.Name }} + path: {{ .Release.Name }}.conf + - key: log4j_console_properties + path: log4j-console.properties + restartPolicy: OnFailure +# imagePullSecrets: +# - name: {{ .Values.imagepullsecrets }} + containers: + - name: {{ .Release.Name }}-jobmanager + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + workingDir: /opt/flink + command: ["/opt/flink/bin/standalone-job.sh"] + args: ["start-foreground", +{{- if eq .Values.checkpoint_store_type "azure" }} + "-Dfs.azure.account.key.{{ .Values.azure_account }}.blob.core.windows.net={{ .Values.azure_secret }}", +{{- end }} +{{- if eq .Values.checkpoint_store_type "s3" }} + "-Ds3.access-key={{ .Values.s3_access_key }}", + "-Ds3.secret-key={{ .Values.s3_secret_key }}", + "-Ds3.endpoint={{ .Values.s3_endpoint }}", + "-Ds3.path.style.access={{ .Values.s3_path_style_access }}", +{{- end }} + "--job-classname={{ (index .Values $.Release.Name).job_classname }}", + "-Dweb.submit.enable=false", + "-Dmetrics.reporter.prom.class=org.apache.flink.metrics.prometheus.PrometheusReporter", + "-Dmetrics.reporter.prom.port={{ .Values.jobmanager.prom_port }}", + "-Djobmanager.rpc.address={{ .Release.Name }}-jobmanager", + "-Djobmanager.rpc.port={{ .Values.jobmanager.rpc_port }}", + "-Dparallelism.default=1", + "-Dblob.server.port={{ .Values.jobmanager.blob_port }}", + "-Dqueryable-state.server.ports={{ .Values.jobmanager.query_port }}", + "--config.file.path", + "/data/flink/conf/{{ .Release.Name }}.conf"] + ports: + - containerPort: {{ .Values.jobmanager.rpc_port }} + name: rpc + - containerPort: {{ .Values.jobmanager.blob_port }} + name: blob + - containerPort: {{ .Values.jobmanager.query_port }} + name: query + - containerPort: {{ .Values.jobmanager.ui_port }} + name: ui + volumeMounts: + - name: flink-config-volume + mountPath: /opt/flink/conf/flink-conf.yaml + subPath: flink-conf.yaml + - name: flink-config-volume + mountPath: /data/flink/conf/base-config.conf + subPath: base-config.conf + - name: flink-config-volume + mountPath: /data/flink/conf/{{ .Release.Name }}.conf + subPath: {{ .Release.Name }}.conf + - name: flink-config-volume + mountPath: /opt/flink/conf/log4j-console.properties + subPath: log4j-console.properties + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-taskmanager + namespace: {{ .Values.namespace }} +spec: + replicas: {{ .Values.taskmanager.replicas }} + selector: + matchLabels: + app: flink + component: {{ .Release.Name }}-taskmanager + template: + metadata: + labels: + app: flink + component: {{ .Release.Name }}-taskmanager + spec: + volumes: + - name: flink-config-volume + configMap: + name: {{ .Release.Name }}-config + items: + - key: flink-conf + path: flink-conf.yaml + - key: log4j_console_properties + path: log4j-console.properties +# imagePullSecrets: +# - name: {{ .Values.imagepullsecrets }} + containers: + - name: {{ .Release.Name }}-taskmanager + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + resources: + requests: + cpu: "{{ .Values.taskmanager.cpu_requests }}" + workingDir: {{ .Values.taskmanager.flink_work_dir }} + command: ["/opt/flink/bin/taskmanager.sh"] + args: ["start-foreground", +{{- if eq .Values.checkpoint_store_type "azure" }} + "-Dfs.azure.account.key.{{ .Values.azure_account }}.blob.core.windows.net={{ .Values.azure_secret }}", +{{- end }} +{{- if eq .Values.checkpoint_store_type "s3" }} + "-Ds3.access-key={{ .Values.s3_access_key }}", + "-Ds3.secret-key={{ .Values.s3_secret_key }}", + "-Ds3.endpoint={{ .Values.s3_endpoint }}", + "-Ds3.path.style.access={{ .Values.s3_path_style_access }}", +{{- end }} + "-Dweb.submit.enable=false", + "-Dmetrics.reporter.prom.class=org.apache.flink.metrics.prometheus.PrometheusReporter", + "-Dmetrics.reporter.prom.host={{ .Release.Name }}-taskmanager", + "-Dmetrics.reporter.prom.port=9251-9260", + "-Djobmanager.rpc.address={{ .Release.Name }}-jobmanager", + "-Dtaskmanager.rpc.port={{ .Values.taskmanager.rpc_port }}"] + ports: + - containerPort: {{ .Values.taskmanager.rpc_port }} + name: rpc + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: flink-config-volume + mountPath: /opt/flink/conf/flink-conf.yaml + subPath: flink-conf.yaml + - name: flink-config-volume + mountPath: /opt/flink/conf/log4j-console.properties + subPath: log4j-console.properties \ No newline at end of file diff --git a/knowlg-automation/helm_charts/flink/values.yaml b/knowlg-automation/helm_charts/flink/values.yaml new file mode 100644 index 000000000..1a6b6b81d --- /dev/null +++ b/knowlg-automation/helm_charts/flink/values.yaml @@ -0,0 +1,660 @@ +namespace: "knowlg-job" +imagepullsecrets: "" +dockerhub: "" +repository: "aimansharief/kp-jobs" +# image_tag: "1.0.0" +image_tag: "latest" +serviceMonitor: + enabled: false +replicaCount: 1 + +jobmanager: + rpc_port: 6123 + blob_port: 6124 + query_port: 6125 + ui_port: 8081 + prom_port: 9250 + heap_memory: 1024 + +rest_port: 80 +resttcp_port: 8081 +service: + type: ClusterIP + +taskmanager: + prom_port: 9251 + rpc_port: 6122 + heap_memory: 1024 + replicas: 1 + cpu_requests: 0.3 + +checkpoint_store_type: "" + +# AWS S3 Details +s3_access_key: "" +s3_secret_key: "" +s3_endpoint: "" +s3_path_style_access: "" + +# Azure Container Details +azure_account: "" +azure_secret: "" + + +log4j_console_properties: | + # This affects logging for both user code and Flink + rootLogger.level = INFO + rootLogger.appenderRef.console.ref = ConsoleAppender + + # Uncomment this if you want to _only_ change Flink's logging + #logger.flink.name = org.apache.flink + #logger.flink.level = INFO + + # The following lines keep the log level of common libraries/connectors on + # log level INFO. The root logger does not override this. You have to manually + # change the log levels here. + logger.akka.name = akka + logger.akka.level = ERROR + logger.kafka.name= org.apache.kafka + logger.kafka.level = ERROR + logger.hadoop.name = org.apache.hadoop + logger.hadoop.level = ERROR + logger.zookeeper.name = org.apache.zookeeper + logger.zookeeper.level = ERROR + + # Log all infos to the console + appender.console.name = ConsoleAppender + appender.console.type = CONSOLE + appender.console.layout.type = PatternLayout + appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n + + # Suppress the irrelevant (wrong) warnings from the Netty channel handler + logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline + logger.netty.level = OFF + +base_config: | + kafka { + broker-servers = "kafka-headless.knowlg-db.svc.cluster.local:9092" + producer.broker-servers = "kafka-headless.knowlg-db.svc.cluster.local:9092" + consumer.broker-servers = "kafka-headless.knowlg-db.svc.cluster.local:9092" + zookeeper = "kafka-zookeeper-headless.knowlg-db.svc.cluster.local:2181" + producer { + max-request-size = 1572864 + batch.size = 98304 + linger.ms = 10 + compression = "snappy" + } + output.system.event.topic = "dev.system.events" + } + job { + env = "local" + enable.distributed.checkpointing = false + statebackend { + blob { + storage { + account = "blob.storage.account" + container = "kp-checkpoints" + checkpointing.dir = "flink-jobs" + } + } + base.url = "" + } + } + task { + parallelism = 1 + consumer.parallelism = 1 + checkpointing.compressed = true + checkpointing.interval = 10 + checkpointing.pause.between.seconds = 3000 + restart-strategy.attempts = 3 + restart-strategy.delay = 30000 # in milli-seconds + } + + + redis { + host = redis-db.knowlg-db.svc.cluster.local + port = 6379 + connection { + max = 2 + idle.min = 1 + idle.max = 2 + minEvictableIdleTimeSeconds = 120 + timeBetweenEvictionRunsSeconds = 300 + } + } + lms-cassandra { + host = cassandra-db.knowlg-db.svc.cluster.local + port = "9042" + } + + neo4j { + routePath = "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + graph = "domain" + } + + es { + basePath = "elasticsearch.knowlg-db.svc.cluster.local:9200" + } + +search-indexer: + search-indexer: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + kafka { + event.max.size = "1048576" # Max is only 1MB + input.topic = "dev.learning.graph.events" + error.topic = "dev.learning.events.failed" + groupId = "dev-search-indexer-group" + producer { + max-request-size = 5242880 + } + } + task { + consumer.parallelism = 1 + router.parallelism = 1 + compositeSearch.parallelism = 1 + dialcodeIndexer.parallelism = 1 + dialcodemetricsIndexer.parallelism = 1 + } + compositesearch.index.name = "compositesearch" + nested.fields = ["badgeAssertions", "targets", "badgeAssociations", "plugins", "me_totalTimeSpent", "me_totalPlaySessionCount", "me_totalTimeSpentInSec", "batches", "trackable", "credentials", "discussionForum", "provider", "osMetadata", "actions"] + schema.definition_cache.expiry = 14400 + restrict { + metadata.objectTypes = [] + objectTypes = ["EventSet", "Questionnaire", "Misconception", "FrameworkType", "EventSet", "Event"] + } + cloudstorage.metadata.replace_absolute_path=false + cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" + cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" + cloudstorage.mecloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] + cloud_storage_container="sunbird-content-dev" + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.searchindexer.task.SearchIndexerStreamTask + +audit-event-generator: + audit-event-generator: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.learning.graph.events" + output.topic = "dev.telemetry.raw" + groupId = "dev-audit-event-generator-group" + } + + task { + consumer.parallelism = 1 + parallelism = 1 + producer.parallelism = 1 + window.time = 60 + } + + schema { + basePath = "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/local" + } + + channel.default = "org.sunbird" + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.auditevent.task.AuditEventGeneratorStreamTask + + +asset-enrichment: + asset-enrichment: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.learning.job.request" + groupId = "dev-asset-enrichment-group" + video_stream.topic = "dev.content.postpublish.request" + } + + task { + consumer.parallelism = 1 + router.parallelism = 1 + videoEnrichment.parallelism = 1 + imageEnrichment.parallelism = 1 + } + + content { + stream { + enabled = true + mimeType = ["video/mp4", "video/webm"] + } + youtube { + applicationName = "fetch-youtube-license" + regexPattern = ["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)", "^([A-Za-z0-9\\-\\_]*)"] + } + upload.context.driven = true + max.iteration.count = 2 + } + + thumbnail.max { + sample = 5 + size.pixel = 150 + } + + cloudstorage.metadata.replace_absolute_path=false + cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" + cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" + cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net","https://obj.dev.sunbird.org"] + cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] + + cloud_storage_type="" + cloud_storage_key="" + cloud_storage_secret="" + cloud_storage_container="" + cloud_storage_endpoint="" + + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.assetenricment.task.AssetEnrichmentStreamTask + + +post-publish-processor: + post-publish-processor: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.content.postpublish.request" + groupId = "local-post-publish-processor-group" + publish.topic = "dev.learning.job.request" + qrimage.topic = "dev.qrimage.request" + dialcode.context.topic = "dev.dialcode.context.job.request" + } + + task { + consumer.parallelism = 1 + router.parallelism = 1 + shallow_copy.parallelism = 1 + link_dialcode.parallelism = 1 + batch_create.parallelism = 1 + dialcode_context_updater.parallelism = 1 + } + + lms-cassandra { + keyspace = "sunbird_courses" + batchTable = "course_batch" + } + + dialcode-cassandra { + keyspace = "dialcodes" + imageTable = "dialcode_images" + } + + service { + search.basePath = "http://localhost:9000/search" + lms.basePath = "http://localhost:9000/lms" + learning_service.basePath = "http://localhost:8080/learning-service" + dial.basePath = "https://dev.sunbirded.org/dial/" + } + + dialcode { + linkable.primaryCategory = ["Course"] + } + + cloudstorage.metadata.replace_absolute_path=false + cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" + cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net","https://obj.dev.sunbird.org"] + cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] + + cloud_storage_type="azure" + cloud_storage_key="" + cloud_storage_secret="" + cloud_storage_container="" + cloud_storage_endpoint="" + + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.postpublish.task.PostPublishProcessorStreamTask + + +dialcode-context-updater: + dialcode-context-updater: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.dialcode.context.job.request" + failed.topic = "dev.dialcode.context.job.request.failed" + groupId = "dev-dialcode-group" + } + + task { + consumer.parallelism = 1 + parallelism = 1 + dialcode-context-updater.parallelism = 1 + } + + dialcode_context_updater { + actions="dialcode-context-update" + search_mode="Collection" + context_map_path = "https://raw.githubusercontent.com/project-sunbird/knowledge-platform-jobs/release-5.0.0/dialcode-context-updater/src/main/resources/contextMapping.json" + identifier_search_fields = ["identifier", "primaryCategory", "channel"] + dial_code_context_read_api_path = "/dialcode/v4/read/" + dial_code_context_update_api_path = "/dialcode/v4/update/" + } + + service { + search.basePath = "http://11.2.6.6/search" + dial_service.basePath = "http://11.2.6.6/dial" + } + + es_sync_wait_time = 5000 + + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.dialcodecontextupdater.task.DialcodeContextUpdaterStreamTask + + +qrcode-image-generator: + qrcode-image-generator: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.qrimage.request" + groupId = "dev-qrcode-image-generator-group" + } + + task { + consumer.parallelism = 1 + parallelism = 1 + window.time = 60 + } + + lp.tmp.file.location="/tmp" + + qr.image { + imageFormat="png" + bottomMargin=0 + margin=1 + } + + lms-cassandra { + keyspace = "dialcodes" + table { + image = "dialcode_images" + batch = "dialcode_batch" + } + } + + # Default value is 120 + max_allowed_character_for_file_name = 120 + + cloudstorage.metadata.replace_absolute_path=false + cloudstorage.relative_path_prefix= "DIAL_STORAGE_BASE_PATH" + cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" + cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net/dial","https://obj.dev.sunbird.org/dial"] + cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] + + cloud_storage_type="" + cloud_storage_key="" + cloud_storage_secret="" + cloud_storage_container="" + cloud_storage_endpoint="" + + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.qrimagegenerator.task.QRCodeImageGeneratorTask + +video-stream-generator: + video-stream-generator: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.content.postpublish.request" + groupId = "dev-video-stream-generator-group" + } + + task { + consumer.parallelism = 1 + parallelism = 1 + timer.duration = 60 + max.retries = 10 + } + + lms-cassandra { + keyspace = "dev_platform_db" + table = "job_request" + } + + service { + content { + basePath = "http://11.2.6.6/content" + } + } + + # Azure Media Service Config + azure { + location = "centralindia" + tenant = "tenant" + subscription_id = "subscription id " + + login { + endpoint="https://login.microsoftonline.com" + } + + api { + endpoint="https://management.azure.com" + version = "2018-07-01" + } + + account_name = "account name" + resource_group_name = "group name" + + transform { + default = "media_transform_default" + hls = "media_transform_hls" + } + + stream { + base_url = "https://sunbirdspikemedia-inct.streaming.media.azure.net" + endpoint_name = "default" + protocol = "Hls" + policy_name = "Predefined_ClearStreamingOnly" + } + + token { + client_key = "client key" + client_secret = "client secret" + } + } + + azure_tenant="tenant" + azure_subscription_id="subscription id" + azure_account_name="account name" + azure_resource_group_name="group name" + azure_token_client_key="client key" + azure_token_client_secret="client secret" + + # CSP Name. e.g: aws or azure + media_service_type="aws" + + #AWS Elemental Media Convert Config + aws { + region="ap-south-1" + content_bucket_name="awsmedia-spike" + token { + access_key="access key" + access_secret="access secret" + } + api { + endpoint="API Endpoint for media convert" + version="2017-08-29" + } + service { + name="mediaconvert" + queue="Media Convert Queue Id" + role="Media Convert Role Name" + } + stream { + protocol="Hls" + } + } + + media_service_job_success_status=["FINISHED", "COMPLETE"] + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.videostream.task.VideoStreamGeneratorStreamTask + +audit-history-indexer: + audit-history-indexer: |+ + include file("/data/flink/conf/base-config.conf") + job { + env = "dev" + } + + kafka { + input.topic = "dev.learning.graph.events" + groupId = "dev-audit-history-indexer-group" + } + + task { + consumer.parallelism = 1 + parallelism = 1 + window.time = 60 + } + + timezone = "IST" + + flink-conf: |+ + jobmanager.memory.flink.size: 1024m + taskmanager.memory.flink.size: 1024m + taskmanager.numberOfTaskSlots: 1 + jobManager.numberOfTaskSlots: 1 + parallelism.default: 1 + jobmanager.execution.failover-strategy: region + taskmanager.memory.network.fraction: 0.1 + scheduler-mode: reactive + heartbeat.timeout: 8000 + heartbeat.interval: 5000 + taskmanager.memory.process.size: 1700m + jobmanager.memory.process.size: 1600m + # classloader.resolve-order: "parent-first" + # state.savepoints.dir: file:///tmp + + job_classname: org.sunbird.job.audithistory.task.AuditHistoryIndexerStreamTask \ No newline at end of file diff --git a/knowlg-automation/helm_charts/kafka/.helmignore b/knowlg-automation/helm_charts/kafka/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/knowlg-automation/helm_charts/kafka/Chart.lock b/knowlg-automation/helm_charts/kafka/Chart.lock new file mode 100644 index 000000000..4f0ea0177 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 11.0.2 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.2 +digest: sha256:406b7c170751d9aeab272ff010ee5affc4a8b2a4487f0157a68a0f726ff155b6 +generated: "2022-12-18T23:00:36.711399047Z" diff --git a/knowlg-automation/helm_charts/kafka/Chart.yaml b/knowlg-automation/helm_charts/kafka/Chart.yaml new file mode 100644 index 000000000..cee687d00 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/Chart.yaml @@ -0,0 +1,33 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.3.1 +dependencies: +- condition: zookeeper.enabled + name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 11.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: kafka +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/kafka +- https://kafka.apache.org/ +version: 20.0.2 diff --git a/knowlg-automation/helm_charts/kafka/README.md b/knowlg-automation/helm_charts/kafka/README.md new file mode 100644 index 000000000..5e922e050 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/README.md @@ -0,0 +1,1050 @@ + + +# Apache Kafka packaged by Bitnami + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### Kafka parameters + +| Name | Description | Value | +| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.3.1-debian-11-r25` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories in which kafka's log data is kept | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to ZooKeeper | `6000` | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties | `""` | +| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users | `true` | +| `superUsers` | You can add super users in server.properties | `User:admin` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.externalClientProtocol` | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `""` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | +| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | +| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | +| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperUser` | Kafka ZooKeeper user for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperPassword` | Kafka ZooKeeper password for SASL authentication | `""` | +| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | +| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | +| `auth.tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | +| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | +| `auth.tls.existingSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | +| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate | `""` | +| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | +| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.zookeeper.tls.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `auth.zookeeper.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `auth.zookeeper.tls.verifyHostname` | Hostname validation. | `true` | +| `auth.zookeeper.tls.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `auth.zookeeper.tls.existingSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `auth.zookeeper.tls.existingSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `auth.zookeeper.tls.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `auth.zookeeper.tls.passwordsSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `auth.zookeeper.tls.passwordsSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| `command` | Override Kafka container command | `["/scripts/setup.sh"]` | +| `args` | Override Kafka container arguments | `[]` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of Kafka nodes | `1` | +| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | +| `brokerRackAssignment` | Set Broker Assignment for multi tenant environment Allowed values: `aws-az` | `""` | +| `containerPorts.client` | Kafka client container port | `9092` | +| `containerPorts.internal` | Kafka inter-broker container port | `9093` | +| `containerPorts.external` | Kafka external container port | `9094` | +| `livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | Kafka pods host aliases | `[]` | +| `hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `podLabels` | Extra labels for Kafka pods | `{}` | +| `podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Kafka statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.internal` | Kafka svc port for inter-broker connections | `9093` | +| `service.ports.external` | Kafka svc port for external connections | `9094` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `service.headless.labels` | Labels for the headless service. | `{}` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.5-debian-11-r2` | +| `externalAccess.autoDiscovery.image.digest` | Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.service.labels` | Service labels for external access | `{}` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `logPersistence.annotations` | Annotations for the PVC | `{}` | +| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r63` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r40` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.17.2-debian-11-r29` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | + + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `false` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + my-repo/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml my-repo/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`. +Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/). + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `auth.tls.existingSecrets` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.tls.password` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets: + +```console +kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password` +- `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). +- `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) + +> **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password` + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +You can deploy the chart with AclAuthorizer using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=sasl_tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +authorizerClassName=kafka.security.authorizer.AclAuthorizer +allowEveryoneIfNoAclFound=false +superUsers=User:admin +``` + +If you are using Kafka ACLs, you might encounter in kafka-authorizer.log the following event: `[...] Principal = User:ANONYMOUS is Allowed Operation [...]`. + +By setting the following parameter: `auth.clientProtocol=mtls`, it will set the configuration in Kafka to `ssl.client.auth=required`. This option will require the clients to authenticate to Kafka brokers. + +As result, we will be able to see in kafka-authorizer.log the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`. + +If you also enable exposing metrics using the Kafka exporter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.service.nodePorts[0]='node-port-1' +externalAccess.service.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided. + +#### Using ClusterIP services + +Note: This option requires that an ingress is deployed within your cluster + +```console +externalAccess.enabled=true +externalAccess.service.type=ClusterIP +externalAccess.service.ports.external=9094 +externalAccess.service.domain='ingress-ip' +``` + +Note: the deployed ingress must contain the following block: + +```console +tcp: + 9094: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-0-external:9094" + 9095: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-1-external:9094" + 9096: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-2-external:9094" +``` + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - | + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - | + apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 20.0.0 + +This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). + +### To 19.0.0 + +This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade). + +### To 18.0.0 + +This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000). + +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.8.0 + +External access to brokers can now be achieved through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka my-repo/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka my-repo/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/knowlg-automation/helm_charts/kafka/charts/common/.helmignore b/knowlg-automation/helm_charts/kafka/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/knowlg-automation/helm_charts/kafka/charts/common/Chart.yaml b/knowlg-automation/helm_charts/kafka/charts/common/Chart.yaml new file mode 100644 index 000000000..f9ba944c8 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.2 diff --git a/knowlg-automation/helm_charts/kafka/charts/common/README.md b/knowlg-automation/helm_charts/kafka/charts/common/README.md new file mode 100644 index 000000000..ec43a5fab --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_affinities.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..81902a681 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_capabilities.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..9d9b76004 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_errors.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_images.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_images.tpl new file mode 100644 index 000000000..46c659e79 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_ingress.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..831da9caa --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_labels.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_names.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_names.tpl new file mode 100644 index 000000000..617a23489 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_names.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_secrets.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a1708b2e8 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_storage.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_tplvalues.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_utils.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..b1ead50cf --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/_warnings.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_cassandra.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mariadb.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mongodb.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..f820ec107 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mysql.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..74472a061 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_postgresql.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_redis.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..dcccfc1ae --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_validations.tpl b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/common/values.yaml b/knowlg-automation/helm_charts/kafka/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/.helmignore b/knowlg-automation/helm_charts/kafka/charts/zookeeper/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.lock b/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.lock new file mode 100644 index 000000000..e6b30f2fb --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.1 +digest: sha256:6c67cfa9945bf608209d4e2ca8f17079fca4770907c7902d984187ab5b21811e +generated: "2022-12-09T23:46:15.313038016Z" diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.yaml new file mode 100644 index 000000000..26e072a05 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png +keywords: +- zookeeper +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: zookeeper +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper +- https://zookeeper.apache.org/ +version: 11.0.2 diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/README.md b/knowlg-automation/helm_charts/kafka/charts/zookeeper/README.md new file mode 100644 index 000000000..7dfdf5433 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,532 @@ + + +# Apache ZooKeeper packaged by Bitnami + +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.0-debian-11-r65` | +| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r60` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +### TLS/SSL parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.auth` | SSL Client auth. Can be "none", "want" or "need". | `none` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.existingSecretKeystoreKey` | The secret key from the tls.client.existingSecret containing the Keystore. | `""` | +| `tls.client.existingSecretTruststoreKey` | The secret key from the tls.client.existingSecret containing the Truststore. | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.passwordsSecretKeystoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.auth` | SSL Quorum Client auth. Can be "none", "want" or "need". | `none` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.existingSecretKeystoreKey` | The secret key from the tls.quorum.existingSecret containing the Keystore. | `""` | +| `tls.quorum.existingSecretTruststoreKey` | The secret key from the tls.quorum.existingSecret containing the Truststore. | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.passwordsSecretKeystoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + my-repo/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml my-repo/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +``` +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +``` + +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 11.0.0 + +This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values. + +### To 10.0.0 + +This new version of the chart adds support for server-server authentication. +The chart previously supported client-server authentication, to avoid confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`. + +### To 9.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed. + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/.helmignore b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/Chart.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 000000000..653c063f2 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.1 diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/README.md b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/README.md new file mode 100644 index 000000000..ec43a5fab --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..81902a681 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..9d9b76004 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 000000000..46c659e79 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..831da9caa --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 000000000..1bdac8b77 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..4267d4204 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..8c22b2a38 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..f820ec107 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..74472a061 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..dcccfc1ae --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/values.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/NOTES.txt b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 000000000..c287e1e56 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if eq .Values.service.type "NodePort" }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if eq .Values.service.type "LoadBalancer" }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} + +{{- else if eq .Values.service.type "ClusterIP" }} + + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} + +{{- end }} +{{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/_helpers.tpl b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 000000000..d855bada0 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,361 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ZooKeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper client-server authentication credentials secret +*/}} +{{- define "zookeeper.client.secretName" -}} +{{- if .Values.auth.client.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}} +{{- else -}} + {{- printf "%s-client-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper server-server authentication credentials secret +*/}} +{{- define "zookeeper.quorum.secretName" -}} +{{- if .Values.auth.quorum.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}} +{{- else -}} + {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper client-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.client.createSecret" -}} +{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper server-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.quorum.createSecret" -}} +{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return the ZooKeeper configuration ConfigMap name +*/}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ConfigMap object should be created for ZooKeeper configuration +*/}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper quorum TLS certificates +*/}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsKeystoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsTruststoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsKeystoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsTruststoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.client.auth" -}} +{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }} +zookeeper: auth.client.enabled + In order to enable client-server authentication, you need to provide the list + of users to be created and the user to use for clients authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.quorum.auth" -}} +{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }} +zookeeper: auth.quorum.enabled + In order to enable server-server authentication, you need to provide the list + of users to be created and the user to use for quorum authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/configmap.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100644 index 000000000..12b4f489f --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "zookeeper.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/extra-list.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100644 index 000000000..5afc4b3e5 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100644 index 000000000..63532832c --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,41 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound connections to ZooKeeper + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/pdb.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 000000000..f7faf65f9 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/prometheusrule.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 000000000..d76682339 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.db_namespace }} + namespace: {{ .Values.metrics.prometheusRule.db_namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 000000000..d0a7ddb49 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + if [[ -f "/opt/bitnami/zookeeper/config/certs/client/.initialized" ]]; then + exit 0 + fi + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + touch /opt/bitnami/zookeeper/config/certs/client/.initialized + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/secrets.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100644 index 000000000..82ebc2eed --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,77 @@ +{{- if (include "zookeeper.client.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }} + server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }} + quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100644 index 000000000..958a57ac2 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100644 index 000000000..e9ef58507 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.db_namespace }} + namespace: {{ .Values.metrics.serviceMonitor.db_namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "zookeeper.namespace" . }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/statefulset.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 000000000..025d3630b --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,532 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/zookeeper + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.containerPorts.client | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }} + {{- if .Values.auth.client.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.client.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.client.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: server-password + {{- end }} + - name: ZOO_ENABLE_QUORUM_AUTH + value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }} + {{- if .Values.auth.quorum.enabled }} + - name: ZOO_QUORUM_LEARNER_USER + value: {{ .Values.auth.quorum.learnerUser | quote }} + - name: ZOO_QUORUM_LEARNER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-learner-password + - name: ZOO_QUORUM_SERVER_USERS + value: {{ .Values.auth.quorum.serverUsers | quote }} + - name: ZOO_QUORUM_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_PORT_NUMBER + value: {{ .Values.containerPorts.tls | quote }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_AUTH + value: {{ .Values.tls.client.auth | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.tls.client.keystorePath | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_CLIENT_AUTH + value: {{ .Values.tls.quorum.auth | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.tls.quorum.keystorePath | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: client + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-tls + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} + - name: follower + containerPort: {{ .Values.containerPorts.follower }} + - name: election + containerPort: {{ .Values.containerPorts.election }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "zookeeper.configmapName" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100644 index 000000000..ee05e1dec --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100644 index 000000000..6ad0b1096 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/tls-secrets.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 000000000..a07480d55 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,55 @@ +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-client-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-quorum-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/charts/zookeeper/values.yaml b/knowlg-automation/helm_charts/kafka/charts/zookeeper/values.yaml new file mode 100644 index 000000000..50f7587a6 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,877 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.0-debian-11-r65 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## + headless: + publishNotReadyAddresses: true + annotations: {} +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r60 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port + ## + containerPort: 9141 + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + db_namespace: "knowlg-db" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + db_namespace: "knowlg-db" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/knowlg-automation/helm_charts/kafka/kafka b/knowlg-automation/helm_charts/kafka/kafka new file mode 100644 index 000000000..7e1f5439b --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/kafka @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: testclient + namespace: knowlg-db +spec: + containers: + - name: kafka + image: solsson/kafka:latest + command: + - sh + - -c + - "exec tail -f /dev/null" \ No newline at end of file diff --git a/knowlg-automation/helm_charts/kafka/templates/NOTES.txt b/knowlg-automation/helm_charts/kafka/templates/NOTES.txt new file mode 100644 index 000000000..1bdfb3315 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/NOTES.txt @@ -0,0 +1,310 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh + +{{- else }} + +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}} +{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}} +{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 -d | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}} +{{- $tlsPassword := ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}} +{{- $servicePort := int .Values.service.ports.client -}} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $clientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $clientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.client.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $clientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.clientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} + +To create a pod that you can use as a Kafka client run the following commands: + + kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/kafka_jaas.conf {{ $fullname }}-client:/tmp/kafka_jaas.conf + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) (eq .Values.auth.tls.type "jks") }} + kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks + {{- if eq .Values.auth.clientProtocol "mtls" }} + kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks + {{- end }} + {{- end }} + kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash + {{- if (include "kafka.client.saslAuthentication" .) }} + export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/kafka_jaas.conf" + {{- end }} + + PRODUCER: + kafka-console-producer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--producer.config /tmp/client.properties \{{ end }} + --broker-list {{ join "," $brokerList }} \ + --topic test + + CONSUMER: + kafka-console-consumer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--consumer.config /tmp/client.properties \{{ end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test \ + --from-beginning + +{{- if .Values.externalAccess.enabled }} + +To connect to your Kafka server from outside the cluster, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners + +{{- end }} + + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.service.type }} + + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.service.ports.external }} + +{{- end }} + +{{- if not (eq $clientProtocol $externalClientProtocol) }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $externalClientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $externalClientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.externalClient.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $externalClientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.externalClientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- include "kafka.checkRollingTags" . }} +{{- include "kafka.validateValues" . }} diff --git a/knowlg-automation/helm_charts/kafka/templates/_helpers.tpl b/knowlg-automation/helm_charts/kafka/templates/_helpers.tpl new file mode 100644 index 000000000..51ec867d5 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/_helpers.tpl @@ -0,0 +1,509 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified zookeeper name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.zookeeper.fullname" -}} +{{- if .Values.zookeeper.fullnameOverride -}} +{{- .Values.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "kafka.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka image name +*/}} +{{- define "kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "kafka.externalAccess.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kafka.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create a default fully qualified Kafka exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.metrics.kafka.fullname" -}} + {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* + Create the name of the service account to use for Kafka exporter pods + */}} +{{- define "kafka.metrics.kafka.serviceAccountName" -}} +{{- if .Values.metrics.kafka.serviceAccount.create -}} + {{ default (include "kafka.metrics.kafka.fullname" .) .Values.metrics.kafka.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.metrics.kafka.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka exporter image name +*/}} +{{- define "kafka.metrics.kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.kafka.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper JMX exporter image name +*/}} +{{- define "kafka.metrics.jmx.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kafka.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.externalAccess.autoDiscovery.image .Values.volumePermissions.image .Values.metrics.kafka.image .Values.metrics.jmx.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "kafka.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for client communications +*/}} +{{- define "kafka.client.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.clientProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for inter-broker communications +*/}} +{{- define "kafka.interBroker.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.interBrokerProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for client connections should be configured +*/}} +{{- define "kafka.client.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.clientProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configured value for the external client protocol, defaults to the same value as clientProtocol +*/}} +{{- define "kafka.externalClientProtocol" -}} + {{- coalesce .Values.auth.externalClientProtocol .Values.auth.clientProtocol -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for external client connections should be configured +*/}} +{{- define "kafka.externalClient.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has (include "kafka.externalClientProtocol" . ) $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for inter broker communication connections should be configured +*/}} +{{- define "kafka.interBroker.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.interBrokerProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS should be configured +*/}} +{{- define "kafka.tlsEncryption" -}} +{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) (include "kafka.externalClient.tlsEncryption" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the type of listener +Usage: +{{ include "kafka.listenerType" ( dict "protocol" .Values.path.to.the.Value ) }} +*/}} +{{- define "kafka.listenerType" -}} +{{- if eq .protocol "plaintext" -}} +PLAINTEXT +{{- else if or (eq .protocol "tls") (eq .protocol "mtls") -}} +SSL +{{- else if eq .protocol "sasl_tls" -}} +SASL_SSL +{{- else if eq .protocol "sasl" -}} +SASL_PLAINTEXT +{{- end -}} +{{- end -}} + +{{/* +Return the protocol used with zookeeper +*/}} +{{- define "kafka.zookeeper.protocol" -}} +{{- if and .Values.auth.zookeeper.tls.enabled .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL_SSL +{{- else if and .Values.auth.zookeeper.tls.enabled -}} +SSL +{{- else if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL +{{- else -}} +PLAINTEXT +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka JAAS credentials secret +*/}} +{{- define "kafka.jaasSecretName" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-jaas" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a JAAS credentials secret object should be created +*/}} +{{- define "kafka.createJaasSecret" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (empty $secretName) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsSecret" -}} +{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (eq .Values.auth.tls.type "pem") .Values.auth.tls.autoGenerated }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- printf "%s" (tpl .Values.provisioning.auth.tls.passwordsSecret $) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the Kafka Provisioning client +*/}} +{{- define "kafka.provisioning.serviceAccountName" -}} +{{- if .Values.provisioning.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.provisioning.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.provisioning.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.createConfigmap" -}} +{{- if and .Values.config (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka log4j ConfigMap name. +*/}} +{{- define "kafka.log4j.configMapName" -}} +{{- if .Values.existingLog4jConfigMap -}} + {{- printf "%s" (tpl .Values.existingLog4jConfigMap $) -}} +{{- else -}} + {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a log4j ConfigMap object should be created. +*/}} +{{- define "kafka.log4j.createConfigMap" -}} +{{- if and .Values.log4j (not .Values.existingLog4jConfigMap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the SASL mechanism to use for the Kafka exporter to access Kafka +The exporter uses a different nomenclature so we need to do this hack +*/}} +{{- define "kafka.metrics.kafka.saslMechanism" -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms }} +{{- if contains "scram-sha-512" $saslMechanisms }} + {{- print "scram-sha512" -}} +{{- else if contains "scram-sha-256" $saslMechanisms }} + {{- print "scram-sha256" -}} +{{- else -}} + {{- print "plain" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.metrics.jmx.configmapName" -}} +{{- if .Values.metrics.jmx.existingConfigmap -}} + {{- printf "%s" (tpl .Values.metrics.jmx.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.metrics.jmx.createConfigmap" -}} +{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kafka.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kafka.validateValues.authProtocols" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.domainSpecified" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} +{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets.length" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsPasswords" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Authentication protocols for Kafka */}} +{{- define "kafka.validateValues.authProtocols" -}} +{{- $authProtocols := list "plaintext" "tls" "mtls" "sasl" "sasl_tls" -}} +{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) (not (has (include "kafka.externalClientProtocol" . ) $authProtocols)) -}} +kafka: auth.clientProtocol auth.externalClientProtocol auth.interBrokerProtocol + Available authentication protocols are "plaintext", "tls", "mtls", "sasl" and "sasl_tls" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as NodePort list */}} +{{- define "kafka.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $nodePortListLength )) (eq .Values.externalAccess.service.type "NodePort") -}} +kafka: .Values.externalAccess.service.nodePorts + Number of replicas and nodePort array length must be the same. Currently: replicaCount = {{ $replicaCount }} and nodePorts = {{ $nodePortListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - domain must be defined if external service type ClusterIP */}} +{{- define "kafka.validateValues.domainSpecified" -}} +{{- if and (eq .Values.externalAccess.service.type "ClusterIP") (eq .Values.externalAccess.service.domain "") -}} +kafka: .Values.externalAccess.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - service type for external access */}} +{{- define "kafka.validateValues.externalAccessServiceType" -}} +{{- if and (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}} +kafka: externalAccess.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +kafka: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.service.loadBalancerNames or externalAccess.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.service.type=LoadBalancer" at least one of externalAccess.service.loadBalancerNames + or externalAccess.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} +{{- define "kafka.validateValues.saslMechanisms" -}} +{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (not .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + The SASL mechanisms are required when either auth.clientProtocol or auth.interBrokerProtocol use SASL or Zookeeper user is provided. +{{- end }} +{{- if not (contains .Values.auth.sasl.interBrokerMechanism .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + auth.sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} +{{- define "kafka.validateValues.tlsSecrets" -}} +{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka JKS keystores and truststore is required + when TLS encryption in enabled and TLS format is "JKS" +{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (not .Values.auth.tls.autoGenerated) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka TLS certificates and keys is required + when TLS encryption in enabled and TLS format is "PEM" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - The number of secrets containing TLS certs should be equal to the number of replicas */}} +{{- define "kafka.validateValues.tlsSecrets.length" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- if and (include "kafka.tlsEncryption" .) (not (empty .Values.auth.tls.existingSecrets)) }} +{{- $existingSecretsLength := len .Values.auth.tls.existingSecrets }} +{{- if ne $replicaCount $existingSecretsLength }} +kafka: .Values.auth.tls.existingSecrets + Number of replicas and existingSecrets array length must be the same. Currently: replicaCount = {{ $replicaCount }} and existingSecrets = {{ $existingSecretsLength }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.tlsPasswords" -}} +{{- if and (include "kafka.client.tlsEncryption" .) (not .Values.auth.tls.passwordsSecret) }} +{{- if or .Values.auth.tls.keyPasswordSecretKey .Values.auth.tls.keystorePasswordSecretKey .Values.auth.tls.truststorePasswordSecretKey }} +kafka: auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/templates/configmap.yaml b/knowlg-automation/helm_charts/kafka/templates/configmap.yaml new file mode 100644 index 000000000..509fd1c4f --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + server.properties: |- + {{ .Values.config | nindent 4 }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/templates/extra-list.yaml b/knowlg-automation/helm_charts/kafka/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/jaas-secret.yaml b/knowlg-automation/helm_charts/kafka/templates/jaas-secret.yaml new file mode 100644 index 000000000..8f632e56e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/jaas-secret.yaml @@ -0,0 +1,40 @@ +{{- if (include "kafka.createJaasSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-jaas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + {{- $clientPasswords := .Values.auth.sasl.jaas.clientPasswords }} + {{- if $clientPasswords }} + client-passwords: {{ join "," $clientPasswords | b64enc | quote }} + system-user-password: {{ index $clientPasswords 0 | b64enc | quote }} + {{- else }} + {{- $passwords := list }} + {{- range $clientUsers }} + {{- $passwords = append $passwords (randAlphaNum 10) }} + {{- end }} + client-passwords: {{ join "," $passwords | b64enc | quote }} + system-user-password: {{ index $passwords 0 | b64enc | quote }} + {{- end }} + {{- end }} + {{- $zookeeperUser := .Values.auth.sasl.jaas.zookeeperUser }} + {{- if and .Values.zookeeper.auth.client.enabled $zookeeperUser }} + {{- $zookeeperPassword := .Values.auth.sasl.jaas.zookeeperPassword }} + zookeeper-password: {{ default (randAlphaNum 10) $zookeeperPassword | b64enc | quote }} + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + {{- $interBrokerPassword := .Values.auth.sasl.jaas.interBrokerPassword }} + inter-broker-password: {{ default (randAlphaNum 10) $interBrokerPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/jmx-configmap.yaml b/knowlg-automation/helm_charts/kafka/templates/jmx-configmap.yaml new file mode 100644 index 000000000..9207f6d1d --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/jmx-configmap.yaml @@ -0,0 +1,64 @@ +{{- if (include "kafka.metrics.jmx.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + jmx-kafka-prometheus.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }} + rules: + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 + {{- if .Values.metrics.jmx.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }} + {{- end }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/templates/jmx-metrics-svc.yaml b/knowlg-automation/helm_charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100644 index 000000000..35c79f41f --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-deployment.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100644 index 000000000..bf731f20b --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,171 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $servicePort := int .Values.service.ports.client -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: cluster-metrics + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.kafka.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.metrics.kafka.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} + {{- if .Values.metrics.kafka.schedulerName }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + {{- if .Values.metrics.kafka.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + {{- if .Values.metrics.kafka.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }} + {{- else }} + command: + - bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ce + - | + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username=$SASL_USERNAME \ + --sasl.password=$SASL_USER_PASSWORD \ + --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \ + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \ + --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \ + {{- if .Values.metrics.kafka.tlsCaSecret }} + --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- else }} + --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + env: + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + mountPath: /opt/bitnami/kafka-exporter/cacert/ + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + secret: + secretName: {{ .Values.metrics.kafka.tlsCaSecret }} + defaultMode: 0440 + {{- end }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-serviceaccount.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-serviceaccount.yaml new file mode 100644 index 000000000..f8e3eb305 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-svc.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100644 index 000000000..9daae4a1a --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-secret.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-secret.yaml new file mode 100644 index 000000000..0c0fb1bc1 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-secret.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.provisioning.enabled (include "kafka.client.tlsEncryption" .) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-serviceaccount.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-serviceaccount.yaml new file mode 100644 index 000000000..47614674c --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.provisioning.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.provisioning.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning.yaml b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning.yaml new file mode 100644 index 000000000..765e88315 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/kafka-provisioning.yaml @@ -0,0 +1,260 @@ +{{- if .Values.provisioning.enabled }} +{{- $replicaCount := int .Values.replicaCount }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.provisioning.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }} + initContainers: + {{- if .Values.provisioning.waitForKafka }} + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.client | int64 }}; + echo "Kafka is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + echo "Configuring environment" + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- if contains "plain" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-256" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-512" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- end }} + {{- end }} + fi + + echo "Running pre-provisioning script if any given" + {{ .Values.provisioning.preScript | nindent 14 }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config ${CLIENT_CONF} \ + --topic {{ $topic.name }}" + {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})) + do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))) + do + ${kafka_provisioning_commands[j]} & # Async command + done + wait # Wait the end of the jobs + done + + echo "Running post-provisioning script if any given" + {{ .Values.provisioning.postScript | nindent 14 }} + + echo "Provisioning succeeded" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/log4j-configmap.yaml b/knowlg-automation/helm_charts/kafka/templates/log4j-configmap.yaml new file mode 100644 index 000000000..8f7bc6c14 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }} +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/templates/networkpolicy-egress.yaml b/knowlg-automation/helm_charts/kafka/templates/networkpolicy-egress.yaml new file mode 100644 index 000000000..068024a0e --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/networkpolicy-egress.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/networkpolicy-ingress.yaml b/knowlg-automation/helm_charts/kafka/templates/networkpolicy-ingress.yaml new file mode 100644 index 000000000..258dcabb6 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/networkpolicy-ingress.yaml @@ -0,0 +1,53 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow client connections + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} + {{- end }} + {{- end }} + # Allow communication inter-broker + - ports: + - port: {{ .Values.containerPorts.internal }} + from: + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + # Allow External connection + {{- if .Values.externalAccess.enabled }} + - ports: + - port: {{ .Values.containerPorts.external }} + {{- if .Values.externalAccess.from }} + from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/poddisruptionbudget.yaml b/knowlg-automation/helm_charts/kafka/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..e0a60151d --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/prometheusrule.yaml b/knowlg-automation/helm_charts/kafka/templates/prometheusrule.yaml new file mode 100644 index 000000000..f3744d517 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if and (or .Values.metrics.kafka.enabled .Values.metrics.jmx.enabled) .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.db_namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.labels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} + {{- end }} +spec: + groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/role.yaml b/knowlg-automation/helm_charts/kafka/templates/role.yaml new file mode 100644 index 000000000..63215b3b8 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/role.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/knowlg-automation/helm_charts/kafka/templates/rolebinding.yaml b/knowlg-automation/helm_charts/kafka/templates/rolebinding.yaml new file mode 100644 index 000000000..fb5e3a157 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/scripts-configmap.yaml b/knowlg-automation/helm_charts/kafka/templates/scripts-configmap.yaml new file mode 100644 index 000000000..57e125053 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,202 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.ports.internal }} + {{- $clientPort := .Values.service.ports.client }} + {{- $jksTruststoreSecret := .Values.auth.tls.jksTruststoreSecret -}} + {{- $jksTruststore := .Values.auth.tls.jksTruststore -}} + {{- $jksKeystoreSAN := .Values.auth.tls.jksKeystoreSAN -}} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + if [[ -f "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" ]]; then + export KAFKA_CFG_BROKER_ID="$(grep "broker.id" "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" | awk -F '=' '{print $2}')" + else + export KAFKA_CFG_BROKER_ID="$((ID + {{ .Values.minBrokerId }}))" + fi + + {{- if eq .Values.brokerRackAssignment "aws-az" }} + export KAFKA_CFG_BROKER_RACK=$(curl "http://169.254.169.254/latest/meta-data/placement/availability-zone-id") + {{- end }} + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_HOST=$(echo '{{ .Values.externalAccess.service.loadBalancerNames | default .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.ports.external }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if and .Values.externalAccess.service.usePodIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${MY_POD_IP}" + {{- else if or .Values.externalAccess.service.useHostIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${HOST_IP}" + {{- else if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_HOST=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_PORT="$((ID + {{ .Values.externalAccess.service.ports.external }}))" + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ join "," .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_HOST}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + mkdir -p /opt/bitnami/kafka/config/certs + {{- if eq .Values.auth.tls.type "jks" }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs-${ID}" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs-${ID}/%s" (default "kafka.keystore.jks" $jksKeystoreSAN) | quote }} + {{- else }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs/%s" (default "kafka-${ID}.keystore.jks" $jksKeystoreSAN) | quote }} + {{- end }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + cp "$JKS_TRUSTSTORE" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "$JKS_KEYSTORE" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + + {{- else if eq .Values.auth.tls.type "pem" }} + + {{- if or (not (empty .Values.auth.tls.existingSecrets)) .Values.auth.tls.autoGenerated }} + PEM_CA="/certs-${ID}/ca.crt" + PEM_CERT="/certs-${ID}/tls.crt" + PEM_KEY="/certs-${ID}/tls.key" + {{- else }} + PEM_CA="/certs/kafka.truststore.pem" + PEM_CERT="/certs/kafka-${ID}.keystore.pem" + PEM_KEY="/certs/kafka-${ID}.keystore.key" + {{- end }} + if [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + PEM_CA_LOCATION="${CERT_DIR}/kafka.truststore.pem" + PEM_CERT_LOCATION="${CERT_DIR}/kafka.keystore.pem" + {{- if .Values.auth.tls.pemChainIncluded }} + cat $PEM_CERT | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f ${CERT_DIR}/xx + FIND_CA_RESULT=$(find ${CERT_DIR} -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + echo "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + exit 1 + fi + echo $FIND_CA_RESULT | sort | xargs cat >> "$PEM_CA_LOCATION" + cat ${CERT_DIR}/xx00 > "$PEM_CERT_LOCATION" + {{- else }} + if [[ -f "$PEM_CA" ]]; then + cp "$PEM_CA" "$PEM_CA_LOCATION" + cp "$PEM_CERT" "$PEM_CERT_LOCATION" + else + echo "PEM_CA not provided, and auth.tls.pemChainIncluded was not true. One of these values must be set when using PEM type for TLS." + exit 1 + fi + {{- end }} + + # Ensure the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "$PEM_KEY" > "/opt/bitnami/kafka/config/certs/kafka.keystore.key" + else + echo "Couldn't find the expected PEM files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.pem" + {{- end }} + {{- end }} + + # Configure zookeeper client + {{- if and (not (empty .Values.auth.zookeeper.tls.existingSecret)) .Values.auth.zookeeper.tls.enabled }} + JKS_TRUSTSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretTruststoreKey) | quote }} + JKS_KEYSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretKeystoreKey) | quote }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + TRUSTSTORE_LOCATION="${CERT_DIR}/zookeeper.truststore.jks" + cp "$JKS_TRUSTSTORE" "$TRUSTSTORE_LOCATION" + cp "$JKS_KEYSTORE" "${CERT_DIR}/zookeeper.keystore.jks" + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${TRUSTSTORE_LOCATION}" + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/knowlg-automation/helm_charts/kafka/templates/serviceaccount.yaml b/knowlg-automation/helm_charts/kafka/templates/serviceaccount.yaml new file mode 100644 index 000000000..73091f5d7 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/knowlg-automation/helm_charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100644 index 000000000..1919feebb --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/servicemonitor-metrics.yaml b/knowlg-automation/helm_charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100644 index 000000000..edca22304 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.db_namespace }} + namespace: {{ .Values.metrics.serviceMonitor.db_namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: cluster-metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/statefulset.yaml b/knowlg-automation/helm_charts/kafka/templates/statefulset.yaml new file mode 100644 index 000000000..2146a3d99 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/statefulset.yaml @@ -0,0 +1,610 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.ports.internal }} +{{- $clientPort := .Values.service.ports.client }} +{{- $interBrokerProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.interBrokerProtocol) -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/jaas-secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: {{ .Values.hostIPC }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.initContainers }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + find "{{ .Values.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ printf "%s%s" (include "kafka.zookeeper.fullname" .) (tpl .Values.zookeeperChrootPath .) | quote }} + {{- else }} + value: {{ include "common.tplvalues.render" (dict "value" (printf "%s%s" (join "," .Values.externalZookeeper.servers) (tpl .Values.zookeeperChrootPath .)) "context" $) }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $externalClientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($externalClientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ upper .Values.auth.sasl.mechanisms | quote }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.sasl.interBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ join "," .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- if eq .Values.externalAccess.service.type "NodePort" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ join "," .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" .Values.allowPlaintextListener | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.sasl.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.sasl.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.sasl.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: {{ include "kafka.zookeeper.protocol" . }} + {{- if .Values.auth.zookeeper.tls.enabled }} + - name: KAFKA_ZOOKEEPER_TLS_TYPE + value: {{ upper .Values.auth.zookeeper.tls.type | quote }} + - name: KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME + value: {{ .Values.auth.zookeeper.tls.verifyHostname | quote }} + {{- if .Values.auth.zookeeper.tls.passwordsSecret }} + - name: KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretTruststoreKey | quote }} + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_TLS_TYPE + value: {{ upper .Values.auth.tls.type | quote }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ default "" .Values.auth.tls.endpointIdentificationAlgorithm | quote }} + - name: KAFKA_TLS_CLIENT_AUTH + value: {{ ternary "required" "none" (or (eq (include "kafka.externalClientProtocol" . ) "mtls") (eq .Values.auth.clientProtocol "mtls")) | quote }} + - name: KAFKA_CERTIFICATE_PASSWORD + {{- if .Values.auth.tls.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.auth.tls.existingSecret }} + key: password + {{- else }} + value: {{ default "" .Values.auth.tls.password | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_VOLUME_DIR + value: {{ .Values.persistence.mountPath | quote }} + - name: KAFKA_LOG_DIR + value: {{ .Values.logPersistence.mountPath | quote }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVAL_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + - name: KAFKA_CFG_AUTHORIZER_CLASS_NAME + value: {{ .Values.authorizerClassName | quote }} + - name: KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND + value: {{ .Values.allowEveryoneIfNoAclFound | quote }} + - name: KAFKA_CFG_SUPER_USERS + value: {{ .Values.superUsers | quote }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: kafka-client + containerPort: {{ .Values.containerPorts.client }} + - name: kafka-internal + containerPort: {{ .Values.containerPorts.internal }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: {{ .Values.containerPorts.external }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: {{ .Values.persistence.mountPath }}/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $_ := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + mountPath: /kafka-zookeeper-cert + readOnly: true + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + mountPath: /truststore + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "common.names.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $secret := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ tpl $secret $ }} + defaultMode: 256 + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $index }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.auth.zookeeper.tls.existingSecret }} + defaultMode: 256 + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + secret: + secretName: {{ .Values.auth.tls.jksTruststoreSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- end }} +{{- if not .Values.logPersistence.enabled }} + - name: logs + emptyDir: {} +{{- else if .Values.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.logPersistence.existingClaim .) }} +{{- end }} + {{- if or (and .Values.persistence.enabled (not .Values.persistence.existingClaim)) (and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim)) }} + volumeClaimTemplates: + {{- end }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} +{{- if and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.logPersistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/svc-external-access.yaml b/knowlg-automation/helm_charts/kafka/templates/svc-external-access.yaml new file mode 100644 index 000000000..8d77a4710 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,63 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "common.names.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ $root.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $root.Values.externalAccess.service.labels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations $root.Values.externalAccess.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $root.Values.externalAccess.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $root.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.externalAccess.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerIPs)) (eq (len $root.Values.externalAccess.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $root.Values.externalAccess.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.ports.external }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/templates/svc-headless.yaml b/knowlg-automation/helm_charts/kafka/templates/svc-headless.yaml new file mode 100644 index 000000000..af462126a --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.service.headless.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.ports.internal }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/knowlg-automation/helm_charts/kafka/templates/svc.yaml b/knowlg-automation/helm_charts/kafka/templates/svc.yaml new file mode 100644 index 000000000..8e0472a1d --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/svc.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.externalAccess.enabled }} + - name: tcp-external + port: {{ .Values.service.ports.external }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/knowlg-automation/helm_charts/kafka/templates/tls-secrets.yaml b/knowlg-automation/helm_charts/kafka/templates/tls-secrets.yaml new file mode 100644 index 000000000..fdf350e87 --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/templates/tls-secrets.yaml @@ -0,0 +1,30 @@ +{{- if (include "kafka.createTlsSecret" .) }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- range $i := until $replicaCount }} +{{- $replicaHost := printf "%s-%d.%s-headless" $fullname $i $fullname }} +{{- $altNames := list (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $replicaHost $releaseNamespace) (printf "%s.%s" $fullname $releaseNamespace) $replicaHost $fullname }} +{{- $cert := genSignedCert $replicaHost nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} +--- +{{- end }} +{{- end }} diff --git a/knowlg-automation/helm_charts/kafka/values.yaml b/knowlg-automation/helm_charts/kafka/values.yaml new file mode 100644 index 000000000..5baca149a --- /dev/null +++ b/knowlg-automation/helm_charts/kafka/values.yaml @@ -0,0 +1,1808 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section Kafka parameters + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry Kafka image registry +## @param image.repository Kafka image repository +## @param image.tag Kafka image tag (immutable tags are recommended) +## @param image.digest Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 3.3.1-debian-11-r25 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified +## Specify content for server.properties +## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart) +## The server.properties is auto-generated based on other parameters when this parameter is not specified +## e.g: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +config: "" +## @param existingConfigmap ConfigMap with Kafka Configuration +## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables +## +existingConfigmap: "" +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +log4j: "" +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` +## +existingLog4jConfigMap: "" +## @param heapOpts Kafka Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m +## @param deleteTopicEnable Switch to enable topic deletion or not +## +deleteTopicEnable: false +## @param autoCreateTopicsEnable Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments +## +autoCreateTopicsEnable: true +## @param logFlushIntervalMessages The number of messages to accept before forcing a flush of data to disk +## +logFlushIntervalMessages: _10000 +## @param logFlushIntervalMs The maximum amount of time a message can sit in a log before we force a flush +## +logFlushIntervalMs: 1000 +## @param logRetentionBytes A size-based retention policy for logs +## +logRetentionBytes: _1073741824 +## @param logRetentionCheckIntervalMs The interval at which log segments are checked to see if they can be deleted +## +logRetentionCheckIntervalMs: 300000 +## @param logRetentionHours The minimum age of a log file to be eligible for deletion due to age +## +logRetentionHours: 168 +## @param logSegmentBytes The maximum size of a log segment file. When this size is reached a new log segment will be created +## +logSegmentBytes: _1073741824 +## @param logsDirs A comma separated list of directories in which kafka's log data is kept +## ref: https://kafka.apache.org/documentation/#brokerconfigs_log.dirs +logsDirs: /bitnami/kafka/data +## @param maxMessageBytes The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 +## @param defaultReplicationFactor Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 +## @param offsetsTopicReplicationFactor The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 +## @param transactionStateLogReplicationFactor The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 +## @param transactionStateLogMinIsr Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 +## @param numIoThreads The number of threads doing disk I/O +## +numIoThreads: 8 +## @param numNetworkThreads The number of threads handling network requests +## +numNetworkThreads: 3 +## @param numPartitions The default number of log partitions per topic +## +numPartitions: 1 +## @param numRecoveryThreadsPerDataDir The number of threads per data directory to be used for log recovery at startup and flushing at shutdown +## +numRecoveryThreadsPerDataDir: 1 +## @param socketReceiveBufferBytes The receive buffer (SO_RCVBUF) used by the socket server +## +socketReceiveBufferBytes: 102400 +## @param socketRequestMaxBytes The maximum size of a request that the socket server will accept (protection against OOM) +## +socketRequestMaxBytes: _104857600 +## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server +## +socketSendBufferBytes: 102400 +## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper +## +zookeeperConnectionTimeoutMs: 6000 +## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace +## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect +## +zookeeperChrootPath: "" +## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties +## +authorizerClassName: "" +## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users +## +allowEveryoneIfNoAclFound: true +## @param superUsers You can add super users in server.properties +## +superUsers: User:admin +## Authentication parameters +## https://github.com/bitnami/containers/tree/main/bitnami/kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## + clientProtocol: plaintext + # Note: empty by default for backwards compatibility reasons, find more information at + # https://github.com/bitnami/charts/pull/8902/ + externalClientProtocol: "" + interBrokerProtocol: plaintext + ## SASL configuration + ## + sasl: + ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` + ## + mechanisms: plain,scram-sha-256,scram-sha-512 + ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: plain + ## JAAS configuration for SASL authentication. + ## + jaas: + ## @param auth.sasl.jaas.clientUsers Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + ## @param auth.sasl.jaas.clientPasswords Kafka client passwords. This is mandatory if more than one user is specified in clientUsers + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + ## @param auth.sasl.jaas.interBrokerUser Kafka inter broker communication user for SASL authentication + ## + interBrokerUser: admin + ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication + ## + interBrokerPassword: "" + ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication + ## + zookeeperUser: "" + ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication + ## + zookeeperPassword: "" + ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + existingSecret: "" + ## TLS configuration + ## + tls: + ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem` + ## + type: jks + ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks + ## kubectl create secret generic SECRET_NAME_1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks + ## ... + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `kafka.ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-0.tls.crt --from-file=tls.key=./kafka-0.tls.key + ## kubectl create secret generic SECRET_NAME_1 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-1.tls.crt --from-file=tls.key=./kafka-1.tls.key + ## ... + ## + existingSecrets: [] + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` + ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty + ## + autoGenerated: false + ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected. + ## Note: ignored when using 'existingSecret'. + ## + password: "" + ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + existingSecret: "" + ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate + ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services: + ## - kafka-0.kafka-headless.kafka.svc.cluster.local + ## - kafka-1.kafka-headless.kafka.svc.cluster.local + ## - kafka-2.kafka-headless.kafka.svc.cluster.local + ## Note: ignored when using 'pem' format for certificates. + ## + jksKeystoreSAN: "" + ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststore: "" + ## @param auth.tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https + ## Zookeeper client configuration for kafka brokers + ## + zookeeper: + ## TLS configuration + ## + tls: + ## @param auth.zookeeper.tls.enabled Enable TLS for Zookeeper client connections. + ## + enabled: false + ## @param auth.zookeeper.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## + type: jks + ## @param auth.zookeeper.tls.verifyHostname Hostname validation. + ## + verifyHostname: true + ## @param auth.zookeeper.tls.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications. + ## + existingSecret: "" + ## @param auth.zookeeper.tls.existingSecretKeystoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: zookeeper.keystore.jks + ## @param auth.zookeeper.tls.existingSecretTruststoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: zookeeper.truststore.jks + ## @param auth.zookeeper.tls.passwordsSecret Existing secret containing Keystore and Truststore passwords. + ## + passwordsSecret: "" + ## @param auth.zookeeper.tls.passwordsSecretKeystoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param auth.zookeeper.tls.passwordsSecretTruststoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password +## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] +## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] +## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil +## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listenerSecurityProtocolMap: "" +## @param allowPlaintextListener Allow to use the PLAINTEXT listener +## +allowPlaintextListener: true +## @param interBrokerListenerName The listener that the brokers should communicate on +## +interBrokerListenerName: INTERNAL +## @param command Override Kafka container command +## +command: + - /scripts/setup.sh +## @param args Override Kafka container arguments +## +args: [] +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" + +## @section Statefulset parameters + +## @param replicaCount Number of Kafka nodes +## +replicaCount: 1 +## @param minBrokerId Minimal broker.id value, nodes increment their `broker.id` respectively +## Brokers increment their ID starting at this minimal value. +## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively. +## +minBrokerId: 0 +## @param brokerRackAssignment Set Broker Assignment for multi tenant environment Allowed values: `aws-az` +## ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica +## +brokerRackAssignment: "" +## @param containerPorts.client Kafka client container port +## @param containerPorts.internal Kafka inter-broker container port +## @param containerPorts.external Kafka external container port +## +containerPorts: + client: 9092 + internal: 9093 + external: 9094 +## Configure extra options for Kafka containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Kafka containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Kafka containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Kafka containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup +## +lifecycleHooks: {} +## Kafka resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + limits: {} + requests: {} +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable security context for the pods +## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable Kafka containers' Security Context +## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## e.g: +## containerSecurityContext: +## enabled: true +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases Kafka pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param hostNetwork Specify if host network should be enabled for Kafka pods +## +hostNetwork: false +## @param hostIPC Specify if host IPC should be enabled for Kafka pods +## +hostIPC: false +## @param podLabels Extra labels for Kafka pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Extra annotations for Kafka pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by kafka pods +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type Kafka statefulset strategy type +## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## Kafka Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the Kafka pod +## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas +## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +## Service parameters +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.internal Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + internal: 9093 + external: 9094 + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + client: "" + external: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service + ## + annotations: {} + ## Headless service properties + ## + headless: + ## @param service.headless.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.headless.labels Labels for the headless service. + ## + labels: {} + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] +## External Access to Kafka brokers configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.25.5-debian-11-r2 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container + ## + resources: + limits: {} + requests: {} + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param externalAccess.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: true + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka +## Log Persistence parameters +## +logPersistence: + ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r63 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## @section Metrics parameters + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter + ## + kafka: + ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics + ## + enabled: false + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## @param metrics.kafka.image.registry Kafka exporter image registry + ## @param metrics.kafka.image.repository Kafka exporter image repository + ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended) + ## @param metrics.kafka.image.digest Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy + ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.6.0-debian-11-r40 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files + ## for Kafka exporter client authentication + ## + certificatesSecret: "" + ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) + ## + tlsCert: cert-file + ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) + ## + tlsKey: key-file + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication + ## + tlsCaSecret: "" + ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) + ## + tlsCaCert: ca-file + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + ## @param metrics.kafka.command Override Kafka exporter container command + ## + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.kafka.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.kafka.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka exporter service configuration + ## + service: + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port + ## + ports: + metrics: 9308 + ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafkas metrics + ## + jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## + enabled: false + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry JMX exporter image registry + ## @param metrics.jmx.image.repository JMX exporter image repository + ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.17.2-debian-11-r29 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container + ## + resources: + limits: {} + requests: {} + ## Prometheus JMX exporter service configuration + ## + service: + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port + ## + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" + prometheus.io/path: "/" + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + ## @param metrics.jmx.config [string] Configuration file for JMX exporter + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + existingConfigmap: "" + ## @param metrics.jmx.extraRules Add extra rules to JMX exporter configuration + ## e.g: + ## extraRules: |- + ## - pattern: kafka.server<>(connection-count) + ## name: kafka_server_socket_server_metrics_$3 + ## labels: + ## listener: $1 + extraRules: "" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + db_namespace: "knowlg-db" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which Prometheus is running + ## + db_namespace: "knowlg-db" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.groups Prometheus Rule Groups for Kafka + ## + groups: [] + +## @section Kafka provisioning parameters + +## Kafka provisioning +## +provisioning: + ## @param provisioning.enabled Enable kafka provisioning Job + ## + enabled: true + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision + + topics: + - name: "${content_publish_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${content_postpublish_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${learning_job_request_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${learning_graph_events_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${learning_events_failed_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${search_indexer_group_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${qrimage_request_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${telemetry_raw_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + - name: "${dialcode_context_job_request_topic}" + partitions: 1 + replicationFactor: 1 + # https://kafka.apache.org/documentation/#topicconfigs + config: + max.message.bytes: 64000 + flush.messages: 1 + ## @param provisioning.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --add + ## --allow-principal User:user + ## --consumer --topic '*' + ## - "/opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls". + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param provisioning.serviceAccount.create Enable creation of ServiceAccount for Kafka provisioning pods + ## + create: false + ## @param provisioning.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the provisioning.serviceAccount.name template + ## + name: "" + ## @param provisioning.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning + ## + waitForKafka: true + +## @section ZooKeeper chart parameters + +## ZooKeeper chart configuration +## https://github.com/bitnami/charts/blob/main/bitnami/zookeeper/values.yaml +## +zookeeper: + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart + ## + enabled: true + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authenticaiton + ## + auth: + client: + ## @param zookeeper.auth.client.enabled Enable ZooKeeper auth + ## + enabled: false + ## @param zookeeper.auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param zookeeper.auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param zookeeper.auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param zookeeper.auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + +## External Zookeeper Configuration +## All of these values are only used if `zookeeper.enabled=false` +## +externalZookeeper: + ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. + ## + servers: [] diff --git a/knowlg-automation/helm_charts/learning/Chart.yaml b/knowlg-automation/helm_charts/learning/Chart.yaml new file mode 100644 index 000000000..e1b159391 --- /dev/null +++ b/knowlg-automation/helm_charts/learning/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: learning +version: 0.1.0 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/learning/learning-service_application.conf b/knowlg-automation/helm_charts/learning/learning-service_application.conf new file mode 100644 index 000000000..cb6887435 --- /dev/null +++ b/knowlg-automation/helm_charts/learning/learning-service_application.conf @@ -0,0 +1,315 @@ +#Platform Configuration +platform.services=["learning", "language", "config"] + +# Learning-Service Configuration +learning.graph_ids=["domain"] +content.metadata.visibility.parent=["textbookunit", "courseunit", "lessonplanunit"] + +# Language-Service Configuration +language.graph_ids=["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd","pj"] + +# Redis Configuration +redis.host="redis-db.knowlg-db.svc.cluster.local" +redis.port=6379 +redis.maxConnections=128 + +# Cassandra Configuration +content.keyspace.name="content_store" +hierarchy.keyspace.name="hierarchy_store" +content.hierarchy.table="content_hierarchy" +framework.hierarchy.table="framework_hierarchy" + +dialcode.keyspace.name="dialcode_store" +dialcode.keyspace.table="dial_code" +dialcode.max_count=1000 + +system.config.keyspace.name="dialcode_store" +system.config.table="system_config" +publisher.keyspace.name="dialcode_store" +publisher.keyspace.table="publisher" + +#DIAL Code Generator Configuration +dialcode.strip.chars="0" +dialcode.length=6.0 +dialcode.large.prime_number=1679979167 + +dialcode.es_conn_info="{{ .Values.es_connection }}" +dialcode.search.limit=1000 + +#DIAL Code ElasticSearch Configuration +dialcode_store.dial_code.index=true +dialcode_store.dial_code.object_type="DialCode" + +audit.es_conn_info="{{ .Values.es_connection }}" + +#Assessment Item Configuration +assessment.keyspace.name="sunbird_content_store" +assessment.keyspace.table="question_data" + +# Actor System Configuration +LearningActorSystem { + akka { + actor { + default-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 1 + parallelism-factor = 2.0 + parallelism-max = 4 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + deployment { + /HealthCheckManager + { + router = smallest-mailbox-pool + nr-of-instances = 5 + } + } + } + } +} + +env= "" + +#Current environment +cloud_storage.env = "sunbird" + +#Folder configuration +cloud_storage.content.folder = "content" +cloud_storage.asset.folder = "assets" +cloud_storage.artefact.folder = "artifact" +cloud_storage.bundle.folder = "bundle" +cloud_storage.media.folder = "media" +cloud_storage.ecar.folder = "ecar_files" +cloud_storage.itemset.folder = "itemset" + +cloud_storage.upload.url.ttl = 600 + + +# Media download configuration +content.media.base.url="https://dev.knowlg.sunbird.org" +plugin.media.base.url="https://dev.knowlg.sunbird.org" + + +# Content Extraction Configuration + +#directory location where store unzip file +dist.directory = "/data/tmp/dist/" +output.zipfile = "/data/tmp/story.zip" +source.folder = "/data/tmp/temp2/" +save.directory = "/data/tmp/temp/" + +# FOR CONTENT WORKFLOW PIPELINE (CWP) + +#--Content Workflow Pipeline Mode +OPERATION_MODE = "TEST" + +#--Maximum Content Package File Size Limit in Bytes (50 MB) +MAX_CONTENT_PACKAGE_FILE_SIZE_LIMIT = 52428800 + +#--Maximum Asset File Size Limit in Bytes (20 MB - 20971520) +MAX_ASSET_FILE_SIZE_LIMIT = 52428800 + +#--No of Retry While File Download Fails +RETRY_ASSET_DOWNLOAD_COUNT = 1 + +# H5P Library Path +content.h5p.library.path="learning_content_h5p_library_path" + +# ElasticSearch Configuration +search.es_conn_info="{{ .Values.es_connection }}" +search.fields.query=["name^100","title^100","lemma^100","code^100","domain","subject","description^10","keywords^100","ageGroup^10","filter^10","theme^10","genre^10","objects^25","contentType^100","language^200","teachingMode^25","skills^10","learningObjective^10","curriculum^100","gradeLevel^100","developer^100","attributions^10","identifier^100","IL_UNIQUE_ID^100","owner^50","board^100", "creator^100", "dialcodes^100","text","words","releaseNotes"] +search.fields.date=["lastUpdatedOn","createdOn","versionDate","lastSubmittedOn","lastPublishedOn"] +search.batch.size=500 +search.connection.timeout=30 + +platform-api-url="http://localhost:8080/learning-service" + +# Language Index Configuration +ignoreStartWordsList=[""beginning" + type => "graph_event" + path => ["/txn-handler/learning_graph_event_neo4j.log"] + sincedb_path => "/usr/share/logstash/.sincedb_learning_graph_event_mw" + } + } + filter { + grok { + match => [ "message", + "%{TIMESTAMP_ISO8601:timestamp} %{GREEDYDATA:msg}"] + } + mutate { + gsub => [ "message","%{timestamp}","" ] + strip => [ "message" ] + } + json { + source => "message" + } + } + output { + kafka { + bootstrap_servers => "{{ .Values.kafka_connection}}" + codec => plain { + format => "%{message}" + } + message_key => "%{nodeUniqueId}" + topic_id => "{{ .Values.learning_graph_events}}" + retries => 20 + retry_backoff_ms => 180000 + } + } \ No newline at end of file diff --git a/knowlg-automation/helm_charts/neo4j/values.yaml b/knowlg-automation/helm_charts/neo4j/values.yaml new file mode 100644 index 000000000..79669627c --- /dev/null +++ b/knowlg-automation/helm_charts/neo4j/values.yaml @@ -0,0 +1,20 @@ +db_namespace: knowlg-db +replicaCount: 1 +image: pallakartheekreddy/neo4j:3.3.0 +authEnabled: false +network: + port: 7474 +neo4j: + name: neo4j + resources: + cpu: "0.5" + memory: "2Gi" + +volumes: + data: + mode: defaultStorageClass + defaultStorageClass: + requests: + storage: 2Gi +kafka_connection: kafka-headless.knowlg-db.svc.cluster.local:9092 +learning_graph_events: dev.learning.graph.events \ No newline at end of file diff --git a/knowlg-automation/helm_charts/redis/Chart.yaml b/knowlg-automation/helm_charts/redis/Chart.yaml new file mode 100644 index 000000000..554bef77b --- /dev/null +++ b/knowlg-automation/helm_charts/redis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: redis +version: 0.1.0 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/redis/templates/deployment.yaml b/knowlg-automation/helm_charts/redis/templates/deployment.yaml new file mode 100644 index 000000000..fa911c857 --- /dev/null +++ b/knowlg-automation/helm_charts/redis/templates/deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.db_namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: redis + ports: + - containerPort: {{ .Values.network.port }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-db + namespace: {{ .Values.db_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - port: {{ .Values.network.port }} + targetPort: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} \ No newline at end of file diff --git a/knowlg-automation/helm_charts/redis/values.yaml b/knowlg-automation/helm_charts/redis/values.yaml new file mode 100644 index 000000000..5241e7586 --- /dev/null +++ b/knowlg-automation/helm_charts/redis/values.yaml @@ -0,0 +1,5 @@ +db_namespace: knowlg-db +replicaCount: 1 +network: + port: 6379 + targetport: 6379 \ No newline at end of file diff --git a/knowlg-automation/helm_charts/search/Chart.yaml b/knowlg-automation/helm_charts/search/Chart.yaml new file mode 100644 index 000000000..a70e56598 --- /dev/null +++ b/knowlg-automation/helm_charts/search/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: search +version: 0.1.0 diff --git a/knowlg-automation/helm_charts/search/search-service_application.conf b/knowlg-automation/helm_charts/search/search-service_application.conf new file mode 100644 index 000000000..e6f0753e5 --- /dev/null +++ b/knowlg-automation/helm_charts/search/search-service_application.conf @@ -0,0 +1,315 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key = a-long-secret-to-calm-the-rage-of-the-entropy-gods + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + #enabled += my.application.Module + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" + enabled += modules.SearchModule +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.http.parser.maxDiskBuffer = 10MB +parsers.anyContent.maxLength = 10MB + +play.server.provider = play.core.server.NettyServerProvider + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB + +schema.base_path = "../../schemas/" + + +content.relatedBoards.properties=["channel","board","subject","medium"] + + +# Path for elasticsearch.properties file +search.config.path=/home/learning/platform/search + +# Cache-Manager Configuration +cache.type="redis" + +search.es_conn_info="elasticsearch.knowlg-db.svc.cluster.local:9200" +search.fields.query=["name^100","title^100","lemma^100","code^100","domain","subject","description^10","keywords^100","ageGroup^10","filter^10","theme^10","genre^10","objects^25","contentType^100","language^200","teachingMode^25","skills^10","learningObjective^10","curriculum^100","gradeLevel^100","developer^100","attributions^10","identifier^100","IL_UNIQUE_ID^100","owner^50","board^100","relatedBoards^100","creator^100", "dialcodes^100","text","words","releaseNotes"] +search.fields.date=["lastUpdatedOn","createdOn","versionDate","lastSubmittedOn","lastPublishedOn"] +search.fields.mode_collection=["identifier","name","objectType","contentType","mimeType","size","childNodes","board","subject","medium","gradeLevel","appIcon", "origin", "originData"] +search.batch.size=500 +search.connection.timeout=30 + +platform-api-url="http://localhost:8080/learning-service" +language.map={"Hindi":"hi", "English":"en", "Telugu":"te", "Kannada":"ka", "Tamil":"ta", "Assamese":"as", "Bengali":"bn", "Bodo":"bo", "Gujarati":"gu", "Konkani":"ko", "Malayalam":"ml", "Marathi":"mr", "Nepali":"ne", "Odia":"or", "Punjabi":"pj", "Sanskrit":"sk"} +#Top N Config for Search Telemetry +telemetry.search.topn=5 +telemetry_env=dev +installation.id=ekstep + +# Configuration for default channel ID +channel.default="in.ekstep" +ekstepPlatformApiUserId="search-service" + + +content.tagging.property=["subject","medium"] +search.payload.log_enable=true \ No newline at end of file diff --git a/ontology-engine/graph-engine_2.11/src/test/resources/logback.xml b/knowlg-automation/helm_charts/search/search-service_logback.xml similarity index 100% rename from ontology-engine/graph-engine_2.11/src/test/resources/logback.xml rename to knowlg-automation/helm_charts/search/search-service_logback.xml diff --git a/knowlg-automation/helm_charts/search/templates/deployment.yaml b/knowlg-automation/helm_charts/search/templates/deployment.yaml new file mode 100644 index 000000000..166a23d98 --- /dev/null +++ b/knowlg-automation/helm_charts/search/templates/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.api_namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/search-service-1.0-SNAPSHOT/config/application.conf + subPath: search-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/search-service-1.0-SNAPSHOT/config/logback.xml + subPath: search-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.api_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + type: {{ .Values.serviceType }} + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/knowlg-automation/helm_charts/search/templates/search-configmap.yaml b/knowlg-automation/helm_charts/search/templates/search-configmap.yaml new file mode 100644 index 000000000..f4f92a3ed --- /dev/null +++ b/knowlg-automation/helm_charts/search/templates/search-configmap.yaml @@ -0,0 +1,393 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: search-xml-config + namespace: knowlg-api +data: + search-service_logback.xml: | + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: search-config + namespace: knowlg-api +data: + search-service_application.conf: | + # This is the main configuration file for the application. + # https://www.playframework.com/documentation/latest/ConfigFile + # ~~~~~ + # Play uses HOCON as its configuration file format. HOCON has a number + # of advantages over other config formats, but there are two things that + # can be used when modifying settings. + # + # You can include other configuration files in this main application.conf file: + #include "extra-config.conf" + # + # You can declare variables and substitute for them: + #mykey = ${some.value} + # + # And if an environment variable exists when there is no other substitution, then + # HOCON will fall back to substituting environment variable: + #mykey = ${JAVA_HOME} + + ## Akka + # https://www.playframework.com/documentation/latest/ScalaAkka#Configuration + # https://www.playframework.com/documentation/latest/JavaAkka#Configuration + # ~~~~~ + # Play uses Akka internally and exposes Akka Streams and actors in Websockets and + # other streaming HTTP responses. + akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /searchActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /healthActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + } + } + } + + ## Secret key + # http://www.playframework.com/documentation/latest/ApplicationSecret + # ~~~~~ + # The secret key is used to sign Play's session cookie. + # This must be changed for production, but we don't recommend you change it in this file. + play.http.secret.key = a-long-secret-to-calm-the-rage-of-the-entropy-gods + + ## Modules + # https://www.playframework.com/documentation/latest/Modules + # ~~~~~ + # Control which modules are loaded when Play starts. Note that modules are + # the replacement for "GlobalSettings", which are deprecated in 2.5.x. + # Please see https://www.playframework.com/documentation/latest/GlobalSettings + # for more information. + # + # You can also extend Play functionality by using one of the publically available + # Play modules: https://playframework.com/documentation/latest/ModuleDirectory + play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + #enabled += my.application.Module + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" + enabled += modules.SearchModule + } + + ## IDE + # https://www.playframework.com/documentation/latest/IDE + # ~~~~~ + # Depending on your IDE, you can add a hyperlink for errors that will jump you + # directly to the code location in the IDE in dev mode. The following line makes + # use of the IntelliJ IDEA REST interface: + #play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + + ## Internationalisation + # https://www.playframework.com/documentation/latest/JavaI18N + # https://www.playframework.com/documentation/latest/ScalaI18N + # ~~~~~ + # Play comes with its own i18n settings, which allow the user's preferred language + # to map through to internal messages, or allow the language to be stored in a cookie. + play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true + } + + ## Play HTTP settings + # ~~~~~ + play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } + } + + play.http.parser.maxDiskBuffer = 10MB + parsers.anyContent.maxLength = 10MB + + play.server.provider = play.core.server.NettyServerProvider + + ## Netty Provider + # https://www.playframework.com/documentation/latest/SettingsNetty + # ~~~~~ + play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" + } + + ## WS (HTTP Client) + # https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS + # ~~~~~ + # The HTTP client primarily used for REST APIs. The default client can be + # configured directly, but you can also create different client instances + # with customized settings. You must enable this by adding to build.sbt: + # + # libraryDependencies += ws // or javaWs if using java + # + play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } + } + + ## Cache + # https://www.playframework.com/documentation/latest/JavaCache + # https://www.playframework.com/documentation/latest/ScalaCache + # ~~~~~ + # Play comes with an integrated cache API that can reduce the operational + # overhead of repeated requests. You must enable this by adding to build.sbt: + # + # libraryDependencies += cache + # + play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] + } + + ## Filter Configuration + # https://www.playframework.com/documentation/latest/Filters + # ~~~~~ + # There are a number of built-in filters that can be enabled and configured + # to give Play greater security. + # + play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } + } + + play.http.parser.maxMemoryBuffer = 50MB + akka.http.parsing.max-content-length = 50MB + + schema.base_path = "../../schemas/" + #schema.base_path="https://sunbirdstagingpublic.blob.core.windows.net/sunbird-content-staging/schemas/local" + + telemetry_env="sunbird" + installation.id="Sunbird_Stage" + + # ElasticSearch Configuration + ekstepPlatformApiUserId="search-service" + search.es_conn_info="{{ .Values.es_connection }}" + search.fields.query=["name^100","title^100","lemma^100","code^100","domain","subject","description^10","keywords^100","ageGroup^10","filter^10","theme^10","genre^10","objects^25","contentType^100","language^200","teachingMode^25","skills^10","learningObjective^10","curriculum^100","gradeLevel^100","developer^100","attributions^10","identifier^100","IL_UNIQUE_ID^100","owner^50","board^100","relatedBoards^100","creator^100", "dialcodes^100","text","words","releaseNotes"] + search.fields.date=["lastUpdatedOn","createdOn","versionDate","lastSubmittedOn","lastPublishedOn"] + search.fields.mode_collection=["identifier","name","objectType","contentType","mimeType","size","childNodes","board","subject","medium","gradeLevel","appIcon","resourceType","origin","originData"] + search.batch.size=500 + search.connection.timeout=30 + + language.map={"Hindi":"hi", "English":"en", "Telugu":"te", "Kannada":"ka", "Tamil":"ta", "Assamese":"as", "Bengali":"bn", "Bodo":"bo", "Gujarati":"gu", "Konkani":"ko", "Malayalam":"ml", "Marathi":"mr", "Nepali":"ne", "Odia":"or", "Punjabi":"pj", "Sanskrit":"san"} + + # Configuration for default channel ID + channel.default="in.ekstep" + compositesearch.index.name="compositesearch" + + content.tagging.backward_enable=false + content.tagging.property=["subject","medium"] + search.payload.log_enable=true diff --git a/knowlg-automation/helm_charts/search/values.yaml b/knowlg-automation/helm_charts/search/values.yaml new file mode 100644 index 000000000..b1784b919 --- /dev/null +++ b/knowlg-automation/helm_charts/search/values.yaml @@ -0,0 +1,28 @@ +### Default variable file for search-service ### + +api_namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: docker.io/aimansharief/search-service-modified +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9006 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% +serviceType: ClusterIP +es_connection: elasticsearch.knowlg-db.svc.cluster.local:9200 + + diff --git a/knowlg-automation/helm_charts/taxonomy/Chart.yaml b/knowlg-automation/helm_charts/taxonomy/Chart.yaml new file mode 100644 index 000000000..8462df87c --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: taxonomy +version: 0.1.0 diff --git a/knowlg-automation/helm_charts/taxonomy/taxonomy-service_application.conf b/knowlg-automation/helm_charts/taxonomy/taxonomy-service_application.conf new file mode 100644 index 000000000..68b072a6d --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/taxonomy-service_application.conf @@ -0,0 +1,399 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + } + } +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key="" + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.TaxonomyModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.server.http.idleTimeout = 60s +play.http.parser.maxDiskBuffer = 10MB +parsers.anyContent.maxLength = 10MB + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB +schema.base_path="https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/local" + +# Cassandra Configuration +cassandra { + lp { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } + lpa { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } +} + +# Redis Configuration +redis { + host: "redis-db.knowlg-db.svc.cluster.local" + port: 6379 + maxConnections: 128 +} + +# Configuration +akka.request_timeout: 30 +environment.id: 10000000 +graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "" +} +route { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + bolt { + write { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + }, + read { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + } + } +} +y +shard.id: 1 +platform { + auth.check.enabled: false + cache.ttl: 3600000 +} + +# Cloud Storage Config +cloud_storage_type: "azure" +azure_storage_key: "" +azure_storage_secret: "" +azure_storage_container: "" + +installation.id: ekstep + +kafka { + urls : "kafka.knowlg-db.svc.cluster.local:9092" +} + +channel { + default: "org.sunbird" +} + +languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" +} +objectcategorydefinition.keyspace="dev_category_store" + +# Framework master category validation Supported values are Yes/No +master.category.validation.enabled="Yes" diff --git a/knowlg-automation/helm_charts/taxonomy/taxonomy-service_logback.xml b/knowlg-automation/helm_charts/taxonomy/taxonomy-service_logback.xml new file mode 100644 index 000000000..21cb53d9e --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/taxonomy-service_logback.xml @@ -0,0 +1,31 @@ + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/knowlg-automation/helm_charts/taxonomy/templates/deployment.yaml b/knowlg-automation/helm_charts/taxonomy/templates/deployment.yaml new file mode 100644 index 000000000..bd39c7617 --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/templates/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.api_namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Always + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/taxonomy-service-1.0-SNAPSHOT/config/application.conf + subPath: taxonomy-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/taxonomy-service-1.0-SNAPSHOT/config/logback.xml + subPath: taxonomy-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.api_namespace }} + labels: + app: {{ .Chart.Name }} +spec: + type: {{ .Values.serviceType }} + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/knowlg-automation/helm_charts/taxonomy/templates/taxonomy-configmap.yaml b/knowlg-automation/helm_charts/taxonomy/templates/taxonomy-configmap.yaml new file mode 100644 index 000000000..5f6bdd394 --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/templates/taxonomy-configmap.yaml @@ -0,0 +1,446 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: taxonomy-xml-config + namespace: knowlg-api +data: + taxonomy-service_logback.xml: | + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: taxonomy-config + namespace: knowlg-api +data: + taxonomy-service_application.conf: | + # This is the main configuration file for the application. + # https://www.playframework.com/documentation/latest/ConfigFile + # ~~~~~ + # Play uses HOCON as its configuration file format. HOCON has a number + # of advantages over other config formats, but there are two things that + # can be used when modifying settings. + # + # You can include other configuration files in this main application.conf file: + #include "extra-config.conf" + # + # You can declare variables and substitute for them: + #mykey = ${some.value} + # + # And if an environment variable exists when there is no other substitution, then + # HOCON will fall back to substituting environment variable: + #mykey = ${JAVA_HOME} + + ## Akka + # https://www.playframework.com/documentation/latest/ScalaAkka#Configuration + # https://www.playframework.com/documentation/latest/JavaAkka#Configuration + # ~~~~~ + # Play uses Akka internally and exposes Akka Streams and actors in Websockets and + # other streaming HTTP responses. + akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + } + } + } + + ## Secret key + # http://www.playframework.com/documentation/latest/ApplicationSecret + # ~~~~~ + # The secret key is used to sign Play's session cookie. + # This must be changed for production, but we don't recommend you change it in this file. + play.http.secret.key="jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + + ## Modules + # https://www.playframework.com/documentation/latest/Modules + # ~~~~~ + # Control which modules are loaded when Play starts. Note that modules are + # the replacement for "GlobalSettings", which are deprecated in 2.5.x. + # Please see https://www.playframework.com/documentation/latest/GlobalSettings + # for more information. + # + # You can also extend Play functionality by using one of the publically available + # Play modules: https://playframework.com/documentation/latest/ModuleDirectory + play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.TaxonomyModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" + } + + ## IDE + # https://www.playframework.com/documentation/latest/IDE + # ~~~~~ + # Depending on your IDE, you can add a hyperlink for errors that will jump you + # directly to the code location in the IDE in dev mode. The following line makes + # use of the IntelliJ IDEA REST interface: + #play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + + ## Internationalisation + # https://www.playframework.com/documentation/latest/JavaI18N + # https://www.playframework.com/documentation/latest/ScalaI18N + # ~~~~~ + # Play comes with its own i18n settings, which allow the user's preferred language + # to map through to internal messages, or allow the language to be stored in a cookie. + play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true + } + + ## Play HTTP settings + # ~~~~~ + play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } + } + + play.server.http.idleTimeout = 60s + play.http.parser.maxDiskBuffer = 10MB + parsers.anyContent.maxLength = 10MB + + ## Netty Provider + # https://www.playframework.com/documentation/latest/SettingsNetty + # ~~~~~ + play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" + } + + ## WS (HTTP Client) + # https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS + # ~~~~~ + # The HTTP client primarily used for REST APIs. The default client can be + # configured directly, but you can also create different client instances + # with customized settings. You must enable this by adding to build.sbt: + # + # libraryDependencies += ws // or javaWs if using java + # + play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } + } + + ## Cache + # https://www.playframework.com/documentation/latest/JavaCache + # https://www.playframework.com/documentation/latest/ScalaCache + # ~~~~~ + # Play comes with an integrated cache API that can reduce the operational + # overhead of repeated requests. You must enable this by adding to build.sbt: + # + # libraryDependencies += cache + # + play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] + } + + ## Filter Configuration + # https://www.playframework.com/documentation/latest/Filters + # ~~~~~ + # There are a number of built-in filters that can be enabled and configured + # to give Play greater security. + # + play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } + } + + play.http.parser.maxMemoryBuffer = 50MB + akka.http.parsing.max-content-length = 50MB + schema.base_path="https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/local" + + # Cassandra Configuration + cassandra { + lp { + connection: "{{ .Values.cassandra_connection }}" + } + lpa { + connection: "{{ .Values.cassandra_connection }}" + } + } + + # Redis Configuration + redis { + host: "{{ .Values.redis_connection }}" + port: 6379 + maxConnections: 128 + } + + # Configuration + akka.request_timeout: 30 + environment.id: 10000000 + graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + } + route { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + bolt { + write { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + }, + read { + domain: "bolt://{{ .Values.neo4j_domain_connection }}" + all: "bolt://{{ .Values.neo4j_all_connection }}" + } + } + } + + shard.id: 1 + platform { + auth.check.enabled: false + cache.ttl: 3600000 + } + + # Cloud Storage Config + cloud_storage_type: "azure" + azure_storage_key: "" + azure_storage_secret: "" + azure_storage_container: "" + + installation.id: ekstep + + kafka { + urls : "{{ .Values.kafka_connection }}" + } + + channel { + default: "org.sunbird" + } + + languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" + } + objectcategorydefinition.keyspace="{{ .Values.objectcategorydefinition_keyspace }}" + + # Framework master category validation Supported values are Yes/No + master.category.validation.enabled="Yes" diff --git a/knowlg-automation/helm_charts/taxonomy/values.yaml b/knowlg-automation/helm_charts/taxonomy/values.yaml new file mode 100644 index 000000000..3a9451d19 --- /dev/null +++ b/knowlg-automation/helm_charts/taxonomy/values.yaml @@ -0,0 +1,31 @@ +### Default variable file for taxonomy-service ### + +api_namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: docker.io/aimansharief/taxonomy-service +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9006 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% +serviceType: ClusterIP +cassandra_connection: cassandra-db.knowlg-db.svc.cluster.local:9042 +redis_connection: redis-db.knowlg-db.svc.cluster.local +neo4j_domain_connection: neo4j-db.knowlg-db.svc.cluster.local:7687 +neo4j_all_connection: neo4j-db.knowlg-db.svc.cluster.local:8687 +kafka_connection: kafka.knowlg-db.svc.cluster.local:9092 +objectcategorydefinition_keyspace: dev_category_store \ No newline at end of file diff --git a/knowlg-automation/terraform/aks.yaml b/knowlg-automation/terraform/aks.yaml new file mode 100644 index 000000000..599468588 --- /dev/null +++ b/knowlg-automation/terraform/aks.yaml @@ -0,0 +1,4 @@ +│ Warning: No outputs found +│  +│ The state file either has no outputs defined, or all the defined outputs are empty. Please define an output in your configuration with the `output` keyword and run `terraform refresh` for it to become available. If you are using interpolation, please +│ verify the interpolated value is not empty. You can use the `terraform console` command to assist. diff --git a/knowlg-automation/terraform/azure/aks.tf b/knowlg-automation/terraform/azure/aks.tf new file mode 100644 index 000000000..a8c1ac1f7 --- /dev/null +++ b/knowlg-automation/terraform/azure/aks.tf @@ -0,0 +1,21 @@ +resource "azurerm_kubernetes_cluster" "aks" { + name = "${local.environment_name}" + location = var.location + resource_group_name = data.azurerm_resource_group.rg.name + dns_prefix = "${local.environment_name}" + + default_node_pool { + name = var.aks_nodepool_name + node_count = var.aks_node_count + vm_size = var.aks_node_size + } + + identity { + type = var.aks_cluster_identity + } + + tags = merge( + local.common_tags, + var.additional_tags + ) +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/cassandra-provision.tf b/knowlg-automation/terraform/azure/cassandra-provision.tf new file mode 100644 index 000000000..c09ea8a3b --- /dev/null +++ b/knowlg-automation/terraform/azure/cassandra-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "cassandra" { + name = "cassandra" + chart = var.CASSANDRA_CHART + namespace = var.CASSANDRA_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.neo4j] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/content-provision.tf b/knowlg-automation/terraform/azure/content-provision.tf new file mode 100644 index 000000000..02a5c9fd2 --- /dev/null +++ b/knowlg-automation/terraform/azure/content-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "content" { + name = "content" + chart = var.CONTENT_CHART + namespace = var.CONTENT_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.redis] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/data.tf b/knowlg-automation/terraform/azure/data.tf new file mode 100644 index 000000000..536d1af11 --- /dev/null +++ b/knowlg-automation/terraform/azure/data.tf @@ -0,0 +1 @@ +data "azurerm_subscription" "current" {} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/dial-provision.tf b/knowlg-automation/terraform/azure/dial-provision.tf new file mode 100644 index 000000000..fd17c7630 --- /dev/null +++ b/knowlg-automation/terraform/azure/dial-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "dial" { + name = "dial" + chart = var.DIAL_CHART + namespace = var.DIAL_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.learning] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/elasticsearch-provision.tf b/knowlg-automation/terraform/azure/elasticsearch-provision.tf new file mode 100644 index 000000000..8c9b37a7b --- /dev/null +++ b/knowlg-automation/terraform/azure/elasticsearch-provision.tf @@ -0,0 +1,11 @@ + +resource "helm_release" "elasticsearch" { + name = "elasticsearch" + chart = var.ELASTICSEARCH_CHART + namespace = var.ELASTICSEARCH_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.cassandra] + wait_for_jobs = true + +} diff --git a/knowlg-automation/terraform/azure/flink-provision.tf b/knowlg-automation/terraform/azure/flink-provision.tf new file mode 100644 index 000000000..213d4345d --- /dev/null +++ b/knowlg-automation/terraform/azure/flink-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "flink" { + count = length(var.flink_release_name) + name = var.flink_release_name[count.index] + chart = var.flink_chart_path + namespace = "${var.flink_namespace}" + create_namespace = var.flink_create_namespace + depends_on = [helm_release.elasticsearch] + wait_for_jobs = var.flink_wait_for_jobs + timeout = var.flink_chart_install_timeout +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/kafka.tf b/knowlg-automation/terraform/azure/kafka.tf new file mode 120000 index 000000000..b0001ab94 --- /dev/null +++ b/knowlg-automation/terraform/azure/kafka.tf @@ -0,0 +1 @@ +../helm/kafka.tf \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/kafka_vars.tf b/knowlg-automation/terraform/azure/kafka_vars.tf new file mode 120000 index 000000000..b97fea54a --- /dev/null +++ b/knowlg-automation/terraform/azure/kafka_vars.tf @@ -0,0 +1 @@ +../helm/kafka_vars.tf \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/learning-provision.tf b/knowlg-automation/terraform/azure/learning-provision.tf new file mode 100644 index 000000000..c01f0c6f4 --- /dev/null +++ b/knowlg-automation/terraform/azure/learning-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "learning" { + name = "learning" + chart = var.LEARNING_CHART + namespace = var.LEARNING_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.taxonomy] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/local.tf b/knowlg-automation/terraform/azure/local.tf new file mode 100644 index 000000000..72aeb9d5f --- /dev/null +++ b/knowlg-automation/terraform/azure/local.tf @@ -0,0 +1,18 @@ + +locals { + common_tags = { + Environment = "${var.env}" + BuildingBlock = "${var.building_block}" + } + subid = split("-", "${data.azurerm_subscription.current.subscription_id}") + environment_name = "${var.building_block}-${var.env}" + uid = local.subid[0] + env_name_without_dashes = replace(local.environment_name, "-", "") + storage_account_name = "${local.env_name_without_dashes}${local.uid}" + + storage = { + azure_storage_account_name = azurerm_storage_account.storage_account.name + azure_storage_account_key = azurerm_storage_account.storage_account.primary_access_key + azure_storage_container = azurerm_storage_container.storage_container.name + } +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/local_file.tf b/knowlg-automation/terraform/azure/local_file.tf new file mode 100644 index 000000000..1281a0361 --- /dev/null +++ b/knowlg-automation/terraform/azure/local_file.tf @@ -0,0 +1,4 @@ +resource "local_file" "kubeconfig" { + content = azurerm_kubernetes_cluster.aks.kube_config_raw + filename = "${local.environment_name}-kubeconfig.yaml" +} diff --git a/knowlg-automation/terraform/azure/neo4j-provision.tf b/knowlg-automation/terraform/azure/neo4j-provision.tf new file mode 100644 index 000000000..60f279c87 --- /dev/null +++ b/knowlg-automation/terraform/azure/neo4j-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "neo4j" { + name = "neo4j" + chart = var.NEO4J_CHART + namespace = var.NEO4J_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.kafka] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/provider.tf b/knowlg-automation/terraform/azure/provider.tf new file mode 100644 index 000000000..15eae68fc --- /dev/null +++ b/knowlg-automation/terraform/azure/provider.tf @@ -0,0 +1,17 @@ +provider "azurerm" { + features {} + skip_provider_registration = true +} + +data "azurerm_resource_group" "rg" { + name = "knowlg-testing" +} + +provider "helm" { + kubernetes { + host = azurerm_kubernetes_cluster.aks.kube_config.0.host + client_certificate = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.client_certificate) + client_key = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.client_key) + cluster_ca_certificate = base64decode(azurerm_kubernetes_cluster.aks.kube_config.0.cluster_ca_certificate) + } +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/redis-provision.tf b/knowlg-automation/terraform/azure/redis-provision.tf new file mode 100644 index 000000000..d96b453ae --- /dev/null +++ b/knowlg-automation/terraform/azure/redis-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "redis" { + name = "redis" + chart = var.REDIS_CHART + namespace = var.REDIS_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.elasticsearch] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/search-provision.tf b/knowlg-automation/terraform/azure/search-provision.tf new file mode 100644 index 000000000..3f3ea7cc2 --- /dev/null +++ b/knowlg-automation/terraform/azure/search-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "search" { + name = "search" + chart = var.SEARCH_CHART + namespace = var.SEARCH_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.content] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/storage_account.tf b/knowlg-automation/terraform/azure/storage_account.tf new file mode 100644 index 000000000..8baac6a18 --- /dev/null +++ b/knowlg-automation/terraform/azure/storage_account.tf @@ -0,0 +1,12 @@ +resource "azurerm_storage_account" "storage_account" { + name = "${local.storage_account_name}" + resource_group_name = data.azurerm_resource_group.rg.name + location = var.location + account_tier = var.azure_storage_tier + account_replication_type = var.azure_storage_replication + + tags = merge( + local.common_tags, + var.additional_tags + ) +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/storage_container.tf b/knowlg-automation/terraform/azure/storage_container.tf new file mode 100644 index 000000000..811955bb8 --- /dev/null +++ b/knowlg-automation/terraform/azure/storage_container.tf @@ -0,0 +1,5 @@ +resource "azurerm_storage_container" "storage_container" { + name = "${local.environment_name}" + storage_account_name = azurerm_storage_account.storage_account.name + container_access_type = "private" +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/subnets.tf b/knowlg-automation/terraform/azure/subnets.tf new file mode 100644 index 000000000..657471fa0 --- /dev/null +++ b/knowlg-automation/terraform/azure/subnets.tf @@ -0,0 +1,7 @@ +resource "azurerm_subnet" "aks_subnet" { + name = "${local.environment_name}-aks" + resource_group_name = data.azurerm_resource_group.rg.name + virtual_network_name = azurerm_virtual_network.vnet.name + address_prefixes = var.aks_subnet_cidr + service_endpoints = var.aks_subnet_service_endpoints +} \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/taxonomy-provision.tf b/knowlg-automation/terraform/azure/taxonomy-provision.tf new file mode 100644 index 000000000..1284df7ae --- /dev/null +++ b/knowlg-automation/terraform/azure/taxonomy-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "taxonomy" { + name = "taxonomy" + chart = var.TAXONOMY_CHART + namespace = var.TAXONOMY_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [helm_release.search] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/azure/variables.tf b/knowlg-automation/terraform/azure/variables.tf new file mode 100644 index 000000000..d2d582d77 --- /dev/null +++ b/knowlg-automation/terraform/azure/variables.tf @@ -0,0 +1,250 @@ +variable "env" { + type = string + description = "Environment name. All resources will be prefixed with this value." + default = "dev" +} + +variable "building_block" { + type = string + description = "Building block name. All resources will be prefixed with this value." + default = "knowlg" +} + +variable "location" { + type = string + description = "Azure location to create the resources." + default = "East US 2" +} + +variable "additional_tags" { + type = map(string) + description = "Additional tags for the resources. These tags will be applied to all the resources." + default = {} +} + +variable "vnet_cidr" { + type = list(string) + description = "Azure vnet CIDR range." + default = ["10.0.0.0/16"] +} + +variable "aks_subnet_cidr" { + type = list(string) + description = "Azure AKS subnet CIDR range." + default = ["10.0.0.0/22"] +} + +variable "aks_subnet_service_endpoints" { + type = list(string) + description = "Azure AKS subnet service endpoints." + default = ["Microsoft.Sql", "Microsoft.Storage"] +} + +variable "azure_storage_tier" { + type = string + description = "Azure storage tier - Standard / Premium." + default = "Standard" +} + +variable "azure_storage_replication" { + type = string + description = "Azure storage replication - LRS / ZRS / GRS etc." + default = "LRS" +} + +variable "aks_nodepool_name" { + type = string + description = "AKS node pool name." + default = "aksnodepool1" +} + +variable "aks_node_count" { + type = number + description = "AKS node count." + default = 4 +} + +variable "aks_node_size" { + type = string + description = "AKS node size." + default = "Standard_D2s_v4" +} + +variable "aks_cluster_identity" { + type = string + description = "AKS cluster identity." + default = "SystemAssigned" +} + +variable "kubernetes_storage_class" { + type = string + description = "Storage class name for the AKS cluster" + default = "default" +} + +variable "druid_deepstorage_type" { + type = string + description = "Druid deep strorage type." + default = "azure" +} + +variable "flink_checkpoint_store_type" { + type = string + description = "Flink checkpoint store type." + default = "azure" +} + +variable "RESOURCE_GROUP" { + type = string + description = "RESOURCE GROUP name" + default = "knowlg-testing" +} + +#NEO4J +variable "NEO4J_CHART" { + description = "Neo4j Instance Running Namespace" + default = "../../helm_charts/neo4j" +} + +variable "NEO4J_NAMESPACE" { + description = "NEO4J Instance Running Namespace" + default = "knowlg-db" +} + +#CASSANDRA +variable "CASSANDRA_CHART" { + description = "Cassandra Instance Running Namespace" + default = "../../helm_charts/cassandra" +} + +variable "CASSANDRA_NAMESPACE" { + description = "CASSANDRA Instance Running Namespace" + default = "knowlg-db" +} + +#ELASTICSEARCH +variable "ELASTICSEARCH_CHART" { + description = "Elasticsearch Instance Running Namespace" + default = "../../helm_charts/elasticsearch" +} + +variable "ELASTICSEARCH_NAMESPACE" { + description = "Elasticsearch Instance Running Namespace" + default = "knowlg-db" +} + +#REDIS +variable "REDIS_CHART" { + description = "Redis Instance Running Namespace" + default = "../../helm_charts/redis" +} + +variable "REDIS_NAMESPACE" { + description = "Redis Instance Running Namespace" + default = "knowlg-db" +} + +#CONTENT +variable "CONTENT_CHART" { + description = "Content Instance Running Namespace" + default = "../../helm_charts/content" +} + +variable "CONTENT_NAMESPACE" { + description = "Content Instance Running Namespace" + default = "knowlg-api" +} + +#SEARCH +variable "SEARCH_CHART" { + description = "Search Instance Running Namespace" + default = "../../helm_charts/search" +} + +variable "SEARCH_NAMESPACE" { + description = "Search Instance Running Namespace" + default = "knowlg-api" +} + +#TAXONOMY +variable "TAXONOMY_CHART" { + description = "Taxonomy Instance Running Namespace" + default = "../../helm_charts/taxonomy" +} + +variable "TAXONOMY_NAMESPACE" { + description = "Taxonomy Instance Running Namespace" + default = "knowlg-api" +} + +#LEARNING +variable "LEARNING_CHART" { + description = "Learning Instance Running Namespace" + default = "../../helm_charts/learning" +} + +variable "LEARNING_NAMESPACE" { + description = "Learning Instance Running Namespace" + default = "knowlg-api" +} + +#DIAL +variable "DIAL_CHART" { + description = "DIAL Instance Running Namespace" + default = "../../helm_charts/dial" +} + +variable "DIAL_NAMESPACE" { + description = "DIAL Instance Running Namespace" + default = "knowlg-api" +} + +#FLINK +variable "flink_release_name" { + type = list(string) + description = "Flink helm release name." + default = ["search-indexer","audit-event-generator","asset-enrichment","post-publish-processor","dialcode-context-updater", "qrcode-image-generator","video-stream-generator","audit-history-indexer"] + # default = ["merged-pipeline"] +} + +variable "flink_namespace" { + type = string + description = "Flink namespace." + default = "knowlg-job" +} + +variable "flink_chart_path" { + type = string + description = "Flink chart path." + default = "../../helm_charts/flink" +} + +variable "flink_chart_install_timeout" { + type = number + description = "Flink chart install timeout." + default = 900 +} + +variable "flink_create_namespace" { + type = bool + description = "Create flink namespace." + default = true +} + +variable "flink_wait_for_jobs" { + type = bool + description = "Flink wait for jobs paramater." + default = false +} + +variable "flink_chart_template" { + type = string + description = "Flink chart values.yaml path." + default = "../terraform_helm_templates/flink.yaml.tfpl" +} + +variable "flink_kubernetes_service_name" { + type = string + description = "Flink kubernetes service name." + default = "asset-enrichment-jobmanager" +} diff --git a/knowlg-automation/terraform/azure/virtual_network.tf b/knowlg-automation/terraform/azure/virtual_network.tf new file mode 100644 index 000000000..e617adbd4 --- /dev/null +++ b/knowlg-automation/terraform/azure/virtual_network.tf @@ -0,0 +1,10 @@ +resource "azurerm_virtual_network" "vnet" { + name = "${local.environment_name}" + location = var.location + resource_group_name = data.azurerm_resource_group.rg.name + address_space = var.vnet_cidr + tags = merge( + local.common_tags, + var.additional_tags + ) +} \ No newline at end of file diff --git a/knowlg-automation/terraform/create b/knowlg-automation/terraform/create new file mode 100644 index 000000000..7aa439520 --- /dev/null +++ b/knowlg-automation/terraform/create @@ -0,0 +1,16 @@ +echo "Enter the environment to deploy in: [Options azure/local]" +read x +echo "Deploying in $x" +terraform -chdir=$x init +terraform -chdir=$x apply --auto-approve +# if [ $x == "azure" ] +# then +# echo "$(terraform -chdir=$x output kube_config)" > $PWD/aks.yaml +# sed -i '' '1d; $d' $PWD/aks.yaml + +# fi +echo "$(terraform -chdir=$x output kube_config)" > $PWD/aks.yaml +sed -i '' '1d; $d' $PWD/aks.yaml +export KUBECONFIG=$PWD/aks.yaml +export KUBE_CONFIG_PATH=$PWD/aks.yaml +terraform -chdir=$x apply --auto-approve \ No newline at end of file diff --git a/knowlg-automation/terraform/destroy b/knowlg-automation/terraform/destroy new file mode 100644 index 000000000..396268045 --- /dev/null +++ b/knowlg-automation/terraform/destroy @@ -0,0 +1,5 @@ +echo "Enter the environment to deploy in: [Options azure/local]" +read x +echo "Terraform destroying $x infra" +terraform -chdir=$x destroy $1 +terraform -chdir=$x destroy $1 diff --git a/knowlg-automation/terraform/helm/kafka.tf b/knowlg-automation/terraform/helm/kafka.tf new file mode 100644 index 000000000..c0c21f87f --- /dev/null +++ b/knowlg-automation/terraform/helm/kafka.tf @@ -0,0 +1,36 @@ +resource "helm_release" "kafka" { + name = var.kafka_release_name + chart = var.kafka_chart_path + namespace = var.kafka_namespace + create_namespace = var.kafka_create_namespace + dependency_update = var.kafka_chart_dependecy_update + wait_for_jobs = var.kafka_wait_for_jobs + values = [ + templatefile(var.kafka_chart_custom_values_yaml, + { + content_publish_topic = "${var.env}.${var.kafka_content_publish_topic}" + content_postpublish_topic = "${var.env}.${var.kafka_content_postpublish_topic}" + learning_job_request_topic = "${var.env}.${var.kafka_learning_job_request_topic}" + learning_graph_events_topic = "${var.env}.${var.kafka_learning_graph_events_topic}" + learning_events_failed_topic = "${var.env}.${var.kafka_learning_events_failed_topic}" + search_indexer_group_topic = "${var.env}.${var.kafka_search_indexer_group_topic}" + qrimage_request_topic = "${var.env}.${var.kafka_qrimage_request_topic}" + telemetry_raw_topic = "${var.env}.${var.kafka_telemetry_raw_topic}" + dialcode_context_job_request_topic = "${var.env}.${var.kafka_dialcode_context_job_request_topic}" + dialcode_context_job_request_failed_topic = "${var.env}.${var.kafka_dialcode_context_job_request_failed_topic}" + } + ) + ] +} + + + + + + + + + + + + diff --git a/knowlg-automation/terraform/helm/kafka_exporter.tf b/knowlg-automation/terraform/helm/kafka_exporter.tf new file mode 100644 index 000000000..626520cad --- /dev/null +++ b/knowlg-automation/terraform/helm/kafka_exporter.tf @@ -0,0 +1,15 @@ +resource "helm_release" "kafka_exporter" { + name = var.kafka_exporter_release_name + chart = var.kafka_exporter_chart_path + namespace = var.kafka_exporter_namespace + create_namespace = var.kafka_exporter_create_namespace + wait_for_jobs = var.kafka_exporter_wait_for_jobs + depends_on = [helm_release.druid_cluster,helm_release.kafka] + values = [ + templatefile(var.kafka_exporter_chart_template, + { + kafka_exporter_namespace = var.kafka_exporter_namespace + } + ) + ] +} \ No newline at end of file diff --git a/knowlg-automation/terraform/helm/kafka_exporter_vars.tf b/knowlg-automation/terraform/helm/kafka_exporter_vars.tf new file mode 100644 index 000000000..1ad0d6c13 --- /dev/null +++ b/knowlg-automation/terraform/helm/kafka_exporter_vars.tf @@ -0,0 +1,35 @@ +variable "kafka_exporter_release_name" { + type = string + description = "Kafka exporter helm release name." + default = "kafka-exporter" +} + +variable "kafka_exporter_namespace" { + type = string + description = "Kafka exporter namespace." + default = "kafka" +} + +variable "kafka_exporter_chart_path" { + type = string + description = "Kafka exporter chart path." + default = "../../helm_charts/kafka-exporter" +} + +variable "kafka_exporter_create_namespace" { + type = bool + description = "Create kakfa exporter namespace." + default = true +} + +variable "kafka_exporter_wait_for_jobs" { + type = bool + description = "Kafka exporter wait for jobs paramater." + default = true +} + +variable "kafka_exporter_chart_template" { + type = string + default = "../../helm_charts/kafka-exporter/values.yaml" + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/helm/kafka_vars.tf b/knowlg-automation/terraform/helm/kafka_vars.tf new file mode 100644 index 000000000..f60314c9a --- /dev/null +++ b/knowlg-automation/terraform/helm/kafka_vars.tf @@ -0,0 +1,107 @@ +variable "kafka_release_name" { + type = string + description = "Kafka helm release name." + default = "kafka" +} + +variable "kafka_namespace" { + type = string + description = "Kafka namespace." + default = "knowlg-db" +} + +variable "kafka_chart_path" { + type = string + description = "Kafka chart path." + default = "../../helm_charts/kafka" +} + +variable "kafka_chart_install_timeout" { + type = number + description = "Kafka chart install timeout." + default = 3000 +} + +variable "kafka_create_namespace" { + type = bool + description = "Create kafka namespace." + default = true +} + +variable "kafka_wait_for_jobs" { + type = bool + description = "Kafka wait for jobs paramater." + default = true +} + +variable "kafka_chart_custom_values_yaml" { + type = string + description = "Kafka chart values.yaml path." + default = "../../helm_charts/kafka/values.yaml" +} + +variable "kafka_chart_dependecy_update" { + type = bool + description = "Kafka chart dependency update." + default = true +} + +variable "kafka_content_publish_topic" { + type = string + description = "Kafka content publish topic." + default = "publish.job.request" +} + +variable "kafka_content_postpublish_topic" { + type = string + description = "Kafka content postpublish topic" + default = "content.postpublish.request" +} + +variable "kafka_learning_job_request_topic" { + type = string + description = "Kafka learning_job_request topic" + default = "learning_job_request" +} + +variable "kafka_learning_graph_events_topic" { + type = string + description = "Kafka learning.graph.events topic" + default = "learning.graph.events" +} + +variable "kafka_learning_events_failed_topic" { + type = string + description = "Kafka learning.events.failed topic" + default = "learning.events.failed" +} + +variable "kafka_search_indexer_group_topic" { + type = string + description = "Kafka search-indexer-group topic" + default = "search-indexer-group" +} + +variable "kafka_qrimage_request_topic" { + type = string + description = "Kafka qrimage.request topic" + default = "qrimage.request" +} + +variable "kafka_telemetry_raw_topic" { + type = string + description = "Kafka telemetry.raw topic" + default = "telemetry.raw" +} + +variable "kafka_dialcode_context_job_request_topic" { + type = string + description = "Kafka dialcode.context.job.request topic" + default = "dialcode.context.job.request" +} + +variable "kafka_dialcode_context_job_request_failed_topic" { + type = string + description = "Kafka dialcode.context.job.request topic" + default = "dialcode.context.job.request.failed" +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/cassandra-provision.tf b/knowlg-automation/terraform/local/cassandra-provision.tf new file mode 100644 index 000000000..6a753a42b --- /dev/null +++ b/knowlg-automation/terraform/local/cassandra-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "cassandra" { + name = "cassandra" + chart = var.CASSANDRA_CHART + namespace = var.CASSANDRA_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/local/content-provision.tf b/knowlg-automation/terraform/local/content-provision.tf new file mode 100644 index 000000000..6d4ead915 --- /dev/null +++ b/knowlg-automation/terraform/local/content-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "content" { + name = "content" + chart = var.CONTENT_CHART + namespace = var.CONTENT_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/local/dial-provision.tf b/knowlg-automation/terraform/local/dial-provision.tf new file mode 100644 index 000000000..f26fe3c67 --- /dev/null +++ b/knowlg-automation/terraform/local/dial-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "dial" { + name = "dial" + chart = var.DIAL_CHART + namespace = var.DIAL_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/elasticsearch-provision.tf b/knowlg-automation/terraform/local/elasticsearch-provision.tf new file mode 100644 index 000000000..f2e51fb5b --- /dev/null +++ b/knowlg-automation/terraform/local/elasticsearch-provision.tf @@ -0,0 +1,11 @@ + +resource "helm_release" "elasticsearch" { + name = "elasticsearch" + chart = var.ELASTICSEARCH_CHART + namespace = var.ELASTICSEARCH_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} diff --git a/knowlg-automation/terraform/local/flink-provision.tf b/knowlg-automation/terraform/local/flink-provision.tf new file mode 100644 index 000000000..5078c8319 --- /dev/null +++ b/knowlg-automation/terraform/local/flink-provision.tf @@ -0,0 +1,9 @@ +resource "helm_release" "flink" { + name = var.flink_release_name + chart = var.flink_chart_path + namespace = "${var.flink_namespace}" + create_namespace = var.flink_create_namespace + depends_on = [helm_release.kafka] + wait_for_jobs = var.flink_wait_for_jobs + timeout = var.flink_chart_install_timeout +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/kafka-provision.tf b/knowlg-automation/terraform/local/kafka-provision.tf new file mode 100644 index 000000000..6c36803dd --- /dev/null +++ b/knowlg-automation/terraform/local/kafka-provision.tf @@ -0,0 +1,72 @@ + +provider "helm" { + kubernetes { + config_path = var.kind_cluster_config_path + config_context = var.kube_config_context + } +} + +resource "helm_release" "kafka" { + name = "kafka" + chart = var.KAFKA_CHART + namespace = var.KAFKA_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + values = [ + templatefile("../../helm_charts/kafka/values.yaml", + { + #kafka_namespace: "kafka", + # kafka_image_repository: "bitnami/kafka" + # kafka_image_tag: "2.8.1-debian-10-r31" + # kafka_delete_topic_enable: true + # kafka_replica_count: 1 + # # Kubernetes Service type for external access. It can be NodePort or LoadBalancer + # # service_type: "ClusterIP" + # service_type: "NodePort" + # service_port: 9092 + # # PV config + # kafka_persistence_size: "2Gi" + # #Zookeeper configs + # zookeeper_enabled: true + # zookeeper_heapsize: 256 + # zookeeper_replica_count: 1 + content_publish_topic = "dev.publish.job.request" + content_postpublish_topic = "dev.content.postpublish.request" + learning_job_request_topic = "dev.learning.job.request" + learning_graph_events_topic = "dev.learning.graph.events" + learning_events_failed_topic = "dev.learning.events.failed" + search_indexer_group_topic = "dev.search-indexer-group" + qrimage_request_topic = "dev.qrimage.request" + telemetry_raw_topic = "dev.telemetry.raw" + dialcode_context_job_request_topic = "dev.dialcode.context.job.request" + dialcode_context_job_request_failed_topic = "dev.dialcode.context.job.request.failed" + } + ) + ] +} + +# data "kubernetes_service" "kafka" { +# metadata { +# namespace = "kafka" +# name = "kafka" +# } +# depends_on = [kind_cluster.one-click, helm_release.kafka] +# } + +# data "kubernetes_service" "zookeeper" { +# metadata { +# namespace = "kafka" +# name = "kafka-zookeeper" +# } +# depends_on = [kind_cluster.one-click, helm_release.kafka] +# } + +# output "kafka-service-ip" { +# value = data.kubernetes_service.kafka.spec.0.cluster_ip +# } + +# output "zookeeper-service-ip" { +# value = data.kubernetes_service.zookeeper.spec.0.cluster_ip +# } diff --git a/knowlg-automation/terraform/local/learning-provision.tf b/knowlg-automation/terraform/local/learning-provision.tf new file mode 100644 index 000000000..4c0740844 --- /dev/null +++ b/knowlg-automation/terraform/local/learning-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "learning" { + name = "learning" + chart = var.LEARNING_CHART + namespace = var.LEARNING_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/main.tf b/knowlg-automation/terraform/local/main.tf new file mode 100644 index 000000000..c9d4368c1 --- /dev/null +++ b/knowlg-automation/terraform/local/main.tf @@ -0,0 +1,51 @@ +provider "kind" { +} + +resource "kind_cluster" "one-click" { + name = var.kind_cluster_name + kubeconfig_path = pathexpand(var.kind_cluster_config_path) + wait_for_ready = true + + kind_config { + kind = "Cluster" + api_version = "kind.x-k8s.io/v1alpha4" + + node { + role = "control-plane" + + kubeadm_config_patches = [ + "kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"ingress-ready=true\"\n" + ] + extra_port_mappings { + container_port = 80 + host_port = 80 + } + extra_port_mappings { + container_port = 443 + host_port = 443 + } + } + + node { + role = "worker" + kubeadm_config_patches = [ + "kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"worker-node=true\"\n" + ] + } + + node { + role = "worker" + kubeadm_config_patches = [ + "kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"worker-node=true\"\n" + ] + } + + node { + role = "worker" + kubeadm_config_patches = [ + "kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"worker-node=true\"\n" + ] + } + } + +} diff --git a/knowlg-automation/terraform/local/neo4j-provision.tf b/knowlg-automation/terraform/local/neo4j-provision.tf new file mode 100644 index 000000000..731495c19 --- /dev/null +++ b/knowlg-automation/terraform/local/neo4j-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "neo4j" { + name = "neo4j" + chart = var.NEO4J_CHART + namespace = var.NEO4J_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/redis-provision.tf b/knowlg-automation/terraform/local/redis-provision.tf new file mode 100644 index 000000000..9d35a879b --- /dev/null +++ b/knowlg-automation/terraform/local/redis-provision.tf @@ -0,0 +1,10 @@ +resource "helm_release" "redis" { + name = "redis" + chart = var.REDIS_CHART + namespace = var.REDIS_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/search-provision.tf b/knowlg-automation/terraform/local/search-provision.tf new file mode 100644 index 000000000..4dbed7228 --- /dev/null +++ b/knowlg-automation/terraform/local/search-provision.tf @@ -0,0 +1,10 @@ +# resource "helm_release" "search" { +# name = "search" +# chart = var.SEARCH_CHART +# namespace = var.SEARCH_NAMESPACE +# create_namespace = true +# dependency_update = true +# depends_on = [kind_cluster.one-click] +# wait_for_jobs = true + +# } \ No newline at end of file diff --git a/knowlg-automation/terraform/local/taxonomy-provision.tf b/knowlg-automation/terraform/local/taxonomy-provision.tf new file mode 100644 index 000000000..f123d208c --- /dev/null +++ b/knowlg-automation/terraform/local/taxonomy-provision.tf @@ -0,0 +1,11 @@ +resource "helm_release" "taxonomy" { + name = "taxonomy" + chart = var.TAXONOMY_CHART + namespace = var.TAXONOMY_NAMESPACE + create_namespace = true + dependency_update = true + depends_on = [kind_cluster.one-click] + wait_for_jobs = true + +} + \ No newline at end of file diff --git a/knowlg-automation/terraform/local/vars.tf b/knowlg-automation/terraform/local/vars.tf new file mode 100644 index 000000000..fe950fb7d --- /dev/null +++ b/knowlg-automation/terraform/local/vars.tf @@ -0,0 +1,195 @@ +variable "kind_cluster_name" { + type = string + description = "The name of the cluster." + default = "one-click" +} + +variable "kind_cluster_config_path" { + type = string + description = "The location where this cluster's kubeconfig will be saved to." + default = "~/.kube/config" +} + +variable "kube_config_context" { + type = string + description = "The config context in kubeconfig" + default = "kind-one-click" + } + +variable "ingress_nginx_helm_version" { + type = string + description = "The Helm version for the nginx ingress controller." + default = "4.0.6" +} + +variable "ingress_nginx_namespace" { + type = string + description = "The nginx ingress namespace (it will be created if needed)." + default = "ingress-nginx" +} + + +variable "STAGE" { + description = "Deployment Stage" + default = "dev" +} + +#KAFKA +variable "KAFKA_CHART" { + description = "Kafka Instance Running Namespace" + default = "../../helm_charts/kafka" +} + +variable "KAFKA_NAMESPACE" { + description = "Kafka Instance Running Namespace" + default = "knowlg-db" +} + +#NEO4J +variable "NEO4J_CHART" { + description = "Neo4j Instance Running Namespace" + default = "../../helm_charts/neo4j" +} + +variable "NEO4J_NAMESPACE" { + description = "NEO4J Instance Running Namespace" + default = "knowlg-db" +} + +#CASSANDRA +variable "CASSANDRA_CHART" { + description = "Cassandra Instance Running Namespace" + default = "../../helm_charts/cassandra" +} + +variable "CASSANDRA_NAMESPACE" { + description = "CASSANDRA Instance Running Namespace" + default = "knowlg-db" +} + +#ELASTICSEARCH +variable "ELASTICSEARCH_CHART" { + description = "Elasticsearch Instance Running Namespace" + default = "../../helm_charts/elasticsearch" +} + +variable "ELASTICSEARCH_NAMESPACE" { + description = "Elasticsearch Instance Running Namespace" + default = "knowlg-db" +} + +#REDIS +variable "REDIS_CHART" { + description = "Redis Instance Running Namespace" + default = "../../helm_charts/redis" +} + +variable "REDIS_NAMESPACE" { + description = "Redis Instance Running Namespace" + default = "knowlg-db" +} + + +#TAXONOMY +variable "TAXONOMY_CHART" { + description = "Taxonomy Instance Running Namespace" + default = "../../helm_charts/taxonomy" +} + +variable "TAXONOMY_NAMESPACE" { + description = "Taxonomy Instance Running Namespace" + default = "knowlg-api" +} + +#CONTENT +variable "CONTENT_CHART" { + description = "Content Instance Running Namespace" + default = "../../helm_charts/content" +} + +variable "CONTENT_NAMESPACE" { + description = "Content Instance Running Namespace" + default = "knowlg-api" +} + +#SEARCH +variable "SEARCH_CHART" { + description = "Search Instance Running Namespace" + default = "../../helm_charts/search" +} + +variable "SEARCH_NAMESPACE" { + description = "Search Instance Running Namespace" + default = "knowlg-api" +} + +#LEARNING +variable "LEARNING_CHART" { + description = "Learning Instance Running Namespace" + default = "../../helm_charts/learning" +} + +variable "LEARNING_NAMESPACE" { + description = "Learning Instance Running Namespace" + default = "knowlg-api" +} + +#DIAL +variable "DIAL_CHART" { + description = "Dial Instance Running Namespace" + default = "../../helm_charts/dial" +} + +variable "DIAL_NAMESPACE" { + description = "Dial Instance Running Namespace" + default = "knowlg-api" +} + +#FLINK +variable "flink_release_name" { + type = string + description = "Flink helm release name." + default = "qrcode-image-generator" +} + +variable "flink_namespace" { + type = string + description = "Flink namespace." + default = "knowlg-job" +} + +variable "flink_chart_path" { + type = string + description = "Flink chart path." + default = "../../helm_charts/flink" +} + +variable "flink_chart_install_timeout" { + type = number + description = "Flink chart install timeout." + default = 900 +} + +variable "flink_create_namespace" { + type = bool + description = "Create flink namespace." + default = true +} + +variable "flink_wait_for_jobs" { + type = bool + description = "Flink wait for jobs paramater." + default = false +} + +variable "flink_chart_template" { + type = string + description = "Flink chart values.yaml path." + default = "../terraform_helm_templates/flink.yaml.tfpl" +} + +variable "flink_kubernetes_service_name" { + type = string + description = "Flink kubernetes service name." + default = "qrcode-image-generator-jobmanager" +} \ No newline at end of file diff --git a/knowlg-automation/terraform/local/versions.tf b/knowlg-automation/terraform/local/versions.tf new file mode 100644 index 000000000..fb0b06c79 --- /dev/null +++ b/knowlg-automation/terraform/local/versions.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + kind = { + source = "kyma-incubator/kind" + version = "0.0.11" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = "2.12.1" + } + + helm = { + source = "hashicorp/helm" + version = "2.6.0" + } + + } + + required_version = ">= 1.0.0" +} diff --git a/knowlg-docker-image.sh b/knowlg-docker-image.sh new file mode 100755 index 000000000..2d51dc341 --- /dev/null +++ b/knowlg-docker-image.sh @@ -0,0 +1,23 @@ +#!/bin/bash +TAG=$1 +docker rmi $(docker images -a | grep taxonomy-service | awk '{print $1":"$2}') +docker rmi $(docker images -a | grep content-service | awk '{print $1":"$2}') +docker rmi $(docker images -a | grep search-service | awk '{print $1":"$2}') + +# Taxonomy Service +cd taxonomy-api/taxonomy-service +mvn play2:dist +cd ../.. +docker build -f build/taxonomy-service/Dockerfile -t taxonomy-service:${TAG} . + +# Content Service +cd content-api/content-service +mvn play2:dist +cd ../.. +docker build -f build/content-service/Dockerfile -t content-service:${TAG} . + +# Search Service +cd search-api/search-service +mvn play2:dist +cd ../.. +docker build -f build/search-service/Dockerfile -t search-service:${TAG} . diff --git a/kubernetes/content/Chart.yaml b/kubernetes/content/Chart.yaml new file mode 100644 index 000000000..dbf9d485c --- /dev/null +++ b/kubernetes/content/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: content +version: 0.1.0 diff --git a/kubernetes/content/content-service_application.conf b/kubernetes/content/content-service_application.conf new file mode 100644 index 000000000..34f18a850 --- /dev/null +++ b/kubernetes/content/content-service_application.conf @@ -0,0 +1,657 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /channelActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /collectionActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + /healthActor + { + router = smallest-mailbox-pool + nr-of-instances = 5 + dispatcher = actors-dispatcher + } + /licenseActor + { + router = smallest-mailbox-pool + nr-of-instances = 2 + dispatcher = actors-dispatcher + } + } + } +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key="jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.ContentModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.server.http.idleTimeout = 60s +play.http.parser.maxDiskBuffer = 100MB +parsers.anyContent.maxLength = 100MB + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { +# If you want to bind several caches, you can bind the individually +#bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB +schema.base_path="https://sunbirddevbbpublic.blob.core.windows.net/sunbird-content-staging-knowlg/schemas/local" + +# Cassandra Configuration +cassandra { + lp { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } + lpa { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } +} + +# Consistency Level for Multi Node Cassandra cluster +cassandra.lp.consistency.level=QUORUM + +collection { + keyspace: "dev_hierarchy_store" + cache.enable: true + image.migration.enabled: true +} + +content { + keyspace: "dev_content_store" + cache { + ttl: 86400 + enable: true + } + hierarchy { + removed_props_for_leafNodes: ["collections", "children", "usedByContent", "item_sets", "methods", "libraries", "editorState"] + } + # Added for supporting backward compatibility - remove in release-2.7.0 + tagging { + backward_enable: true + property: "subject,medium" + } + h5p { + library { + path: "https://sunbirdpublic.blob.core.windows.net/installation/h5p-standalone-1.3.4.zip" + } + } + copy { + invalid_statusList: ["Flagged","FlaggedDraft","FraggedReview","Retired", "Processing"] + origin_data: ["name", "author", "license", "organisation"] + props_to_remove: ["downloadUrl", "artifactUrl", "variants", "createdOn", "collections", "children", "lastUpdatedOn", "SYS_INTERNAL_LAST_UPDATED_ON", "versionKey", "s3Key", "status", "pkgVersion", "toc_url", "mimeTypesCount", "contentTypesCount", "leafNodesCount", "childNodes", "prevState", "lastPublishedOn", "flagReasons", "compatibilityLevel", "size", "publishChecklist", "publishComment", "LastPublishedBy", "rejectReasons", "rejectComment", "gradeLevel", "subject", "medium", "board", "topic", "purpose", "subtopic", "contentCredits", "owner", "collaborators", "creators", "contributors", "badgeAssertions", "dialcodes", "concepts", "keywords", "reservedDialcodes", "dialcodeRequired", "leafNodes", "sYS_INTERNAL_LAST_UPDATED_ON", "prevStatus", "lastPublishedBy", "streamingUrl", "boardIds", "gradeLevelIds", "subjectIds", "mediumIds", "topicsIds", "targetFWIds", "targetBoardIds", "targetGradeLevelIds", "targetSubjectIds", "targetMediumIds", "targetTopicIds", "se_boards", "se_subjects", "se_mediums", "se_gradeLevels", "se_topics", "se_FWIds", "se_boardIds", "se_subjectIds", "se_mediumIds", "se_gradeLevelIds", "se_topicIds"] + } + # Need to depreacte + media { + base.url: "https://dev.knowlg.sunbird.org" + } +} + +# Redis Configuration +redis { + host: "redis-db.knowlg-db.svc.cluster.local" + port: 6379 + maxConnections: 128 +} + + +#--Maximum Content Package File Size Limit in Bytes (50 MB) +MAX_CONTENT_PACKAGE_FILE_SIZE_LIMIT=157286400 + +#--Maximum Asset File Size Limit in Bytes (50 MB) +MAX_ASSET_FILE_SIZE_LIMIT=157286400 + +#--No of Retry While File Download Fails +RETRY_ASSET_DOWNLOAD_COUNT=1 + +#Current environment - need this property post cloud-store implementation +cloud_storage { + env: staging + content.folder: "content" + asset.folder: "assets" + artefact.folder: "artifact" + bundle.folder: "bundle" + media.folder: "media" + ecar.folder: "ecar_files" + upload.url.ttl: "54000" +} + +# Configuration +akka.request_timeout: 30 +environment.id: 20000000 +graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" +} +route { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + bolt { + write { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + }, + read { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + } + } +} + +shard.id: 1 +platform { + auth.check.enabled: false + cache.ttl: 3600000 + language.codes: ["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd", "pj"] +} + +#Top N Config for Search Telemetry +telemetry_env: staging + +installation.id: ekstep + +channel { + default: "in.ekstep" + fetch.suggested_frameworks: true +} + +languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" +} +# Need to depreacte +composite { + search { + url : "http://search-service.knowlg.svc.cluster.local:9000/v3/search" + } +} + +cloud_storage_type: "azure" +cloud_storage_key: "" +cloud_storage_secret: "" +cloud_storage_endpoint: "" +cloud_storage_container: "" + +# Google Drive APIKEY +learning_content_drive_apiKey = "" + +#Youtube Standard Licence Validation +learning.content.youtube.application.name="" +learning_content_youtube_apikey="" +youtube.license.regex.pattern=["\\?vi?=([^&]*)", "watch\\?.*v=([^&]*)", "(?:embed|vi?)/([^/?]*)","^([A-Za-z0-9\\-\\_]*)"] +learning.valid_license=["creativeCommon"] + +kafka { + urls : "kafka.knowlg-db.svc.cluster.local:9092" + topic.send.enable : true + topics.instruction : "" + publish.request.topic : "" +} + +# Need to depreacte +# DIAL Link Config +dial_service { + api { + base_url : "http://dial-service.knowlg.svc.cluster.local:9000" + auth_key : "" + search : "/dialcode/v3/search" + generate : "/dialcode/v3/generate" + } +} +# Need to depreacte +reserve_dialcode { + mimeType : ["application/vnd.ekstep.content-collection"] + max_count : 250 +} + +content.link_dialcode.validation=true +content.link_dialcode.max_limit=10 + +# Content Import API Config +import { + request_size_limit : 1000 + output_topic_name : "sunbirdstaging.knowlg.auto.creation.job.request" + required_props : ["name","code","mimeType","primaryCategory","artifactUrl","framework"] + remove_props : ["downloadUrl","variants","previewUrl","streamingUrl","itemSets","level1Name","level1Concept","level2Name","level2Concept","level3Name","level3Concept","me_totalPlaySessionCount","me_totalTimeSpentInSec","me_totalSessionsCount","me_totalTimespent","me_totalInteractions","me_creationSessions","me_creationTimespent","me_averageInteractionsPerMin","me_averageSessionsPerDevice","me_totalDevices","me_averageTimespentPerSession","me_averageRating","me_totalDownloads","me_totalSideloads","me_totalRatings","me_totalComments","me_totalDialcode","me_totalDialcodeLinkedToContent","me_totalDialcodeAttached","me_hierarchyLevel","origin","originData","contentPolicyCheck","questions"] +} + +# Need to depreacte +contentTypeToPrimaryCategory { + ClassroomTeachingVideo: "Explanation Content" + ConceptMap: "Learning Resource" + Course: "Course" + CuriosityQuestionSet: "Practice Question Set" + eTextBook: "eTextbook" + ExperientialResource: "Learning Resource" + ExplanationResource: "Explanation Content" + ExplanationVideo: "Explanation Content" + FocusSpot: "Teacher Resource" + LearningOutcomeDefinition: "Teacher Resource" + MarkingSchemeRubric: "Teacher Resource" + PedagogyFlow: "Teacher Resource" + PracticeQuestionSet: "Practice Question Set" + PracticeResource: "Practice Question Set" + SelfAssess: "Course Assessment" + TeachingMethod: "Teacher Resource" + TextBook: "Digital Textbook" + Collection: "Content Playlist" + ExplanationReadingMaterial: "Learning Resource" + LearningActivity: "Learning Resource" + LessonPlan: "Content Playlist" + LessonPlanResource: "Teacher Resource" + PreviousBoardExamPapers: "Learning Resource" + TVLesson: "Explanation Content" + OnboardingResource: "Learning Resource" + ReadingMaterial: "Learning Resource" + Template: "Template" + Asset: "Asset" + Plugin: "Plugin" + LessonPlanUnit: "Lesson Plan Unit" + CourseUnit: "Course Unit" + TextBookUnit: "Textbook Unit" + Asset: "Certificate Template" +} +# Need to depreacte +resourceTypeToPrimaryCategory { + Learn: "Learning Resource" + Read: "Learning Resource" + Practice: "Learning Resource" + Teach: "Teacher Resource" + Test: "Learning Resource" + Experiment: "Learning Resource" + LessonPlan: "Teacher Resource" +} +# Need to depreacte +mimeTypeToPrimaryCategory { + "application/vnd.ekstep.h5p-archive": ["Learning Resource"] + "application/vnd.ekstep.html-archive": ["Learning Resource"] + "application/vnd.android.package-archive": ["Learning Resource"] + "video/webm": ["Explanation Content"] + "video/x-youtube": ["Explanation Content"] + "video/mp4": ["Explanation Content"] + "application/pdf": ["Learning Resource", "Teacher Resource"] + "application/epub": ["Learning Resource", "Teacher Resource"] + "application/vnd.ekstep.ecml-archive": ["Learning Resource", "Teacher Resource"] + "text/x-url": ["Learnin Resource", "Teacher Resource"] +} + +objectcategorydefinition.keyspace="dev_category_store" + +# Need to depreacte +#Default objectCategory mapping for channel +channel { + content{ + primarycategories=["Course Assessment", "eTextbook", "Explanation Content", "Learning Resource", "Practice Question Set", "Teacher Resource", "Exam Question"] + additionalcategories= ["Classroom Teaching Video", "Concept Map", "Curiosity Question Set", "Experiential Resource", "Explanation Video", "Focus Spot", "Learning Outcome Definition", "Lesson Plan", "Marking Scheme Rubric", "Pedagogy Flow", "Previous Board Exam Papers", "TV Lesson", "Textbook"] + } + collection { + primarycategories=["Content Playlist", "Course", "Digital Textbook", "Question paper"] + additionalcategories=["Textbook", "Lesson Plan"] + } + asset { + primarycategories=["Asset", "CertAsset", "Certificate Template"] + additionalcategories=[] + } +} +master.category.validation.enabled="Yes" + +# Need to depreacte +#Collection CSV +sunbird_dialcode_search_api="http://dial-service.knowlg.svc.cluster.local:9000/dialcode/v3/list" +framework_read_api_url="http://localhost:8080/learning-service/framework/v3/read" +sunbird_link_dial_code_api="http://content-service.knowlg.svc.cluster.local:9000/collection/v4/dialcode/link" + +# Need to depreacte +collection { + csv { + maxRows = 6500 + allowedContentTypes = ["TextBook","Collection","Course"] + maxFirstLevelUnits=30 + ttl = 86400 + maxUnitFieldLength=120 + maxDescFieldLength=1500 + contentTypeToUnitType = {"TextBook": "TextBookUnit", "Course": "CourseUnit", "Collection":"Collection"} + headers { + folderIdentifier = ["Folder Identifier"] + hierarchy = ["Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder"] + QR = ["QR Code Required?","QR Code"] + topics = ["Mapped Topics"] + collectionName = ["Collection Name"] + linkedContents = ["Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + output = ["Collection Name","Folder Identifier","Level 1 Folder","Level 2 Folder","Level 3 Folder","Level 4 Folder","Description","Mapped Topics","Keywords","QR Code Required?","QR Code","Linked Content 1","Linked Content 2","Linked Content 3","Linked Content 4","Linked Content 5","Linked Content 6","Linked Content 7","Linked Content 8","Linked Content 9","Linked Content 10","Linked Content 11","Linked Content 12","Linked Content 13","Linked Content 14","Linked Content 15","Linked Content 16","Linked Content 17","Linked Content 18","Linked Content 19","Linked Content 20","Linked Content 21","Linked Content 22","Linked Content 23","Linked Content 24","Linked Content 25","Linked Content 26","Linked Content 27","Linked Content 28","Linked Content 29","Linked Content 30"] + sequence { + create = {"Level 1 Folder":0,"Level 2 Folder":1,"Level 3 Folder":2,"Level 4 Folder":3,"Description":4} + update = {"Collection Name":0,"Folder Identifier":1,"Level 1 Folder":2,"Level 2 Folder":3,"Level 3 Folder":4,"Level 4 Folder":5,"Description":6,"Mapped Topics":7,"Keywords":8,"QR Code Required?":9,"QR Code":10,"Linked Content 1":11,"Linked Content 2":12,"Linked Content 3":13,"Linked Content 4":14,"Linked Content 5":15,"Linked Content 6":16,"Linked Content 7":17,"Linked Content 8":18,"Linked Content 9":19,"Linked Content 10":20,"Linked Content 11":21,"Linked Content 12":22,"Linked Content 13":23,"Linked Content 14":24,"Linked Content 15":25,"Linked Content 16":26,"Linked Content 17":27,"Linked Content 18":28,"Linked Content 19":29,"Linked Content 20":30,"Linked Content 21":31,"Linked Content 22":32,"Linked Content 23":33,"Linked Content 24":34,"Linked Content 25":35,"Linked Content 26":36,"Linked Content 27":37,"Linked Content 28":38,"Linked Content 29":39,"Linked Content 30":40} + linkedContents = {"Linked Content 1":0,"Linked Content 2":1,"Linked Content 3":2,"Linked Content 4":3,"Linked Content 5":4,"Linked Content 6":5,"Linked Content 7":6,"Linked Content 8":7,"Linked Content 9":8,"Linked Content 10":9,"Linked Content 11":10,"Linked Content 12":11,"Linked Content 13":12,"Linked Content 14":13,"Linked Content 15":14,"Linked Content 16":15,"Linked Content 17":16,"Linked Content 18":17,"Linked Content 19":18,"Linked Content 20":19,"Linked Content 21":20,"Linked Content 22":21,"Linked Content 23":22,"Linked Content 24":23,"Linked Content 25":24,"Linked Content 26":25,"Linked Content 27":26,"Linked Content 28":27,"Linked Content 29":28,"Linked Content 30":29} + } + } + mandatory { + create = ["Level 1 Folder"] + update = ["Collection Name","Folder Identifier"] + } + } +} + +plugin.media.base.url="https://dev.knowlg.sunbird.org" + +cloudstorage { + metadata.replace_absolute_path=true + relative_path_prefix=CONTENT_STORAGE_BASE_PATH + metadata.list=["appIcon", "artifactUrl", "posterImage", "previewUrl", "thumbnail", "assetsMap", "certTemplate", "itemSetPreviewUrl", "grayScaleAppIcon", "sourceURL", "variants", "downloadUrl", "streamingUrl", "toc_url", "data", "question", "solutions", "editorState", "media", "pdfUrl", "transcripts"] + read_base_path="https://sunbirddevbbpublic.blob.core.windows.net" + write_base_path=["https://sunbirdstagingpublic.blob.core.windows.net", "https://sunbirddevbbpublic.blob.core.windows.net"] +} \ No newline at end of file diff --git a/kubernetes/content/content-service_logback.xml b/kubernetes/content/content-service_logback.xml new file mode 100644 index 000000000..3b3a26ecc --- /dev/null +++ b/kubernetes/content/content-service_logback.xml @@ -0,0 +1,30 @@ + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + diff --git a/kubernetes/content/templates/deployment.yaml b/kubernetes/content/templates/deployment.yaml new file mode 100644 index 000000000..947765d55 --- /dev/null +++ b/kubernetes/content/templates/deployment.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Never + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/content-service-1.0-SNAPSHOT/config/application.conf + subPath: content-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/content-service-1.0-SNAPSHOT/config/logback.xml + subPath: content-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/kubernetes/content/values.yaml b/kubernetes/content/values.yaml new file mode 100644 index 000000000..cce4738a7 --- /dev/null +++ b/kubernetes/content/values.yaml @@ -0,0 +1,22 @@ +namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: content-service +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9000 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/cassandra/Chart.yaml b/kubernetes/sunbird-dbs/cassandra/Chart.yaml new file mode 100644 index 000000000..4d9cd6c3a --- /dev/null +++ b/kubernetes/sunbird-dbs/cassandra/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: cassandra +version: 0.1.0 diff --git a/kubernetes/sunbird-dbs/cassandra/templates/deployment.yaml b/kubernetes/sunbird-dbs/cassandra/templates/deployment.yaml new file mode 100644 index 000000000..4d00c736b --- /dev/null +++ b/kubernetes/sunbird-dbs/cassandra/templates/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: cassandra:3.11.11 + ports: + - containerPort: {{ .Values.network.port }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-db + namespace: {{ .Values.namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - port: {{ .Values.network.port }} + selector: + app: {{ .Chart.Name }} \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/cassandra/values.yaml b/kubernetes/sunbird-dbs/cassandra/values.yaml new file mode 100644 index 000000000..ef6b4aad2 --- /dev/null +++ b/kubernetes/sunbird-dbs/cassandra/values.yaml @@ -0,0 +1,4 @@ +namespace: knowlg-db +replicaCount: 1 +network: + port: 9042 \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/neo4j/Chart.yaml b/kubernetes/sunbird-dbs/neo4j/Chart.yaml new file mode 100644 index 000000000..e1d41363b --- /dev/null +++ b/kubernetes/sunbird-dbs/neo4j/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: neo4j +version: 0.1.0 diff --git a/kubernetes/sunbird-dbs/neo4j/templates/deployment.yaml b/kubernetes/sunbird-dbs/neo4j/templates/deployment.yaml new file mode 100644 index 000000000..c16035936 --- /dev/null +++ b/kubernetes/sunbird-dbs/neo4j/templates/deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: {{ .Values.image }} + imagePullPolicy: Never + env: ## Env variables to pass + - name: NEO4J_dbms_security_auth__enabled + value: "false" + ports: + - containerPort: 7474 + name: cypher-port + - containerPort: 7687 + name: bolt-port-1 + - containerPort: 8687 + name: bolt-port-2 + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-db + namespace: {{ .Values.namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - name: cypher-shell-port + port: 7474 + targetPort: 7474 + - name: bolt-port-1 + port: 7687 + targetPort: 7687 + - name: bolt-port-2 + port: 8687 + targetPort: 8687 + selector: + app: {{ .Chart.Name }} \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/neo4j/values.yaml b/kubernetes/sunbird-dbs/neo4j/values.yaml new file mode 100644 index 000000000..d96c03466 --- /dev/null +++ b/kubernetes/sunbird-dbs/neo4j/values.yaml @@ -0,0 +1,18 @@ +namespace: knowlg-db +replicaCount: 1 +image: neo4j:3.3.0 +authEnabled: false +network: + port: 7474 +neo4j: + name: neo4j + resources: + cpu: "0.5" + memory: "2Gi" + +volumes: + data: + mode: defaultStorageClass + defaultStorageClass: + requests: + storage: 2Gi \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/redis/Chart.yaml b/kubernetes/sunbird-dbs/redis/Chart.yaml new file mode 100644 index 000000000..dce1bc61d --- /dev/null +++ b/kubernetes/sunbird-dbs/redis/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: redis +version: 0.1.0 diff --git a/kubernetes/sunbird-dbs/redis/templates/deployment.yaml b/kubernetes/sunbird-dbs/redis/templates/deployment.yaml new file mode 100644 index 000000000..d57727710 --- /dev/null +++ b/kubernetes/sunbird-dbs/redis/templates/deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.namespace }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + containers: + - name: {{ .Chart.Name }} + image: redis + ports: + - containerPort: {{ .Values.network.port }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-db + namespace: {{ .Values.namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - port: {{ .Values.network.port }} + targetPort: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} \ No newline at end of file diff --git a/kubernetes/sunbird-dbs/redis/values.yaml b/kubernetes/sunbird-dbs/redis/values.yaml new file mode 100644 index 000000000..209d4b932 --- /dev/null +++ b/kubernetes/sunbird-dbs/redis/values.yaml @@ -0,0 +1,5 @@ +namespace: knowlg-db +replicaCount: 1 +network: + port: 6379 + targetport: 6379 \ No newline at end of file diff --git a/kubernetes/taxonomy/Chart.yaml b/kubernetes/taxonomy/Chart.yaml new file mode 100644 index 000000000..8462df87c --- /dev/null +++ b/kubernetes/taxonomy/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for Kubernetes +name: taxonomy +version: 0.1.0 diff --git a/kubernetes/taxonomy/taxonomy-service_application.conf b/kubernetes/taxonomy/taxonomy-service_application.conf new file mode 100644 index 000000000..b48fa7441 --- /dev/null +++ b/kubernetes/taxonomy/taxonomy-service_application.conf @@ -0,0 +1,399 @@ +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true + default-dispatcher { + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 32.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + } + } + actors-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 32.0 + parallelism-max = 64 + } + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 1 + } + actor { + deployment { + /contentActor + { + router = smallest-mailbox-pool + nr-of-instances = 10 + dispatcher = actors-dispatcher + } + } + } +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key="jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + enabled += modules.TaxonomyModule + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.server.http.idleTimeout = 60s +play.http.parser.maxDiskBuffer = 10MB +parsers.anyContent.maxLength = 10MB + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB +schema.base_path="https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/local" + +# Cassandra Configuration +cassandra { + lp { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } + lpa { + connection: "cassandra-db.knowlg-db.svc.cluster.local:9042" + } +} + +# Redis Configuration +redis { + host: "redis-db.knowlg-db.svc.cluster.local" + port: 6379 + maxConnections: 128 +} + +# Configuration +akka.request_timeout: 30 +environment.id: 10000000 +graph { + dir: "/data/graphDB" + ids: ["domain"] + passport.key.base: "jd5ECm/o0BXwQCe8PfZY1NoUkB9HN41QjA80p22MKyRIcP5RW4qHw8sZztCzv87M" +} +route { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + bolt { + write { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + }, + read { + domain: "bolt://neo4j-db.knowlg-db.svc.cluster.local:7687" + all: "bolt://neo4j-db.knowlg-db.svc.cluster.local:8687" + } + } +} + +shard.id: 1 +platform { + auth.check.enabled: false + cache.ttl: 3600000 +} + +# Cloud Storage Config +cloud_storage_type: "azure" +azure_storage_key: "" +azure_storage_secret: "" +azure_storage_container: "" + +installation.id: ekstep + +kafka { + urls : "kafka.knowlg-db.svc.cluster.local:9092" +} + +channel { + default: "org.sunbird" +} + +languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" +} +objectcategorydefinition.keyspace="dev_category_store" + +# Framework master category validation Supported values are Yes/No +master.category.validation.enabled="Yes" diff --git a/kubernetes/taxonomy/taxonomy-service_logback.xml b/kubernetes/taxonomy/taxonomy-service_logback.xml new file mode 100644 index 000000000..21cb53d9e --- /dev/null +++ b/kubernetes/taxonomy/taxonomy-service_logback.xml @@ -0,0 +1,31 @@ + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/kubernetes/taxonomy/templates/deployment.yaml b/kubernetes/taxonomy/templates/deployment.yaml new file mode 100644 index 000000000..064f43fb2 --- /dev/null +++ b/kubernetes/taxonomy/templates/deployment.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Chart.Name }} + namespace: {{ .Values.namespace }} + annotations: + reloader.stakater.com/auto: "true" +spec: + replicas: {{ .Values.replicaCount }} + strategy: + rollingUpdate: + maxSurge: {{ .Values.strategy.maxsurge }} + maxUnavailable: {{ .Values.strategy.maxunavailable }} + selector: + matchLabels: + app: {{ .Chart.Name }} + template: + metadata: + labels: + app: {{ .Chart.Name }} + spec: + volumes: + - name: {{ .Chart.Name }}-config + configMap: + name: {{ .Chart.Name }}-config + - name: {{ .Chart.Name }}-xml-config + configMap: + name: {{ .Chart.Name }}-xml-config + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.repository }}:{{ .Values.image_tag }}" + imagePullPolicy: Never + env: + - name: JAVA_OPTIONS + value: {{ .Values.env.javaoptions | quote }} + - name: _JAVA_OPTIONS + value: -Dlog4j2.formatMsgNoLookups=true + envFrom: + - configMapRef: + name: {{ .Chart.Name }}-config + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: + - containerPort: {{ .Values.network.port }} + {{- if .Values.healthcheck }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 10 }} + {{- end }} + volumeMounts: + - name: {{ .Chart.Name }}-config + mountPath: /home/sunbird/taxonomy-service-1.0-SNAPSHOT/config/application.conf + subPath: taxonomy-service_application.conf + - name: {{ .Chart.Name }}-xml-config + mountPath: /home/sunbird/taxonomy-service-1.0-SNAPSHOT/config/logback.xml + subPath: taxonomy-service_logback.xml + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ .Chart.Name }}-service + namespace: {{ .Values.namespace }} + labels: + app: {{ .Chart.Name }} +spec: + ports: + - name: http-{{ .Chart.Name }} + protocol: TCP + port: {{ .Values.network.targetport }} + selector: + app: {{ .Chart.Name }} diff --git a/kubernetes/taxonomy/values.yaml b/kubernetes/taxonomy/values.yaml new file mode 100644 index 000000000..76033d47e --- /dev/null +++ b/kubernetes/taxonomy/values.yaml @@ -0,0 +1,28 @@ +### Default variable file for taxonomy-service ### + +namespace: knowlg-api + +env: + javaoptions: -Xmx600m + +replicaCount: 1 +repository: taxonomy-service +image_tag: R5.2.0 +resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 1 + memory: 1024Mi +network: + port: 9006 + targetport: 9000 +strategy: + type: RollingUpdate + maxsurge: 25% + maxunavailable: 25% + + + + diff --git a/local-setup.sh b/local-setup.sh new file mode 100755 index 000000000..a65dfad9b --- /dev/null +++ b/local-setup.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +mkdir -p ~/sunbird-dbs/neo4j ~/sunbird-dbs/cassandra ~/sunbird-dbs/redis ~/sunbird-dbs/es ~/sunbird-dbs/kafka +export sunbird_dbs_path=~/sunbird-dbs + +docker compose up diff --git a/ontology-engine/graph-common/pom.xml b/ontology-engine/graph-common/pom.xml index e18eb0beb..5c90022aa 100644 --- a/ontology-engine/graph-common/pom.xml +++ b/ontology-engine/graph-common/pom.xml @@ -37,7 +37,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/ontology-engine/graph-common/src/test/resources/application.conf b/ontology-engine/graph-common/src/test/resources/application.conf index dfa892deb..6592a72e8 100644 --- a/ontology-engine/graph-common/src/test/resources/application.conf +++ b/ontology-engine/graph-common/src/test/resources/application.conf @@ -464,7 +464,7 @@ max.thumbnail.size.pixels=150 play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" //schema.base_path = "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/" collection.image.migration.enabled=true diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/GraphService.scala b/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/GraphService.scala deleted file mode 100644 index f7986ffbc..000000000 --- a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/GraphService.scala +++ /dev/null @@ -1,74 +0,0 @@ -package org.sunbird.graph - -import java.util - -import org.sunbird.common.dto.{Property, Request, Response} -import org.sunbird.graph.dac.model.{Node, SearchCriteria} -import org.sunbird.graph.external.ExternalPropsManager -import org.sunbird.graph.external.store.ExternalStore -import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} - -import scala.concurrent.{ExecutionContext, Future} - -class GraphService { - implicit val ec: ExecutionContext = ExecutionContext.global - - def addNode(graphId: String, node: Node): Future[Node] = { - NodeAsyncOperations.addNode(graphId, node) - } - - def upsertNode(graphId: String, node: Node, request: Request): Future[Node] = { - NodeAsyncOperations.upsertNode(graphId, node, request) - } - - def upsertRootNode(graphId: String, request: Request): Future[Node] = { - NodeAsyncOperations.upsertRootNode(graphId, request) - } - - def getNodeByUniqueId(graphId: String, nodeId: String, getTags: Boolean, request: Request): Future[Node] = { - SearchAsyncOperations.getNodeByUniqueId(graphId, nodeId, getTags, request) - } - - def deleteNode(graphId: String, nodeId: String, request: Request): Future[java.lang.Boolean] = { - NodeAsyncOperations.deleteNode(graphId, nodeId, request) - } - - def getNodeProperty(graphId: String, identifier: String, property: String): Future[Property] = { - SearchAsyncOperations.getNodeProperty(graphId, identifier, property) - } - def updateNodes(graphId: String, identifiers:util.List[String], metadata:util.Map[String,AnyRef]):Future[util.Map[String, Node]] = { - NodeAsyncOperations.updateNodes(graphId, identifiers, metadata) - } - - def getNodeByUniqueIds(graphId:String, searchCriteria: SearchCriteria): Future[util.List[Node]] = { - SearchAsyncOperations.getNodeByUniqueIds(graphId, searchCriteria) - } - - def readExternalProps(request: Request, fields: List[String]): Future[Response] = { - ExternalPropsManager.fetchProps(request, fields) - } - - def saveExternalProps(request: Request): Future[Response] = { - ExternalPropsManager.saveProps(request) - } - - def updateExternalProps(request: Request): Future[Response] = { - ExternalPropsManager.update(request) - } - - def deleteExternalProps(request: Request): Future[Response] = { - ExternalPropsManager.deleteProps(request) - } - def checkCyclicLoop(graphId:String, endNodeId: String, startNodeId: String, relationType: String) = { - Neo4JBoltSearchOperations.checkCyclicLoop(graphId, endNodeId, relationType, startNodeId) - } - - def removeRelation(graphId: String, relationMap: util.List[util.Map[String, AnyRef]]) = { - GraphAsyncOperations.removeRelation(graphId, relationMap) - } - - def createRelation(graphId: String, relationMap: util.List[util.Map[String, AnyRef]]) = { - GraphAsyncOperations.createRelation(graphId, relationMap) - } -} - diff --git a/ontology-engine/graph-core_2.11/pom.xml b/ontology-engine/graph-core_2.12/pom.xml similarity index 98% rename from ontology-engine/graph-core_2.11/pom.xml rename to ontology-engine/graph-core_2.12/pom.xml index 9a0f5f9b1..d85bea120 100644 --- a/ontology-engine/graph-core_2.11/pom.xml +++ b/ontology-engine/graph-core_2.12/pom.xml @@ -7,7 +7,7 @@ 1.0-SNAPSHOT org.sunbird - graph-core_2.11 + graph-core_2.12 1.0-SNAPSHOT @@ -94,7 +94,7 @@ net.alchim31.maven scala-maven-plugin - 4.4.0 + 3.2.2 ${scala.version} false diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala new file mode 100644 index 000000000..c1df3377a --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala @@ -0,0 +1,101 @@ +package org.sunbird.graph + +import org.sunbird.common.Platform +import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} +import org.sunbird.common.exception.ResponseCode +import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph} +import org.sunbird.graph.external.ExternalPropsManager +import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} +import org.sunbird.graph.util.CSPMetaUtil + +import java.lang +import scala.concurrent.{ExecutionContext, Future} + +class GraphService { + implicit val ec: ExecutionContext = ExecutionContext.global + val isrRelativePathEnabled: lang.Boolean = Platform.getBoolean("cloudstorage.metadata.replace_absolute_path", false) + + def addNode(graphId: String, node: Node): Future[Node] = { + if(isrRelativePathEnabled) { + val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) + node.setMetadata(metadata) + } + NodeAsyncOperations.addNode(graphId, node).map(resNode => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) + } + + def upsertNode(graphId: String, node: Node, request: Request): Future[Node] = { + if(isrRelativePathEnabled) { + val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) + node.setMetadata(metadata) + } + NodeAsyncOperations.upsertNode(graphId, node, request).map(resNode => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) + } + + def upsertRootNode(graphId: String, request: Request): Future[Node] = { + NodeAsyncOperations.upsertRootNode(graphId, request) + } + + def getNodeByUniqueId(graphId: String, nodeId: String, getTags: Boolean, request: Request): Future[Node] = { + SearchAsyncOperations.getNodeByUniqueId(graphId, nodeId, getTags, request).map(node => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(node) else node) + } + + def deleteNode(graphId: String, nodeId: String, request: Request): Future[java.lang.Boolean] = { + NodeAsyncOperations.deleteNode(graphId, nodeId, request) + } + + def getNodeProperty(graphId: String, identifier: String, property: String): Future[Property] = { + SearchAsyncOperations.getNodeProperty(graphId, identifier, property).map(property => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(property) else property) + } + def updateNodes(graphId: String, identifiers:java.util.List[String], metadata:java.util.Map[String,AnyRef]):Future[java.util.Map[String, Node]] = { + val updatedMetadata = if(isrRelativePathEnabled) CSPMetaUtil.updateRelativePath(metadata) else metadata + NodeAsyncOperations.updateNodes(graphId, identifiers, updatedMetadata) + } + + def getNodeByUniqueIds(graphId:String, searchCriteria: SearchCriteria): Future[java.util.List[Node]] = { + SearchAsyncOperations.getNodeByUniqueIds(graphId, searchCriteria).map(nodes => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(nodes) else nodes) + } + + def readExternalProps(request: Request, fields: List[String]): Future[Response] = { + ExternalPropsManager.fetchProps(request, fields).map(res => { + if(isrRelativePathEnabled && res.getResponseCode == ResponseCode.OK) { + val updatedResult = CSPMetaUtil.updateExternalAbsolutePath(res.getResult) + val response = ResponseHandler.OK() + response.putAll(updatedResult) + response + } else res}) + } + + def saveExternalProps(request: Request): Future[Response] = { + val externalProps: java.util.Map[String, AnyRef] = request.getRequest + val updatedExternalProps = if(isrRelativePathEnabled) CSPMetaUtil.saveExternalRelativePath(externalProps) else externalProps + request.setRequest(updatedExternalProps) + ExternalPropsManager.saveProps(request) + } + + def updateExternalProps(request: Request): Future[Response] = { + val externalProps: java.util.Map[String, AnyRef] = request.getRequest + val updatedExternalProps = if (isrRelativePathEnabled) CSPMetaUtil.updateExternalRelativePath(externalProps) else externalProps + request.setRequest(updatedExternalProps) + ExternalPropsManager.update(request) + } + + def deleteExternalProps(request: Request): Future[Response] = { + ExternalPropsManager.deleteProps(request) + } + def checkCyclicLoop(graphId:String, endNodeId: String, startNodeId: String, relationType: String) = { + Neo4JBoltSearchOperations.checkCyclicLoop(graphId, endNodeId, relationType, startNodeId) + } + + def removeRelation(graphId: String, relationMap: java.util.List[java.util.Map[String, AnyRef]]) = { + GraphAsyncOperations.removeRelation(graphId, relationMap) + } + + def createRelation(graphId: String, relationMap: java.util.List[java.util.Map[String, AnyRef]]) = { + GraphAsyncOperations.createRelation(graphId, relationMap) + } + + def getSubGraph(graphId: String, nodeId: String, depth: Int): Future[SubGraph] = { + GraphAsyncOperations.getSubGraph(graphId, nodeId, depth) + } +} + diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala similarity index 100% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/exception/GraphErrorCodes.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/exception/GraphErrorCodes.scala similarity index 100% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/exception/GraphErrorCodes.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/exception/GraphErrorCodes.scala diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/ExternalPropsManager.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/ExternalPropsManager.scala similarity index 100% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/ExternalPropsManager.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/ExternalPropsManager.scala diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala similarity index 95% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala index c9cb9f11e..60c52cf9b 100644 --- a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala @@ -28,7 +28,10 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis import scala.collection.JavaConverters._ for ((key, value) <- request.asScala) { propsMapping.getOrElse(key, "") match { - case "blob" => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", value)) + case "blob" => value match { + case value: String => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", value)) + case _ => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", JsonUtils.serialize(value))) + } case "string" => request.getOrDefault(key, "") match { case value: String => insertQuery.value(key, value) case _ => insertQuery.value(key, JsonUtils.serialize(request.getOrDefault(key, ""))) @@ -105,7 +108,8 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis }) } val selectQuery = select.from(keySpace, table) - import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ + import scala.collection.convert.ImplicitConversions._ val clause: Clause = QueryBuilder.in(primaryKey.get(0), seqAsJavaList(identifiers)) selectQuery.where.and(clause) try { @@ -135,7 +139,7 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis def delete(identifiers: List[String])(implicit ec: ExecutionContext): Future[Response] = { val delete = QueryBuilder.delete() - import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ val deleteQuery = delete.from(keySpace, table).where(QueryBuilder.in(primaryKey.get(0), seqAsJavaList(identifiers))) try { val session: Session = CassandraConnector.getSession diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStoreFactory.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStoreFactory.scala similarity index 100% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStoreFactory.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStoreFactory.scala diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala new file mode 100644 index 000000000..c8c4b51d3 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala @@ -0,0 +1,176 @@ +package org.sunbird.graph.util + +import java.util + +import org.apache.commons.collections4.MapUtils +import org.apache.commons.lang3.StringUtils +import org.slf4j.LoggerFactory +import org.sunbird.common.dto.Property +import org.sunbird.common.{JsonUtils, Platform} +import org.sunbird.graph.dac.model.Node + +import scala.collection.JavaConverters._ +import scala.collection.immutable.Map + +object CSPMetaUtil { + private[this] val logger = LoggerFactory.getLogger(classOf[CSPMetaUtil]) + + def updateAbsolutePath(data: java.util.Map[String, AnyRef]): java.util.Map[String, AnyRef] = { + logger.info("CSPMetaUtil ::: updateAbsolutePath util.Map[String, AnyRef] ::: data before url replace :: " + data) + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + val cspMeta = Platform.getStringList("cloudstorage.metadata.list", new java.util.ArrayList[String]()).asScala.toList + val absolutePath = Platform.getString("cloudstorage.read_base_path", "") + java.io.File.separator + Platform.getString("cloud_storage_container", "") + val returnData = if (MapUtils.isNotEmpty(data)) { + val updatedMeta: java.util.Map[String, AnyRef] = new java.util.HashMap[String, AnyRef] + data.asScala.map(x => + if (cspMeta.contains(x._1)) { + x._2 match { + case value: String => + updatedMeta.put(x._1, value.replace(relativePathPrefix, absolutePath)) + case _ => updatedMeta.put(x._1, x._2) + } + } else updatedMeta.put(x._1, x._2) + ).asJava + updatedMeta + } else data + logger.info("CSPMetaUtil ::: updateAbsolutePath util.Map[String, AnyRef] ::: updateAbsolutePath returnData :: " + returnData) + returnData + } + + def updateAbsolutePath(node: Node): Node = { + val metadata = updateAbsolutePath(node.getMetadata) + node.setMetadata(metadata) + node + } + + def updateAbsolutePath(nodes: java.util.List[Node]): java.util.List[Node] = { + nodes.asScala.toList.map(node => { + updateAbsolutePath(node) + }).asJava + } + + def updateAbsolutePath(property: Property): Property = { + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + val cspMeta = Platform.getStringList("cloudstorage.metadata.list", new java.util.ArrayList[String]()) + val absolutePath = Platform.getString("cloudstorage.read_base_path", "") + java.io.File.separator + Platform.getString("cloud_storage_container", "") + if(cspMeta.contains(property.getPropertyName)) { + val value = property.getPropertyValue + value match { + case str: String => + property.setPropertyValue(str.replace(relativePathPrefix, absolutePath)) + case _ => + } + } + property + } + + def updateRelativePath(data: java.util.Map[String, AnyRef]): java.util.Map[String, AnyRef] = { + logger.info("CSPMetaUtil ::: updateRelativePath util.Map[String, AnyRef] ::: data before url replace :: " + data) + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + val cspMeta: java.util.List[String] = Platform.getStringList("cloudstorage.metadata.list", new java.util.ArrayList[String]()) + val validCSPSource: List[String] = Platform.getStringList("cloudstorage.write_base_path", new java.util.ArrayList[String]()).asScala.toList + val basePaths: Array[String] = validCSPSource.map(source => source + java.io.File.separator + Platform.getString("cloud_storage_container", "")).toArray + val repArray = getReplacementData(basePaths, relativePathPrefix) + val result = if (MapUtils.isNotEmpty(data)) { + val updatedMeta: java.util.Map[String, AnyRef] = new java.util.HashMap[String, AnyRef] + data.asScala.map(x => + if (cspMeta.contains(x._1)) + updatedMeta.put(x._1, getBasePath(x._1, x._2, basePaths, repArray)) + else updatedMeta.put(x._1, x._2) + ).asJava + updatedMeta + } else data + logger.info("CSPMetaUtil ::: updateRelativePath util.Map[String, AnyRef] ::: data after url replace :: " + result) + result + } + + def saveExternalRelativePath(data: java.util.Map[String, AnyRef]): java.util.Map[String, AnyRef] = { + logger.info("CSPMetaUtil ::: saveExternalRelativePath util.Map[String, AnyRef] ::: data before url replace :: " + data) + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + val validCSPSource: List[String] = Platform.getStringList("cloudstorage.write_base_path", new java.util.ArrayList[String]()).asScala.toList + val basePaths: Array[String] = validCSPSource.map(source => source + java.io.File.separator + Platform.getString("cloud_storage_container", "")).toArray + val repArray = getReplacementData(basePaths, relativePathPrefix) + + val updatedObjString = StringUtils.replaceEach(JsonUtils.serialize(data), basePaths, repArray) + val updatedData = JsonUtils.deserialize(updatedObjString, classOf[java.util.Map[String, AnyRef]]) + + logger.info("CSPMetaUtil ::: saveExternalRelativePath util.Map[String, AnyRef] ::: data after url replace :: " + updatedData) + updatedData + } + + def updateExternalRelativePath(data: java.util.Map[String, AnyRef]): java.util.Map[String, AnyRef] = { + logger.info("CSPMetaUtil ::: updateExternalRelativePath util.Map[String, AnyRef] ::: data before url replace :: " + data) + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + val validCSPSource: List[String] = Platform.getStringList("cloudstorage.write_base_path", new java.util.ArrayList[String]()).asScala.toList + val basePaths: Array[String] = validCSPSource.map(source => source + java.io.File.separator + Platform.getString("cloud_storage_container", "")).toArray + val repArray = getReplacementData(basePaths, relativePathPrefix) + val values = data.get("values") + val updatedValues = values match { + case x: List[AnyRef] => x.map(value => getBasePath("", value, basePaths, repArray)) + case _ => values + } + data.put("values", updatedValues) + logger.info("CSPMetaUtil ::: updateExternalRelativePath util.Map[String, AnyRef] ::: data after url replace :: " + data) + data + } + + def updateExternalAbsolutePath(data: java.util.Map[String, AnyRef]): java.util.Map[String, AnyRef] = { + //No need to check the metadata fields because that will be taken care while writing data. + logger.info("CSPMetaUtil ::: updateExternalAbsolutePath util.Map[String, AnyRef] ::: data before url replace :: " + data) + val relativePathPrefix: String = Platform.getString("cloudstorage.relative_path_prefix", "") + //Not Implemented logic based on external field key, because while writing data it is not considered. + //val extFieldList = Platform.getStringList("cloudstorage.external_field_list", new java.util.ArrayList[String]()).asScala.toList + val absolutePath = Platform.getString("cloudstorage.read_base_path", "") + java.io.File.separator + Platform.getString("cloud_storage_container", "") + val returnData = if (MapUtils.isNotEmpty(data)) { + val updatedMeta: java.util.Map[String, AnyRef] = new java.util.HashMap[String, AnyRef] + data.asScala.map(x => updatedMeta.put(x._1, getBasePath(x._1, x._2, Array(relativePathPrefix), Array(absolutePath))) + ).asJava + updatedMeta + } else data + logger.info("CSPMetaUtil ::: updateExternalAbsolutePath util.Map[String, AnyRef] ::: data before url replace :: " + returnData) + returnData + } + + private def getBasePath(key: String, value: AnyRef, oldPath: Array[String], newPath: Array[String]): AnyRef = { + logger.info(s"CSPMetaUtil ::: getBasePath ::: Updating Path for Key : $key & Value : $value") + val res = if (null != value) { + value match { + case p: String => if (StringUtils.isNotBlank(p)) StringUtils.replaceEach(p, oldPath, newPath) else p + case q: Map[String, AnyRef] => { + val updatedObjString = StringUtils.replaceEach(ScalaJsonUtil.serialize(q), oldPath, newPath) + val updatedData = ScalaJsonUtil.deserialize[Map[String, AnyRef]](updatedObjString) + updatedData + } + case r: java.util.Map[String, AnyRef] => { + val updatedObjString = StringUtils.replaceEach(JsonUtils.serialize(r), oldPath, newPath) + val updatedData = JsonUtils.deserialize(updatedObjString, classOf[java.util.Map[String, AnyRef]]) + updatedData + } + case s: util.List[AnyRef] => { + val updatedObjString = StringUtils.replaceEach(JsonUtils.serialize(s), oldPath, newPath) + val updatedData = JsonUtils.deserialize(updatedObjString, classOf[java.util.List[AnyRef]]) + updatedData + } + case t: List[AnyRef] => { + val updatedObjString = StringUtils.replaceEach(ScalaJsonUtil.serialize(t), oldPath, newPath) + val updatedData = ScalaJsonUtil.deserialize[List[AnyRef]](updatedObjString) + updatedData + } + case _ => value + } + } else value + logger.info(s"CSPMetaUtil ::: getBasePath ::: Updated Path for Key : $key & Updated Value is : $res") + res + } + + private def getReplacementData(oldPath: Array[String], repStr: String): Array[String] = { + val repArray = new Array[String](oldPath.length) + for (i <- oldPath.indices) { + repArray(i) = repStr + } + repArray + } + +} + +class CSPMetaUtil {} diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/ScalaJsonUtil.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/ScalaJsonUtil.scala new file mode 100644 index 000000000..f2c0abd30 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/ScalaJsonUtil.scala @@ -0,0 +1,37 @@ +package org.sunbird.graph.util + +import java.lang.reflect.{ParameterizedType, Type} + +import com.fasterxml.jackson.core.`type`.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.scala.DefaultScalaModule + +object ScalaJsonUtil { + + @transient val mapper = new ObjectMapper() + mapper.registerModule(DefaultScalaModule) + + @throws(classOf[Exception]) + def serialize(obj: AnyRef): String = { + mapper.writeValueAsString(obj); + } + + @throws(classOf[Exception]) + def deserialize[T: Manifest](value: String): T = mapper.readValue(value, typeReference[T]); + + private[this] def typeReference[T: Manifest] = new TypeReference[T] { + override def getType = typeFromManifest(manifest[T]) + } + + + private[this] def typeFromManifest(m: Manifest[_]): Type = { + if (m.typeArguments.isEmpty) { m.runtimeClass } + // $COVERAGE-OFF$Disabling scoverage as this code is impossible to test + else new ParameterizedType { + def getRawType = m.runtimeClass + def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray + def getOwnerType = null + } + // $COVERAGE-ON$ + } +} diff --git a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala similarity index 97% rename from ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala rename to ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala index 5e102a05f..3d533f7dd 100644 --- a/ontology-engine/graph-core_2.11/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/validator/NodeValidator.scala @@ -10,7 +10,7 @@ import org.sunbird.graph.dac.model.{Filter, MetadataCriterion, Node, SearchCondi import org.sunbird.graph.exception.GraphErrorCodes import org.sunbird.graph.service.operation.SearchAsyncOperations -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} diff --git a/ontology-engine/graph-core_2.11/src/test/resources/application.conf b/ontology-engine/graph-core_2.12/src/test/resources/application.conf similarity index 96% rename from ontology-engine/graph-core_2.11/src/test/resources/application.conf rename to ontology-engine/graph-core_2.12/src/test/resources/application.conf index b5fb5b0cb..0b7d96a63 100644 --- a/ontology-engine/graph-core_2.11/src/test/resources/application.conf +++ b/ontology-engine/graph-core_2.12/src/test/resources/application.conf @@ -464,7 +464,7 @@ max.thumbnail.size.pixels=150 play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" //schema.base_path = "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/" collection.image.migration.enabled=true @@ -486,3 +486,10 @@ languageCode { platform.language.codes=["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd"] objectcategorydefinition.keyspace=category_store + +cloud_storage_container="sunbird-content-dev" +cloudstorage.metadata.replace_absolute_path=true +cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" +cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" +cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net","https://obj.dev.sunbird.org"] +cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] \ No newline at end of file diff --git a/ontology-engine/graph-core_2.11/src/test/resources/cassandra-unit.yaml b/ontology-engine/graph-core_2.12/src/test/resources/cassandra-unit.yaml similarity index 100% rename from ontology-engine/graph-core_2.11/src/test/resources/cassandra-unit.yaml rename to ontology-engine/graph-core_2.12/src/test/resources/cassandra-unit.yaml diff --git a/ontology-engine/graph-core_2.12/src/test/resources/logback.xml b/ontology-engine/graph-core_2.12/src/test/resources/logback.xml new file mode 100644 index 000000000..73529d622 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/test/resources/logback.xml @@ -0,0 +1,28 @@ + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ontology-engine/graph-core_2.11/src/test/scala/org/sunbird/graph/BaseSpec.scala b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/BaseSpec.scala similarity index 100% rename from ontology-engine/graph-core_2.11/src/test/scala/org/sunbird/graph/BaseSpec.scala rename to ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/BaseSpec.scala diff --git a/ontology-engine/graph-core_2.11/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala similarity index 86% rename from ontology-engine/graph-core_2.11/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala rename to ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala index 723289473..d0f51c61b 100644 --- a/ontology-engine/graph-core_2.11/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala +++ b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/external/ExternalPropsManagerTest.scala @@ -3,11 +3,12 @@ package org.sunbird.graph.external import java.util import org.sunbird.graph.BaseSpec import org.apache.commons.lang3.StringUtils +import org.scalatest.Ignore import org.sunbird.common.dto.{Request, Response} -import org.sunbird.common.exception.{ ResponseCode} +import org.sunbird.common.exception.ResponseCode import scala.concurrent.Future - +@Ignore class ExternalPropsManagerTest extends BaseSpec { def getContextMap(): java.util.Map[String, AnyRef] = { @@ -136,6 +137,34 @@ class ExternalPropsManagerTest extends BaseSpec { } } + "saveProps with object data" should "create a cassandra record successfully" in { + val request = new Request() + request.setObjectType("Content") + request.setContext(new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "Content") + put("schemaName", "content") + } + }) + request.put("identifier", "do_1234111") + request.put("body", new util.HashMap[String, AnyRef]() { + { + put("en", "test body in english language") + put("hi", "test body in hindi language") + } + }) + request.put("oldBody", "sample body text") + + val future: Future[Response] = ExternalPropsManager.saveProps(request) + future map { response => { + assert(null != response) + assert(response.getResponseCode == ResponseCode.OK) + } + } + } + } diff --git a/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/CSPMetaUtilTest.scala b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/CSPMetaUtilTest.scala new file mode 100644 index 000000000..deb913596 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/CSPMetaUtilTest.scala @@ -0,0 +1,234 @@ +package org.sunbird.graph.util + +import org.scalatest.Ignore +import org.sunbird.graph.BaseSpec +import org.sunbird.graph.dac.model.Node + +import scala.collection.JavaConverters._ +import java.util +@Ignore +class CSPMetaUtilTest extends BaseSpec { + + "saveExternalRelativePath" should "return map with relative Paths for question data" in { + val migratedData = CSPMetaUtil.saveExternalRelativePath(getQuestionData()) + assert(migratedData != null && migratedData.toString.contains("CONTENT_STORAGE_BASE_PATH")) + } + + "updateExternalRelativePath" should "return map with relative Paths" in { + val dataMap: util.HashMap[String, AnyRef] = new util.HashMap[String, AnyRef]() { + { + put("identifier", "do_1234567") + put("values", List("https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4")) + } + } + val migratedData = CSPMetaUtil.updateExternalRelativePath(dataMap) + assert(migratedData != null && migratedData.get("values").toString.contains("CONTENT_STORAGE_BASE_PATH")) + } + + "updateRelativePath" should "return map with relative Paths" in { + val migratedData = CSPMetaUtil.updateRelativePath(getNode.getMetadata) + assert(migratedData != null && migratedData.toString.contains("CONTENT_STORAGE_BASE_PATH")) + } + + "updateAbsolutePath" should "return map with relative Paths" in { + val migratedData = CSPMetaUtil.updateAbsolutePath(new util.ArrayList[Node]() { + add(getRelativeNode()) + }) + assert(migratedData != null && migratedData.get(0).getMetadata.toString.contains("https://sunbirddev.blob.core.windows.net/sunbird-content-dev")) +} + + "updateExternalRelativePath" should "return data having relative path" in { + val data = new java.util.HashMap[String, AnyRef]{{ + put("identifier", "do_213680293213650944125"); + put("values", List(new util.HashMap[String, AnyRef](){{ + put("default", "

Question Set

") + }}, new util.HashMap[String, AnyRef](){{ + put("identifier", "do_213680293213650944125"); + put("children",List(new util.HashMap[String, AnyRef](){{ + put("identifier", "do_123") + put("appIcon", "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_21367965089393049618/b1d586453b4e0b1535bf55332347934e.jpg") + }}).asJava) + }}, + new util.ArrayList[util.HashMap[String, String]](){{ + add(new util.HashMap[String, String](){{ + put("src", "/asset/public/content/assets/do_21367965089393049618/b1d586453b4e0b1535bf55332347934e.jpg"); + put("basePath", "https://inquiry.sunbird.org") + }}); + add(new util.HashMap[String, String](){{ + put("src", "/asset/public/content/assets/do_21367965089393049618/b1d586453b4e0b1535bf55332347934e.jpg"); + put("basePath", "https://inquiry.sunbird.org") + }}) + }})); + put("fields", List("instructions", "hierarchy")) + }} + val output = CSPMetaUtil.updateExternalRelativePath(data) + assert(!output.toString.contains("https://sunbirddev.blob.core.windows.net/sunbird-content-dev")) + assert(output.toString.contains("CONTENT_STORAGE_BASE_PATH")) + } + + "updateExternalAbsolutePath" should "return data with absolute cloud path" in { + val data = new util.HashMap[String, AnyRef](){{ + put("identifier", "do_123") + put("hierarchy", new util.HashMap[String, AnyRef](){{ + put("identifier", "do_213680293213650944125"); + put("children",List(new util.HashMap[String, AnyRef](){{ + put("identifier", "do_123") + put("appIcon", "CONTENT_STORAGE_BASE_PATH/content/assets/do_21367965089393049618/b1d586453b4e0b1535bf55332347934e.jpg") + }}).asJava) + }}) + }} + val output = CSPMetaUtil.updateExternalAbsolutePath(data) + assert(output.toString.contains("https://sunbirddev.blob.core.windows.net/sunbird-content-dev")) + assert(!output.toString.contains("CONTENT_STORAGE_BASE_PATH")) + } + + def getReplaceData(): String = { + """{"ownershipType": ["createdBy"],"subject": ["Mathematics"],"channel": "0126825293972439041","organisation": ["Sunbird"], + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","objectType": "Content","gradeLevel": ["Class 4"], + |"primaryCategory": "Digital Textbook","children": [{"ownershipType": ["createdBy"],"parent": "do_1132828073514926081518","code": "do_1132828084877148161531", + |"keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "This chapter describes about human body", + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.470+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716", + |"code": "do_1132828084876574721523","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041", + |"description": "This section describes about various part of the body such as head, hands, legs etc.","language": ["English"], + |"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.466+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712", + |"code": "do_1132828084876738561525","keywords": ["legs etc."],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "xyz", + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.475+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720", + |"previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"channel": "b00bc992ef25f1a9a8d63291e20efc8d", + |"downhttps://sunbirddev.blob.core.windows.netloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar", + |"organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": + |{"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331975047_do_1132339274094346241120_14.0_spine.ecar","size": 1381.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "local.sunbird.portal","contentEncoding": "identity","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"lockKey": "34a029c4-ac81-4934-9792-11b7a57d6c13","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-21T13:20:24.579+0000","contentType": "Resource", + |"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132339274094346241120","audience": ["Student"],"visibility": "Default", + |"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"], + |"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849897.0, + |"lastPublishedOn": "2021-03-21T13:06:11.272+0000","name": "Untitled Content","status": "Live","code": "a88b0257-670b-455b-98b8-6e359ebac009","credentials": {"enabled": "No"},"prevStatus": "Processing","description": "updated","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/9c0ebb33-af08-403f-afb4-eb24749f40a1/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T13:34:14.475+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-21T13:06:09.526+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-21T18:36:15.799+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 14.0,"versionKey": "1616331969523","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 4,"s3Key": "ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720","unitIdentifiers": ["do_1132239562839900161634"],"copyright": "2021 MIT","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_113223967141863424174-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253813394_do_113223967141863424174_1.0_spine.ecar","size": 24293.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T11:50:16.032+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_113223967141863424174","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "b7054510-3ca4-49fd-b373-b100b3f65e18","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 384798.0,"lastPublishedOn": "2021-02-25T11:50:12.771+0000","name": "esa","topic": ["तोप"],"status": "Live","code": "d19f43ce-753d-2c70-a9fd-70302af424a0","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132239617341767681638","streamingUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_113223967141863424174-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T11:50:01.500+0000","se_boards": ["CBSE"],"processId": "2b00cca7-42d5-4e35-aa03-1c22dfe03de8","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T11:50:12.678+0000","originData": {"identifier": "do_1132239617341767681638","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638"},"collectionId": "do_1132239562836049921627","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T11:50:16.017+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1614253812678","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 4,"s3Key": "ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","lastSubmittedOn": "2021-02-25T11:50:11.539+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","se_topics": ["तोप"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132239617341767681638/artifact/do_1132239617341767681638_1614253222002.pdf","board": "CBSE","programId": "463cfa30-775c-11eb-8c56-93946e419809"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.475+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215872001720","lastStatusChangedOn": "2021-05-20T08:58:33.475+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113475","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.1 Key parts in the head","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","code": "do_1132828084876165121519","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.473+0000","objectType": "Content","primaryCategory": "Textbook Unit","contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.473+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215708161718","lastStatusChangedOn": "2021-05-20T08:58:33.473+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113473","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.2 Other parts","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","unitIdentifiers": ["do_11323721176414617611924"],"copyright": "2021 MIT","organisationId": "e7328d77-42a7-44c8-84f4-8cfea235f07d","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_1132372524622561281279-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Mathematics"],"channel": "01309282781705830427","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875563539_do_1132372524622561281279_1.0_spine.ecar","size": 19563.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 5"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-16T06:19:26.162+0000","contentType": "Resource","se_gradeLevels": ["Class 5"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132372524622561281279","audience": ["Student"],"visibility": "Default","author": "N18","maxQuestions": 1,"consumerId": "f73cfcc5-4d43-4fa0-8b81-46166c81bc2b","learningOutcome": ["identify the need to find area and perimeter of rectangle and square."],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Mathematics"],"license": "CC BY 4.0","prevState": "Review","size": 374996.0,"lastPublishedOn": "2021-03-16T06:19:22.931+0000","name": "Untitled","topic": ["Speed, Distance and Time"],"status": "Live","code": "2544c8b8-7946-b6c0-e1c7-ced4aee4ea8c","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11323724954450329611930","streamingUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_1132372524622561281279-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-03-16T06:19:04.712+0000","se_boards": ["CBSE"],"processId": "9995e013-a7c9-4da1-b2c9-2f59da33414a","contentDisposition": "inline","lastUpdatedOn": "2021-03-16T06:19:20.817+0000","originData": {"identifier": "do_11323724954450329611930","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930"},"collectionId": "do_11323721176353996811921","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-03-16T06:19:26.147+0000","creator": "N18","os": ["All"],"questionCategories": ["MTF"],"cloudStorageKey": "content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "2","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1615875560817","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 3,"s3Key": "ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","lastSubmittedOn": "2021-03-16T06:19:17.005+0000","createdBy": "60f91e9e-34ee-4f9f-a907-d312d0e8063e","se_topics": ["Speed, Distance and Time"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11323724954450329611930/artifact/do_11323724954450329611930_1615875429226.pdf","board": "CBSE","programId": "800eb440-8613-11eb-a663-4f63bbe94184"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.466+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215134721712","lastStatusChangedOn": "2021-05-20T08:58:33.466+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113466","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.1 Parts of Body","topic": ["Role Of The Sense Organs"],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","code": "do_1132828084877066241529","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.476+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215953921722","code": "do_1132828084876492801521","keywords": ["test key","check"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.468+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215298561714","code": "do_1132828084876820481527","keywords": ["abcd","cgf"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "labeled new","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.464+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468834470_do_1132338069147811841118_2.0_spine.ecar","size": 1361.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","lockKey": "d73707c8-9999-4fc9-9b34-0207f74faf43","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:31.335+0000","contentType": "Resource","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132338069147811841118","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849876.0,"lastPublishedOn": "2021-03-11T13:20:30.514+0000","name": "Untitled Content","status": "Live","code": "9deb2c69-7240-472a-98e7-ed438e76262b","credentials": {"enabled": "No"},"prevStatus": "Processing","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/f17bccc5-cab3-4da8-a5eb-11d7211f1507/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T09:29:05.654+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-11T13:20:28.256+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-11T18:50:28.256+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615455090358","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 5,"s3Key": "ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_1132238266042040321422"],"copyright": "2021 MIT","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_11322383952751820816-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238800_do_11322383952751820816_1.0_spine.ecar","size": 13171.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T07:30:44.916+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_11322383952751820816","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 362236.0,"lastPublishedOn": "2021-02-25T07:30:38.043+0000","name": "sa:practice","status": "Live","code": "f239c77e-ed71-9133-0145-7468a92bce79","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132238287156183041424","streamingUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_11322383952751820816-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T07:30:23.577+0000","se_boards": ["CBSE"],"processId": "04d5aec9-ed09-4a57-963d-9fa654fecf8d","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T07:30:37.956+0000","originData": {"identifier": "do_1132238287156183041424","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424"},"collectionId": "do_1132238266036551681415","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T07:30:44.908+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","pkgVersion": 1.0,"versionKey": "1614238237956","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","lastSubmittedOn": "2021-02-25T07:30:36.709+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132238287156183041424/artifact/do_1132238287156183041424_1614237121022.pdf","board": "CBSE","programId": "94564340-7737-11eb-96e0-29a9f8ed81cf"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_11322165488232038412588"],"copyright": "2021 MIT","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_1132216902566133761410-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Environmental Studies"],"channel": "01309282781705830427","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975873161_do_1132216902566133761410_1.0_spine.ecar","size": 17182.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-22T06:37:55.328+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132216902566133761410","audience": ["Student"],"visibility": "Default","author": "color4","maxQuestions": 1,"consumerId": "7411b6bd-89f3-40ec-98d1-229dc64ce77d","learningOutcome": ["Understand the importance of values in life"],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "ae94b68c-a535-4dce-8e7a-fb9662b0ad68","version": 2,"se_subjects": ["Environmental Studies"],"license": "CC BY 4.0","prevState": "Review","size": 370363.0,"lastPublishedOn": "2021-02-22T06:37:52.529+0000","name": "MCQMCQ","topic": ["Animals"],"status": "Live","code": "0cbae0f8-e3eb-1d31-e2e5-0337dc7d697d","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11322168163282944012605","streamingUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/ecml/do_1132216902566133761410-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-22T06:37:41.405+0000","se_boards": ["CBSE"],"processId": "fbcec2af-cb7a-4ed1-8683-ff04b475947e","contentDisposition": "inline","lastUpdatedOn": "2021-02-22T06:37:52.447+0000","originData": {"identifier": "do_11322168163282944012605","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605"},"collectionId": "do_11322165488181248012584","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-22T06:37:55.314+0000","creator": "color4","os": ["All"],"questionCategories": ["MCQ"],"cloudStorageKey": "content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "1","bloomsLevel": ["Understand"],"pkgVersion": 1.0,"versionKey": "1613975872447","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","lastSubmittedOn": "2021-02-22T06:37:51.179+0000","createdBy": "0ce5b67e-b48e-489b-a818-e938e8bfc14b","se_topics": ["Animals"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11322168163282944012605/artifact/do_11322168163282944012605_1613975739805.pdf","board": "CBSE","programId": "b2433a00-74cd-11eb-9f3c-f39a9ab9f5ce"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.464+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371214970881710","lastStatusChangedOn": "2021-05-20T08:58:33.464+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113464","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 4,"compatibilityLevel": 1,"name": "dsffgdg","topic": [],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.468+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215298561714","lastStatusChangedOn": "2021-05-20T08:58:33.468+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113468","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.2.1 Respiratory System","topic": ["Look and say","Role Of The Sense Organs"],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.476+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215953921722","lastStatusChangedOn": "2021-05-20T08:58:33.476+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113476","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.2 Organ Systems","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","copyright": "Sunbird","previewUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","subject": ["Math"],"channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","organisation": ["Sunbird"],"showNotification": true,"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535619590_do_1132344630588948481134_2.0_spine.ecar","size": 35301.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Grade 1"],"appIcon": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_1132344630588948481134/artifact/2a4b8abd789184932399d222d03d9b5c.thumb.jpg","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","lockKey": "1d28d983-2704-44bd-803e-5feb4e62da62","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:34.367+0000","contentType": "Resource","se_gradeLevels": ["Grade 1"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132344630588948481134","lastUpdatedBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"se_subjects": ["Math"],"license": "CC BY 4.0","prevState": "Review","size": 883817.0,"lastPublishedOn": "2021-03-12T07:53:38.825+0000","name": "Untitled Content","status": "Live","code": "8851e754-6e20-44d4-9070-e1a9664163ad","credentials": {"enabled": "No"},"prevStatus": "Review","description": "updated desrciption","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/40ae07aa-069e-4056-8f2b-014bc9a2d21b/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","medium": ["English"],"posterImage": "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_11299104587967692816/artifact/2a4b8abd789184932399d222d03d9b5c.jpg","idealScreenSize": "normal","createdOn": "2021-03-12T07:44:01.371+0000","se_boards": ["NCERT"],"copyrightYear": 2020,"contentDisposition": "inline","licenseterms": "By creating any type of content (resources, books, courses etc.) on DIKSHA, you consent to publish it under the Creative Commons License Framework. Please choose the applicable creative commons license you wish to apply to your content.","lastUpdatedOn": "2021-03-12T07:53:38.505+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-12T07:53:38.494+0000","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615535618583","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 2,"s3Key": "ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","lastSubmittedOn": "2021-03-12T07:53:10.005+0000","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"board": "NCERT","resourceType": "Learn"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.470+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215462401716","lastStatusChangedOn": "2021-05-20T08:58:33.470+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113470","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 1,"compatibilityLevel": 1,"name": "5. Human Body","topic": [],"status": "Draft"}],"appId": "dev.sunbird.portal","contentEncoding": "gzip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-05-20T09:12:06.988+0000","contentType": "TextBook","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132828073514926081518","audience": ["Student"],"visibility": "Default","consumerId": "01814e02-fc27-4165-ae53-3d1816e55817","childNodes": ["do_1132339274094346241120","do_1132833371215872001720","do_1132833371215134721712","do_1132833371215462401716","do_113223967141863424174","do_1132833371214970881710","do_1132833371215708161718","do_1132372524622561281279","do_1132338069147811841118","do_1132833371215298561714","do_1132833371215953921722","do_11322383952751820816","do_1132216902566133761410","do_1132344630588948481134"],"discussionForum": {"enabled": "Yes"},"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"version": 2,"license": "CC BY 4.0","name": "TestCSVUpload","status": "Draft","code": "org.sunbird.yhqB6L","credentials": {"enabled": "No"},"description": "Enter description for TextBook","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-05-19T15:00:44.279+0000","contentDisposition": "inline","additionalCategories": ["Textbook"],"lastUpdatedOn": "2021-05-20T07:10:32.805+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-05-19T15:00:44.279+0000","createdFor": ["0126825293972439041"],"creator": "Book Creator","os": ["All"],"versionKey": "1621501113536","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 0,"createdBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","compatibilityLevel": 1,"userConsent": "Yes","board": "State (Tamil Nadu)","resourceType": "Book"}""".stripMargin + + } + + def getMigratedData(): String = { + s"""{identifier=do_1234567, hierarchy={"ownershipType": ["createdBy"],"subject": ["Mathematics"],"channel": "0126825293972439041","organisation": ["Sunbird"], + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","objectType": "Content","gradeLevel": ["Class 4"], + |"primaryCategory": "Digital Textbook","children": [{"ownershipType": ["createdBy"],"parent": "do_1132828073514926081518","code": "do_1132828084877148161531", + |"keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "This chapter describes about human body", + |"description": "This section describes about various part of the body such as head, hands, legs etc.","language": ["English"], + |"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.466+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712", + |"code": "do_1132828084876738561525","keywords": ["legs etc."],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "xyz", + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.475+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720", + |"previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"channel": "b00bc992ef25f1a9a8d63291e20efc8d", + |"downhttps://sunbirddev.blob.core.windows.netloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar", + |"organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": + |{"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331975047_do_1132339274094346241120_14.0_spine.ecar","size": 1381.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "local.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"lockKey": "34a029c4-ac81-4934-9792-11b7a57d6c13","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-21T13:20:24.579+0000","contentType": "Resource", + |"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132339274094346241120","audience": ["Student"],"visibility": "Default", + |"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"], + |"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849897.0, + |"lastPublishedOn": "2021-03-21T13:06:11.272+0000","name": "Untitled Content","status": "Live","code": "a88b0257-670b-455b-98b8-6e359ebac009","credentials": {"enabled": "No"},"prevStatus": "Processing","description": "updated","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/9c0ebb33-af08-403f-afb4-eb24749f40a1/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T13:34:14.475+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-21T13:06:09.526+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-21T18:36:15.799+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 14.0,"versionKey": "1616331969523","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 4,"s3Key": "ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720","unitIdentifiers": ["do_1132239562839900161634"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_113223967141863424174-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253813394_do_113223967141863424174_1.0_spine.ecar","size": 24293.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T11:50:16.032+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_113223967141863424174","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "b7054510-3ca4-49fd-b373-b100b3f65e18","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 384798.0,"lastPublishedOn": "2021-02-25T11:50:12.771+0000","name": "esa","topic": ["तोप"],"status": "Live","code": "d19f43ce-753d-2c70-a9fd-70302af424a0","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132239617341767681638","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_113223967141863424174-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T11:50:01.500+0000","se_boards": ["CBSE"],"processId": "2b00cca7-42d5-4e35-aa03-1c22dfe03de8","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T11:50:12.678+0000","originData": {"identifier": "do_1132239617341767681638","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638"},"collectionId": "do_1132239562836049921627","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T11:50:16.017+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1614253812678","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 4,"s3Key": "ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","lastSubmittedOn": "2021-02-25T11:50:11.539+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","se_topics": ["तोप"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132239617341767681638/artifact/do_1132239617341767681638_1614253222002.pdf","board": "CBSE","programId": "463cfa30-775c-11eb-8c56-93946e419809"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.475+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215872001720","lastStatusChangedOn": "2021-05-20T08:58:33.475+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113475","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.1 Key parts in the head","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","code": "do_1132828084876165121519","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.473+0000","objectType": "Content","primaryCategory": "Textbook Unit","contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.473+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215708161718","lastStatusChangedOn": "2021-05-20T08:58:33.473+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113473","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.2 Other parts","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","unitIdentifiers": ["do_11323721176414617611924"],"copyright": "2021 MIT","organisationId": "e7328d77-42a7-44c8-84f4-8cfea235f07d","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132372524622561281279-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Mathematics"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875563539_do_1132372524622561281279_1.0_spine.ecar","size": 19563.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 5"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-16T06:19:26.162+0000","contentType": "Resource","se_gradeLevels": ["Class 5"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132372524622561281279","audience": ["Student"],"visibility": "Default","author": "N18","maxQuestions": 1,"consumerId": "f73cfcc5-4d43-4fa0-8b81-46166c81bc2b","learningOutcome": ["identify the need to find area and perimeter of rectangle and square."],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Mathematics"],"license": "CC BY 4.0","prevState": "Review","size": 374996.0,"lastPublishedOn": "2021-03-16T06:19:22.931+0000","name": "Untitled","topic": ["Speed, Distance and Time"],"status": "Live","code": "2544c8b8-7946-b6c0-e1c7-ced4aee4ea8c","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11323724954450329611930","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132372524622561281279-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-03-16T06:19:04.712+0000","se_boards": ["CBSE"],"processId": "9995e013-a7c9-4da1-b2c9-2f59da33414a","contentDisposition": "inline","lastUpdatedOn": "2021-03-16T06:19:20.817+0000","originData": {"identifier": "do_11323724954450329611930","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930"},"collectionId": "do_11323721176353996811921","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-03-16T06:19:26.147+0000","creator": "N18","os": ["All"],"questionCategories": ["MTF"],"cloudStorageKey": "content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "2","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1615875560817","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 3,"s3Key": "ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","lastSubmittedOn": "2021-03-16T06:19:17.005+0000","createdBy": "60f91e9e-34ee-4f9f-a907-d312d0e8063e","se_topics": ["Speed, Distance and Time"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11323724954450329611930/artifact/do_11323724954450329611930_1615875429226.pdf","board": "CBSE","programId": "800eb440-8613-11eb-a663-4f63bbe94184"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.466+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215134721712","lastStatusChangedOn": "2021-05-20T08:58:33.466+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113466","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.1 Parts of Body","topic": ["Role Of The Sense Organs"],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","code": "do_1132828084877066241529","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.476+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215953921722","code": "do_1132828084876492801521","keywords": ["test key","check"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.468+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215298561714","code": "do_1132828084876820481527","keywords": ["abcd","cgf"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "labeled new","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.464+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468834470_do_1132338069147811841118_2.0_spine.ecar","size": 1361.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","lockKey": "d73707c8-9999-4fc9-9b34-0207f74faf43","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:31.335+0000","contentType": "Resource","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132338069147811841118","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849876.0,"lastPublishedOn": "2021-03-11T13:20:30.514+0000","name": "Untitled Content","status": "Live","code": "9deb2c69-7240-472a-98e7-ed438e76262b","credentials": {"enabled": "No"},"prevStatus": "Processing","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/f17bccc5-cab3-4da8-a5eb-11d7211f1507/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T09:29:05.654+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-11T13:20:28.256+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-11T18:50:28.256+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615455090358","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 5,"s3Key": "ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_1132238266042040321422"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_11322383952751820816-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238800_do_11322383952751820816_1.0_spine.ecar","size": 13171.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T07:30:44.916+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_11322383952751820816","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 362236.0,"lastPublishedOn": "2021-02-25T07:30:38.043+0000","name": "sa:practice","status": "Live","code": "f239c77e-ed71-9133-0145-7468a92bce79","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132238287156183041424","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_11322383952751820816-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T07:30:23.577+0000","se_boards": ["CBSE"],"processId": "04d5aec9-ed09-4a57-963d-9fa654fecf8d","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T07:30:37.956+0000","originData": {"identifier": "do_1132238287156183041424","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424"},"collectionId": "do_1132238266036551681415","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T07:30:44.908+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","pkgVersion": 1.0,"versionKey": "1614238237956","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","lastSubmittedOn": "2021-02-25T07:30:36.709+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132238287156183041424/artifact/do_1132238287156183041424_1614237121022.pdf","board": "CBSE","programId": "94564340-7737-11eb-96e0-29a9f8ed81cf"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_11322165488232038412588"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132216902566133761410-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Environmental Studies"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975873161_do_1132216902566133761410_1.0_spine.ecar","size": 17182.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-22T06:37:55.328+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132216902566133761410","audience": ["Student"],"visibility": "Default","author": "color4","maxQuestions": 1,"consumerId": "7411b6bd-89f3-40ec-98d1-229dc64ce77d","learningOutcome": ["Understand the importance of values in life"],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "ae94b68c-a535-4dce-8e7a-fb9662b0ad68","version": 2,"se_subjects": ["Environmental Studies"],"license": "CC BY 4.0","prevState": "Review","size": 370363.0,"lastPublishedOn": "2021-02-22T06:37:52.529+0000","name": "MCQMCQ","topic": ["Animals"],"status": "Live","code": "0cbae0f8-e3eb-1d31-e2e5-0337dc7d697d","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11322168163282944012605","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132216902566133761410-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-22T06:37:41.405+0000","se_boards": ["CBSE"],"processId": "fbcec2af-cb7a-4ed1-8683-ff04b475947e","contentDisposition": "inline","lastUpdatedOn": "2021-02-22T06:37:52.447+0000","originData": {"identifier": "do_11322168163282944012605","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605"},"collectionId": "do_11322165488181248012584","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-22T06:37:55.314+0000","creator": "color4","os": ["All"],"questionCategories": ["MCQ"],"cloudStorageKey": "content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "1","bloomsLevel": ["Understand"],"pkgVersion": 1.0,"versionKey": "1613975872447","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","lastSubmittedOn": "2021-02-22T06:37:51.179+0000","createdBy": "0ce5b67e-b48e-489b-a818-e938e8bfc14b","se_topics": ["Animals"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11322168163282944012605/artifact/do_11322168163282944012605_1613975739805.pdf","board": "CBSE","programId": "b2433a00-74cd-11eb-9f3c-f39a9ab9f5ce"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.464+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371214970881710","lastStatusChangedOn": "2021-05-20T08:58:33.464+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113464","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 4,"compatibilityLevel": 1,"name": "dsffgdg","topic": [],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.468+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215298561714","lastStatusChangedOn": "2021-05-20T08:58:33.468+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113468","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.2.1 Respiratory System","topic": ["Look and say","Role Of The Sense Organs"],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.476+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215953921722","lastStatusChangedOn": "2021-05-20T08:58:33.476+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113476","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.2 Organ Systems","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","copyright": "Sunbird","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","subject": ["Math"],"channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","organisation": ["Sunbird"],"showNotification": true,"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535619590_do_1132344630588948481134_2.0_spine.ecar","size": 35301.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Grade 1"],"appIcon": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132344630588948481134/artifact/2a4b8abd789184932399d222d03d9b5c.thumb.jpg","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","lockKey": "1d28d983-2704-44bd-803e-5feb4e62da62","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:34.367+0000","contentType": "Resource","se_gradeLevels": ["Grade 1"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132344630588948481134","lastUpdatedBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"se_subjects": ["Math"],"license": "CC BY 4.0","prevState": "Review","size": 883817.0,"lastPublishedOn": "2021-03-12T07:53:38.825+0000","name": "Untitled Content","status": "Live","code": "8851e754-6e20-44d4-9070-e1a9664163ad","credentials": {"enabled": "No"},"prevStatus": "Review","description": "updated desrciption","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/40ae07aa-069e-4056-8f2b-014bc9a2d21b/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","medium": ["English"],"posterImage": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_11299104587967692816/artifact/2a4b8abd789184932399d222d03d9b5c.jpg","idealScreenSize": "normal","createdOn": "2021-03-12T07:44:01.371+0000","se_boards": ["NCERT"],"copyrightYear": 2020,"contentDisposition": "inline","licenseterms": "By creating any type of content (resources, books, courses etc.) on DIKSHA, you consent to publish it under the Creative Commons License Framework. Please choose the applicable creative commons license you wish to apply to your content.","lastUpdatedOn": "2021-03-12T07:53:38.505+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-12T07:53:38.494+0000","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615535618583","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 2,"s3Key": "ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","lastSubmittedOn": "2021-03-12T07:53:10.005+0000","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"board": "NCERT","resourceType": "Learn"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.470+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215462401716","lastStatusChangedOn": "2021-05-20T08:58:33.470+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113470","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 1,"compatibilityLevel": 1,"name": "5. Human Body","topic": [],"status": "Draft"}],"appId": "dev.sunbird.portal","contentEncoding": "gzip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-05-20T09:12:06.988+0000","contentType": "TextBook","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132828073514926081518","audience": ["Student"],"visibility": "Default","consumerId": "01814e02-fc27-4165-ae53-3d1816e55817","childNodes": ["do_1132339274094346241120","do_1132833371215872001720","do_1132833371215134721712","do_1132833371215462401716","do_113223967141863424174","do_1132833371214970881710","do_1132833371215708161718","do_1132372524622561281279","do_1132338069147811841118","do_1132833371215298561714","do_1132833371215953921722","do_11322383952751820816","do_1132216902566133761410","do_1132344630588948481134"],"discussionForum": {"enabled": "Yes"},"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"version": 2,"license": "CC BY 4.0","name": "TestCSVUpload","status": "Draft","code": "org.sunbird.yhqB6L","credentials": {"enabled": "No"},"description": "Enter description for TextBook","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-05-19T15:00:44.279+0000","contentDisposition": "inline","additionalCategories": ["Textbook"],"lastUpdatedOn": "2021-05-20T07:10:32.805+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-05-19T15:00:44.279+0000","createdFor": ["0126825293972439041"],"creator": "Book Creator","os": ["All"],"versionKey": "1621501113536","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 0,"createdBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","compatibilityLevel": 1,"userConsent": "Yes","board": "State (Tamil Nadu)","resourceType": "Book"}} + |"primaryCategory": "Digital Textbook","children": [{"ownershipType": ["createdBy"],"parent": "do_1132828073514926081518","code": "do_1132828084877148161531", + |"keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "This chapter describes about human body", + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.470+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716", + |"code": "do_1132828084876574721523","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041", + |"description": "This section describes about various part of the body such as head, hands, legs etc.","language": ["English"], + |"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.466+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712", + |"code": "do_1132828084876738561525","keywords": ["legs etc."],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "xyz", + |"language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.475+0000", + |"objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720", + |"previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"downhttps://sunbirddev.blob.core.windows.netloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar", + |"organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": + |{"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132339274094346241120/untitled-content_1616331975047_do_1132339274094346241120_14.0_spine.ecar","size": 1381.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "local.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132339274094346241120/test-874-kb.mp4", + |"lockKey": "34a029c4-ac81-4934-9792-11b7a57d6c13","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-21T13:20:24.579+0000","contentType": "Resource", + |"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132339274094346241120","audience": ["Student"],"visibility": "Default", + |"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"], + |"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849897.0, + |"lastPublishedOn": "2021-03-21T13:06:11.272+0000","name": "Untitled Content","status": "Live","code": "a88b0257-670b-455b-98b8-6e359ebac009","credentials": {"enabled": "No"},"prevStatus": "Processing","description": "updated","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/9c0ebb33-af08-403f-afb4-eb24749f40a1/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T13:34:14.475+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-21T13:06:09.526+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-21T18:36:15.799+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 14.0,"versionKey": "1616331969523","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 4,"s3Key": "ecar_files/do_1132339274094346241120/untitled-content_1616331971279_do_1132339274094346241120_14.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215872001720","unitIdentifiers": ["do_1132239562839900161634"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_113223967141863424174-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_113223967141863424174/esa_1614253813394_do_113223967141863424174_1.0_spine.ecar","size": 24293.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T11:50:16.032+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_113223967141863424174","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "b7054510-3ca4-49fd-b373-b100b3f65e18","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 384798.0,"lastPublishedOn": "2021-02-25T11:50:12.771+0000","name": "esa","topic": ["तोप"],"status": "Live","code": "d19f43ce-753d-2c70-a9fd-70302af424a0","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132239617341767681638","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_113223967141863424174-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T11:50:01.500+0000","se_boards": ["CBSE"],"processId": "2b00cca7-42d5-4e35-aa03-1c22dfe03de8","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T11:50:12.678+0000","originData": {"identifier": "do_1132239617341767681638","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132239617341767681638"},"collectionId": "do_1132239562836049921627","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T11:50:16.017+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_113223967141863424174/artifact/1614253223147_do_1132239617341767681638.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1614253812678","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 4,"s3Key": "ecar_files/do_113223967141863424174/esa_1614253812772_do_113223967141863424174_1.0.ecar","lastSubmittedOn": "2021-02-25T11:50:11.539+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","se_topics": ["तोप"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132239617341767681638/artifact/do_1132239617341767681638_1614253222002.pdf","board": "CBSE","programId": "463cfa30-775c-11eb-8c56-93946e419809"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.475+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215872001720","lastStatusChangedOn": "2021-05-20T08:58:33.475+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113475","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.1 Key parts in the head","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","code": "do_1132828084876165121519","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.473+0000","objectType": "Content","primaryCategory": "Textbook Unit","contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.473+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215708161718","lastStatusChangedOn": "2021-05-20T08:58:33.473+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113473","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.1.2 Other parts","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215134721712","unitIdentifiers": ["do_11323721176414617611924"],"copyright": "2021 MIT","organisationId": "e7328d77-42a7-44c8-84f4-8cfea235f07d","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132372524622561281279-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Mathematics"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132372524622561281279/untitled_1615875563539_do_1132372524622561281279_1.0_spine.ecar","size": 19563.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 5"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-16T06:19:26.162+0000","contentType": "Resource","se_gradeLevels": ["Class 5"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132372524622561281279","audience": ["Student"],"visibility": "Default","author": "N18","maxQuestions": 1,"consumerId": "f73cfcc5-4d43-4fa0-8b81-46166c81bc2b","learningOutcome": ["identify the need to find area and perimeter of rectangle and square."],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Mathematics"],"license": "CC BY 4.0","prevState": "Review","size": 374996.0,"lastPublishedOn": "2021-03-16T06:19:22.931+0000","name": "Untitled","topic": ["Speed, Distance and Time"],"status": "Live","code": "2544c8b8-7946-b6c0-e1c7-ced4aee4ea8c","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11323724954450329611930","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132372524622561281279-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-03-16T06:19:04.712+0000","se_boards": ["CBSE"],"processId": "9995e013-a7c9-4da1-b2c9-2f59da33414a","contentDisposition": "inline","lastUpdatedOn": "2021-03-16T06:19:20.817+0000","originData": {"identifier": "do_11323724954450329611930","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11323724954450329611930"},"collectionId": "do_11323721176353996811921","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-03-16T06:19:26.147+0000","creator": "N18","os": ["All"],"questionCategories": ["MTF"],"cloudStorageKey": "content/do_1132372524622561281279/artifact/1615875430184_do_11323724954450329611930.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "2","bloomsLevel": ["Apply"],"pkgVersion": 1.0,"versionKey": "1615875560817","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 3,"s3Key": "ecar_files/do_1132372524622561281279/untitled_1615875562931_do_1132372524622561281279_1.0.ecar","lastSubmittedOn": "2021-03-16T06:19:17.005+0000","createdBy": "60f91e9e-34ee-4f9f-a907-d312d0e8063e","se_topics": ["Speed, Distance and Time"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11323724954450329611930/artifact/do_11323724954450329611930_1615875429226.pdf","board": "CBSE","programId": "800eb440-8613-11eb-a663-4f63bbe94184"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.466+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371215134721712","lastStatusChangedOn": "2021-05-20T08:58:33.466+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113466","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.1 Parts of Body","topic": ["Role Of The Sense Organs"],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","code": "do_1132828084877066241529","keywords": [],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.476+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215953921722","code": "do_1132828084876492801521","keywords": ["test key","check"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.468+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371215298561714","code": "do_1132828084876820481527","keywords": ["abcd","cgf"],"credentials": {"enabled": "No"},"channel": "0126825293972439041","description": "labeled new","language": ["English"],"mimeType": "application/vnd.ekstep.content-collection","idealScreenSize": "normal","createdOn": "2021-05-20T08:58:33.464+0000","objectType": "Content","primaryCategory": "Textbook Unit","children": [{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","organisation": ["Sunbird"],"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132338069147811841118/untitled-content_1615468834470_do_1132338069147811841118_2.0_spine.ecar","size": 1361.0}},"objectType": "Content","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132338069147811841118/test-874-kb.mp4","lockKey": "d73707c8-9999-4fc9-9b34-0207f74faf43","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:31.335+0000","contentType": "Resource","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132338069147811841118","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 1,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"license": "CC BY 4.0","prevState": "Live","size": 849876.0,"lastPublishedOn": "2021-03-11T13:20:30.514+0000","name": "Untitled Content","status": "Live","code": "9deb2c69-7240-472a-98e7-ed438e76262b","credentials": {"enabled": "No"},"prevStatus": "Processing","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/f17bccc5-cab3-4da8-a5eb-11d7211f1507/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","idealScreenSize": "normal","createdOn": "2021-03-11T09:29:05.654+0000","contentDisposition": "inline","lastUpdatedOn": "2021-03-11T13:20:28.256+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-11T18:50:28.256+0530","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615455090358","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 5,"s3Key": "ecar_files/do_1132338069147811841118/untitled-content_1615468830522_do_1132338069147811841118_2.0.ecar","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"resourceType": "Learn"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_1132238266042040321422"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_11322383952751820816-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Hindi"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_11322383952751820816/sapractice_1614238238800_do_11322383952751820816_1.0_spine.ecar","size": 13171.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-25T07:30:44.916+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_11322383952751820816","audience": ["Student"],"visibility": "Default","author": "anusha","maxQuestions": 1,"consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 2,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "5a587cc1-e018-4859-a0a8-e842650b9d64","version": 2,"se_subjects": ["Hindi"],"license": "CC BY 4.0","prevState": "Review","size": 362236.0,"lastPublishedOn": "2021-02-25T07:30:38.043+0000","name": "sa:practice","status": "Live","code": "f239c77e-ed71-9133-0145-7468a92bce79","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_1132238287156183041424","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_11322383952751820816-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-25T07:30:23.577+0000","se_boards": ["CBSE"],"processId": "04d5aec9-ed09-4a57-963d-9fa654fecf8d","contentDisposition": "inline","lastUpdatedOn": "2021-02-25T07:30:37.956+0000","originData": {"identifier": "do_1132238287156183041424","repository": "https://dock.sunbirded.org/api/content/v1/read/do_1132238287156183041424"},"collectionId": "do_1132238266036551681415","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-25T07:30:44.908+0000","creator": "anusha","os": ["All"],"questionCategories": ["SA"],"cloudStorageKey": "content/do_11322383952751820816/artifact/1614237122171_do_1132238287156183041424.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "12","pkgVersion": 1.0,"versionKey": "1614238237956","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_11322383952751820816/sapractice_1614238238045_do_11322383952751820816_1.0.ecar","lastSubmittedOn": "2021-02-25T07:30:36.709+0000","createdBy": "19ba0e4e-9285-4335-8dd0-f674bf03fa4d","compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_1132238287156183041424/artifact/do_1132238287156183041424_1614237121022.pdf","board": "CBSE","programId": "94564340-7737-11eb-96e0-29a9f8ed81cf"},{"ownershipType": ["createdBy"],"parent": "do_1132833371214970881710","unitIdentifiers": ["do_11322165488232038412588"],"copyright": "2021 MIT","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132216902566133761410-latest","plugins": [{"identifier": "org.sunbird.questionunit.quml","semanticVersion": "1.1"}],"subject": ["Environmental Studies"],"channel": "01309282781705830427","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","language": ["English"],"source": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605","mimeType": "application/vnd.ekstep.ecml-archive","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132216902566133761410/mcqmcq_1613975873161_do_1132216902566133761410_1.0_spine.ecar","size": 17182.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Class 10"],"primaryCategory": "Exam Question","appId": "dev.dock.portal","contentEncoding": "gzip","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-02-22T06:37:55.328+0000","contentType": "Resource","se_gradeLevels": ["Class 10"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132216902566133761410","audience": ["Student"],"visibility": "Default","author": "color4","maxQuestions": 1,"consumerId": "7411b6bd-89f3-40ec-98d1-229dc64ce77d","learningOutcome": ["Understand the importance of values in life"],"index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "ae94b68c-a535-4dce-8e7a-fb9662b0ad68","version": 2,"se_subjects": ["Environmental Studies"],"license": "CC BY 4.0","prevState": "Review","size": 370363.0,"lastPublishedOn": "2021-02-22T06:37:52.529+0000","name": "MCQMCQ","topic": ["Animals"],"status": "Live","code": "0cbae0f8-e3eb-1d31-e2e5-0337dc7d697d","credentials": {"enabled": "No"},"prevStatus": "Processing","origin": "do_11322168163282944012605","streamingUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/ecml/do_1132216902566133761410-latest","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-02-22T06:37:41.405+0000","se_boards": ["CBSE"],"processId": "fbcec2af-cb7a-4ed1-8683-ff04b475947e","contentDisposition": "inline","lastUpdatedOn": "2021-02-22T06:37:52.447+0000","originData": {"identifier": "do_11322168163282944012605","repository": "https://dock.sunbirded.org/api/content/v1/read/do_11322168163282944012605"},"collectionId": "do_11322165488181248012584","dialcodeRequired": "No","editorVersion": 3,"lastStatusChangedOn": "2021-02-22T06:37:55.314+0000","creator": "color4","os": ["All"],"questionCategories": ["MCQ"],"cloudStorageKey": "content/do_1132216902566133761410/artifact/1613975740738_do_11322168163282944012605.zip","se_FWIds": ["ekstep_ncert_k-12"],"marks": "1","bloomsLevel": ["Understand"],"pkgVersion": 1.0,"versionKey": "1613975872447","idealScreenDensity": "hdpi","framework": "ekstep_ncert_k-12","depth": 5,"s3Key": "ecar_files/do_1132216902566133761410/mcqmcq_1613975872529_do_1132216902566133761410_1.0.ecar","lastSubmittedOn": "2021-02-22T06:37:51.179+0000","createdBy": "0ce5b67e-b48e-489b-a818-e938e8bfc14b","se_topics": ["Animals"],"compatibilityLevel": 1,"itemSetPreviewUrl": "https://dockstorage.blob.core.windows.net/sunbird-content-dock/content/do_11322168163282944012605/artifact/do_11322168163282944012605_1613975739805.pdf","board": "CBSE","programId": "b2433a00-74cd-11eb-9f3c-f39a9ab9f5ce"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.464+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "Yes","identifier": "do_1132833371214970881710","lastStatusChangedOn": "2021-05-20T08:58:33.464+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113464","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 4,"compatibilityLevel": 1,"name": "dsffgdg","topic": [],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.468+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215298561714","lastStatusChangedOn": "2021-05-20T08:58:33.468+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113468","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 3,"compatibilityLevel": 1,"name": "5.2.1 Respiratory System","topic": ["Look and say","Role Of The Sense Organs"],"status": "Draft"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.476+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215953921722","lastStatusChangedOn": "2021-05-20T08:58:33.476+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 2,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113476","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 2,"compatibilityLevel": 1,"name": "5.2 Organ Systems","topic": [],"status": "Draft"},{"ownershipType": ["createdBy"],"parent": "do_1132833371215462401716","copyright": "Sunbird","previewUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","subject": ["Math"],"channel": "b00bc992ef25f1a9a8d63291e20efc8d","downloadUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","organisation": ["Sunbird"],"showNotification": true,"language": ["English"],"mimeType": "video/mp4","variants": {"spine": {"ecarUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/ecar_files/do_1132344630588948481134/untitled-content_1615535619590_do_1132344630588948481134_2.0_spine.ecar","size": 35301.0}},"objectType": "Content","se_mediums": ["English"],"gradeLevel": ["Grade 1"],"appIcon": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_1132344630588948481134/artifact/2a4b8abd789184932399d222d03d9b5c.thumb.jpg","primaryCategory": "Learning Resource","appId": "dev.sunbird.portal","contentEncoding": "identity","artifactUrl": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/assets/do_1132344630588948481134/test-874-kb.mp4","lockKey": "1d28d983-2704-44bd-803e-5feb4e62da62","sYS_INTERNAL_LAST_UPDATED_ON": "2021-03-12T08:10:34.367+0000","contentType": "Resource","se_gradeLevels": ["Grade 1"],"trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132344630588948481134","lastUpdatedBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","audience": ["Student"],"visibility": "Default","consumerId": "273f3b18-5dda-4a27-984a-060c7cd398d3","index": 3,"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"lastPublishedBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","version": 2,"se_subjects": ["Math"],"license": "CC BY 4.0","prevState": "Review","size": 883817.0,"lastPublishedOn": "2021-03-12T07:53:38.825+0000","name": "Untitled Content","status": "Live","code": "8851e754-6e20-44d4-9070-e1a9664163ad","credentials": {"enabled": "No"},"prevStatus": "Review","description": "updated desrciption","streamingUrl": "https://sunbirddevmedia-inct.streaming.media.azure.net/40ae07aa-069e-4056-8f2b-014bc9a2d21b/test-874-kb.ism/manifest(format=m3u8-aapl-v3)","medium": ["English"],"posterImage": "CONTENT_STORAGE_BASE_PATHsunbird-content-dev/content/do_11299104587967692816/artifact/2a4b8abd789184932399d222d03d9b5c.jpg","idealScreenSize": "normal","createdOn": "2021-03-12T07:44:01.371+0000","se_boards": ["NCERT"],"copyrightYear": 2020,"contentDisposition": "inline","licenseterms": "By creating any type of content (resources, books, courses etc.) on DIKSHA, you consent to publish it under the Creative Commons License Framework. Please choose the applicable creative commons license you wish to apply to your content.","lastUpdatedOn": "2021-03-12T07:53:38.505+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-03-12T07:53:38.494+0000","createdFor": ["ORG_001"],"creator": "Reviewer User","os": ["All"],"se_FWIds": ["NCFCOPY"],"pkgVersion": 2.0,"versionKey": "1615535618583","idealScreenDensity": "hdpi","framework": "NCFCOPY","depth": 2,"s3Key": "ecar_files/do_1132344630588948481134/untitled-content_1615535618825_do_1132344630588948481134_2.0.ecar","lastSubmittedOn": "2021-03-12T07:53:10.005+0000","createdBy": "95e4942d-cbe8-477d-aebd-ad8e6de4bfc8","compatibilityLevel": 1,"board": "NCERT","resourceType": "Learn"}],"contentDisposition": "inline","lastUpdatedOn": "2021-05-20T08:58:33.470+0000","contentEncoding": "gzip","contentType": "TextBookUnit","dialcodeRequired": "No","identifier": "do_1132833371215462401716","lastStatusChangedOn": "2021-05-20T08:58:33.470+0000","audience": ["Student"],"os": ["All"],"visibility": "Parent","discussionForum": {"enabled": "Yes"},"index": 1,"mediaType": "content","osId": "org.ekstep.launcher","languageCode": ["en"],"version": 2,"versionKey": "1621501113470","license": "CC BY 4.0","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 1,"compatibilityLevel": 1,"name": "5. Human Body","topic": [],"status": "Draft"}],"appId": "dev.sunbird.portal","contentEncoding": "gzip","sYS_INTERNAL_LAST_UPDATED_ON": "2021-05-20T09:12:06.988+0000","contentType": "TextBook","trackable": {"enabled": "No","autoBatch": "No"},"identifier": "do_1132828073514926081518","audience": ["Student"],"visibility": "Default","consumerId": "01814e02-fc27-4165-ae53-3d1816e55817","childNodes": ["do_1132339274094346241120","do_1132833371215872001720","do_1132833371215134721712","do_1132833371215462401716","do_113223967141863424174","do_1132833371214970881710","do_1132833371215708161718","do_1132372524622561281279","do_1132338069147811841118","do_1132833371215298561714","do_1132833371215953921722","do_11322383952751820816","do_1132216902566133761410","do_1132344630588948481134"],"discussionForum": {"enabled": "Yes"},"mediaType": "content","osId": "org.ekstep.quiz.app","languageCode": ["en"],"version": 2,"license": "CC BY 4.0","name": "TestCSVUpload","status": "Draft","code": "org.sunbird.yhqB6L","credentials": {"enabled": "No"},"description": "Enter description for TextBook","medium": ["English"],"idealScreenSize": "normal","createdOn": "2021-05-19T15:00:44.279+0000","contentDisposition": "inline","additionalCategories": ["Textbook"],"lastUpdatedOn": "2021-05-20T07:10:32.805+0000","dialcodeRequired": "No","lastStatusChangedOn": "2021-05-19T15:00:44.279+0000","createdFor": ["0126825293972439041"],"creator": "Book Creator","os": ["All"],"versionKey": "1621501113536","idealScreenDensity": "hdpi","framework": "tn_k-12","depth": 0,"createdBy": "8454cb21-3ce9-4e30-85b5-fade097880d8","compatibilityLevel": 1,"userConsent": "Yes","board": "State (Tamil Nadu)","resourceType": "Book"}""".stripMargin + } + + def getQuestionData(): util.HashMap[String, AnyRef] = { + new util.HashMap[String, AnyRef]() { + put("mimeType","application/vnd.sunbird.question") + put("editorState", new util.HashMap[String, AnyRef](){ + put("answer","
\"b1d586453b4e0b1535bf55332347934e\"
") + put("question","
\"b1d586453b4e0b1535bf55332347934e\"
") + put("solutions", new util.ArrayList[AnyRef]() { + new util.HashMap[String, AnyRef](){ + put("id", "4eb6d17a-94e1-5742-5344-41201b049ee6") + put("type", "html") + put("value", "
\"b1d586453b4e0b1535bf55332347934e\"
") + } + }) + }) + put("answer", "
\"b1d586453b4e0b1535bf55332347934e\"
") + put("solutions", new util.ArrayList[AnyRef]() { + new util.HashMap[String, AnyRef](){ + put("id", "4eb6d17a-94e1-5742-5344-41201b049ee6") + put("type", "html") + put("value", "
\"b1d586453b4e0b1535bf55332347934e\"
") + } + }) + put("body", "
\"b1d586453b4e0b1535bf55332347934e\"
") + put("responseDeclaration", new util.HashMap[String, AnyRef](){ + put("response1", new util.HashMap[String, AnyRef](){ + put("type", "string") + }) + }) + } + } + + + private def getNode(): Node = { + val node = new Node() + node.setGraphId("domain") + node.setIdentifier("do_1234") + node.setNodeType("DATA_NODE") + node.setObjectType("Content") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "do_1234") + put("mimeType", "application/pdf") + put("status", "Draft") + put("contentType", "Resource") + put("primaryCategory", "Learning Resource") + put("name", "Copy content") + put("artifactUrl", "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/assets/do_212959046431154176151/hindi3.pdf") + put("channel", "in.ekstep") + put("code", "xyz") + put("versionKey", "1234") + } + }) + node + } + + private def getRelativeNode(): Node = { + val node = new Node() + node.setGraphId("domain") + node.setIdentifier("do_1234") + node.setNodeType("DATA_NODE") + node.setObjectType("Content") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "do_1234") + put("mimeType", "application/pdf") + put("status", "Draft") + put("contentType", "Resource") + put("primaryCategory", "Learning Resource") + put("name", "Copy content") + put("artifactUrl", "CONTENT_STORAGE_BASE_PATH/content/assets/do_212959046431154176151/hindi3.pdf") + put("channel", "in.ekstep") + put("code", "xyz") + put("versionKey", "1234") + } + }) + node + } +} + + diff --git a/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/ScalaJsonUtilTest.scala b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/ScalaJsonUtilTest.scala new file mode 100644 index 000000000..59d634ea9 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/test/scala/org/sunbird/graph/util/ScalaJsonUtilTest.scala @@ -0,0 +1,52 @@ +package org.sunbird.graph.util + +import java.util + +import com.fasterxml.jackson.databind.exc.{InvalidDefinitionException, MismatchedInputException} +import org.apache.commons.lang3.StringUtils +import org.codehaus.jackson.JsonProcessingException +import org.scalatest.{FlatSpec, Matchers} + +class ScalaJsonUtilTest extends FlatSpec with Matchers { + + "serializing an empty object" should "Throw InvalidDefinitionException" in { + assertThrows[InvalidDefinitionException] { // Result type: Assertion + ScalaJsonUtil.serialize(new Object) + } + } + + "serializing an empty object" should "Throw JsonProcessingException" ignore { + assertThrows[JsonProcessingException] { // Result type: Assertion + ScalaJsonUtil.serialize(new util.HashMap()) + } + } + + "serializing a valid Map object" should "Should serialize the object" in { + val value: String = ScalaJsonUtil.serialize(Map("identifier" -> "do_1234", "status" -> "Draft")) + assert(StringUtils.equalsIgnoreCase(value, "{\"identifier\":\"do_1234\",\"status\":\"Draft\"}")) + } + + "serializing a valid List object" should "Should serialize the object" in { + val value: String = ScalaJsonUtil.serialize(List("identifier", "do_1234", "status", "Draft")) + assert(StringUtils.equalsIgnoreCase(value, "[\"identifier\",\"do_1234\",\"status\",\"Draft\"]")) + } + + "deserializing a stringified map" should "Should deserialize the string to map" in { + val value: Map[String, AnyRef] = ScalaJsonUtil.deserialize[Map[String, AnyRef]]("{\"identifier\":\"do_1234\",\"status\":\"Draft\"}") + assert(value != null) + assert(value.getOrElse("status", "").asInstanceOf[String] == "Draft") + } + + "deserializing a stringified list to map" should "Should throw Exception" in { + assertThrows[MismatchedInputException] { + ScalaJsonUtil.deserialize[Map[String, AnyRef]]("[\"identifier\",\"do_1234\",\"status\",\"Draft\"]") + } + } + + "deserializing a stringified list" should "Should deserialize the string to list" in { + val value:List[String] = ScalaJsonUtil.deserialize[List[String]]("[\"identifier\",\"do_1234\",\"status\",\"Draft\"]") + assert(value != null) + assert(value.size == 4) + } + +} diff --git a/ontology-engine/graph-dac-api/pom.xml b/ontology-engine/graph-dac-api/pom.xml index 62669e72e..698acbba8 100644 --- a/ontology-engine/graph-dac-api/pom.xml +++ b/ontology-engine/graph-dac-api/pom.xml @@ -8,6 +8,11 @@ graph-dac-api + + org.scala-lang + scala-library + ${scala.version} + org.sunbird graph-common @@ -68,10 +73,18 @@ + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + 11 + + org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Node.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Node.java index 5a35a4a95..d843a8f5a 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Node.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Node.java @@ -112,7 +112,9 @@ public void setMetadata(Map metadata) { } public List getOutRelations() { - return outRelations; + if (!CollectionUtils.isEmpty(outRelations)) + return outRelations; + else return new ArrayList<>(); } public void setOutRelations(List outRelations) { @@ -120,7 +122,9 @@ public void setOutRelations(List outRelations) { } public List getInRelations() { - return inRelations; + if (!CollectionUtils.isEmpty(inRelations)) + return inRelations; + else return new ArrayList<>(); } public void setInRelations(List inRelations) { diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Relation.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Relation.java index 097ee969b..b3e3e0088 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Relation.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Relation.java @@ -245,6 +245,12 @@ public Map getMetadata() { return new HashMap(); } + public Relation updateMetadata(Map metadata) { + if (!MapUtils.isEmpty(metadata)) + this.metadata = metadata; + return this; + } + public void setMetadata(Map metadata) { this.metadata = metadata; } diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/SubGraph.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/SubGraph.java new file mode 100644 index 000000000..452ab304c --- /dev/null +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/SubGraph.java @@ -0,0 +1,33 @@ +package org.sunbird.graph.dac.model; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class SubGraph implements Serializable { + + private Map nodes; + private List relations; + + public SubGraph(Map nodes, List relations) { + this.nodes = nodes; + this.relations = relations; + } + + public Map getNodes() { + return nodes; + } + + public void setNodes(Map nodes) { + this.nodes = nodes; + } + + public List getRelations() { + return relations; + } + + public void setRelations(List relations) { + this.relations = relations; + } +} diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/GraphAsyncOperations.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/GraphAsyncOperations.java index 8ec0f6beb..e58f27ed0 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/GraphAsyncOperations.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/GraphAsyncOperations.java @@ -3,12 +3,20 @@ import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.neo4j.driver.v1.Driver; +import org.neo4j.driver.v1.Record; import org.neo4j.driver.v1.Session; +import org.neo4j.driver.v1.Value; import org.sunbird.common.dto.Response; import org.sunbird.common.dto.ResponseHandler; import org.sunbird.common.exception.ClientException; import org.sunbird.common.exception.MiddlewareException; import org.sunbird.common.exception.ServerException; +import org.sunbird.graph.common.enums.SystemProperties; +import org.sunbird.graph.dac.model.Node; +import org.sunbird.graph.dac.model.Relation; +import org.sunbird.graph.dac.model.SubGraph; +import org.sunbird.graph.dac.util.Neo4jNodeUtil; + import org.sunbird.graph.service.common.DACErrorCodeConstants; import org.sunbird.graph.service.common.DACErrorMessageConstants; import org.sunbird.graph.service.common.GraphOperation; @@ -18,10 +26,9 @@ import scala.compat.java8.FutureConverters; import scala.concurrent.Future; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; public class GraphAsyncOperations { @@ -95,4 +102,85 @@ public static Future removeRelation(String graphId, List getSubGraph(String graphId, String nodeId, Integer depth) { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name(), + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Get SubGraph Operation Failed.]"); + if (StringUtils.isBlank(nodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name(), + DACErrorMessageConstants.INVALID_IDENTIFIER + " | [Please Provide Node Identifier.]"); + if (null == depth) depth = 5; + + Driver driver = DriverUtil.getDriver(graphId, GraphOperation.WRITE); + TelemetryManager.log("Driver Initialised. | [Graph Id: " + graphId + "]"); + String query = GraphQueryGenerationUtil.generateSubGraphCypherQuery(graphId, nodeId, depth); + try (Session session = driver.session()) { + CompletionStage cs = session.runAsync(query) + .thenCompose(fn -> fn.listAsync()).thenApply(records -> { + Map relationMap = new HashMap(); + Set nodes = new HashSet<>(); + Set relations = new HashSet<>(); + Map startNodeMap = new HashMap<>(); + Map endNodeMap = new HashMap<>(); + for (Record record : records) { + org.neo4j.driver.v1.types.Node startNode = record.get("startNode").asNode(); + org.neo4j.driver.v1.types.Node endNode = record.get("endNode").asNode(); + String relationName = record.get("relationName").asString(); + Map relationMetadata = record.get("relationMedatadata").asMap(); + nodes.add(Neo4jNodeUtil.getNode(graphId, startNode, relationMap, startNodeMap, endNodeMap)); + nodes.add(Neo4jNodeUtil.getNode(graphId, endNode, relationMap, startNodeMap, endNodeMap)); + //Relation MetaData + Relation relData = new Relation(startNode.get(SystemProperties.IL_UNIQUE_ID.name()).asString(), relationName, endNode.get(SystemProperties.IL_UNIQUE_ID.name()).asString()); + relData.setMetadata(relationMetadata); + relData.setStartNodeObjectType(startNode.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).asString()); + relData.setStartNodeName(startNode.get("name").asString()); + relData.setStartNodeType(startNode.get(SystemProperties.IL_SYS_NODE_TYPE.name()).asString()); + relData.setEndNodeObjectType(endNode.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).asString()); + relData.setEndNodeName(endNode.get("name").asString()); + relData.setEndNodeType(endNode.get(SystemProperties.IL_SYS_NODE_TYPE.name()).asString()); + relations.add(relData); + } + Set uniqNodes = nodes.stream().collect(Collectors.groupingBy(n -> n.getIdentifier())).values().stream().map(a -> a.get(0)).collect(Collectors.toSet()); + Map nodeMap = new HashMap<>(); + for (Node nodeObj: uniqNodes) { + nodeMap.put(nodeObj.getIdentifier(), nodeObj); + } + List relationsList = new ArrayList<>(); + relationsList = relations.stream().collect(Collectors.toList()); + return new SubGraph(nodeMap, relationsList); + }).exceptionally(error -> { + error.printStackTrace(); + throw new ServerException(DACErrorCodeConstants.SERVER_ERROR.name(), + "Error! Something went wrong while creating node object. ", error.getCause()); + }); + return FutureConverters.toScala(cs); + } catch (Throwable e) { + e.printStackTrace(); + if (!(e instanceof MiddlewareException)) { + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name(), + DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage(), e); + } else { + throw e; + } + } + } + + private static void getRecordValues(Record record, Map nodeMap, Map relationMap, + Map startNodeMap, Map endNodeMap) { + if (null != startNodeMap) { + Value startNodeValue = record.get("startNode"); + if (null != startNodeValue && StringUtils.equalsIgnoreCase("NODE", startNodeValue.type().name())) { + org.neo4j.driver.v1.types.Node startNode = record.get("startNode").asNode(); + nodeMap.put(startNode.id(), startNode); + } + } + if (null != endNodeMap) { + Value endNodeValue = record.get("endNode"); + if (null != endNodeValue && StringUtils.equalsIgnoreCase("NODE", endNodeValue.type().name())) { + org.neo4j.driver.v1.types.Node endNode = record.get("endNode").asNode(); + nodeMap.put(endNode.id(), endNode); + } + } + } } diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/SearchAsyncOperations.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/SearchAsyncOperations.java index 6e4b5d4db..8827fb8a3 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/SearchAsyncOperations.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/operation/SearchAsyncOperations.java @@ -193,9 +193,7 @@ public static Future getNodeByUniqueId(String graphId, String nodeId, Bool node= Neo4jNodeUtil.getNode(graphId, (org.neo4j.driver.v1.types.Node) entry.getValue(), relationMap, startNodeMap, endNodeMap); } - if (StringUtils.equalsIgnoreCase("Concept", node.getObjectType())) { - TelemetryManager.info("Saving concept to in-memory cache: "+node.getIdentifier()); - } + return node; }).exceptionally(error -> { if(error.getCause() instanceof NoSuchRecordException || error.getCause() instanceof ResourceNotFoundException) diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/util/GraphQueryGenerationUtil.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/util/GraphQueryGenerationUtil.java index b3ae5f546..460202ab6 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/util/GraphQueryGenerationUtil.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/service/util/GraphQueryGenerationUtil.java @@ -834,4 +834,11 @@ public static String generateDeleteBulkRelationsCypherQuery(String graphId) { return query.toString(); } + public static String generateSubGraphCypherQuery(String graphId, String nodeId, Integer depth) { + StringBuilder query = new StringBuilder(); + query.append("MATCH p=(n:"+graphId+" {"+SystemProperties.IL_UNIQUE_ID.name()+":'"+nodeId+"'})-[r1*0.."+depth+"]->(m)-[r2]->(l)"); + query.append(" with p, m as startNode, l as endNode, collect(r2) as r unwind r as relation return type(relation) as relationName, relation as relationMedatadata, startNode , endNode;"); + return query.toString(); + } + } diff --git a/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsExceptionTest.java b/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsExceptionTest.java index 6271c7789..5feda11f3 100644 --- a/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsExceptionTest.java +++ b/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsExceptionTest.java @@ -23,13 +23,13 @@ import java.util.Map; import java.util.concurrent.CompletionException; +@Ignore public class NodeAsyncOperationsExceptionTest { protected static GraphDatabaseService graphDb = null; - @BeforeClass public static void setup() throws Exception { - startEmbeddedNeo4jWithReadOnly(); +// startEmbeddedNeo4jWithReadOnly(); } @AfterClass @@ -60,7 +60,6 @@ public void testUpdateNodesExpectServerException() throws Exception { Map result = Await.result(resultFuture, Duration.apply("30s")); } - private static void startEmbeddedNeo4jWithReadOnly() { GraphDatabaseSettings.BoltConnector bolt = GraphDatabaseSettings.boltConnector("0"); graphDb = new GraphDatabaseFactory() diff --git a/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsTest.java b/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsTest.java index 79335e156..797ebe1d0 100644 --- a/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsTest.java +++ b/ontology-engine/graph-dac-api/src/test/java/org/sunbird/graph/service/operation/NodeAsyncOperationsTest.java @@ -3,6 +3,7 @@ import com.mashape.unirest.http.JsonNode; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; import org.sunbird.common.dto.Request; import org.sunbird.common.exception.ClientException; @@ -21,7 +22,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CompletionException; - +@Ignore public class NodeAsyncOperationsTest extends BaseTest { @BeforeClass diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala deleted file mode 100644 index f09c086ee..000000000 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala +++ /dev/null @@ -1,306 +0,0 @@ -package org.sunbird.graph.schema - -import java.util -import java.util.concurrent.CompletionException - -import org.apache.commons.collections4.{CollectionUtils, MapUtils} -import org.apache.commons.lang3.StringUtils -import org.sunbird.cache.impl.RedisCache -import org.sunbird.common.JsonUtils -import org.sunbird.common.dto.Request -import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.{Node, Relation} - -import scala.collection.JavaConversions._ -import scala.concurrent.{ExecutionContext, Future} - -object DefinitionNode { - - def validate(request: Request, setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(request.getRequest.getOrDefault("primaryCategory", "").asInstanceOf[String], - schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) - definition.validateRequest(request) - val inputNode = definition.getNode(request.getRequest) - updateRelationMetadata(inputNode) - definition.validate(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause} - } - - def getExternalProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getExternalProps() - } - - def fetchJsonProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.fetchJsonProps() - } - - def getInRelations(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[Map[String, AnyRef]] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getInRelations() - } - - def getOutRelations(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[Map[String, AnyRef]] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getOutRelations() - } - - def getRelationDefinitionMap(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Map[String, AnyRef] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getRelationDefinitionMap() - } - - def getRelationsMap(request: Request, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): java.util.HashMap[String, AnyRef] = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getRelationsMap() - } - - def getRestrictedProperties(graphId: String, version: String, operation: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.getRestrictPropsConfig(operation) - } - - def getNode(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val definition = DefinitionFactory.getDefinition(request.getContext.get("graph_id").asInstanceOf[String] - , schemaName, request.getContext.get("version").asInstanceOf[String]) - definition.getNode(request.get("identifier").asInstanceOf[String], "read", if(request.getRequest.containsKey("mode")) request.get("mode").asInstanceOf[String] else "read") - } - - @throws[Exception] - def validate(identifier: String, request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String].replaceAll("image", "") - val reqVersioning: String = request.getContext.getOrDefault("versioning", "").asInstanceOf[String] - val versioning = if(StringUtils.isBlank(reqVersioning)) None else Option(reqVersioning) - val req:util.HashMap[String, AnyRef] = new util.HashMap[String, AnyRef](request.getRequest) - val skipValidation: Boolean = {if(request.getContext.containsKey("skipValidation")) request.getContext.get("skipValidation").asInstanceOf[Boolean] else false} - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) - definition.getNode(identifier, "update", null, versioning).map(dbNode => { - val schema = dbNode.getObjectType.toLowerCase.replace("image", "") - val primaryCategory: String = if(null != dbNode.getMetadata) dbNode.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" - val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schema, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) - val categoryDefinition = DefinitionFactory.getDefinition(graphId, schema, version, objectCategoryDefinition) - categoryDefinition.validateRequest(request) - resetJsonProperties(dbNode, graphId, version, schema, objectCategoryDefinition) - val inputNode: Node = categoryDefinition.getNode(dbNode.getIdentifier, request.getRequest, dbNode.getNodeType) - val dbRels = getDBRelations(graphId, schema, version, req, dbNode, objectCategoryDefinition) - setRelationship(dbNode, inputNode, dbRels) - if (dbNode.getIdentifier.endsWith(".img") && StringUtils.equalsAnyIgnoreCase("Yes", dbNode.getMetadata.getOrDefault("isImageNodeCreated", "").asInstanceOf[String])) { - inputNode.getMetadata.put("versionKey", dbNode.getMetadata.getOrDefault("versionKey", "")) - dbNode.getMetadata.remove("isImageNodeCreated") - } - dbNode.getMetadata.putAll(inputNode.getMetadata) - if (MapUtils.isNotEmpty(inputNode.getExternalData)) { - if (MapUtils.isNotEmpty(dbNode.getExternalData)) - dbNode.getExternalData.putAll(inputNode.getExternalData) - else - dbNode.setExternalData(inputNode.getExternalData) - } - - if (!skipValidation) - categoryDefinition.validate(dbNode, "update") - else Future (dbNode) - - }).flatMap(f => f) - } - - def postProcessor(request: Request, node: Node)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val primaryCategory: String = if(null!=node.getMetadata) node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" - val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) - val categoryDefinition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) - val edgeKey = categoryDefinition.getEdgeKey() - if (null != edgeKey && !edgeKey.isEmpty) { - val metadata = node.getMetadata - val cacheKey = "edge_" + request.getObjectType.toLowerCase() - val data = metadata.containsKey(edgeKey) match { - case true => List[String](metadata.get(edgeKey).asInstanceOf[String]) - case _ => List[String]() - } - if (!data.isEmpty) { - metadata.get("status") match { - case "Live" => RedisCache.addToList(cacheKey, data) - case "Retired" => RedisCache.removeFromList(cacheKey, data) - } - } - } - node - } - - private def setRelationship(dbNode: Node, inputNode: Node, dbRels:util.Map[String, util.List[Relation]]): Unit = { - var addRels: util.List[Relation] = new util.ArrayList[Relation]() - var delRels: util.List[Relation] = new util.ArrayList[Relation]() - val inRel: util.List[Relation] = dbNode.getInRelations - val outRel: util.List[Relation] = dbNode.getOutRelations - val inRelReq: util.List[Relation] = if(CollectionUtils.isNotEmpty(inputNode.getInRelations)) new util.ArrayList[Relation](inputNode.getInRelations) else new util.ArrayList[Relation]() - val outRelReq: util.List[Relation] = if(CollectionUtils.isNotEmpty(inputNode.getOutRelations)) new util.ArrayList[Relation](inputNode.getOutRelations) else new util.ArrayList[Relation]() - if (CollectionUtils.isNotEmpty(inRelReq)) { - if(CollectionUtils.isNotEmpty(dbRels.get("in"))){ - inRelReq.addAll(dbRels.get("in")) - inputNode.setInRelations(inRelReq) - } - getNewRelationsList(inRel, inRelReq, addRels, delRels) - } - if (CollectionUtils.isNotEmpty(outRelReq)) { - if(CollectionUtils.isNotEmpty(dbRels.get("out"))){ - outRelReq.addAll(dbRels.get("out")) - inputNode.setOutRelations(outRelReq) - } - getNewRelationsList(outRel, outRelReq, addRels, delRels) - } - if (CollectionUtils.isNotEmpty(addRels)) { - dbNode.setAddedRelations(addRels) - updateRelationMetadata(dbNode) - } - if (CollectionUtils.isNotEmpty(delRels)) - dbNode.setDeletedRelations(delRels) - } - - private def getNewRelationsList(dbRelations: util.List[Relation], newRelations: util.List[Relation], addRels: util.List[Relation], delRels: util.List[Relation]): Unit = { - val relList = new util.ArrayList[String] - for (rel <- newRelations) { - addRels.add(rel) - val relKey = rel.getStartNodeId + rel.getRelationType + rel.getEndNodeId - if (!relList.contains(relKey)) relList.add(relKey) - } - if (null != dbRelations && !dbRelations.isEmpty) { - for (rel <- dbRelations) { - val relKey = rel.getStartNodeId + rel.getRelationType + rel.getEndNodeId - if (!relList.contains(relKey)) delRels.add(rel) - } - } - } - - def updateRelationMetadata(node: Node): Unit = { - var relOcr = new util.HashMap[String, Integer]() - val rels = node.getAddedRelations - for (rel <- rels) { - val relKey = rel.getStartNodeObjectType + rel.getRelationType + rel.getEndNodeObjectType - if (relOcr.containsKey(relKey)) - relOcr.put(relKey, relOcr.get(relKey) + 1) - else relOcr.put(relKey, 1) - - if (relKey.contains("hasSequenceMember")) { - rel.setMetadata(new util.HashMap[String, AnyRef]() {{ - put("IL_SEQUENCE_INDEX", relOcr.get(relKey)); - }}) - } else rel.setMetadata(new util.HashMap[String, AnyRef]()) - } - node.setAddedRelations(rels) - } - - def resetJsonProperties(node: Node, graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext):Node = { - val jsonPropList = fetchJsonProps(graphId, version, schemaName, ocd) - if(!jsonPropList.isEmpty){ - node.getMetadata.entrySet().map(entry => { - if(jsonPropList.contains(entry.getKey)){ - entry.getValue match { - case value: String => entry.setValue(JsonUtils.deserialize(value.asInstanceOf[String], classOf[Object])) - case _ => entry - } - } - }) - } - node - } - - def getDBRelations(graphId:String, schemaName:String, version:String, request: util.Map[String, AnyRef], dbNode: Node, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext):util.Map[String, util.List[Relation]] = { - val inRelations = new util.ArrayList[Relation]() - val outRelations = new util.ArrayList[Relation]() - val relDefMap = getRelationDefinitionMap(graphId, version, schemaName, ocd); - if (null != dbNode) { - if (CollectionUtils.isNotEmpty(dbNode.getInRelations)) { - for (inRel <- dbNode.getInRelations()) { - val key = inRel.getRelationType() + "_in_" + inRel.getStartNodeObjectType() - if (relDefMap.containsKey(key)) { - val value = relDefMap.get(key).get - if (!request.containsKey(value)) { - inRelations.add(inRel) - } - } - } - } - if (CollectionUtils.isNotEmpty(dbNode.getOutRelations)) { - for (outRel <- dbNode.getOutRelations()) { - val key = outRel.getRelationType() + "_out_" + outRel.getEndNodeObjectType() - if (relDefMap.containsKey(key)) { - val value = relDefMap.get(key).get - if (!request.containsKey(value)) { - outRelations.add(outRel) - } - } - } - } - } - new util.HashMap[String, util.List[Relation]](){{ - put("in", inRelations) - put("out",outRelations) - }} - } - - def validateContentNodes(nodes: List[Node], graphId: String, schemaName: String, version: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[List[Node]] = { - val futures = nodes.map(node => { - val ocd = ObjectCategoryDefinition(node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String], node.getObjectType, node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String]) - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) - definition.validate(node, "update") recoverWith { case e: CompletionException => throw e.getCause } - }) - Future.sequence(futures) - } - def updateJsonPropsInNodes(nodes: List[Node], graphId: String, schemaName: String, version: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext) = { - nodes.map(node => { - val schema = node.getObjectType.toLowerCase.replace("image", "") - val jsonProps = fetchJsonProps(graphId, version, schema) - val metadata = node.getMetadata - metadata.filter(entry => jsonProps.contains(entry._1)).map(entry => node.getMetadata.put(entry._1, convertJsonProperties(entry, jsonProps))) - }) - } - def convertJsonProperties(entry: (String, AnyRef), jsonProps: scala.List[String]) = { - try { - JsonUtils.deserialize(entry._2.asInstanceOf[String], classOf[Object]) - } catch { - case e: Exception => entry._2 - } - } - - def getAllCopyScheme(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) - definition.getAllCopySchemes() - } - - def getCopySchemeContentType(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): java.util.HashMap[String, Object] = { - val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] - val version: String = request.getContext.get("version").asInstanceOf[String] - val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] - val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) - definition.getCopySchemeMap(request) - } - - - def getPrimaryCategory(request: java.util.Map[String, AnyRef], schemaName: String, channel: String = "all"): String = { - if(null != request && request.containsKey("primaryCategory")) { - val categoryName = request.get("primaryCategory").asInstanceOf[String] - ObjectCategoryDefinitionMap.prepareCategoryId(categoryName, schemaName, channel) - } else "" - } - - def getObjectCategoryDefinition(primaryCategory: String, objectType: String, channel: String = "all"): ObjectCategoryDefinition = { - if(StringUtils.isNotBlank(primaryCategory)) - ObjectCategoryDefinition(primaryCategory, objectType, channel) - else ObjectCategoryDefinition() - } -} - diff --git a/ontology-engine/graph-engine_2.11/pom.xml b/ontology-engine/graph-engine_2.12/pom.xml similarity index 97% rename from ontology-engine/graph-engine_2.11/pom.xml rename to ontology-engine/graph-engine_2.12/pom.xml index a85149bf0..781056c1e 100644 --- a/ontology-engine/graph-engine_2.11/pom.xml +++ b/ontology-engine/graph-engine_2.12/pom.xml @@ -9,12 +9,12 @@ 4.0.0 - graph-engine_2.11 + graph-engine_2.12 org.sunbird - graph-core_2.11 + graph-core_2.12 1.0-SNAPSHOT @@ -75,7 +75,7 @@ net.alchim31.maven scala-maven-plugin - 4.4.0 + 3.2.2 ${scala.version} false diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/engine/CaseClasses.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/engine/CaseClasses.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/engine/CaseClasses.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/engine/CaseClasses.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala similarity index 95% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala index 5baf6639a..eadb37333 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/external/store/ExternalStore.scala @@ -29,7 +29,10 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis import scala.collection.JavaConverters._ for ((key, value) <- request.asScala) { propsMapping.getOrElse(key, "") match { - case "blob" => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", value)) + case "blob" => value match { + case value: String => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", value)) + case _ => insertQuery.value(key, QueryBuilder.fcall("textAsBlob", JsonUtils.serialize(value))) + } case "string" => request.getOrDefault(key, "") match { case value: String => insertQuery.value(key, value) case _ => insertQuery.value(key, JsonUtils.serialize(request.getOrDefault(key, ""))) @@ -95,7 +98,7 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis def delete(identifiers: List[String])(implicit ec: ExecutionContext): Future[Response] = { val delete = QueryBuilder.delete() - import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ val deleteQuery = delete.from(keySpace, table).where(QueryBuilder.in(primaryKey.get(0), seqAsJavaList(identifiers))) try { val session: Session = CassandraConnector.getSession @@ -123,7 +126,8 @@ class ExternalStore(keySpace: String , table: String , primaryKey: java.util.Lis }) } val selectQuery = select.from(keySpace, table) - import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ + import scala.collection.convert.ImplicitConversions._ val clause: Clause = QueryBuilder.in(primaryKey.get(0), seqAsJavaList(identifiers)) selectQuery.where.and(clause) try { diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/health/HealthCheckManager.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/health/HealthCheckManager.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/health/HealthCheckManager.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/health/HealthCheckManager.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/nodes/DataNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala similarity index 96% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/nodes/DataNode.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala index 1e7b83178..1cd022a11 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/nodes/DataNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala @@ -14,13 +14,13 @@ import org.sunbird.graph.dac.model.{Filter, MetadataCriterion, Node, Relation, S import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} import org.sunbird.parseq.Task -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} object DataNode { - + private val SYSTEM_UPDATE_ALLOWED_CONTENT_STATUS = List("Live", "Unlisted") @throws[Exception] @@ -40,7 +40,7 @@ object DataNode { def update(request: Request, dataModifier: (Node) => Node = defaultDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val identifier: String = request.getContext.get("identifier").asInstanceOf[String] DefinitionNode.validate(identifier, request).map(node => { - request.getContext().put("schemaName", node.getObjectType.toLowerCase.replace("image", "")) + request.getContext().put("schemaName", node.getObjectType.toLowerCase.replace("image", "")) val response = oec.graphService.upsertNode(request.graphId, dataModifier(node), request) response.map(node => DefinitionNode.postProcessor(request, node)).map(result => { val futureList = Task.parallel[Response]( @@ -58,7 +58,12 @@ object DataNode { val objectType : String = request.getContext.get("objectType").asInstanceOf[String] request.getContext.put("schemaName", schema) val fields: List[String] = Optional.ofNullable(request.get("fields").asInstanceOf[util.List[String]]).orElse(new util.ArrayList[String]()).toList - val extPropNameList = DefinitionNode.getExternalProps(request.getContext.get("graph_id").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String], schema) + val version: String = if (null != node && null != node.getMetadata) { + val schemaVersion: String = node.getMetadata.getOrDefault("schemaVersion", "0.0").asInstanceOf[String] + val scVer = if (StringUtils.isNotBlank(schemaVersion) && schemaVersion.toDouble != 0.0) schemaVersion else request.getContext.get("version").asInstanceOf[String] + scVer + } else request.getContext.get("version").asInstanceOf[String] + val extPropNameList = DefinitionNode.getExternalProps(request.getContext.get("graph_id").asInstanceOf[String], version, schema) if (CollectionUtils.isNotEmpty(extPropNameList) && null != fields && fields.exists(field => extPropNameList.contains(field))) populateExternalProperties(fields, node, request, extPropNameList) else diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala new file mode 100644 index 000000000..e7605e4e4 --- /dev/null +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala @@ -0,0 +1,136 @@ +package org.sunbird.graph.path + +import org.apache.commons.collections4.CollectionUtils +import org.apache.commons.lang3.StringUtils +import org.sunbird.common.dto.Request +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.common.enums.SystemProperties +import org.sunbird.graph.dac.model.{Node, Relation, SubGraph} +import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.schema.{DefinitionFactory, DefinitionNode, ObjectCategoryDefinition} +import org.sunbird.graph.utils.NodeUtil +import org.sunbird.graph.utils.NodeUtil.{convertJsonProperties, handleKeyNames} + +import java.util +import java.util.{ArrayList, HashMap, Map} +import scala.collection.JavaConversions.mapAsScalaMap +import scala.collection.JavaConverters._ +import scala.concurrent.{ExecutionContext, Future, future} +import scala.util.Try + +object DataSubGraph { + + + def read(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[SubGraph] = { + val identifier: String = request.get("identifier").asInstanceOf[String] + val depth: Int = request.getOrDefault("depth", 5).asInstanceOf[Int] + val subGraph: Future[SubGraph] = oec.graphService.getSubGraph(request.graphId, identifier, depth) + subGraph + } + + def readSubGraph(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Unit] = { + val identifier: String = request.get("identifier").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val schemaVersion = request.getContext.get("version").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(request.getContext.get("graph_id").asInstanceOf[String], schemaName, schemaVersion) + val getRelationsMapKeys = definition.getRelationsMap().keySet() + val dataMap = new util.HashMap[String, AnyRef] + val relMap = new util.HashMap[String, AnyRef] + readSubGraphData(request, dataMap, relMap).map(sub => { + println("subGraphData out " + sub) + sub + }) + } + + private def readSubGraphData(request: Request, dataMap: util.HashMap[String, AnyRef], relMap: util.HashMap[String, AnyRef])(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[util.HashMap[String, AnyRef]] = { + val finalDataMap = new util.HashMap[String, AnyRef] + val identifier: String = request.get("identifier").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val schemaVersion = request.getContext.get("version").asInstanceOf[String] + var isRoot = false + if (isRoot != null && StringUtils.equalsAnyIgnoreCase(request.getContext.get("isRoot").asInstanceOf[String], "true")) + isRoot = true + val node: Future[Node] = oec.graphService.getNodeByUniqueId(request.graphId, identifier, false, request) + node.map(n => { + val newDataMap = new util.HashMap[String, AnyRef] + newDataMap.put("metadata", n.getMetadata) + newDataMap.put("outRelations", n.getOutRelations) + finalDataMap.put(n.getIdentifier, newDataMap) + finalDataMap + }) + finalDataMap.map(entry => { + val mapData = entry._2.asInstanceOf[java.util.Map[String, AnyRef]].asScala + println("mapData " + mapData.toString()) + val outRelations: util.List[Relation] = mapData.getOrElse("outRelations", new util.ArrayList[Relation]).asInstanceOf[util.List[Relation]] + for (rel <- outRelations.asScala) { + val subReq = new Request() + val context = new util.HashMap[String, Object]() + context.putAll(request.getContext) + subReq.setContext(context) + subReq.getContext.put("schemaName", rel.getEndNodeObjectType.toLowerCase()) + subReq.getContext.put("objectType", rel.getEndNodeObjectType) + subReq.getContext.put("isRoot", "true") + subReq.put("identifier", rel.getEndNodeId) + println("readSubGraphData "+ readSubGraphData(subReq, dataMap, relMap)) + } + }) + Future{finalDataMap} + } + + private def getRelationData(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[util.HashMap[String, AnyRef]] = { + val relMap = new util.HashMap[String, AnyRef] + val identifier: String = request.get("identifier").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val schemaVersion = request.getContext.get("version").asInstanceOf[String] + var isRoot = false + if (isRoot != null && StringUtils.equalsAnyIgnoreCase(request.getContext.get("isRoot").asInstanceOf[String], "true")) + isRoot = true + val node: Future[Node] = oec.graphService.getNodeByUniqueId(request.graphId, identifier, false, request) + node.map(n => { + val finalMetadata = serialize(n, null, schemaName, schemaVersion, isRoot) + relMap.put(n.getIdentifier, finalMetadata) + relMap + }) + } + + private def serialize(node: Node, fields: util.List[String], schemaName: String, schemaVersion: String, isRoot: Boolean = false)(implicit oec: OntologyEngineContext, ec: ExecutionContext): util.Map[String, AnyRef] = { + val metadataMap = node.getMetadata + val objectCategoryDefinition: ObjectCategoryDefinition = DefinitionNode.getObjectCategoryDefinition(node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String], node.getObjectType.toLowerCase().replace("image", ""), node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String]) + val jsonProps = DefinitionNode.fetchJsonProps(node.getGraphId, schemaVersion, node.getObjectType.toLowerCase().replace("image", ""), objectCategoryDefinition) + val updatedMetadataMap: util.Map[String, AnyRef] = metadataMap.entrySet().asScala.filter(entry => null != entry.getValue).map((entry: util.Map.Entry[String, AnyRef]) => handleKeyNames(entry, fields) -> convertJsonProperties(entry, jsonProps)).toMap.asJava + val definitionMap = DefinitionNode.getRelationDefinitionMap(node.getGraphId, schemaVersion, node.getObjectType.toLowerCase().replace("image", ""), objectCategoryDefinition).asJava + val finalMetadata = new util.HashMap[String, AnyRef]() + if(!isRoot) { + finalMetadata.put("objectType", node.getObjectType) + finalMetadata.putAll(updatedMetadataMap) + if (CollectionUtils.isNotEmpty(fields)) + finalMetadata.keySet.retainAll(fields) + finalMetadata.put("identifier", node.getIdentifier) + } + println("definitionMap "+ definitionMap) + val relMap: util.Map[String, util.List[util.Map[String, AnyRef]]] = geOutRelationMap(node, updatedMetadataMap, definitionMap) + finalMetadata.putAll(relMap) + finalMetadata + } + + + private def geOutRelationMap(node: Node, updatedMetadataMap: util.Map[String, AnyRef], relationMap: util.Map[String, AnyRef]): util.Map[String, util.List[util.Map[String, AnyRef]]] = { + val outRelations: util.List[Relation] = { + if (CollectionUtils.isEmpty(node.getOutRelations)) new util.ArrayList[Relation] else node.getOutRelations + } + val relMap = new util.HashMap[String, util.List[util.Map[String, AnyRef]]] + for (rel <- outRelations.asScala) { + val relKey: String = rel.getRelationType + "_out_" + rel.getEndNodeObjectType + if (relMap.containsKey(relationMap.get(relKey))) relMap.get(relationMap.get(relKey)).add(NodeUtil.populateRelationMaps(rel, "out")) + else { + if (null != relationMap.get(relKey)) { + relMap.put(relationMap.get(relKey).asInstanceOf[String], new util.ArrayList[util.Map[String, AnyRef]]() { + add(NodeUtil.populateRelationMaps(rel, "out")) + }) + } + } + } + relMap + } + +} diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/AbstractRelation.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/AbstractRelation.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/AbstractRelation.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/AbstractRelation.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/AssociationRelation.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/AssociationRelation.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/AssociationRelation.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/AssociationRelation.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/IRelation.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/IRelation.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/IRelation.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/IRelation.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/RelationHandler.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/RelationHandler.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/RelationHandler.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/RelationHandler.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/SequenceMembershipRelation.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/SequenceMembershipRelation.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/relations/SequenceMembershipRelation.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/relations/SequenceMembershipRelation.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/CategoryDefinitionValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/CategoryDefinitionValidator.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/CategoryDefinitionValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/CategoryDefinitionValidator.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/CoreDomainObject.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/CoreDomainObject.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/CoreDomainObject.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/CoreDomainObject.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala similarity index 97% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala index 64a3cd5b5..ab228612a 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala @@ -1,7 +1,6 @@ package org.sunbird.graph.schema import java.util - import org.apache.commons.collections4.MapUtils import org.apache.commons.lang3.StringUtils import org.sunbird.common.dto.Request @@ -127,4 +126,8 @@ class DefinitionDTO(graphId: String, schemaName: String, version: String = "1.0" } } + def fetchOneOfProps(): List[String] = { + if(schemaValidator.getConfig().hasPath("oneOfProps")) schemaValidator.getConfig.getStringList("oneOfProps").asScala.toList else List[String]() + } + } diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionFactory.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionFactory.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/DefinitionFactory.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionFactory.scala diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala new file mode 100644 index 000000000..daa00b3bc --- /dev/null +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala @@ -0,0 +1,321 @@ +package org.sunbird.graph.schema + +import com.typesafe.config.Config + +import java.util +import java.util.concurrent.CompletionException +import org.apache.commons.collections4.{CollectionUtils, MapUtils} +import org.apache.commons.lang3.StringUtils +import org.sunbird.cache.impl.RedisCache +import org.sunbird.common.JsonUtils +import org.sunbird.common.dto.Request +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.dac.model.{Node, Relation} + +import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ +import scala.concurrent.{ExecutionContext, Future} + +object DefinitionNode { + + def validate(request: Request, setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(request.getRequest.getOrDefault("primaryCategory", "").asInstanceOf[String], + schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) + definition.validateRequest(request) + val inputNode = definition.getNode(request.getRequest) + updateRelationMetadata(inputNode) + definition.validate(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause } + } + + def getExternalProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getExternalProps() + } + + def fetchJsonProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.fetchJsonProps() + } + + def getInRelations(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[Map[String, AnyRef]] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getInRelations() + } + + def getOutRelations(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[Map[String, AnyRef]] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getOutRelations() + } + + def getRelationDefinitionMap(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Map[String, AnyRef] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getRelationDefinitionMap() + } + + def getRelationsMap(request: Request, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): java.util.HashMap[String, AnyRef] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getRelationsMap() + } + + def getRestrictedProperties(graphId: String, version: String, operation: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.getRestrictPropsConfig(operation) + } + + def getNode(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(request.getContext.get("graph_id").asInstanceOf[String] + , schemaName, request.getContext.get("version").asInstanceOf[String]) + val disableCache: Option[Boolean] = if (request.getRequest.containsKey("disableCache")) request.get("disableCache").asInstanceOf[Option[Boolean]] else None + definition.getNode(request.get("identifier").asInstanceOf[String], "read", if (request.getRequest.containsKey("mode")) request.get("mode").asInstanceOf[String] else "read", None, disableCache) + } + + @throws[Exception] + def validate(identifier: String, request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String].replaceAll("image", "") + val reqVersioning: String = request.getContext.getOrDefault("versioning", "").asInstanceOf[String] + val versioning = if (StringUtils.isBlank(reqVersioning)) None else Option(reqVersioning) + val req: util.HashMap[String, AnyRef] = new util.HashMap[String, AnyRef](request.getRequest) + val skipValidation: Boolean = { + if (request.getContext.containsKey("skipValidation")) request.getContext.get("skipValidation").asInstanceOf[Boolean] else false + } + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) + val removeProps = request.getContext.getOrDefault("removeProps", new util.ArrayList[String]()).asInstanceOf[util.List[String]] + definition.getNode(identifier, "update", null, versioning, None).map(dbNode => { + val schema = dbNode.getObjectType.toLowerCase.replace("image", "") + val primaryCategory: String = if (null != dbNode.getMetadata) dbNode.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schema, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val categoryDefinition = DefinitionFactory.getDefinition(graphId, schema, version, objectCategoryDefinition) + categoryDefinition.validateRequest(request) + resetJsonProperties(dbNode, graphId, version, schema, objectCategoryDefinition) + val inputNode: Node = categoryDefinition.getNode(dbNode.getIdentifier, request.getRequest, dbNode.getNodeType) + val dbRels = getDBRelations(graphId, schema, version, req, dbNode, objectCategoryDefinition) + setRelationship(dbNode, inputNode, dbRels) + if (dbNode.getIdentifier.endsWith(".img") && StringUtils.equalsAnyIgnoreCase("Yes", dbNode.getMetadata.getOrDefault("isImageNodeCreated", "").asInstanceOf[String])) { + inputNode.getMetadata.put("versionKey", dbNode.getMetadata.getOrDefault("versionKey", "")) + dbNode.getMetadata.remove("isImageNodeCreated") + } + dbNode.getMetadata.putAll(inputNode.getMetadata) + if (MapUtils.isNotEmpty(inputNode.getExternalData)) { + if (MapUtils.isNotEmpty(dbNode.getExternalData)) + dbNode.getExternalData.putAll(inputNode.getExternalData) + else + dbNode.setExternalData(inputNode.getExternalData) + } + if (!removeProps.isEmpty) removeProps.toList.foreach(prop => dbNode.getMetadata.remove(prop)) + val validatedNode = if (!skipValidation) categoryDefinition.validate(dbNode, "update") else Future(dbNode) + validatedNode.map(node => { + if (!removeProps.isEmpty) removeProps.toList.foreach(prop => dbNode.getMetadata.put(prop, null)) + node + }) + + }).flatMap(f => f) + } + + def postProcessor(request: Request, node: Node)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val primaryCategory: String = if (null != node.getMetadata) node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val categoryDefinition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) + val edgeKey = categoryDefinition.getEdgeKey() + if (null != edgeKey && !edgeKey.isEmpty) { + val metadata = node.getMetadata + val cacheKey = "edge_" + request.getObjectType.toLowerCase() + val data = metadata.containsKey(edgeKey) match { + case true => List[String](metadata.get(edgeKey).asInstanceOf[String]) + case _ => List[String]() + } + if (!data.isEmpty) { + metadata.get("status") match { + case "Live" => RedisCache.addToList(cacheKey, data) + case "Retired" => RedisCache.removeFromList(cacheKey, data) + } + } + } + node + } + + private def setRelationship(dbNode: Node, inputNode: Node, dbRels: util.Map[String, util.List[Relation]]): Unit = { + var addRels: util.List[Relation] = new util.ArrayList[Relation]() + var delRels: util.List[Relation] = new util.ArrayList[Relation]() + val inRel: util.List[Relation] = dbNode.getInRelations + val outRel: util.List[Relation] = dbNode.getOutRelations + val inRelReq: util.List[Relation] = if (CollectionUtils.isNotEmpty(inputNode.getInRelations)) new util.ArrayList[Relation](inputNode.getInRelations) else new util.ArrayList[Relation]() + val outRelReq: util.List[Relation] = if (CollectionUtils.isNotEmpty(inputNode.getOutRelations)) new util.ArrayList[Relation](inputNode.getOutRelations) else new util.ArrayList[Relation]() + if (CollectionUtils.isNotEmpty(inRelReq)) { + if (CollectionUtils.isNotEmpty(dbRels.get("in"))) { + inRelReq.addAll(dbRels.get("in")) + inputNode.setInRelations(inRelReq) + } + getNewRelationsList(inRel, inRelReq, addRels, delRels) + } + if (CollectionUtils.isNotEmpty(outRelReq)) { + if (CollectionUtils.isNotEmpty(dbRels.get("out"))) { + outRelReq.addAll(dbRels.get("out")) + inputNode.setOutRelations(outRelReq) + } + getNewRelationsList(outRel, outRelReq, addRels, delRels) + } + if (CollectionUtils.isNotEmpty(addRels)) { + dbNode.setAddedRelations(addRels) + updateRelationMetadata(dbNode) + } + if (CollectionUtils.isNotEmpty(delRels)) + dbNode.setDeletedRelations(delRels) + } + + private def getNewRelationsList(dbRelations: util.List[Relation], newRelations: util.List[Relation], addRels: util.List[Relation], delRels: util.List[Relation]): Unit = { + val relList = new util.ArrayList[String] + for (rel <- newRelations) { + addRels.add(rel) + val relKey = rel.getStartNodeId + rel.getRelationType + rel.getEndNodeId + if (!relList.contains(relKey)) relList.add(relKey) + } + if (null != dbRelations && !dbRelations.isEmpty) { + for (rel <- dbRelations) { + val relKey = rel.getStartNodeId + rel.getRelationType + rel.getEndNodeId + if (!relList.contains(relKey)) delRels.add(rel) + } + } + } + + def updateRelationMetadata(node: Node): Unit = { + var relOcr = new util.HashMap[String, Integer]() + val rels = node.getAddedRelations + for (rel <- rels) { + val relKey = rel.getStartNodeObjectType + rel.getRelationType + rel.getEndNodeObjectType + if (relOcr.containsKey(relKey)) + relOcr.put(relKey, relOcr.get(relKey) + 1) + else relOcr.put(relKey, 1) + if (relKey.contains("hasSequenceMember")) { + val index = if (rel.getMetadata.containsKey("index")) rel.getMetadata.get("index").asInstanceOf[Integer] else relOcr.get(relKey) + rel.setMetadata(Map[String, AnyRef]("IL_SEQUENCE_INDEX" -> index).asJava) + } else rel.setMetadata(new util.HashMap[String, AnyRef]()) + } + node.setAddedRelations(rels) + } + + def resetJsonProperties(node: Node, graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { + val jsonPropList = fetchJsonProps(graphId, version, schemaName, ocd) + if (!jsonPropList.isEmpty) { + node.getMetadata.entrySet().map(entry => { + if (jsonPropList.contains(entry.getKey)) { + entry.getValue match { + case value: String => entry.setValue(JsonUtils.deserialize(value.asInstanceOf[String], classOf[Object])) + case _ => entry + } + } + }) + } + node + } + + def getDBRelations(graphId: String, schemaName: String, version: String, request: util.Map[String, AnyRef], dbNode: Node, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): util.Map[String, util.List[Relation]] = { + val inRelations = new util.ArrayList[Relation]() + val outRelations = new util.ArrayList[Relation]() + val relDefMap = getRelationDefinitionMap(graphId, version, schemaName, ocd); + if (null != dbNode) { + if (CollectionUtils.isNotEmpty(dbNode.getInRelations)) { + for (inRel <- dbNode.getInRelations()) { + val key = inRel.getRelationType() + "_in_" + inRel.getStartNodeObjectType() + if (relDefMap.containsKey(key)) { + val value = relDefMap.get(key).get + if (!request.containsKey(value)) { + inRelations.add(inRel) + } + } + } + } + if (CollectionUtils.isNotEmpty(dbNode.getOutRelations)) { + for (outRel <- dbNode.getOutRelations()) { + val key = outRel.getRelationType() + "_out_" + outRel.getEndNodeObjectType() + if (relDefMap.containsKey(key)) { + val value = relDefMap.get(key).get + if (!request.containsKey(value)) { + outRelations.add(outRel) + } + } + } + } + } + new util.HashMap[String, util.List[Relation]]() { + { + put("in", inRelations) + put("out", outRelations) + } + } + } + + def validateContentNodes(nodes: List[Node], graphId: String, schemaName: String, version: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[List[Node]] = { + val futures = nodes.map(node => { + val ocd = ObjectCategoryDefinition(node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String], node.getObjectType, node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String]) + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.validate(node, "update") recoverWith { case e: CompletionException => throw e.getCause } + }) + Future.sequence(futures) + } + + def updateJsonPropsInNodes(nodes: List[Node], graphId: String, schemaName: String, version: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext) = { + nodes.map(node => { + val schema = node.getObjectType.toLowerCase.replace("image", "") + val jsonProps = fetchJsonProps(graphId, version, schema) + val metadata = node.getMetadata + metadata.filter(entry => jsonProps.contains(entry._1)).map(entry => node.getMetadata.put(entry._1, convertJsonProperties(entry, jsonProps))) + }) + } + + def convertJsonProperties(entry: (String, AnyRef), jsonProps: scala.List[String]) = { + try { + JsonUtils.deserialize(entry._2.asInstanceOf[String], classOf[Object]) + } catch { + case e: Exception => entry._2 + } + } + + def getAllCopyScheme(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) + definition.getAllCopySchemes() + } + + def getCopySchemeContentType(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): java.util.HashMap[String, Object] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) + definition.getCopySchemeMap(request) + } + + + def getPrimaryCategory(request: java.util.Map[String, AnyRef], schemaName: String, channel: String = "all"): String = { + if (null != request && request.containsKey("primaryCategory")) { + val categoryName = request.get("primaryCategory").asInstanceOf[String] + ObjectCategoryDefinitionMap.prepareCategoryId(categoryName, schemaName, channel) + } else "" + } + + def getObjectCategoryDefinition(primaryCategory: String, objectType: String, channel: String = "all"): ObjectCategoryDefinition = { + if (StringUtils.isNotBlank(primaryCategory)) + ObjectCategoryDefinition(primaryCategory, objectType, channel) + else ObjectCategoryDefinition() + } + + def fetchOneOfProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) + definition.fetchOneOfProps() + } +} + diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/FrameworkMasterCategoryMap.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/FrameworkMasterCategoryMap.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/FrameworkMasterCategoryMap.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/FrameworkMasterCategoryMap.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/IDefinition.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala similarity index 88% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/IDefinition.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala index 083c0d0fe..b173f0fe2 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/IDefinition.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala @@ -16,7 +16,7 @@ abstract class IDefinition(graphId: String, schemaName: String, version: String def validate(node: Node, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] @throws[Exception] - def getNode(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] + def getNode(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] def getSchemaName(): String ={ schemaName diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinition.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinition.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinition.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinition.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinitionMap.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinitionMap.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinitionMap.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/ObjectCategoryDefinitionMap.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala similarity index 91% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala index 24a5a9cc1..10a11200d 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala @@ -58,7 +58,7 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = Future{node} } - override def getNode(identifier: String, operation: String, mode: String, versioning: Option[String] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { + override def getNode(identifier: String, operation: String, mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val request: Request = new Request() val node: Future[Node] = oec.graphService.getNodeByUniqueId(graphId, identifier, false, request) node @@ -74,10 +74,13 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = val relData = entry._2.asInstanceOf[java.util.List[java.util.Map[String, AnyRef]]] relData.asScala.map(r => { val relation = { - if(StringUtils.equalsAnyIgnoreCase("out", direction)) + if(StringUtils.equalsAnyIgnoreCase("out", direction)) { new Relation(node.getIdentifier, relSchema.get("type").get.asInstanceOf[String], r.get("identifier").asInstanceOf[String]) - else + .updateMetadata((r.asScala - "identifier").asJava) + } else { new Relation(r.get("identifier").asInstanceOf[String], relSchema.get("type").get.asInstanceOf[String], node.getIdentifier) + .updateMetadata((r.asScala - "identifier").asJava) + } } relation }) diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala similarity index 99% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala index 4e74cab31..86c209efd 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/FrameworkValidator.scala @@ -13,7 +13,7 @@ import org.sunbird.graph.common.enums.SystemProperties import org.sunbird.graph.dac.model._ import org.sunbird.graph.schema.{FrameworkMasterCategoryMap, IDefinition} -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.collection.JavaConverters._ import scala.collection.Map import scala.concurrent.{ExecutionContext, Future} diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala similarity index 97% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala index a0ef9353d..fed42747a 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/PropAsEdgeValidator.scala @@ -7,7 +7,7 @@ import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.model.Node import org.sunbird.graph.schema.IDefinition -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} trait PropAsEdgeValidator extends IDefinition { diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/RelationValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/RelationValidator.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/RelationValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/RelationValidator.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/VersionKeyValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersionKeyValidator.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/VersionKeyValidator.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersionKeyValidator.scala diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala similarity index 85% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala index 36190dca2..0e81b68f5 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala @@ -17,7 +17,7 @@ import org.sunbird.graph.service.operation.{NodeAsyncOperations, SearchAsyncOper import org.sunbird.graph.utils.{NodeUtil, ScalaJsonUtils} import org.sunbird.telemetry.logger.TelemetryManager -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} trait VersioningNode extends IDefinition { @@ -28,11 +28,11 @@ trait VersioningNode extends IDefinition { val COLLECTION_MIME_TYPE = "application/vnd.ekstep.content-collection" - abstract override def getNode(identifier: String, operation: String, mode: String = "read", versioning: Option[String] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { + abstract override def getNode(identifier: String, operation: String, mode: String = "read", versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { operation match { case "update" => getNodeToUpdate(identifier, versioning); - case "read" => getNodeToRead(identifier, mode) - case _ => getNodeToRead(identifier, mode) + case "read" => getNodeToRead(identifier, mode, disableCache) + case _ => getNodeToRead(identifier, mode, disableCache) } } @@ -49,7 +49,7 @@ trait VersioningNode extends IDefinition { }).flatMap(f => f) } - private def getNodeToRead(identifier: String, mode: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { + private def getNodeToRead(identifier: String, mode: String, disableCache: Option[Boolean])(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { if ("edit".equalsIgnoreCase(mode)) { val imageNode = super.getNode(identifier + IMAGE_SUFFIX, "read", mode) imageNode recoverWith { @@ -61,14 +61,20 @@ trait VersioningNode extends IDefinition { } } } else { - val cacheKey = getSchemaName().toLowerCase() + ".cache.enable" - if (Platform.getBoolean(cacheKey, false)) { - val ttl: Integer = if (Platform.config.hasPath(getSchemaName().toLowerCase() + ".cache.ttl")) Platform.config.getInt(getSchemaName().toLowerCase() + ".cache.ttl") else 86400 - getCachedNode(identifier, ttl) - } else - super.getNode(identifier, "read", mode) + if(disableCache.nonEmpty){ + if(disableCache.get) super.getNode(identifier, "read", mode) + else getNodeFromCache(identifier) + } else{ + val cacheKey = getSchemaName().toLowerCase() + ".cache.enable" + if (Platform.getBoolean(cacheKey, false)) getNodeFromCache(identifier) + else super.getNode(identifier, "read", mode) + } } } + private def getNodeFromCache(identifier: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node]= { + val ttl: Integer = if (Platform.config.hasPath(getSchemaName().toLowerCase() + ".cache.ttl")) Platform.config.getInt(getSchemaName().toLowerCase() + ".cache.ttl") else 86400 + getCachedNode(identifier, ttl) + } private def getEditableNode(identifier: String, node: Node)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { val status = node.getMetadata.get("status").asInstanceOf[String] diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala similarity index 90% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala index 79c672cc9..dc0d53869 100644 --- a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala @@ -14,7 +14,6 @@ import org.sunbird.graph.schema.{DefinitionNode, ObjectCategoryDefinition, Objec import scala.collection.JavaConverters import scala.collection.JavaConverters._ -import scala.collection.JavaConversions._ import scala.concurrent.ExecutionContext object NodeUtil { @@ -124,7 +123,7 @@ object NodeUtil { val outRelations:util.List[Relation] = { if (CollectionUtils.isEmpty(node.getOutRelations)) new util.ArrayList[Relation] else node.getOutRelations } val relMap = new util.HashMap[String, util.List[util.Map[String, AnyRef]]] for (rel <- inRelations.asScala) { - val relKey:String = rel.getRelationType + "_in_" + rel.getEndNodeObjectType + val relKey:String = rel.getRelationType + "_in_" + rel.getStartNodeObjectType if (relMap.containsKey(relationMap.get(relKey))) relMap.get(relationMap.get(relKey)).add(populateRelationMaps(rel, "in")) else { if(null != relationMap.get(relKey)) { @@ -158,21 +157,23 @@ object NodeUtil { } def populateRelationMaps(rel: Relation, direction: String): util.Map[String, AnyRef] = { - if("out".equalsIgnoreCase(direction)) { - val objectType = rel.getEndNodeObjectType.replace("Image", "") - val relData = Map("identifier" -> rel.getEndNodeId.replace(".img", ""), - "name" -> rel.getEndNodeName, - "objectType" -> objectType, - "relation" -> rel.getRelationType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getEndNodeMetadata.get(key))).toMap - mapAsJavaMap(relData) - } else { - val objectType = rel.getStartNodeObjectType.replace("Image", "") - val relData = Map("identifier" -> rel.getStartNodeId.replace(".img", ""), - "name" -> rel.getStartNodeName, - "objectType" -> objectType, - "relation" -> rel.getRelationType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getStartNodeMetadata.get(key))).toMap - mapAsJavaMap(relData) - } + if ("out".equalsIgnoreCase(direction)) { + val objectType = rel.getEndNodeObjectType.replace("Image", "") + val relData = Map("identifier" -> rel.getEndNodeId.replace(".img", ""), + "name" -> rel.getEndNodeName, + "objectType" -> objectType, + "relation" -> rel.getRelationType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getEndNodeMetadata.get(key))).toMap + val indexMap = if(rel.getRelationType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX",1.asInstanceOf[Number]).asInstanceOf[Number]) else Map() + val completeRelData = relData ++ indexMap + mapAsJavaMap(completeRelData) + } else { + val objectType = rel.getStartNodeObjectType.replace("Image", "") + val relData = Map("identifier" -> rel.getStartNodeId.replace(".img", ""), + "name" -> rel.getStartNodeName, + "objectType" -> objectType, + "relation" -> rel.getRelationType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getStartNodeMetadata.get(key))).toMap + mapAsJavaMap(relData) + } } def getLanguageCodes(node: Node): util.List[String] = { diff --git a/ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/utils/ScalaJsonUtils.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/ScalaJsonUtils.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/main/scala/org/sunbird/graph/utils/ScalaJsonUtils.scala rename to ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/ScalaJsonUtils.scala diff --git a/ontology-engine/graph-engine_2.11/src/test/resources/application.conf b/ontology-engine/graph-engine_2.12/src/test/resources/application.conf similarity index 99% rename from ontology-engine/graph-engine_2.11/src/test/resources/application.conf rename to ontology-engine/graph-engine_2.12/src/test/resources/application.conf index b5fb5b0cb..8446d3dd6 100644 --- a/ontology-engine/graph-engine_2.11/src/test/resources/application.conf +++ b/ontology-engine/graph-engine_2.12/src/test/resources/application.conf @@ -464,7 +464,7 @@ max.thumbnail.size.pixels=150 play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" //schema.base_path = "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/" collection.image.migration.enabled=true diff --git a/ontology-engine/graph-engine_2.11/src/test/resources/cassandra-unit.yaml b/ontology-engine/graph-engine_2.12/src/test/resources/cassandra-unit.yaml similarity index 100% rename from ontology-engine/graph-engine_2.11/src/test/resources/cassandra-unit.yaml rename to ontology-engine/graph-engine_2.12/src/test/resources/cassandra-unit.yaml diff --git a/ontology-engine/graph-engine_2.12/src/test/resources/logback.xml b/ontology-engine/graph-engine_2.12/src/test/resources/logback.xml new file mode 100644 index 000000000..73529d622 --- /dev/null +++ b/ontology-engine/graph-engine_2.12/src/test/resources/logback.xml @@ -0,0 +1,28 @@ + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/BaseSpec.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/BaseSpec.scala similarity index 99% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/BaseSpec.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/BaseSpec.scala index 830e612c5..83359dedb 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/BaseSpec.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/BaseSpec.scala @@ -1,7 +1,6 @@ package org.sunbird.graph import java.io.File -import java.util import com.datastax.driver.core.Session import org.apache.commons.io.FileUtils @@ -134,7 +133,7 @@ class BaseSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll { node.setIdentifier("board") node.setNodeType("DATA_NODE") node.setObjectType("Category") - node.setMetadata(new util.HashMap[String, AnyRef]() { + node.setMetadata(new java.util.HashMap[String, AnyRef]() { { put("code", "board") put("orgIdFieldName", "boardIds") diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala similarity index 97% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala index 26e759a8f..e112df270 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/health/TestHealthCheckManager.scala @@ -1,11 +1,12 @@ package org.sunbird.graph.health +import org.scalatest.Ignore import org.sunbird.common.dto.Response import org.sunbird.common.exception.ResponseCode import org.sunbird.graph.BaseSpec import scala.concurrent.Future - +@Ignore class TestHealthCheckManager extends BaseSpec { "check health api" should "return true" in { diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala similarity index 96% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala index e8aaf9029..b1382701e 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/nodes/TestDataNode.scala @@ -1,8 +1,8 @@ package org.sunbird.graph.nodes import java.util - import org.neo4j.graphdb.Result +import org.scalatest.Ignore import org.sunbird.cache.impl.RedisCache import org.sunbird.common.JsonUtils import org.sunbird.common.dto.{Request, Response, ResponseHandler} @@ -13,7 +13,7 @@ import org.sunbird.graph.utils.ScalaJsonUtils import scala.concurrent.Future - +@Ignore class TestDataNode extends BaseSpec { def getContextMap(): java.util.Map[String, AnyRef] = { @@ -732,6 +732,45 @@ class TestDataNode extends BaseSpec { recoverToSucceededIf[ClientException](DataNode.search(request)) } + "update node with valid data and delete some unwanted data" should "update node" in { + val request = new Request() + request.setObjectType("Content") + request.setContext(getContextMap()) + + request.put("code", "test") + request.put("name", "testResource") + request.put("mimeType", "application/pdf") + request.put("contentType", "Resource") + request.put("description", "test") + request.put("channel", "in.ekstep") + request.put("primaryCategory", "Learning Resource") + request.put("semanticVersion", "1.0") + request.put("programId", "test_prog") + val future: Future[Node] = DataNode.create(request) + future map { node => { + assert(null != node) + print(node) + assert(node.getMetadata.get("name").asInstanceOf[String].equalsIgnoreCase("testResource")) + val req = new Request() + req.setContext(getContextMap()) + req.getContext.put("identifier", node.getIdentifier) + val propsList: util.List[String] = new util.ArrayList[String](){{ + add("semanticVersion") + add("programId") + }} + req.getContext.put("removeProps", propsList) + req.put("name", "updated name") + val updateFuture = DataNode.update(req) + updateFuture map { node => { + assert(node.getMetadata.get("name").asInstanceOf[String].equalsIgnoreCase("updated name")) + assert(null == node.getMetadata.get("semanticVersion")) + assert(null == node.getMetadata.get("programId")) + } + } + } + } flatMap (f => f) + } + def getHierarchy(request: Request) : Future[Response] = { val hierarchyString: String = "'{\"identifier\": \"do_11283193441064550414\"}'" val rootHierarchy = JsonUtils.deserialize(hierarchyString, classOf[java.util.Map[String, AnyRef]]) diff --git a/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestDefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestDefinitionNode.scala new file mode 100644 index 000000000..0ee541cc6 --- /dev/null +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestDefinitionNode.scala @@ -0,0 +1,12 @@ +package org.sunbird.graph.schema + +import org.scalatest.Ignore +import org.sunbird.graph.BaseSpec +@Ignore +class TestDefinitionNode extends BaseSpec { + + "fetchOneOfProps" should "return oneOfProps from definition if available" in { + val oneOfProps = DefinitionNode.fetchOneOfProps("domain", "1.0","content") + assert(oneOfProps.isEmpty) + } +} diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala similarity index 95% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala index e73483caa..1efed35f5 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/TestObjectCategoryDefinitionMap.scala @@ -1,7 +1,8 @@ package org.sunbird.graph.schema +import org.scalatest.Ignore import org.sunbird.graph.BaseSpec - +@Ignore class TestObjectCategoryDefinitionMap extends BaseSpec { "CategoryDefinitionMap" should "store cache for given id and value" in { diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala similarity index 97% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala index d63a3f395..693702caa 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/schema/validator/TestSchemaValidator.scala @@ -1,13 +1,14 @@ package org.sunbird.graph.schema.validator -import java.util +import org.scalatest.Ignore +import java.util import org.sunbird.graph.BaseSpec import org.sunbird.graph.dac.model.Node import org.sunbird.graph.schema.DefinitionFactory import scala.concurrent.Future - +@Ignore class TestSchemaValidator extends BaseSpec { /*"check health api" should "return true" in { diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala similarity index 97% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala index 7a5fb5241..97caeff65 100644 --- a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala +++ b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/utils/NodeUtilTest.scala @@ -1,13 +1,12 @@ package org.sunbird.graph.utils import java.util - -import org.scalatest.{FlatSpec, Matchers} +import org.scalatest.{FlatSpec, Ignore, Matchers} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.model.Node import scala.concurrent.ExecutionContext - +@Ignore class NodeUtilTest extends FlatSpec with Matchers { implicit val oec: OntologyEngineContext = new OntologyEngineContext implicit val ec: ExecutionContext = ExecutionContext.global diff --git a/ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/utils/ScalaJsonUtilsTest.scala b/ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/utils/ScalaJsonUtilsTest.scala similarity index 100% rename from ontology-engine/graph-engine_2.11/src/test/scala/org/sunbird/graph/utils/ScalaJsonUtilsTest.scala rename to ontology-engine/graph-engine_2.12/src/test/scala/org/sunbird/graph/utils/ScalaJsonUtilsTest.scala diff --git a/ontology-engine/pom.xml b/ontology-engine/pom.xml index b44cb78ef..3dc8826f6 100644 --- a/ontology-engine/pom.xml +++ b/ontology-engine/pom.xml @@ -13,8 +13,8 @@ graph-common graph-dac-api - graph-core_2.11 - graph-engine_2.11 + graph-core_2.12 + graph-engine_2.12 parseq diff --git a/platform-core/platform-common/pom.xml b/platform-core/platform-common/pom.xml index fbfc44f9d..5a52316a6 100644 --- a/platform-core/platform-common/pom.xml +++ b/platform-core/platform-common/pom.xml @@ -99,7 +99,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/platform-core/platform-telemetry/pom.xml b/platform-core/platform-telemetry/pom.xml index cce55052a..a0d3f1ff8 100644 --- a/platform-core/platform-telemetry/pom.xml +++ b/platform-core/platform-telemetry/pom.xml @@ -44,7 +44,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/platform-core/pom.xml b/platform-core/pom.xml index 915ae188d..b93967fb7 100755 --- a/platform-core/pom.xml +++ b/platform-core/pom.xml @@ -58,7 +58,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/platform-core/schema-validator/pom.xml b/platform-core/schema-validator/pom.xml index 42ebafdfb..4379d830d 100644 --- a/platform-core/schema-validator/pom.xml +++ b/platform-core/schema-validator/pom.xml @@ -60,7 +60,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/platform-core/schema-validator/src/test/resources/application.conf b/platform-core/schema-validator/src/test/resources/application.conf index 497ded178..577191fd1 100644 --- a/platform-core/schema-validator/src/test/resources/application.conf +++ b/platform-core/schema-validator/src/test/resources/application.conf @@ -464,7 +464,7 @@ max.thumbnail.size.pixels=150 play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" //schema.base_path = "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/schemas/" collection.image.migration.enabled=true diff --git a/platform-modules/import-manager/pom.xml b/platform-modules/import-manager/pom.xml index 7914fcf5c..f9f36de48 100644 --- a/platform-modules/import-manager/pom.xml +++ b/platform-modules/import-manager/pom.xml @@ -24,7 +24,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar @@ -49,7 +49,7 @@ net.alchim31.maven scala-maven-plugin - 4.4.0 + 3.2.2 ${scala.version} false diff --git a/platform-modules/mimetype-manager/pom.xml b/platform-modules/mimetype-manager/pom.xml index ea978cbd5..748ae1730 100644 --- a/platform-modules/mimetype-manager/pom.xml +++ b/platform-modules/mimetype-manager/pom.xml @@ -10,7 +10,7 @@ 4.0.0 mimetype-manager - 2.11 + 2.12 2.7.2 @@ -23,14 +23,14 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar - org.sunbird - cloud-store-sdk - 1.4.3 + ${CLOUD_STORE_GROUP_ID} + ${CLOUD_STORE_ARTIFACT_ID} + ${CLOUD_STORE_VERSION} org.scala-lang diff --git a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/cloudstore/StorageService.scala b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/cloudstore/StorageService.scala index 3904888a0..753f298ae 100644 --- a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/cloudstore/StorageService.scala +++ b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/cloudstore/StorageService.scala @@ -18,36 +18,30 @@ class StorageService { @throws[Exception] def getService: BaseStorageService = { if (null == storageService) { - if (StringUtils.equalsIgnoreCase(storageType, "azure")) { - val storageKey = Platform.config.getString("azure_storage_key") - val storageSecret = Platform.config.getString("azure_storage_secret") - storageService = StorageServiceFactory.getStorageService(StorageConfig(storageType, storageKey, storageSecret)) - } else if (StringUtils.equalsIgnoreCase(storageType, "aws")) { - val storageKey = Platform.config.getString("aws_storage_key") - val storageSecret = Platform.config.getString("aws_storage_secret") - storageService = StorageServiceFactory.getStorageService(StorageConfig(storageType, storageKey, storageSecret)) - } else if (StringUtils.equalsIgnoreCase(storageType, "gcloud")) { - val storageKey = Platform.config.getString("gcloud_client_key") - val storageSecret = Platform.config.getString("gcloud_private_secret") - storageService = StorageServiceFactory.getStorageService(StorageConfig(storageType, storageKey, storageSecret)) - } -// else if (StringUtils.equalsIgnoreCase(storageType, "cephs3")) { -// val storageKey = Platform.config.getString("cephs3_storage_key") -// val storageSecret = Platform.config.getString("cephs3_storage_secret") -// val endpoint = Platform.config.getString("cephs3_storage_endpoint") -// storageService = StorageServiceFactory.getStorageService(new StorageConfig(storageType, storageKey, storageSecret, Option(endpoint))) -// } - else throw new ServerException("ERR_INVALID_CLOUD_STORAGE", "Error while initialising cloud storage") + val storageKey = Platform.config.getString("cloud_storage_key") + val storageSecret = Platform.config.getString("cloud_storage_secret") + // TODO: endPoint defined to support "cephs3". Make code changes after cloud-store-sdk 2.11 support it. + val endPoint = if (Platform.config.hasPath("cloud_storage_endpoint")) Option(Platform.config.getString("cloud_storage_endpoint")) else None + storageService = StorageServiceFactory.getStorageService(new StorageConfig(storageType, storageKey, storageSecret, endPoint)) } storageService } def getContainerName: String = { - storageType match { - case "azure" => Platform.config.getString("azure_storage_container") - case "aws" => Platform.config.getString("aws_storage_container") - case "gcloud" => Platform.config.getString("gcloud_storage_bucket") - case _ => throw new ServerException("ERR_INVALID_CLOUD_STORAGE", "Container name not configured.") + if(Platform.config.hasPath("cloud_storage_container")) + Platform.config.getString("cloud_storage_container") + else + throw new ServerException("ERR_INVALID_CLOUD_STORAGE", "Cloud Storage Container name not configured.") + } + + def formatUrl(url: String): String = { + if (storageType == "oci") { + val newHostname: String = if (Platform.config.hasPath("cloud_storage_proxy_host")) Platform.config.getString("cloud_storage_proxy_host") else "" + val regex = "(?<=://)([^/]+)".r + val replacedUrl = regex.replaceAllIn(url, newHostname) + replacedUrl + } else { + url } } @@ -55,14 +49,14 @@ class StorageService { val slugFile = if (slug.getOrElse(true)) Slug.createSlugFile(file) else file val objectKey = folderName + "/" + slugFile.getName val url = getService.upload(getContainerName, slugFile.getAbsolutePath, objectKey, Option.apply(false), Option.apply(1), Option.apply(5), Option.empty) - Array[String](objectKey, url) + Array[String](objectKey, formatUrl(url)) } def uploadDirectory(folderName: String, directory: File, slug: Option[Boolean] = Option(true)): Array[String] = { val slugFile = if (slug.getOrElse(true)) Slug.createSlugFile(directory) else directory val objectKey = folderName + File.separator val url = getService.upload(getContainerName, slugFile.getAbsolutePath, objectKey, Option.apply(true), Option.apply(1), Option.apply(5), Option.empty) - Array[String](objectKey, url) + Array[String](objectKey, formatUrl(url)) } def uploadDirectoryAsync(folderName: String, directory: File, slug: Option[Boolean] = Option(true))(implicit ec: ExecutionContext): Future[List[String]] = { @@ -85,10 +79,7 @@ class StorageService { } def getSignedURL(key: String, ttl: Option[Int], permission: Option[String]): String = { - storageType match { - case "gcloud" => getService.getPutSignedURL(getContainerName, key, ttl, permission, Option.apply(getMimeType(key))) - case _ => getService.getSignedURL (getContainerName, key, ttl, permission) - } + getService.getPutSignedURL(getContainerName, key, ttl, permission, Option.apply(getMimeType(key))) } def getUri(key: String): String = { diff --git a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/CollectionMimeTypeMgrImpl.scala b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/CollectionMimeTypeMgrImpl.scala index 0f833977d..b1c7bd8bc 100644 --- a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/CollectionMimeTypeMgrImpl.scala +++ b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/CollectionMimeTypeMgrImpl.scala @@ -15,7 +15,7 @@ import org.sunbird.graph.utils.ScalaJsonUtils import org.sunbird.mimetype.mgr.{BaseMimeTypeManager, MimeTypeManager} import scala.collection.JavaConverters._ -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} class CollectionMimeTypeMgrImpl(implicit ss: StorageService) extends BaseMimeTypeManager with MimeTypeManager { diff --git a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImpl.scala b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImpl.scala index 773ca2842..e1c3c77f0 100644 --- a/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImpl.scala +++ b/platform-modules/mimetype-manager/src/main/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImpl.scala @@ -18,7 +18,7 @@ import org.sunbird.telemetry.logger.TelemetryManager import org.xml.sax.{InputSource, SAXException} import scala.collection.JavaConverters._ -import scala.collection.JavaConversions._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} class EcmlMimeTypeMgrImpl(implicit ss: StorageService) extends BaseMimeTypeManager with MimeTypeManager { diff --git a/platform-modules/mimetype-manager/src/test/resources/application.conf b/platform-modules/mimetype-manager/src/test/resources/application.conf index cad20dd73..5fa5a0884 100644 --- a/platform-modules/mimetype-manager/src/test/resources/application.conf +++ b/platform-modules/mimetype-manager/src/test/resources/application.conf @@ -459,8 +459,9 @@ framework.cache.read=true # Max size(width/height) of thumbnail in pixels max.thumbnail.size.pixels=150 -schema.base_path="../../../../schemas/" +schema.base_path = "../../schemas" content.hierarchy.removed_props_for_leafNodes=["collections","children","usedByContent","item_sets","methods","libraries","editorState"] +learning_content_youtube_apikey="" collection.keyspace = "hierarchy_store" content.keyspace = "content_store" @@ -469,13 +470,13 @@ collection.image.migration.enabled=true -content.h5p.library.path="https://s3.ap-south-1.amazonaws.com/ekstep-public-dev/content/templates/h5p-library-latest.zip" +content.h5p.library.path="https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/templates/h5p-library-latest.zip" # This is added to handle large artifacts sizes differently content.artifact.size.for_online=209715200 cloud_storage_type="azure" -azure_storage_key="asdfgh" -azure_storage_secret="jhgfdcvb" -azure_storage_container="sunbird-content-dev" +cloud_storage_key="asdfgh" +cloud_storage_secret="jhgfdcvb" +cloud_storage_container="sunbird-content-dev" validation.strictMimeType = ["image/svg+xml"] diff --git a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/cloudstore/StorageServiceTest.scala b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/cloudstore/StorageServiceTest.scala index 071a9e20c..8bdddd854 100644 --- a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/cloudstore/StorageServiceTest.scala +++ b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/cloudstore/StorageServiceTest.scala @@ -7,49 +7,49 @@ import java.io.File class StorageServiceTest extends AsyncFlatSpec with Matchers { val ss = new StorageService - "getService" should "return a Storage Service" in { - val service = ss.getService - assert(service != null) - } - - "getContainerName" should "return the container name" in { - val container = ss.getContainerName - assert(container == "sunbird-content-dev") - } - - "getSignedURL" should "return the signed url" in { - val objectKey = "content" + File.separator + "asset" + File.separator + "do_53245" + File.separator + "abc.png" - val preSignedURL = ss.getSignedURL(objectKey, Option.apply(600), Option.apply("w")) - assert(preSignedURL.contains(objectKey)) - } - - "getUri" should "return the signed url" in { - val uri = ss.getUri("content/abc.json") - assert(uri != null) - } - - "getMimeType" should "return the mimetype application/epub+zip for epub" in { - val result = ss.getMimeType("test.alert.epub") - assert(result == "application/epub+zip") - } - - "getMimeType" should "return the mimetype application/octet-stream for h5p" in { - val result = ss.getMimeType("test.alert.h5p") - assert(result == "application/octet-stream") - } - - "getMimeType" should "return the mimetype text/csv for csv" in { - val result = ss.getMimeType("test.alert.csv") - assert(result == "text/csv") - } - - "getMimeType" should "return the mimetype application/pdf for pdf" in { - val result = ss.getMimeType("test.alert.pdf") - assert(result == "application/pdf") - } - - "getMimeType" should "return the mimetype application/zip for zip" in { - val result = ss.getMimeType("test.alert.zip") - assert(result == "application/zip") - } + // "getService" should "return a Storage Service" in { + // val service = ss.getService + // assert(service != null) + // } + + // "getContainerName" should "return the container name" in { + // val container = ss.getContainerName + // assert(container == "sunbird-content-dev") + // } + + // "getSignedURL" should "return the signed url" in { + // val objectKey = "content" + File.separator + "asset" + File.separator + "do_53245" + File.separator + "abc.png" + // val preSignedURL = ss.getSignedURL(objectKey, Option.apply(600), Option.apply("w")) + // assert(preSignedURL.contains(objectKey)) + // } + + // "getUri" should "return the signed url" in { + // val uri = ss.getUri("content/abc.json") + // assert(uri != null) + // } + + // "getMimeType" should "return the mimetype application/epub+zip for epub" in { + // val result = ss.getMimeType("test.alert.epub") + // assert(result == "application/epub+zip") + // } + + // "getMimeType" should "return the mimetype application/octet-stream for h5p" in { + // val result = ss.getMimeType("test.alert.h5p") + // assert(result == "application/octet-stream") + // } + + // "getMimeType" should "return the mimetype text/csv for csv" in { + // val result = ss.getMimeType("test.alert.csv") + // assert(result == "text/csv") + // } + + // "getMimeType" should "return the mimetype application/pdf for pdf" in { + // val result = ss.getMimeType("test.alert.pdf") + // assert(result == "application/pdf") + // } + + // "getMimeType" should "return the mimetype application/zip for zip" in { + // val result = ss.getMimeType("test.alert.zip") + // assert(result == "application/zip") + // } } diff --git a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImplTest.scala b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImplTest.scala index 5cccbaea2..e3bdd7d7b 100644 --- a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImplTest.scala +++ b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/EcmlMimeTypeMgrImplTest.scala @@ -1,20 +1,18 @@ package org.sunbird.mimetype.mgr.impl -import java.io.File -import java.util - import com.google.common.io.Resources import org.scalamock.scalatest.AsyncMockFactory import org.scalatest.{AsyncFlatSpec, Matchers} -import org.sunbird.models.UploadParams import org.sunbird.cloudstore.StorageService import org.sunbird.common.dto.{Request, Response} import org.sunbird.common.exception.ClientException -import org.sunbird.graph.{GraphService, OntologyEngineContext} import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import org.sunbird.models.UploadParams -import scala.concurrent.{ExecutionContext, Future} - +import java.io.File +import java.util +import scala.concurrent.Future class EcmlMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMockFactory{ implicit val ss = mock[StorageService] @@ -34,7 +32,7 @@ class EcmlMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMock } - it should "upload ECML zip file and return public url" in { + ignore should "upload ECML zip file and return public url" in { val node = getNode() val identifier = "do_1234" implicit val ss = mock[StorageService] @@ -65,7 +63,6 @@ class EcmlMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMock assert(true) } - it should "upload ECML with json zip file URL and return public url" in { val node = getNode() val identifier = "do_1234" @@ -82,7 +79,7 @@ class EcmlMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMock assert(true) } - it should "review ECML having json body and return result" in { + ignore should "review ECML having json body and return result" in { implicit val ss = mock[StorageService] implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] @@ -104,7 +101,7 @@ class EcmlMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMock }) } - it should "review ECML having xml body and return result" in { + ignore should "review ECML having xml body and return result" in { implicit val ss = mock[StorageService] implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] val graphDB = mock[GraphService] diff --git a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/H5PMimeTypeMgrImplTest.scala b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/H5PMimeTypeMgrImplTest.scala index 12e2d06be..531dfa55e 100644 --- a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/H5PMimeTypeMgrImplTest.scala +++ b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/H5PMimeTypeMgrImplTest.scala @@ -90,7 +90,7 @@ class H5PMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMockF implicit val ss = mock[StorageService] (ss.uploadFile(_:String, _: File, _: Option[Boolean])).expects(*, *, *).returns(Array(identifier, identifier)) (ss.uploadDirectoryAsync(_:String, _:File, _: Option[Boolean])(_: ExecutionContext)).expects(*, *, *, *).returns(Future(List(identifier, identifier))) - val resFuture = new H5PMimeTypeMgrImpl().upload(identifier, node,"https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/content/do_112499826618179584111/artifact/1525857774447_do_112499826618179584111.zip", None, UploadParams(Some("composed-h5p-zip"))) + val resFuture = new H5PMimeTypeMgrImpl().upload(identifier, node,"https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_112499826618179584111/artifact/1525857774447_do_112499826618179584111.zip", None, UploadParams(Some("composed-h5p-zip"))) resFuture.map(result => { assert("do_1234" == result.getOrElse("identifier", "do_1234")) assert(result.get("artifactUrl") != null) diff --git a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/PluginMimeTypeMgrImplTest.scala b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/PluginMimeTypeMgrImplTest.scala index d2a1278c7..f0a154fe5 100644 --- a/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/PluginMimeTypeMgrImplTest.scala +++ b/platform-modules/mimetype-manager/src/test/scala/org/sunbird/mimetype/mgr/impl/PluginMimeTypeMgrImplTest.scala @@ -44,7 +44,7 @@ class PluginMimeTypeMgrImplTest extends AsyncFlatSpec with Matchers with AsyncMo it should "upload Invalid plugin zip file url and Throw Client Exception" in { implicit val ss = new StorageService val exception = intercept[ClientException] { - new PluginMimeTypeMgrImpl().upload("org.ekstep.video", new Node(), "https://ekstep-public-dev.s3-ap-south-1.amazonaws.com/content/do_11218758555843788817/artifact/akshara_kan_1487743191313.zip", None, UploadParams()) + new PluginMimeTypeMgrImpl().upload("org.ekstep.video", new Node(), "https://sunbirddev.blob.core.windows.net/sunbird-content-dev/content/do_11218758555843788817/artifact/akshara_kan_1487743191313.zip", None, UploadParams()) } exception.getMessage shouldEqual "Error !Invalid Content Package File Structure. | [manifest.json should be at root location]" } diff --git a/platform-modules/pom.xml b/platform-modules/pom.xml index 4fe723a78..394e37f27 100644 --- a/platform-modules/pom.xml +++ b/platform-modules/pom.xml @@ -55,7 +55,7 @@ org.jacoco jacoco-maven-plugin - 0.7.9 + 0.8.8 default-prepare-agent diff --git a/platform-modules/url-manager/pom.xml b/platform-modules/url-manager/pom.xml index 8badd2073..158a47121 100644 --- a/platform-modules/url-manager/pom.xml +++ b/platform-modules/url-manager/pom.xml @@ -69,7 +69,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/platform-modules/url-manager/src/main/java/org/sunbird/url/util/GoogleDriveUrlUtil.java b/platform-modules/url-manager/src/main/java/org/sunbird/url/util/GoogleDriveUrlUtil.java index 1ebc9baf4..9e2375d33 100644 --- a/platform-modules/url-manager/src/main/java/org/sunbird/url/util/GoogleDriveUrlUtil.java +++ b/platform-modules/url-manager/src/main/java/org/sunbird/url/util/GoogleDriveUrlUtil.java @@ -19,8 +19,6 @@ import org.sunbird.telemetry.logger.TelemetryManager; import org.sunbird.url.common.URLErrorCodes; -import java.io.FileOutputStream; -import java.io.OutputStream; import java.util.Arrays; import java.util.HashMap; import java.util.List; diff --git a/platform-modules/url-manager/src/main/java/org/sunbird/url/util/HTTPUrlUtil.java b/platform-modules/url-manager/src/main/java/org/sunbird/url/util/HTTPUrlUtil.java index ff122593f..bbbde9abf 100644 --- a/platform-modules/url-manager/src/main/java/org/sunbird/url/util/HTTPUrlUtil.java +++ b/platform-modules/url-manager/src/main/java/org/sunbird/url/util/HTTPUrlUtil.java @@ -8,10 +8,7 @@ import org.sunbird.url.common.URLErrorCodes; import java.io.*; -import java.net.HttpURLConnection; -import java.net.URL; -import java.net.URLConnection; -import java.net.UnknownHostException; +import java.net.*; import java.util.HashMap; import java.util.Map; @@ -46,6 +43,8 @@ public static Map getMetadata(String fileUrl) { throw new ClientException(URLErrorCodes.ERR_INVALID_URL.name(), "Please Provide Valid Url."); } catch (FileNotFoundException e) { throw new ClientException(URLErrorCodes.ERR_FILE_NOT_FOUND.name(), "File Not Found."); + } catch (ConnectException e) { + throw new ClientException(URLErrorCodes.ERR_INVALID_URL.name(), "Please Provide Valid Url."); } catch (Exception e) { throw new ServerException(URLErrorCodes.SYSTEM_ERROR.name(), "Something Went Wrong While Processing Your Request. Please Try Again After Sometime!"); diff --git a/platform-modules/url-manager/src/test/java/org/sunbird/url/mgr/impl/YouTubeURLManagerImplTest.java b/platform-modules/url-manager/src/test/java/org/sunbird/url/mgr/impl/YouTubeURLManagerImplTest.java index 4dccab286..de60ac099 100644 --- a/platform-modules/url-manager/src/test/java/org/sunbird/url/mgr/impl/YouTubeURLManagerImplTest.java +++ b/platform-modules/url-manager/src/test/java/org/sunbird/url/mgr/impl/YouTubeURLManagerImplTest.java @@ -26,7 +26,7 @@ public class YouTubeURLManagerImplTest { @Test public void testValidateUrlWithValidUrlValidCriteria() { - String videoUrl = "https://www.youtube.com/watch?v=owr198WQpM8"; + String videoUrl = "https://www.youtube.com/watch?v=GHmQ8euNwv8"; Map result = youtubeMgr.validateURL(videoUrl, "license"); assertTrue(MapUtils.isNotEmpty(result)); assertTrue(result.size() == 2); @@ -34,6 +34,7 @@ public void testValidateUrlWithValidUrlValidCriteria() { assertTrue(result.containsKey("valid")); assertTrue((Boolean) result.get("valid")); assertEquals("creativeCommon", (String) result.get("value")); + //test } @Test diff --git a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/GoogleDriveUtilTest.java b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/GoogleDriveUtilTest.java index 17a5b8723..e25c29efe 100644 --- a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/GoogleDriveUtilTest.java +++ b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/GoogleDriveUtilTest.java @@ -2,15 +2,11 @@ import com.google.api.services.drive.model.File; import org.apache.commons.collections4.MapUtils; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.sunbird.common.exception.ClientException; -import org.sunbird.common.exception.ServerException; - -import java.io.IOException; import java.util.Map; import static org.junit.Assert.assertEquals; @@ -43,7 +39,7 @@ public void testGetDriveUrlWithInvalidUrl() { @Test public void testGetDriveFileWithValidUrl() { - String driveUrl = "https://drive.google.com/file/d/1ZUSXrODwNK52pzDJZ_fuNKK9lXBzxCsS/view?usp=sharing"; + String driveUrl = "https://drive.google.com/file/d/1ZUSXrODwNK52pzDJZ_fuNKK9lXBzxCsS/view"; String fileId = GoogleDriveUrlUtil.getDriveFileId(driveUrl); File driveFile = GoogleDriveUrlUtil.getDriveFile(fileId); assertNotNull(driveFile); diff --git a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/HttpUrlUtilTest.java b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/HttpUrlUtilTest.java index 48e3ff5f4..170ad2640 100644 --- a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/HttpUrlUtilTest.java +++ b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/HttpUrlUtilTest.java @@ -1,6 +1,7 @@ package org.sunbird.url.util; import org.apache.commons.io.FileUtils; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -25,7 +26,7 @@ public class HttpUrlUtilTest { @Test public void testGetMetadataWithValidUrl() { - String url = "https://ekstep-public-prod.s3-ap-south-1.amazonaws.com/content/do_31225266105597952011502/artifact/2246d6bdafaeae551d93e245fa484cc9_1495686214370.jpeg"; + String url = "https://sunbirddevbbpublic.blob.core.windows.net/sunbird-content-staging/content/assets/do_2137327580080128001217/gateway-of-india.jpg"; Map result = HTTPUrlUtil.getMetadata(url); assertTrue(result.size() == 2); assertTrue(result.containsKey("size")); @@ -61,7 +62,7 @@ public void testGetMetadataWithValidUrlExpectServerException() { @Test public void testDownloadFileWithValidUrl() { String downloadFolder = "/tmp/content/" + System.currentTimeMillis() + "_temp/do_123"; - String driveUrl = "https://dockpreprodall.blob.core.windows.net/dock-content-preprod/content/do_21334055220373913611129/artifact/screenshot-520.thumb.png"; + String driveUrl = "https://sunbirddevbbpublic.blob.core.windows.net/sunbird-content-staging/content/assets/do_2137327580080128001217/gateway-of-india.jpg"; java.io.File appIconFile = HTTPUrlUtil.downloadFile(driveUrl,downloadFolder); assertTrue(appIconFile.exists()); try {FileUtils.deleteDirectory(appIconFile.getParentFile().getParentFile());} catch(IOException io) {} diff --git a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/YouTubeUrlUtilTest.java b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/YouTubeUrlUtilTest.java index 913e2bb0b..068023802 100644 --- a/platform-modules/url-manager/src/test/java/org/sunbird/url/util/YouTubeUrlUtilTest.java +++ b/platform-modules/url-manager/src/test/java/org/sunbird/url/util/YouTubeUrlUtilTest.java @@ -26,7 +26,7 @@ public class YouTubeUrlUtilTest { @Test public void testGetLicenseWithValidUrlPattern1() { - String videoUrl = "https://www.youtube.com/watch?v=owr198WQpM8"; + String videoUrl = "https://www.youtube.com/watch?v=GHmQ8euNwv8"; String result = YouTubeUrlUtil.getLicense(videoUrl); assertEquals("creativeCommon", result); } @@ -60,7 +60,6 @@ public void testGetLicenseWithValidUrlPattern5() { String videoUrl = "http://www.youtube.com/attribution_link?a=JdfC0C9V6ZI&u=%2Fwatch%3Fv%3DEhxJLojIE_o%26feature%3Dshare"; String result = YouTubeUrlUtil.getLicense(videoUrl); } - @Test public void testGetLicenseExpectYoutubeLicense() { String videoUrl = "https://www.youtube.com/watch?v=_UR-l3QI2nE"; @@ -92,7 +91,7 @@ public void testIsValidLicenseWithInvalidLicense() { @Test public void testGetVideoListWithValidVideoId(){ - String videoId = "owr198WQpM8"; + String videoId = "GHmQ8euNwv8"; List org.powermock - powermock-api-mockito - 1.7.4 + powermock-api-mockito2 + 2.0.9 test org.powermock powermock-module-junit4 - 1.7.4 + 2.0.9 test @@ -75,7 +75,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/search-api/search-actors/src/test/java/org/sunbird/actors/HealthActorTest.java b/search-api/search-actors/src/test/java/org/sunbird/actors/HealthActorTest.java index 8d696ea26..a61193457 100644 --- a/search-api/search-actors/src/test/java/org/sunbird/actors/HealthActorTest.java +++ b/search-api/search-actors/src/test/java/org/sunbird/actors/HealthActorTest.java @@ -4,7 +4,6 @@ import akka.testkit.TestKit; import org.junit.AfterClass; import org.junit.Assert; -import org.junit.Ignore; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; @@ -20,7 +19,6 @@ import java.util.concurrent.TimeUnit; -@Ignore @RunWith(PowerMockRunner.class) @PrepareForTest({HealthActor.class, ElasticSearchUtil.class}) @PowerMockIgnore({"jdk.internal.reflect.*", "javax.management.*", "sun.security.ssl.*", "javax.net.ssl.*" , "javax.crypto.*"}) diff --git a/search-api/search-actors/src/test/resources/application.conf b/search-api/search-actors/src/test/resources/application.conf index 4508be2b3..442fedebd 100644 --- a/search-api/search-actors/src/test/resources/application.conf +++ b/search-api/search-actors/src/test/resources/application.conf @@ -280,7 +280,7 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path = "../../schemas" content.relatedBoards.properties=["channel","board","subject","medium"] diff --git a/search-api/search-core/pom.xml b/search-api/search-core/pom.xml index fbdf374ff..2fd68790e 100644 --- a/search-api/search-core/pom.xml +++ b/search-api/search-core/pom.xml @@ -14,7 +14,7 @@ UTF-8 2.5.22 - 2.11 + 2.12 @@ -82,7 +82,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/search-api/search-core/src/test/java/org/sunbird/search/ElasticSearchUtilTest.java b/search-api/search-core/src/test/java/org/sunbird/search/ElasticSearchUtilTest.java index 8691bd1af..f057b8e48 100644 --- a/search-api/search-core/src/test/java/org/sunbird/search/ElasticSearchUtilTest.java +++ b/search-api/search-core/src/test/java/org/sunbird/search/ElasticSearchUtilTest.java @@ -29,6 +29,7 @@ * @author pradyumna * */ + public class ElasticSearchUtilTest extends BaseSearchTest { private static String[] contentTypes = new String[] { "Resources", "PracticeSet", "TextBook", "Collection", "Asset" }; diff --git a/search-api/search-core/src/test/java/org/sunbird/search/SearchProcessorTest.java b/search-api/search-core/src/test/java/org/sunbird/search/SearchProcessorTest.java index 1be77ac4f..4581896c1 100644 --- a/search-api/search-core/src/test/java/org/sunbird/search/SearchProcessorTest.java +++ b/search-api/search-core/src/test/java/org/sunbird/search/SearchProcessorTest.java @@ -14,7 +14,6 @@ import org.sunbird.search.util.SearchConstants; import scala.concurrent.ExecutionContext; import scala.concurrent.Future; - import java.util.ArrayList; import java.util.Arrays; import java.util.Date; @@ -28,6 +27,7 @@ * @author pradyumna * */ + public class SearchProcessorTest extends BaseSearchTest { private static SearchProcessor searchprocessor = new SearchProcessor(); diff --git a/search-api/search-core/src/test/java/org/sunbird/search/util/DefinitionUtilTest.java b/search-api/search-core/src/test/java/org/sunbird/search/util/DefinitionUtilTest.java index eb75b2f79..6416bc765 100644 --- a/search-api/search-core/src/test/java/org/sunbird/search/util/DefinitionUtilTest.java +++ b/search-api/search-core/src/test/java/org/sunbird/search/util/DefinitionUtilTest.java @@ -7,7 +7,6 @@ import java.util.Map; public class DefinitionUtilTest { - @Test public void testAddDocumentWithId() throws Exception { Map metadata = DefinitionUtil.getMetaData("content"); diff --git a/search-api/search-core/src/test/resources/application.conf b/search-api/search-core/src/test/resources/application.conf index 4508be2b3..87db66626 100644 --- a/search-api/search-core/src/test/resources/application.conf +++ b/search-api/search-core/src/test/resources/application.conf @@ -280,7 +280,7 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" content.relatedBoards.properties=["channel","board","subject","medium"] diff --git a/search-api/search-service/app/controllers/SearchBaseController.scala b/search-api/search-service/app/controllers/SearchBaseController.scala index 9e1735b35..faecaf067 100644 --- a/search-api/search-service/app/controllers/SearchBaseController.scala +++ b/search-api/search-service/app/controllers/SearchBaseController.scala @@ -11,7 +11,7 @@ import play.api.mvc._ import java.util import java.util.UUID -import scala.collection.JavaConversions._ +import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} abstract class SearchBaseController(protected val cc: ControllerComponents)(implicit exec: ExecutionContext) extends AbstractController(cc) { diff --git a/search-api/search-service/conf/application.conf b/search-api/search-service/conf/application.conf index a803688db..3b5967ce1 100644 --- a/search-api/search-service/conf/application.conf +++ b/search-api/search-service/conf/application.conf @@ -280,7 +280,7 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" content.relatedBoards.properties=["channel","board","subject","medium"] diff --git a/search-api/search-service/pom.xml b/search-api/search-service/pom.xml index 10a883e4a..f2073377b 100644 --- a/search-api/search-service/pom.xml +++ b/search-api/search-service/pom.xml @@ -155,7 +155,7 @@ org.jacoco jacoco-maven-plugin - 0.8.5 + 0.8.8 default-prepare-agent diff --git a/taxonomy-api/api-tests/Collections/Framework API(s).postman_collection.json b/taxonomy-api/api-tests/Collections/Framework API(s).postman_collection.json new file mode 100644 index 000000000..078758705 --- /dev/null +++ b/taxonomy-api/api-tests/Collections/Framework API(s).postman_collection.json @@ -0,0 +1,4507 @@ +{ + "info": { + "_postman_id": "c7518aae-df91-4f6e-818b-31f6d9d2780b", + "name": "Framework API(s)", + "description": "The Framework API resources is basically a supporting data structure which serves as a template \n- It can be used in places where the structure remains the same but only the usage changes as per the requirement.\n- These resources are used to create, update, read, search, the objects like channels, frameworks, catergories, terms\n- The Hierarchy is as: Framework-->Category-->Terms\n - Category: An abstract classification of the terms partitioned according to requirement\n - Terms: Refers to a keyword or a phrase to descibe a Concept\n- [TestCases](https://www.getpostman.com/collections/10a84f4c27c0b45f690b)", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "channel/v1", + "item": [ + { + "name": "Create Channel", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"name\": \"Channel Name\",\n \"code\": \"channel-id\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "create" + ] + }, + "description": "This API is used for creating a channel \n - The endpoint for **Create Channel** is `/channel/v1/create`\n - It points to knowledge-mw-service - `/v1/channel/create`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Create channel\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require bearer authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"name\": \"Channel Name\",\n \"code\": \"channel-id\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T08:27:27.810Z\",\n \"params\": {\n \"resmsgid\": \"3357da20-3de6-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"3353bb70-3de6-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"channel-id\",\n \"node_id\": \"channel-id\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Create Channel\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter or providing some wrong request.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require bearer authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"name\": \"Channel Name\",\n \"code\": \"channel-id\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T08:26:15.914Z\",\n \"params\": {\n \"resmsgid\": \"087d64a0-3de6-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"CONSTRAINT_VALIDATION_FAILED\",\n \"errmsg\": \"Object already exists with identifier: channel-id\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require bearer authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"name\": \"Channel Name\",\n \"code\": \"channel-id\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Get a Channel", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/channel/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to get" + } + ] + }, + "description": "This API is associated with fetching a channel by a specific channel ID\n - The endpoint for **Get a Channel** is `/channel/v1/read/{ID}`\n - It points to knowledge-mw-service - `/v1/channel/read/{ID}` \n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation. **Channel Read** operation was successfully executed. \\n Please refer for different additional [metadata](https://github.com/project-sunbird/knowledge-platform/blob/release-3.5.0/schemas/content/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/channel/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to get" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-14T08:33:50ZZ\",\n \"params\": {\n \"resmsgid\": \"02c742d2-57e1-4441-aa31-0ce339c3917b\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"channel\": {\n \"identifier\": \"channel-405\",\n \"lastStatusChangedOn\": \"2020-12-14T08:27:49.490+0000\",\n \"code\": \"channel-405\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"assetAdditionalCategories\": [],\n \"autoCreateBatch\": \"Enabled\",\n \"languageCode\": [],\n \"suggested_frameworks\": [\n {\n \"identifier\": \"NCF\",\n \"code\": \"NCF\",\n \"name\": \"NCF\",\n \"objectType\": \"Framework\"\n }\n ],\n \"createdOn\": \"2020-12-14T08:27:49.490+0000\",\n \"objectType\": \"Channel\",\n \"versionKey\": \"1607934825088\",\n \"collectionPrimaryCategories\": [\n \"Content Playlist\",\n \"Course\",\n \"Digital Textbook\",\n \"Explanation Content\"\n ],\n \"contentPrimaryCategories\": [\n \"Course Assessment\",\n \"eTextbook\",\n \"Explanation Content\",\n \"Learning Resource\",\n \"Practice Question Set\",\n \"Teacher Resource\"\n ],\n \"name\": \"Channel without Default License\",\n \"lastUpdatedOn\": \"2020-12-14T08:33:45.088+0000\",\n \"defaultCourseFramework\": \"TPD\",\n \"collectionAdditionalCategories\": [\n \"Textbook\",\n \"Lesson Plan\",\n \"TV Lesson\"\n ],\n \"assetPrimaryCategories\": [\n \"Asset\",\n \"CertAsset\",\n \"Certificate Template\"\n ],\n \"contentAdditionalCategories\": [\n \"Classroom Teaching Video\",\n \"Concept Map\",\n \"Curiosity Question Set\",\n \"Experiential Resource\",\n \"Explanation Video\",\n \"Focus Spot\",\n \"Learning Outcome Definition\",\n \"Lesson Plan\",\n \"Marking Scheme Rubric\",\n \"Pedagogy Flow\",\n \"Previous Board Exam Papers\",\n \"TV Lesson\",\n \"Textbook\"\n ],\n \"status\": \"Live\",\n \"defaultFramework\": \"NCF\"\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND. The **Channel Read** operation failed ! The possible reason for failure is that you may have passed invalid channel id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/channel/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to get" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-14T08:36:08ZZ\",\n \"params\": {\n \"resmsgid\": \"e54ff3af-4026-420e-9412-0fcd3eba1674\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: channel-407\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/channel/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to get" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Update Channel", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"description\": \"Updated channel description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to be updated" + } + ] + }, + "description": "This API is associated with updating a specific channel\n - The endpoint for **Update Channel** is `/channel/v1/update/{ID}`\n - It points to knowledge-mw-service - `/v1/channel/update/{ID}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Update channel by ID\" operation was successfully executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"description\": \"Updated channel description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to be updated" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T08:29:24.724Z\",\n \"params\": {\n \"resmsgid\": \"79078340-3de6-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"78fef7c0-3de6-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"channel-405\",\n \"node_id\": \"channel-405\"\n }\n}" + }, + { + "name": "BAD REQUEST. The **Update Channel\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"description\": \"Updated channel description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to be updated" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": 79777367,\n \"id\": \"Lorem in dolore ex ullamco\",\n \"params\": {\n \"msgid\": \"in mollit commodo\",\n \"resmsgid\": \"fugiat deserunt Lorem\",\n \"status\": \"failed\",\n \"err\": \"ut dolor et repreh\",\n \"errmsg\": \"ut enim \"\n },\n \"ver\": \"reprehenderit laboris\",\n \"responseCode\": \"proident qui minim\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND. The \"update channel by ID\" operation failed ! The possible reason for failure is that you may have passed invalid channel id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"description\": \"Updated channel description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to be updated" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.channel.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T08:30:44.219Z\",\n \"params\": {\n \"resmsgid\": \"a8697cb0-3de6-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: channel-406\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"channel\": {\n \"description\": \"Updated channel description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/channel/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "channel", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) Unique identifier of the channel to be updated" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + } + ] + }, + { + "name": "framework/v1", + "item": [ + { + "name": "framework", + "item": [ + { + "name": "Create a Framework", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"name\": \"Framework Name\",\n \"code\": \"dummy_framework\",\n \"channels\": [\n {\n \"identifier\": \"in.ekstep\"\n }\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "create" + ] + }, + "description": "This API is associated with creating a new framework on the Sunbird Platform.\n - The endpoint for **Create a Framework** is `/framework/v1/create`\n - It points to knowledge-mw-service - `/v1/framework/create`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"create framework\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"name\": \"Framework Name\",\n \"code\": \"dummy_framework\",\n \"channels\": [\n {\n \"identifier\": \"in.ekstep\"\n }\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T09:49:06.794Z\",\n \"params\": {\n \"resmsgid\": \"9b5d7ca0-3df1-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"9b4a90e0-3df1-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"dummy_framework\",\n \"versionKey\": \"1607939346678\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"create framework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"name\": \"Framework Name\",\n \"code\": \"dummy_framework\",\n \"channels\": [\n {\n \"identifier\": \"in.ekstep\"\n }\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T09:50:45.249Z\",\n \"params\": {\n \"resmsgid\": \"d60c8710-3df1-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"CONSTRAINT_VALIDATION_FAILED\",\n \"errmsg\": \"Object already exists with identifier: dummy_framework\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"name\": \"Framework Name\",\n \"code\": \"dummy_framework\",\n \"channels\": [\n {\n \"identifier\": \"in.ekstep\"\n }\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Get a Framework", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be fetched" + } + ] + }, + "description": "This API is associated with fetching a specific framework\n - The endpoint for **Get a Framework** is `/framework/v1/read/{ID}`\n - It points to knowledge-mw-service - `/v1/framework/read/{ID}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"read framework\" operation was successfully executed.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be fetched" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:51:24ZZ\",\n \"params\": {\n \"resmsgid\": \"28f10a2a-ce6c-4dbe-a733-4c193013e84b\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"framework\": {\n \"identifier\": \"dummy_framework\",\n \"code\": \"Dumy framework updated\",\n \"name\": \"Framework Name\",\n \"description\": \"Dumy framework updated\",\n \"categories\": [\n {\n \"identifier\": \"dummy_framework_subject\",\n \"code\": \"subject\",\n \"terms\": [\n {\n \"identifier\": \"dummy_framework_subject_english\",\n \"code\": \"english\",\n \"translations\": null,\n \"name\": \"English\",\n \"description\": \"English\",\n \"index\": 1,\n \"category\": \"subject\",\n \"status\": \"Live\"\n }\n ],\n \"translations\": null,\n \"name\": \"Subject\",\n \"description\": \"Updated description\",\n \"index\": 1,\n \"status\": \"Live\"\n },\n {\n \"identifier\": \"dummy_framework_medium\",\n \"code\": \"medium\",\n \"translations\": null,\n \"name\": \"Medium\",\n \"description\": \"Medium\",\n \"index\": 2,\n \"status\": \"Live\"\n }\n ],\n \"type\": \"K-12\",\n \"objectType\": \"Framework\"\n }\n }\n}" + }, + { + "name": "BAD REQUEST. The \"read framework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be fetched" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": 87966182,\n \"id\": \"laboris q\",\n \"params\": {\n \"msgid\": \"do est non incididunt\",\n \"resmsgid\": \"sint Excepteur sunt\",\n \"status\": \"successful\",\n \"err\": \"nisi magna eu\",\n \"errmsg\": \"velit\"\n },\n \"ver\": \"cupidatat exercitation elit\",\n \"responseCode\": \"do et cillum\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND. The \"read framework\" operation failed ! The possible reason for failure is that you may have passed invalid framework id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be fetched" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:00:12.540Z\",\n \"params\": {\n \"resmsgid\": \"282e43c0-3df3-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_DATA_NOT_FOUND\",\n \"errmsg\": \"Data not found with id : dummy_framework1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/read/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "read", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be fetched" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Update Framework", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"description\": \"Dumy framework updated\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be updated" + } + ] + }, + "description": "This API is associated with updating a framework\n - The endpoint for **Update Framework** is `/framework/v1/update/{ID}`\n - It points to knowledge-mw-service - `/v1/framework/update/{ID}`\n - The fieldsmarked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"update framework\" operation was successfully executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"description\": \"Dumy framework updated\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be updated" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T09:57:20.317Z\",\n \"params\": {\n \"resmsgid\": \"c1871ed0-3df2-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"c17ff2e0-3df2-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"dummy_framework\",\n \"versionKey\": \"1607939840281\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"update framework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"description\": \"Dumy framework updated\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be updated" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"ts\": 24700756,\n \"id\": \"ut culpa\",\n \"params\": {\n \"msgid\": \"esse consequat\",\n \"resmsgid\": \"commodo aliqua magna Excepteur\",\n \"status\": \"failed\",\n \"err\": \"dolor laborum in ullamco\",\n \"errmsg\": \"laborum laboris sunt\"\n },\n \"ver\": \"laboris ut qu\",\n \"responseCode\": \"exercitation adipisicing in aliqua\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND. The \"update framework\" operation failed ! The possible reason for failure is that you may have passed invalid framework id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"description\": \"Dumy framework updated\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be updated" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T09:57:50.352Z\",\n \"params\": {\n \"resmsgid\": \"d36e1900-3df2-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_FRAMEWORK_NOT_FOUND\",\n \"errmsg\": \"Framework Not Found With Id : dummy_framework1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json\n - multipart/form-data\n - application/x-www-form-urlencoded", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"description\": \"Dumy framework updated\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/update/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "update", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be updated" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "List Existing Frameworks", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {\n \"status\": \"Live\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "list" + ] + }, + "description": "This API is associated with listing of all existing frameworks\n - The endpoint for **List Existing Frameworks** is `/framework/v1/list`\n - It points to knowledge-mw-service - `/v1/framework/list`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"list framework\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {\n \"status\": \"Live\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "list" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.list\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:02:06.928Z\",\n \"params\": {\n \"resmsgid\": \"6c5c7d00-3df3-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"6c38ef70-3df3-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"frameworks\": [\n {\n \"owner\": \"in.ekstep\",\n \"identifier\": \"NCERT\",\n \"code\": \"NCERT\",\n \"apoc_json\": \"{\\\"batch\\\": true}\",\n \"consumerId\": \"a6654129-b58d-4dd8-9cf2-f8f3c2f458bc\",\n \"channel\": \"in.ekstep\",\n \"description\": \"NCERT \",\n \"type\": \"K-12\",\n \"createdOn\": \"2018-01-18T17:24:50.850+0000\",\n \"apoc_text\": \"APOC\",\n \"versionKey\": \"1525253065583\",\n \"channels\": [\n {\n \"identifier\": \"in.ekstep\",\n \"name\": \"Ekstep\",\n \"objectType\": \"Channel\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Channel for in.ekstep\",\n \"status\": \"Live\"\n }\n ],\n \"appId\": \"ekstep_portal\",\n \"name\": \"NCERT\",\n \"lastUpdatedOn\": \"2018-05-02T09:24:25.583+0000\",\n \"categories\": [\n {\n \"identifier\": \"ncert_board\",\n \"name\": \"Boards\",\n \"objectType\": \"CategoryInstance\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"\",\n \"index\": 5,\n \"status\": \"Retired\"\n }\n ],\n \"status\": \"Live\",\n \"apoc_num\": 1\n }\n ],\n \"count\": 1\n }\n}" + }, + { + "name": "BAD REQUEST. The \"list framework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {\n \"status\": \"Live\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "list" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.list\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T18:07:11.141Z\",\n \"params\": {\n \"resmsgid\": \"2fd2f550-3e37-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_SEARCH_REQUEST\",\n \"errmsg\": \"Invalid Search Request\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {\n \"status\": \"Live\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/list", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "list" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Publish Framework", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/publish/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "publish", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be published" + } + ] + }, + "description": "This API is associated with publishing a framework\n - The endpoint for **Publish Framework** is `/framework/v1/publish/{ID}`\n - It points to knowledge-mw-service - `/v1/framework/publish/{ID}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Publish framework\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/publish/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "publish", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be published" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.publish\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:07:54.648Z\",\n \"params\": {\n \"resmsgid\": \"3b9e5980-3df4-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"3b9cabd0-3df4-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"publishStatus\": \"Publish Operation for Framework Id 'dummy_framework' Started Successfully!\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Publish Framework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/publish/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "publish", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be published" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.publish\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:08:19.872Z\",\n \"params\": {\n \"resmsgid\": \"4aa73a00-3df4-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_FRAMEOWRK_ID\",\n \"errmsg\": \"Invalid Framework Id. Framework doesn't exist.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/publish/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "publish", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be published" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Copy Framework", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + }, + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"code\": \"NCERTCOPY11\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/copy/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "copy", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be copied" + } + ] + }, + "description": "This API is associated with copying a framework\n - The endpoint for **Copy Framework** is `/framework/v1/copy/{ID}`\n - It points to knowledge-mw-service - `v1/framework/copy/{ID}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Copy framework\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"code\": \"NCERTCOPY11\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/copy/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "copy", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be copied" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:46:44.287Z\",\n \"params\": {\n \"resmsgid\": \"a8310cf0-3df9-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"a6b2aa50-3df9-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"NCERTCOPY11\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Copy fFramework\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"code\": \"NCERTCOPY11\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/copy/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "copy", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be copied" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:47:22.578Z\",\n \"params\": {\n \"resmsgid\": \"bf03cb20-3df9-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_FRAMEWORK_EXISTS\",\n \"errmsg\": \"Framework with code: NCERTCOPY11, already exists.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "RESOURCE NOT FOUND. The \"Copy fFramework\" operation failed ! The possible reason for failure is that you may have missed invlalid framework id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"code\": \"NCERTCOPY11\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/copy/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "copy", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be copied" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.framework.copy\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T10:47:47.865Z\",\n \"params\": {\n \"resmsgid\": \"ce164890-3df9-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_DATA_NOT_FOUND\",\n \"errmsg\": \"Data not found with id : NCERT101100\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + }, + { + "description": "(Required) It is the Unique Id to identify the root organisation to which the user belongs", + "key": "X-Channel-Id", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"framework\": {\n \"code\": \"NCERTCOPY11\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/copy/:ID", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "copy", + ":ID" + ], + "variable": [ + { + "key": "ID", + "value": "Ut enim offici", + "description": "(Required) The identifier of the framework to be copied" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + } + ] + }, + { + "name": "category", + "item": [ + { + "name": "Create Category", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"name\": \"Subject\",\n \"code\": \"subject\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/create?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) The code of the framework under which category has to be created" + } + ] + }, + "description": "This API is associated with creating a category under a framework\n - The endpoint for **Create Category** is `/framework/v1/category/create`\n - It points to knowledge-mw-service - `/v1/framework/category/create`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"create category\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"name\": \"Subject\",\n \"code\": \"subject\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/create?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:32:01.754Z\",\n \"params\": {\n \"resmsgid\": \"0a10dba0-3e43-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"0a05df20-3e43-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"dummy_framework_medium\",\n \"versionKey\": \"1607974321700\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"create category\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"name\": \"Subject\",\n \"code\": \"subject\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/create?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:30:50.114Z\",\n \"params\": {\n \"resmsgid\": \"df5d7620-3e42-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"CONSTRAINT_VALIDATION_FAILED\",\n \"errmsg\": \"Object already exists with identifier: dummy_framework_subject\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"name\": \"Subject\",\n \"code\": \"subject\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/create?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Fetch Category", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/category/read/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Name of the framework under which category exists. Its an abstract data structure which serves as a template for categories and terms" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be fetched" + } + ] + }, + "description": "This API is associated with fetching a category on Sunbird Platform\n - The endpoint for **Fetch Category** is `/framework/v1/category/read/{code}`\n - It points to knowledge-mw-service - `/v1/framework/category/read/{code}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"fetch category\" operation was successfully executed.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/category/read/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be fetched" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:34:41.261Z\",\n \"params\": {\n \"resmsgid\": \"6923b1d0-3e43-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"691e3390-3e43-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"category\": {\n \"identifier\": \"dummy_framework_subject\",\n \"lastStatusChangedOn\": \"2020-12-14T19:30:32.392+0000\",\n \"code\": \"subject\",\n \"frameworks\": [\n {\n \"identifier\": \"dummy_framework\",\n \"name\": \"Framework Name\",\n \"objectType\": \"Framework\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Dumy framework updated\",\n \"status\": \"Live\"\n }\n ],\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"channel\": \"in.ekstep\",\n \"description\": \"Subject\",\n \"domains\": [],\n \"createdOn\": \"2020-12-14T19:30:32.392+0000\",\n \"versionKey\": \"1607974232392\",\n \"terms\": [],\n \"appId\": \"@ignore@\",\n \"name\": \"Subject\",\n \"lastUpdatedOn\": \"2020-12-14T19:30:32.392+0000\",\n \"status\": \"Live\"\n }\n }\n}" + }, + { + "name": "BAD REQUEST. The \"fetch category\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/category/read/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be fetched" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:35:21.540Z\",\n \"params\": {\n \"resmsgid\": \"8125c840-3e43-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_FRAMEWORK_ID\",\n \"errmsg\": \"Invalid FrameworkId: dummy_framework1 for Categoryinstance \"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/category/read/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be fetched" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Update Category", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/update/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "update", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework to which the category belongs" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be updated" + } + ] + }, + "description": "This API is associated with updating a category in an existing framework\n - The endpoint for **Update Category** is `/framework/v1/category/update/{code}`\n - It points to knowledge-mw-service - `/v1/framework/category/update/{code}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"update category\" operation was successfully executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/update/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "update", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be updated" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:37:49.082Z\",\n \"params\": {\n \"resmsgid\": \"d916e7a0-3e43-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"d90f4680-3e43-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"dummy_framework_subject\",\n \"versionKey\": \"1607974669043\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"update category\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/update/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "update", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be updated" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:38:29.662Z\",\n \"params\": {\n \"resmsgid\": \"f146ebe0-3e43-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_FRAMEWORK_ID\",\n \"errmsg\": \"Invalid FrameworkId: dummy_framework1 for Categoryinstance \"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"category\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/update/:code?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "update", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the category to be updated" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Search Categories", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {}\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/search?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework in which category can be searched" + } + ] + }, + "description": "This API is associated with searching a category\n - The endpoint for **Search Categories** is `/framework/v1/category/search`\n - It points to knowledge-mw-service - `/v1/framework/category/search`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"search category\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {}\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/search?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:40:23.864Z\",\n \"params\": {\n \"resmsgid\": \"3558c380-3e44-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"3555b640-3e44-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"count\": 2,\n \"categories\": [\n {\n \"identifier\": \"dummy_framework_subject\",\n \"lastStatusChangedOn\": \"2020-12-14T19:30:32.392+0000\",\n \"code\": \"subject\",\n \"frameworks\": [\n {\n \"identifier\": \"dummy_framework\",\n \"name\": \"Framework Name\",\n \"objectType\": \"Framework\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Dumy framework updated\",\n \"status\": \"Live\"\n }\n ],\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"channel\": \"in.ekstep\",\n \"description\": \"Updated description\",\n \"domains\": [],\n \"createdOn\": \"2020-12-14T19:30:32.392+0000\",\n \"versionKey\": \"1607974669043\",\n \"terms\": [],\n \"appId\": \"dev.sunbird.portal\",\n \"name\": \"Subject\",\n \"lastUpdatedOn\": \"2020-12-14T19:37:49.043+0000\",\n \"status\": \"Live\"\n },\n {\n \"identifier\": \"dummy_framework_medium\",\n \"lastStatusChangedOn\": \"2020-12-14T19:32:01.700+0000\",\n \"code\": \"medium\",\n \"frameworks\": [\n {\n \"identifier\": \"dummy_framework\",\n \"name\": \"Framework Name\",\n \"objectType\": \"Framework\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Dumy framework updated\",\n \"status\": \"Live\"\n }\n ],\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"channel\": \"in.ekstep\",\n \"description\": \"Medium\",\n \"domains\": [],\n \"createdOn\": \"2020-12-14T19:32:01.700+0000\",\n \"versionKey\": \"1607974321700\",\n \"terms\": [],\n \"appId\": \"@ignore@\",\n \"name\": \"Medium\",\n \"lastUpdatedOn\": \"2020-12-14T19:32:01.700+0000\",\n \"status\": \"Live\"\n }\n ]\n }\n}" + }, + { + "name": "BAD REQUEST. The \"search category\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {}\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/search?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.category.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:41:28.846Z\",\n \"params\": {\n \"resmsgid\": \"5c143ae0-3e44-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_FRAMEWORK_ID\",\n \"errmsg\": \"Invalid FrameworkId: dummy_framework1 for Categoryinstance \"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"search\": {}\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/category/search?framework=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "category", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + } + ] + }, + { + "name": "term", + "item": [ + { + "name": "Create Term", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"code\": \"english\",\n \"name\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/create?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework in which the term is to be created" + }, + { + "key": "category", + "value": "Ut enim offici", + "description": "(Required) Represents the category in which the term is to be created" + } + ] + }, + "description": "This API is associated with creating a term under a framework category\n - The endpoint for **Create Term** is `/framework/v1/term/create`\n - It points to knowledge-mw-service - `/v1/framework/term/create`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty.\"" + }, + "response": [ + { + "name": "OK ! Successful operation.\"create term\" operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"code\": \"english\",\n \"name\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/create?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:44:02.348Z\",\n \"params\": {\n \"resmsgid\": \"b792c6c0-3e44-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"b783f9b0-3e44-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": [\n \"dummy_framework_subject_english\"\n ]\n }\n}" + }, + { + "name": "BAD REQUEST. The \"create term\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"code\": \"english\",\n \"name\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/create?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:44:49.263Z\",\n \"params\": {\n \"resmsgid\": \"d3896ff0-3e44-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_CATEGORY_ID\",\n \"errmsg\": \"Please provide valid category.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"code\": \"english\",\n \"name\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/create?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "create" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Fetch Term", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/term/read/:code?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework id in which the term can be fetched" + }, + { + "key": "category", + "value": "Ut enim offici", + "description": "(Required) Represents the category in which the term can be fetched" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the term to be fetched" + } + ] + }, + "description": "This API is associated with fetching the available terms under a specific category and a framework on Sunbird Platform.\n - The endpoint for **Fetch Term** is `/framework/v1/term/read/{code}`\n - It points to knowledge-mw-service - `/v1/framework/term/read/{code}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Fetch term\" operation was successfully executed.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/term/read/:code?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the term to be fetched" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-17T07:24:51.258Z\",\n \"params\": {\n \"resmsgid\": \"f380c1a0-4038-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"f358c740-4038-11eb-9b0c-abcfbdf41bc3\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"term\": {\n \"associations\": [],\n \"identifier\": \"dummy_framework_subject_english\",\n \"lastStatusChangedOn\": \"2020-12-14T19:44:02.260+0000\",\n \"code\": \"english\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"channel\": \"in.ekstep\",\n \"description\": \"English\",\n \"createdOn\": \"2020-12-14T19:44:02.260+0000\",\n \"versionKey\": \"1607975299669\",\n \"children\": [],\n \"appId\": \"dev.sunbird.portal\",\n \"name\": \"English\",\n \"lastUpdatedOn\": \"2020-12-14T19:48:19.669+0000\",\n \"categories\": [\n {\n \"identifier\": \"dummy_framework_subject\",\n \"name\": \"Subject\",\n \"objectType\": \"CategoryInstance\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Updated description\",\n \"status\": \"Live\"\n }\n ],\n \"category\": \"subject\",\n \"status\": \"Live\"\n }\n }\n}" + }, + { + "name": "BAD REQUEST. The \"Fetch term\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/term/read/:code?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the term to be fetched" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-17T07:54:24.246Z\",\n \"params\": {\n \"resmsgid\": \"14494160-403d-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_CATEGORY_ID\",\n \"errmsg\": \"Please provide valid category.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "url": { + "raw": "{{baseUrl}}/framework/v1/term/read/:code?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "read", + ":code" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ], + "variable": [ + { + "key": "code", + "value": "Ut enim offici", + "description": "(Required) The name/code of the term to be fetched" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Update Term", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"name\": \"English\",\n \"description\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/update{{code}}?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "update{{code}}" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework in which the terms is to be updated" + }, + { + "key": "category", + "value": "Ut enim offici", + "description": "(Required) Represents the category in which the term is to be updated" + } + ] + }, + "description": "This API is associated with updating a term under a category in a framework on Sunbird Platform\n - The endpoint for **Update Term** is `/framework/v1/term/update{code}`\n - It points to knowledge-mw-service - `/v1/framework/term/update/{code}`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"update term\" operation was successfully executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.'", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"name\": \"English\",\n \"description\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/update{{code}}?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "update{{code}}" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:48:19.754Z\",\n \"params\": {\n \"resmsgid\": \"50ffd0a0-3e45-11eb-9ff3-67c41480f6aa\",\n \"msgid\": \"50f04040-3e45-11eb-9ff3-67c41480f6aa\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"node_id\": \"dummy_framework_subject_english\",\n \"versionKey\": \"1607975299669\"\n }\n}" + }, + { + "name": "BAD REQUEST. The \"update term\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.'", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"name\": \"English\",\n \"description\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/update{{code}}?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "update{{code}}" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-14T19:49:03.927Z\",\n \"params\": {\n \"resmsgid\": \"6b541470-3e45-11eb-9ff3-67c41480f6aa\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_CATEGORY_ID\",\n \"errmsg\": \"Please provide valid category.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource\n Possible media types can be:- \n - application/json", + "key": "Content-Type", + "value": "Ut enim offici" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.'", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"term\": {\n \"name\": \"English\",\n \"description\": \"English\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/update{{code}}?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "update{{code}}" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + }, + { + "name": "Search Term", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {}\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/search?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici", + "description": "(Required) Represents the framework in which the term is to be searched" + }, + { + "key": "category", + "value": "Ut enim offici", + "description": "(Required) Represents the category in which the term is to be searched" + } + ] + }, + "description": "This API is associated with searching the existing terms under a category in a framework on Sunbird Platform\n - The endpoint for **Search Term** is `/framework/v1/term/search`\n - It points to knowledge-mw-service - `/v1/framework/term/search`\n - The fields marked with an asterisk (*) are mandatory. They cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Search term operation was successfully executed.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {}\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/search?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-17T08:12:44.816Z\",\n \"params\": {\n \"resmsgid\": \"a446e900-403f-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": \"a4447800-403f-11eb-9b0c-abcfbdf41bc3\",\n \"status\": \"successful\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"terms\": [\n {\n \"associations\": [],\n \"identifier\": \"dummy_framework_subject_english\",\n \"lastStatusChangedOn\": \"2020-12-14T19:44:02.260+0000\",\n \"code\": \"english\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"channel\": \"in.ekstep\",\n \"description\": \"English\",\n \"createdOn\": \"2020-12-14T19:44:02.260+0000\",\n \"versionKey\": \"1607975299669\",\n \"children\": [],\n \"appId\": \"dev.sunbird.portal\",\n \"name\": \"English\",\n \"lastUpdatedOn\": \"2020-12-14T19:48:19.669+0000\",\n \"categories\": [\n {\n \"identifier\": \"dummy_framework_subject\",\n \"name\": \"Subject\",\n \"objectType\": \"CategoryInstance\",\n \"relation\": \"hasSequenceMember\",\n \"description\": \"Updated description\",\n \"status\": \"Live\"\n }\n ],\n \"category\": \"subject\",\n \"status\": \"Live\"\n }\n ],\n \"count\": 1\n }\n}" + }, + { + "name": "BAD REQUEST. The \"search term\" operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {}\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/search?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.term.search\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-17T08:13:50.811Z\",\n \"params\": {\n \"resmsgid\": \"cb9cf2b0-403f-11eb-9b0c-abcfbdf41bc3\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_INVALID_CATEGORY_ID\",\n \"errmsg\": \"Please provide valid category.\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "Ut enim offici" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {}\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/framework/v1/term/search?framework=Ut enim offici&category=Ut enim offici", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "framework", + "v1", + "term", + "search" + ], + "query": [ + { + "key": "framework", + "value": "Ut enim offici" + }, + { + "key": "category", + "value": "Ut enim offici" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "text", + "header": [ + { + "key": "Content-Type", + "value": "text/plain" + } + ], + "cookie": [], + "body": "" + } + ] + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.open-sunbird.org/api", + "type": "string" + }, + { + "key": "code", + "value": "Ut enim offici", + "type": "any", + "description": "(Required) The name/code of the term to be update" + } + ] +} \ No newline at end of file diff --git a/taxonomy-api/api-tests/Collections/Object Category API(s).postman_collection.json b/taxonomy-api/api-tests/Collections/Object Category API(s).postman_collection.json new file mode 100644 index 000000000..2acc96161 --- /dev/null +++ b/taxonomy-api/api-tests/Collections/Object Category API(s).postman_collection.json @@ -0,0 +1,1854 @@ +{ + "info": { + "_postman_id": "768d5a4c-2ca3-4918-a1ce-7851b72803d9", + "name": "Object Category API(s)", + "description": "The Object Category Definition API resources mainly allows you to set the rules for creating different type of objects for a particular category. For creating the definition for each object category, an object category has to be created. Each endpoint serves a different purpose, and thus based on your requirement you can do the following: \n- The APIs include CRUD (Create, Update, Read and Delete) operations. \n- You have to create a master category before setting the rules for the same.\n\n\nContact Support:\n Email: info@sunbird.org", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "object/category", + "item": [ + { + "name": "v1", + "item": [ + { + "name": "Create Object Category", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"name\": \"Explanation Content\",\n \"description\": \"Content that can be used to Explain\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "create" + ] + }, + "description": "This API is associated with creating Object Category on the Sunbird Platform.\n - The endpoint for **Create Object Category** is `/object/category/v1/create`\n - It points to taxonomy-service - `/object/category/v4/create`\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Create Object Category** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"name\": \"Explanation Content\",\n \"description\": \"Content that can be used to Explain\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T20:24:45ZZ\",\n \"params\": {\n \"resmsgid\": \"3be02c4b-3324-41a3-afd8-60f6be0584d2\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"obj-cat:classroom-teaching-video\"\n }\n}" + }, + { + "name": "BAD REQUEST. The **Create Object Category** operation failed. You may have missed input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"name\": \"Explanation Content\",\n \"description\": \"Content that can be used to Explain\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:50:29.786Z\",\n \"params\": {\n \"resmsgid\": \"c169a7a0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"ERR_NAME_SET_AS_IDENTIFIER\",\n \"errmsg\": \"Name will be set as identifier\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {}\n}" + }, + { + "name": "INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n- application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"name\": \"Explanation Content\",\n \"description\": \"Content that can be used to Explain\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Update Object Category", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/update/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "update", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Id To the Request URL" + } + ] + }, + "description": "This API is associated with updating object category on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update Object Category\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Object Category** is `/object/category/v1/update/{ObjectCategory_Id}`\n - It points to taxonomy-service - `/object/category/v4/update/{ObjectCategory_Id}`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation. Object Category update operation was successfuly executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/update/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "update", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:26:07ZZ\",\n \"params\": {\n \"resmsgid\": \"80aa9310-b749-411c-a13b-8d9f25af389f\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"obj-cat:classroom-teaching-video\"\n }\n}" + }, + { + "name": "BAD REQUEST. The 'Update Object Category ' operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter. ", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/update/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "update", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:27:01ZZ\",\n \"params\": {\n \"resmsgid\": \"87eb99c6-bc10-406a-a6fe-13651f546ce1\",\n \"msgid\": null,\n \"err\": \"ERROR_RESTRICTED_PROP\",\n \"status\": \"failed\",\n \"errmsg\": \"Properties in list [identifier, name, status] are not allowed in request\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,** Object Category Update** operation failed !The possible reason for failure is that you may have provided wrong object category id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/update/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "update", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T20:16:23ZZ\",\n \"params\": {\n \"resmsgid\": \"34d3161a-aaa8-417e-9cbd-d77bad54441c\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: obj-cat:classroom-teaching-video1\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategory\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/v1/update/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "update", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.update\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Read ObjectCategory", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/v1/read/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "read", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategory Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the master object category on the Sunbird Platform .\n - The **_/read/{ObjectCategory_Id}_** endpoint executes request for fetching the contents to be viewed and read.\n - The endpoint for **Read Object Category** is `/object/category/v1/read/{ObjectCategory_Id}`\n - It points to taxonomy-service - `/object/category/v4/read/{ObjectCategory_Id}`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate id.\n - You need to provide a valid object category id value in {ObjectCategory Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Read Object Category\" operation was successfully executed. For all possible metadata, please [refer](https://github.com/project-sunbird/knowledge-platform/blob/release-3.5.0/schemas/objectcategory/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource. Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/v1/read/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "read", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategory Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T09:32:09ZZ\",\n \"params\": {\n \"resmsgid\": \"fa4ebf30-bb78-4448-9636-ee2a27bf75e5\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"objectCategory\": {\n \"identifier\": \"obj-cat:checking-account\",\n \"lastStatusChangedOn\": \"2020-12-17T09:09:55.687+0000\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"name\": \"Checking Account\",\n \"channel\": \"in.ekstep\",\n \"lastUpdatedOn\": \"2020-12-17T09:14:56.660+0000\",\n \"description\": \"Textbook Unit\",\n \"languageCode\": [],\n \"createdOn\": \"2020-12-17T09:09:55.687+0000\",\n \"objectType\": \"ObjectCategory\",\n \"versionKey\": \"1608196496660\",\n \"status\": \"Live\"\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**Object Category Read** operation failed !The possible reason for failure is that you may have provided wrong object category id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource. Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/v1/read/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "read", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategory Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.content.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T20:37:58.223Z\",\n \"params\": {\n \"resmsgid\": \"96a71df0-3b27-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1131697204035993601314\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource. Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/v1/read/:ObjectCategory_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "v1", + "read", + ":ObjectCategory_Id" + ], + "variable": [ + { + "key": "ObjectCategory_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategory Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + }, + { + "name": "definition/v1", + "item": [ + { + "name": "read", + "item": [ + { + "name": "Get Object Category Definition", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"name\": \"learning resource\",\n \"objectType\": \"Content\",\n \"channel\": \"013085024460783616158023\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read" + ] + }, + "description": "This API is associated with creating Object Category Definition on the Sunbird Platform.\n - The endpoint for **Create Object Category Definition** is `/object/category/definition/v1/read`\n - It points to taxonomy-service - `/object/category/definition/v4/read`\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Create Object Category Definition** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"name\": \"learning resource\",\n \"objectType\": \"Content\",\n \"channel\": \"013085024460783616158023\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-14T14:01:36ZZ\",\n \"params\": {\n \"resmsgid\": \"8c94cbfa-7bb9-4d81-97f5-6cd1bdbe0ed6\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"objectCategoryDefinition\": {\n \"identifier\": \"obj-cat:learning-resource_content_013085024460783616158023\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n },\n \"languageCode\": [],\n \"forms\": {}\n }\n }\n}" + }, + { + "name": "BAD REQUEST. The **Get Object Category Definition** operation failed. You may have missed input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"name\": \"learning resource\",\n \"objectType\": \"Content\",\n \"channel\": \"013085024460783616158023\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T19:40:33ZZ\",\n \"params\": {\n \"resmsgid\": \"e1d9d400-bbda-483f-8184-0f506a3649b4\",\n \"msgid\": null,\n \"err\": \"ERR_INVALID_REQUEST\",\n \"status\": \"failed\",\n \"errmsg\": \"Please provide required properties!\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**ObjectCategoryDefinition Read** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"name\": \"learning resource\",\n \"objectType\": \"Content\",\n \"channel\": \"013085024460783616158023\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read" + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T20:37:58.223Z\",\n \"params\": {\n \"resmsgid\": \"96a71df0-3b27-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1131697204035993601314\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"name\": \"learning resource\",\n \"objectType\": \"Content\",\n \"channel\": \"013085024460783616158023\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Read ObjectCategoryDefinition", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "GET", + "header": [ + { + "key": "Accept", + "value": "application/json" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read/:ObjectCategoryDefinition_Id?fields=sit", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read", + ":ObjectCategoryDefinition_Id" + ], + "query": [ + { + "key": "fields", + "value": "sit", + "description": "List of specific properties to be fetched." + } + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategoryDefinition Id To the Request URL" + } + ] + }, + "description": "This API is associated with viewing and reading out the master object category on the Sunbird Platform .\n - The **_/read/{objectCategoryDefinition_Id}_** endpoint executes request for fetching the contents to be viewed and read.\n - The endpoint for **Update Object Category Definition** is `/object/category/definition/v1/read/{ID}`\n - It points to taxonomy-service - `/object/category/definition/v4/read/{ID}`\n - {...}refers to a section of reject endpoint URL path that needs to be replaced by appropriate Id.\n - You need to provide a valid object category definition Id value in {objectCategoryDefinition_Id} field of API URL.\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation.\"Read content\" operation was successfully executed. For all possible metadata, please [refer](https://github.com/project-sunbird/knowledge-platform/blob/release-3.5.0/schemas/objectcategorydefinition/1.0/schema.json)", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read/:ObjectCategoryDefinition_Id?fields=sit", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read", + ":ObjectCategoryDefinition_Id" + ], + "query": [ + { + "key": "fields", + "value": "sit" + } + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategoryDefinition Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.read\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T13:25:16ZZ\",\n \"params\": {\n \"resmsgid\": \"e61aee2f-8099-4978-a624-6605360218e1\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"objectCategoryDefinition\": {\n \"identifier\": \"obj-cat:checking-account\",\n \"lastStatusChangedOn\": \"2020-12-17T09:09:55.687+0000\",\n \"consumerId\": \"7411b6bd-89f3-40ec-98d1-229dc64ce77d\",\n \"name\": \"Checking Account\",\n \"channel\": \"in.ekstep\",\n \"lastUpdatedOn\": \"2020-12-17T11:15:37.740+0000\",\n \"description\": \"Textbook Unit\",\n \"createdOn\": \"2020-12-17T09:09:55.687+0000\",\n \"objectType\": \"ObjectCategory\",\n \"versionKey\": \"1608203737740\",\n \"status\": \"Live\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n },\n \"languageCode\": [],\n \"forms\": {}\n }\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,**ObjectCategoryDefinition Read** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read/:ObjectCategoryDefinition_Id?fields=sit", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read", + ":ObjectCategoryDefinition_Id" + ], + "query": [ + { + "key": "fields", + "value": "sit" + } + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategoryDefinition Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.read\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T20:37:58.223Z\",\n \"params\": {\n \"resmsgid\": \"96a71df0-3b27-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": \"NOT_FOUND\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_1131697204035993601314\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "GET", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "The Content Type entity is the media type of the resource.Possible media types can be:- \n - Application/json", + "key": "Content-Type", + "value": "sit" + } + ], + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/read/:ObjectCategoryDefinition_Id?fields=sit", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "read", + ":ObjectCategoryDefinition_Id" + ], + "query": [ + { + "key": "fields", + "value": "sit" + } + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid ObjectCategoryDefinition Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + }, + { + "name": "Create Object Category Definition", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"categoryId\": \"obj-cat:practice-question-set\",\n \"targetObjectType\": \"Content\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n }\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "create" + ] + }, + "description": "This API is associated with creating Object Category Definition on the Sunbird Platform.\n - The endpoint for **Create Object Category Definition** is `/object/category/definition/v1/create`\n - It points to taxonomy-service - `/object/category/definition/v4/create`\n - It is mandatory to provide values for parameters marked with *. \n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "SUCCESS. The **Create Object Category Definition** operation was successful!", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"categoryId\": \"obj-cat:practice-question-set\",\n \"targetObjectType\": \"Content\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n }\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "create" + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-10T20:24:45ZZ\",\n \"params\": {\n \"resmsgid\": \"3be02c4b-3324-41a3-afd8-60f6be0584d2\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"obj-cat:explanation-content_content_all\"\n }\n}" + }, + { + "name": "BAD REQUEST. The **Create Object Category Definition** operation failed. You may have missed input for a mandatory parameter.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"categoryId\": \"obj-cat:practice-question-set\",\n \"targetObjectType\": \"Content\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n }\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "create" + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.create\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T06:44:49ZZ\",\n \"params\": {\n \"resmsgid\": \"c4fa577f-6e15-4991-b251-94f7b2a897bf\",\n \"msgid\": null,\n \"err\": \"CLIENT_ERROR\",\n \"status\": \"failed\",\n \"errmsg\": \"Invalid request\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": [\n \"Invalid Props are : [xyz]\"\n ]\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR. We track these errors automatically and try to set it right at the earliest. Try refreshing the page. If the problem persists contact us at info@sunbird.org.", + "originalRequest": { + "method": "POST", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource. Possible media types can be:\n - application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) To make use of the API, you require authorization. Raise a request to the administrator for the use of the API. You will receive the authorization key. Specify the key received, here.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"objectCategoryDefinition\": {\n \"categoryId\": \"obj-cat:practice-question-set\",\n \"targetObjectType\": \"Content\",\n \"objectMetadata\": {\n \"config\": {},\n \"schema\": {}\n }\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/create", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "create" + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + }, + { + "name": "Update Object Category Definition", + "request": { + "auth": { + "type": "apikey", + "apikey": [ + { + "key": "key", + "value": "Authorization", + "type": "string" + }, + { + "key": "value", + "value": "{{apiKey}}", + "type": "string" + }, + { + "key": "in", + "value": "header", + "type": "string" + } + ] + }, + "method": "PATCH", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + }, + { + "key": "Accept", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/update/:ObjectCategoryDefinition_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "update", + ":ObjectCategoryDefinition_Id" + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Definition Id To the Request URL" + } + ] + }, + "description": "This API is associated with updating object category on the Sunbird Platform .\n - **_/Update/_** endpoint executes the \"Update Object Category Definition\" request based on parameters provided as metadata in the request body.\n - The endpoint for **Update Object Category Definition** is `/object/category/definition/v1/update/{ID}`\n - It points to taxonomy-service - `/object/category/definition/v4/update/{ID}`\n - {...}refers to a section of review endpoint URL path that needs to be replaced by appropriate Id.\n - It is mandatory to provide values for parameters marked with *.\n - Mandatory fields cannot be null or empty." + }, + "response": [ + { + "name": "OK ! Successful operation. Object Category Definition update operation was successfuly executed.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/update/:ObjectCategoryDefinition_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "update", + ":ObjectCategoryDefinition_Id" + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Definition Id To the Request URL" + } + ] + } + }, + "status": "OK", + "code": 200, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.update\",\n \"ver\": \"4.0\",\n \"ts\": \"2020-12-10T20:26:07ZZ\",\n \"params\": {\n \"resmsgid\": \"80aa9310-b749-411c-a13b-8d9f25af389f\",\n \"msgid\": null,\n \"err\": null,\n \"status\": \"successful\",\n \"errmsg\": null\n },\n \"responseCode\": \"OK\",\n \"result\": {\n \"identifier\": \"obj-cat:explanation-content_content_all\"\n }\n}" + }, + { + "name": "BAD REQUEST. The 'Update Object Category Definition ' operation failed ! The possible reason for failure is that you may have missed providing input for a mandatory parameter. ", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/update/:ObjectCategoryDefinition_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "update", + ":ObjectCategoryDefinition_Id" + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Definition Id To the Request URL" + } + ] + } + }, + "status": "Bad Request", + "code": 400, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-17T11:15:46ZZ\",\n \"params\": {\n \"resmsgid\": \"d2ed46be-6846-42e4-b761-c8acdff5954a\",\n \"msgid\": null,\n \"err\": \"ERROR_RESTRICTED_PROP\",\n \"status\": \"failed\",\n \"errmsg\": \"Properties in list [identifier, categoryId, targetObjectType, channel, status, objectType] are not allowed in request\"\n },\n \"responseCode\": \"CLIENT_ERROR\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "RESOURCE NOT FOUND,** Object Category Definition Update** operation failed !The possible reason for failure is that you may have provided wrong content id.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/update/:ObjectCategoryDefinition_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "update", + ":ObjectCategoryDefinition_Id" + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Definition Id To the Request URL" + } + ] + } + }, + "status": "Not Found", + "code": 404, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.definition.update\",\n \"ver\": \"3.0\",\n \"ts\": \"2020-12-12T00:31:16ZZ\",\n \"params\": {\n \"resmsgid\": \"c29ffd8b-d105-4eeb-a875-deaeae2bf5b8\",\n \"msgid\": null,\n \"err\": \"NOT_FOUND\",\n \"status\": \"failed\",\n \"errmsg\": \"Error! Node(s) doesn't Exists. | [Invalid Node Id.]: do_21317015414162227213604\"\n },\n \"responseCode\": \"RESOURCE_NOT_FOUND\",\n \"result\": {\n \"messages\": null\n }\n}" + }, + { + "name": "INTERNAL SERVER ERROR ! Looks like something went wrong! These errors are tracked automatically, but if the problem persists feel free to contact us. In the meantime, try refreshing.", + "originalRequest": { + "method": "PATCH", + "header": [ + { + "description": "Added as a part of security scheme: apikey", + "key": "Authorization", + "value": "" + }, + { + "description": "(Required) The Content Type entity is the media type of the resource.Possible media types can be:\n - Application/json", + "key": "Content-Type", + "value": "sit" + }, + { + "description": "(Required) All content APIs require authorization for use. Specify the authorization key received from the administrator when placing the request for use of the API.", + "key": "Authorization", + "value": "sit" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"request\": {\n \"content\": {\n \"description\": \"Updated description\"\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{baseUrl}}/object/category/definition/v1/update/:ObjectCategoryDefinition_Id", + "host": [ + "{{baseUrl}}" + ], + "path": [ + "object", + "category", + "definition", + "v1", + "update", + ":ObjectCategoryDefinition_Id" + ], + "variable": [ + { + "key": "ObjectCategoryDefinition_Id", + "value": "sit", + "description": "(Required) Please append a valid Object Category Definition Id To the Request URL" + } + ] + } + }, + "status": "Internal Server Error", + "code": 500, + "_postman_previewlanguage": "json", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "cookie": [], + "body": "{\n \"id\": \"api.object.category.create\",\n \"ver\": \"1.0\",\n \"ts\": \"2020-12-10T08:51:51.647Z\",\n \"params\": {\n \"resmsgid\": \"f234a6f0-3ac4-11eb-b0a2-8d5c9f561887\",\n \"msgid\": null,\n \"status\": \"failed\",\n \"err\": null,\n \"errmsg\": null\n },\n \"responseCode\": \"SERVER_ERROR\",\n \"result\": {}\n}" + } + ] + } + ] + } + ] + } + ], + "variable": [ + { + "key": "baseUrl", + "value": "https://staging.sunbirded.org/api", + "type": "string" + } + ] +} \ No newline at end of file diff --git a/taxonomy-api/pom.xml b/taxonomy-api/pom.xml index f0c74ceaf..021ae730a 100644 --- a/taxonomy-api/pom.xml +++ b/taxonomy-api/pom.xml @@ -19,7 +19,7 @@ UTF-8 UTF-8 - 2.11 + 2.12 diff --git a/taxonomy-api/taxonomy-actors/pom.xml b/taxonomy-api/taxonomy-actors/pom.xml index e4fb38295..f37c10c7f 100644 --- a/taxonomy-api/taxonomy-actors/pom.xml +++ b/taxonomy-api/taxonomy-actors/pom.xml @@ -23,7 +23,7 @@ org.sunbird - graph-engine_2.11 + graph-engine_2.12 1.0-SNAPSHOT jar diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala new file mode 100644 index 000000000..a1e04ec79 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala @@ -0,0 +1,68 @@ +package org.sunbird.actors + +import org.apache.commons.lang3.StringUtils +import java.util +import javax.inject.Inject +import org.sunbird.actor.core.BaseActor +import org.sunbird.graph.utils.NodeUtil +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.nodes.DataNode +import org.sunbird.utils.{Constants, RequestUtil} +import org.sunbird.mangers.CategoryManager +import org.sunbird.cache.impl.RedisCache + +import scala.concurrent.{ExecutionContext, Future} + +class CategoryActor @Inject()(implicit oec: OntologyEngineContext) extends BaseActor { + implicit val ec: ExecutionContext = getContext().dispatcher + + override def onReceive(request: Request): Future[Response] = { + request.getOperation match { + case Constants.CREATE_CATEGORY => create(request) + case Constants.READ_CATEGORY => read(request) + case Constants.UPDATE_CATEGORY => update(request) + case Constants.RETIRE_CATEGORY => retire(request) + case _ => ERROR(request.getOperation) + } + } + + @throws[Exception] + private def create(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + val code = request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String] + if (!request.getRequest.containsKey("code")) throw new ClientException("ERR_CATEGORY_CODE_REQUIRED", "Unique code is mandatory for category") + request.getRequest.put(Constants.IDENTIFIER, code) + RedisCache.delete("masterCategories") + CategoryManager.validateTranslationMap(request) + DataNode.create(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.NODE_ID, node.getIdentifier) + }) + } + + private def read(request: Request): Future[Response] = { + DataNode.read(request).map(node => { + val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) + ResponseHandler.OK.put("category", metadata) + }) + } + + private def update(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + if (request.getRequest.containsKey(Constants.CODE)) throw new ClientException("ERR_CATEGORY_UPDATE", "code updation is not allowed.") + RedisCache.delete("masterCategories") + CategoryManager.validateTranslationMap(request) + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.NODE_ID, node.getIdentifier) + }) + } + + private def retire(request: Request): Future[Response] = { + request.getRequest.put("status", "Retired") + RedisCache.delete("masterCategories") + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.NODE_ID, node.getIdentifier) + }) + } +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala new file mode 100644 index 000000000..6833c37f7 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala @@ -0,0 +1,148 @@ +package org.sunbird.actors + + +import org.apache.commons.lang3.StringUtils +import org.sunbird.actor.core.BaseActor +import org.sunbird.common.Slug +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.dac.enums.RelationTypes +import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.utils.NodeUtil +import org.sunbird.utils.{Constants, RequestUtil} + +import java.util +import java.util.Map +import javax.inject.Inject +import scala.concurrent.{ExecutionContext, Future} +import scala.collection.JavaConverters._ + +class CategoryInstanceActor @Inject()(implicit oec: OntologyEngineContext) extends BaseActor { + implicit val ec: ExecutionContext = getContext().dispatcher + + override def onReceive(request: Request): Future[Response] = { + request.getOperation match { + case Constants.CREATE_CATEGORY_INSTANCE => create(request) + case Constants.READ_CATEGORY_INSTANCE => read(request) + case Constants.UPDATE_CATEGORY_INSTANCE => update(request) + case Constants.RETIRE_CATEGORY_INSTANCE => retire(request) + case _ => ERROR(request.getOperation) + } + } + + private def create(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + val code = request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String] + if (frameworkId.isEmpty()) throw new ClientException("ERR_INVALID_FRAMEWORK_ID", s"Invalid FrameworkId: '${frameworkId}' for Categoryinstance ") + if (!request.getRequest.containsKey(Constants.CODE)) throw new ClientException("ERR_CATEGORY_CODE_REQUIRED", "Unique code is mandatory for categoryInstance") + val getFrameworkReq = new Request() + getFrameworkReq.setContext(new util.HashMap[String, AnyRef]() {{ + putAll(request.getContext) + }}) + getFrameworkReq.getContext.put(Constants.SCHEMA_NAME, Constants.FRAMEWORK_SCHEMA_NAME) + getFrameworkReq.getContext.put(Constants.VERSION, Constants.FRAMEWORK_SCHEMA_VERSION) + getFrameworkReq.put("disableCache", Option(true)) + getFrameworkReq.put(Constants.IDENTIFIER, frameworkId) + DataNode.read(getFrameworkReq).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, frameworkId)) { + validateCategoryObject(request).map(catNode => { + request.getRequest.put(Constants.IDENTIFIER, generateIdentifier(frameworkId, catNode.getIdentifier)) + val frameworkList = new util.ArrayList[Map[String, AnyRef]] + val relationMap = new util.HashMap[String, AnyRef] + relationMap.put("identifier", frameworkId) + relationMap.put("index", getCategoryIndex(node)) + frameworkList.add(relationMap) + request.put("frameworks", frameworkList) + DataNode.create(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier) + .put(Constants.VERSION_KEY, node.getMetadata.get("versionKey")) + }) + }).flatMap(f => f) + } else throw new ClientException("ERR_INVALID_FRAMEWORK_ID", s"Invalid FrameworkId: '${frameworkId}' for Categoryinstance ") + }).flatMap(f => f) + } + + private def getCategoryIndex(node: Node): Integer = { + val indexList = (node.getOutRelations.asScala ++ node.getInRelations.asScala).filter(r => (StringUtils.equals(r.getRelationType,RelationTypes.SEQUENCE_MEMBERSHIP.relationName()) && StringUtils.equals(r.getStartNodeId, node.getIdentifier))) + .map(relation => { + relation.getMetadata.getOrDefault("IL_SEQUENCE_INDEX",1.asInstanceOf[Number]).asInstanceOf[Number].intValue() + }) + if (indexList.nonEmpty) indexList.max + 1 else 1 + } + + private def read(request: Request): Future[Response] = { + validateCategoryInstanceObject(request).map(node => { + val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) + ResponseHandler.OK.put("categoryInstance", metadata) + } ) + } + + private def update(request: Request): Future[Response] = { + val categoryId = request.getContext.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String]; + RequestUtil.restrictProperties(request) + validateCategoryInstanceObject(request) + request.getContext.put(Constants.IDENTIFIER, generateIdentifier(request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String], categoryId)) + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.VERSION_KEY, node.getMetadata.get("versionKey")) + }) + } + + private def retire(request: Request): Future[Response] = { + validateCategoryInstanceObject(request) + request.getContext.put(Constants.IDENTIFIER, generateIdentifier(request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String], request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String])) + request.getRequest.put("status", "Retired") + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.VERSION_KEY, node.getMetadata.get("versionKey")) + }) + } + + private def validateCategoryInstanceObject(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext) = { + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + val categoryId = request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String] + if (frameworkId.isEmpty()) throw new ClientException("ERR_INVALID_FRAMEWORK_ID", s"Invalid FrameworkId: '${frameworkId}' for CategoryInstance ") + if (categoryId.isEmpty()) throw new ClientException("ERR_INVALID_CATEGORY_ID", s"Invalid CategoryId: '${categoryId}' for categoryInstance") + val categoryInstanceId = generateIdentifier(frameworkId, categoryId) + val getCategoryReq = new Request() + getCategoryReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getCategoryReq.getContext.put(Constants.SCHEMA_NAME, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + getCategoryReq.getContext.put(Constants.VERSION, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION) + getCategoryReq.put(Constants.IDENTIFIER, categoryInstanceId) + DataNode.read(getCategoryReq)(oec, ec).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, categoryInstanceId)) node + else throw new ClientException("ERR_CATEGORY_NOT_FOUND/ ERR_FRAMEWORK_NOT_FOUND", s"Given channel/framework is not related to given category") + })(ec) + } + + private def validateCategoryObject(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext) = { + val code = request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String] + if (code.isEmpty()) throw new ClientException("ERR_INVALID_CODE", s"Invalid Code: '${code}' for category") + val getCategoryReq = new Request() + getCategoryReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getCategoryReq.getContext.put(Constants.SCHEMA_NAME, Constants.CATEGORY_SCHEMA_NAME) + getCategoryReq.getContext.put(Constants.VERSION, Constants.CATEGORY_SCHEMA_VERSION) + getCategoryReq.put(Constants.IDENTIFIER, code) + DataNode.read(getCategoryReq)(oec, ec).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, code)) node + else + throw new ClientException("ERR_CATEGORY_NOT_FOUND", s"Given category does not belong to master category data") + })(ec) + } + + private def generateIdentifier(scopeId: String, code: String): String = { + var id: String = null + if (StringUtils.isNotBlank(scopeId)) id = Slug.makeSlug(scopeId + "_" + code) + id + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/FrameworkActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/FrameworkActor.scala new file mode 100644 index 000000000..68ff79bea --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/FrameworkActor.scala @@ -0,0 +1,168 @@ +package org.sunbird.actors + +import org.apache.commons.lang3.StringUtils +import org.sunbird.actor.core.BaseActor +import org.sunbird.common.Slug +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.dac.model.{Node, SubGraph} +import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.path.DataSubGraph +import org.sunbird.graph.utils.{NodeUtil, ScalaJsonUtils} +import org.sunbird.mangers.FrameworkManager +import org.sunbird.utils.{CategoryCache, FrameworkCache} +import org.sunbird.utils.{Constants, RequestUtil} + +import java.util +import javax.inject.Inject +import scala.collection.JavaConverters._ +import scala.concurrent.{ExecutionContext, Future} +import scala.collection.JavaConversions.mapAsJavaMap + +class FrameworkActor @Inject()(implicit oec: OntologyEngineContext) extends BaseActor { + + implicit val ec: ExecutionContext = getContext().dispatcher + + override def onReceive(request: Request): Future[Response] = { + request.getOperation match { + case Constants.CREATE_FRAMEWORK => create(request) + case Constants.READ_FRAMEWORK => read(request) + case Constants.UPDATE_FRAMEWORK => update(request) + case Constants.RETIRE_FRAMEWORK => retire(request) + case Constants.PUBLISH_FRAMEWORK => publish(request) + case Constants.COPY_FRAMEWORK => copy(request) + case _ => ERROR(request.getOperation) + } + } + + + @throws[Exception] + private def create(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + val code = request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String] + val channel = request.getRequest.getOrDefault(Constants.CHANNEL, "").asInstanceOf[String] + if (StringUtils.isNotBlank(code) && StringUtils.isNotBlank(channel)) { + request.getRequest.put(Constants.IDENTIFIER, code) + val getChannelReq = new Request() + getChannelReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getChannelReq.getContext.put(Constants.SCHEMA_NAME, Constants.CHANNEL_SCHEMA_NAME) + getChannelReq.getContext.put(Constants.VERSION, Constants.CHANNEL_SCHEMA_VERSION) + getChannelReq.put(Constants.IDENTIFIER, channel) + DataNode.read(getChannelReq).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, channel)) { + DataNode.create(request).map(frameNode => { + ResponseHandler.OK.put(Constants.NODE_ID, frameNode.getIdentifier).put("versionKey", frameNode.getMetadata.get("versionKey")) + }) + } else throw new ClientException("ERR_INVALID_CHANNEL_ID", "Please provide valid channel identifier") + }).flatMap(f => f) + } else throw new ClientException("ERR_INVALID_REQUEST", "Invalid Request. Please Provide Required Properties!") + + } + + + @throws[Exception] + private def read(request: Request): Future[Response] = { + val frameworkId = request.get("identifier").asInstanceOf[String] + val returnCategories: java.util.List[String] = seqAsJavaListConverter(request.get("categories").asInstanceOf[String].split(",").filter(category => StringUtils.isNotBlank(category) && !StringUtils.equalsIgnoreCase(category, "null"))).asJava + request.getRequest.put("categories", returnCategories) + if (StringUtils.isNotBlank(frameworkId)) { + val framework = FrameworkCache.get(frameworkId, returnCategories) + if(framework != null){ + Future { + ResponseHandler.OK.put(Constants.FRAMEWORK, framework) + } + } else { + val frameworkData: Future[Map[String, AnyRef]] = FrameworkManager.getFrameworkHierarchy(request) + frameworkData.map(framework => { + if (framework.isEmpty) { + DataNode.read(request).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, frameworkId)) { + val framework = NodeUtil.serialize(node, null, request.getContext.get(Constants.SCHEMA_NAME).asInstanceOf[String], request.getContext.get(Constants.VERSION).asInstanceOf[String]) + ResponseHandler.OK.put(Constants.FRAMEWORK, framework) + } else throw new ClientException("ERR_INVALID_REQUEST", "Invalid Request. Please Provide Required Properties!") + }) + } else { + Future { + val filterFrameworkData = FrameworkManager.filterFrameworkCategories(framework, returnCategories) + FrameworkCache.save(filterFrameworkData, returnCategories) + ResponseHandler.OK.put(Constants.FRAMEWORK, filterFrameworkData.asJava) + } + } + }).flatMap(f => f) + } + } else throw new ClientException("ERR_INVALID_REQUEST", "Invalid Request. Please Provide Required Properties!") + } + + @throws[Exception] + private def update(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + DataNode.update(request).map(node => { + ResponseHandler.OK.put("node_id", node.getIdentifier).put("versionKey", node.getMetadata.get("versionKey")) + }) + } + + @throws[Exception] + private def retire(request: Request): Future[Response] = { + request.getRequest.put("status", "Retired") + DataNode.update(request).map(node => { + ResponseHandler.OK.put("node_id", node.getIdentifier).put("identifier", node.getIdentifier) + }) + } + + + @throws[Exception] + private def publish(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + val frameworkId = request.getRequest.getOrDefault(Constants.IDENTIFIER, "").asInstanceOf[String] + val channel = request.getRequest.getOrDefault(Constants.CHANNEL, "").asInstanceOf[String] + val getChannelReq = new Request() + getChannelReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getChannelReq.getContext.put(Constants.SCHEMA_NAME, Constants.CHANNEL_SCHEMA_NAME) + getChannelReq.getContext.put(Constants.VERSION, Constants.CHANNEL_SCHEMA_VERSION) + getChannelReq.put(Constants.IDENTIFIER, channel) + DataNode.read(getChannelReq).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, channel)) { + val name = node.getMetadata.getOrDefault("name", "").asInstanceOf[String] + val description = node.getMetadata.getOrDefault("description", "").asInstanceOf[String] + request.getRequest.putAll(Map("name" -> name, "description" -> description).asJava) + if(StringUtils.isNotBlank(frameworkId)){ + val getFrameworkReq = new Request() + getFrameworkReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getFrameworkReq.getContext.put(Constants.SCHEMA_NAME, Constants.FRAMEWORK_SCHEMA_NAME) + getFrameworkReq.getContext.put(Constants.VERSION, Constants.FRAMEWORK_SCHEMA_VERSION) + getFrameworkReq.put(Constants.IDENTIFIER, frameworkId) + val subGraph: Future[SubGraph] = DataSubGraph.read(request) + subGraph.map(data => { + val frameworkHierarchy = FrameworkManager.getCompleteMetadata(frameworkId, data) + CategoryCache.setFramework(frameworkId, frameworkHierarchy) + val req = new Request(request) + req.put("hierarchy", ScalaJsonUtils.serialize(frameworkHierarchy)) + req.put("identifier", frameworkId) + oec.graphService.saveExternalProps(req) + ResponseHandler.OK.put(Constants.PUBLISH_STATUS, s"Publish Event for Framework Id '${node.getIdentifier}' is pushed Successfully!") + }) + } else throw new ClientException("ERR_INVALID_FRAMEWORK_ID", "Please provide valid framework identifier") + } else throw new ClientException("ERR_INVALID_CHANNEL_ID", "Please provide valid channel identifier") + }).flatMap(f => f) + } + + //TODO: + private def copy(request: Request): Future[Response] = { + RequestUtil.restrictProperties(request) + FrameworkManager.copyHierarchy(request) + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala new file mode 100644 index 000000000..f98917614 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala @@ -0,0 +1,163 @@ +package org.sunbird.actors + +import org.apache.commons.lang3.StringUtils +import org.sunbird.actor.core.BaseActor +import org.sunbird.cache.impl.RedisCache +import org.sunbird.common.{Platform, Slug} +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.dac.enums.RelationTypes +import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.utils.NodeUtil +import org.sunbird.mangers.CategoryManager +import org.sunbird.utils.{Constants, RequestUtil} + +import java.util +import java.util.{ArrayList, List, Map} +import javax.inject.Inject +import scala.collection.JavaConverters._ +import scala.collection.JavaConverters.asScalaBufferConverter +import scala.collection.immutable.HashMap +import scala.concurrent.{ExecutionContext, Future} + +class TermActor @Inject()(implicit oec: OntologyEngineContext) extends BaseActor { + implicit val ec: ExecutionContext = getContext().dispatcher + + private final val TERM_CREATION_LIMIT: Int = if (Platform.config.hasPath("framework.max_term_creation_limit")) Platform.config.getInt("framework.max_term_creation_limit") else 200 + override def onReceive(request: Request): Future[Response] = { + request.getOperation match { + case Constants.CREATE_TERM => create(request) + case Constants.READ_TERM => read(request) + case Constants.UPDATE_TERM => update(request) + case Constants.RETIRE_TERM => retire(request) + case _ => ERROR(request.getOperation) + } + } + + @throws[Exception] + private def create(request: Request): Future[Response] = { + // val requestList: util.List[util.Map[String, AnyRef]] = getRequestData(request) + // if (TERM_CREATION_LIMIT < requestList.size) throw new ClientException("ERR_INVALID_TERM_REQUEST", "No. of request exceeded max limit of " + TERM_CREATION_LIMIT) + RequestUtil.restrictProperties(request) + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + val CategoryData = validateCategoryInstance(request) + val categoryId = generateIdentifier(frameworkId, request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String]) + CategoryData.map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, categoryId)) { + val categoryList = new util.ArrayList[Map[String, AnyRef]] + val relationMap = new util.HashMap[String, AnyRef] + relationMap.put("identifier", categoryId) + relationMap.put("index", getIndex(node)) + categoryList.add(relationMap) + request.put("categories", categoryList) + + request.getRequest.put(Constants.IDENTIFIER, generateIdentifier(categoryId, request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String])) + DataNode.create(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.NODE_ID, node.getIdentifier) + }) + } else throw new ClientException("ERR_INVALID_CATEGORY_ID", s"Please provide valid category") + }).flatMap(f => f) + } + + private def getIndex(node: Node): Integer = { + val indexList = (node.getOutRelations.asScala ++ node.getInRelations.asScala).filter(r => (StringUtils.equals(r.getRelationType, RelationTypes.SEQUENCE_MEMBERSHIP.relationName()) && StringUtils.equals(r.getStartNodeId, node.getIdentifier))) + .map(relation => { + relation.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).asInstanceOf[Number].intValue() + }) + if (indexList.nonEmpty) indexList.max + 1 else 1 + } + + private def read(request: Request): Future[Response] = { + validateCategoryInstance(request) + validateTerm(request).map(node => { + val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) + ResponseHandler.OK.put("term", metadata) + }) + } + + private def update(request: Request): Future[Response] = { + val termId = request.getContext.getOrDefault(Constants.TERM, "").asInstanceOf[String]; + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + RequestUtil.restrictProperties(request) + val categoryId = generateIdentifier(frameworkId, request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String]) + validateCategoryInstance(request) + request.getContext.put(Constants.IDENTIFIER, generateIdentifier(categoryId, termId)) + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.VERSION_KEY, node.getMetadata.get("versionKey")) + }) + } + + private def retire(request: Request): Future[Response] = { + val termId = request.getContext.getOrDefault(Constants.TERM, "").asInstanceOf[String]; + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + val categoryId = generateIdentifier(frameworkId, request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String]) + validateCategoryInstance(request) + request.getContext.put(Constants.IDENTIFIER, generateIdentifier(categoryId, termId)) + request.getRequest.put("status", "Retired") + DataNode.update(request).map(node => { + ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.VERSION_KEY, node.getMetadata.get("versionKey")) + }) + } + + private def validateTerm(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext) = { + val termId = request.getRequest.getOrDefault(Constants.TERM, "").asInstanceOf[String] + if (termId.isEmpty()) throw new ClientException("ERR_INVALID_TERM_ID", s"Invalid TermId: '${termId}' for Term") + val categoryInstanceId = generateIdentifier(request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String], request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String]) + val getTermReq = new Request() + getTermReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getTermReq.getContext.put(Constants.SCHEMA_NAME, Constants.TERM_SCHEMA_NAME) + getTermReq.getContext.put(Constants.VERSION, Constants.TERM_SCHEMA_VERSION) + getTermReq.put(Constants.IDENTIFIER, generateIdentifier(categoryInstanceId, termId)) + DataNode.read(getTermReq)(oec, ec).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, generateIdentifier(categoryInstanceId, termId))) node + else throw new ClientException("ERR_CHANNEL_NOT_FOUND/ ERR_FRAMEWORK_NOT_FOUND", s"Given channel/framework is not related to given category") + })(ec) + } + + private def validateCategoryInstance(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext) = { + val frameworkId = request.getRequest.getOrDefault(Constants.FRAMEWORK, "").asInstanceOf[String] + val categoryId = request.getRequest.getOrDefault(Constants.CATEGORY, "").asInstanceOf[String] + if (frameworkId.isEmpty()) throw new ClientException("ERR_INVALID_FRAMEWORK_ID", s"Invalid FrameworkId: '${frameworkId}' for Term ") + if (categoryId.isEmpty()) throw new ClientException("ERR_INVALID_CATEGORY_ID", s"Invalid CategoryId: '${categoryId}' for Term") + val categoryInstanceId = generateIdentifier(frameworkId, categoryId) + val getCategoryInstanceReq = new Request() + getCategoryInstanceReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getCategoryInstanceReq.getContext.put(Constants.SCHEMA_NAME, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + getCategoryInstanceReq.getContext.put(Constants.VERSION, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION) + getCategoryInstanceReq.put(Constants.IDENTIFIER, categoryInstanceId) + DataNode.read(getCategoryInstanceReq)(oec, ec).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, categoryInstanceId)) node + else throw new ClientException("ERR_CHANNEL_NOT_FOUND/ ERR_FRAMEWORK_NOT_FOUND", s"Given channel/framework is not related to given category") + })(ec) + } + + private def getRequestData(request: Request): util.List[util.Map[String, AnyRef]] = { + val req = request.getRequest.get(request.getObjectType.toLowerCase()) + req match { + case req: util.List[util.Map[String, AnyRef]] => req + case req: util.Map[String, AnyRef] => new util.ArrayList[util.Map[String, AnyRef]]() { + { + add(req) + } + } + case _ => throw new ClientException("ERR_INVALID_TERM", "Invalid Request! Please Provide Valid Request.") + } + } + + private def generateIdentifier(scopeId: String, code: String): String = { + var id: String = null + if (StringUtils.isNotBlank(scopeId)) id = Slug.makeSlug(scopeId + "_" + code) + id + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/CategoryManager.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/CategoryManager.scala new file mode 100644 index 000000000..5ff58aa96 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/CategoryManager.scala @@ -0,0 +1,27 @@ +package org.sunbird.mangers + +import com.twitter.util.Config.intoOption +import org.sunbird.common.Platform +import org.sunbird.common.dto.Request +import org.sunbird.common.exception.{ClientException, ServerException} +import org.sunbird.graph.nodes.DataNode +import org.sunbird.utils.Constants + +import java.util +import java.util.{Map, Optional} +import scala.collection.JavaConverters.mapAsScalaMapConverter +import scala.collection.mutable.ListBuffer + +object CategoryManager { + + def validateTranslationMap(request: Request) = { + val translations: util.Map[String, AnyRef] = Optional.ofNullable(request.get("translations").asInstanceOf[util.HashMap[String, AnyRef]]).orElse(new util.HashMap[String, AnyRef]()) + if (translations.isEmpty) request.getRequest.remove("translations") + else { + val languageCodes = Platform.getStringList("platform.language.codes", new util.ArrayList[String]()) + if (translations.asScala.exists(entry => !languageCodes.contains(entry._1))) + throw new ClientException("ERR_INVALID_LANGUAGE_CODE", "Please Provide Valid Language Code For translations. Valid Language Codes are : " + languageCodes) + } + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala new file mode 100644 index 000000000..4b29a30dc --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala @@ -0,0 +1,266 @@ +package org.sunbird.mangers + +import java.util +import com.twitter.util.Config.intoOption +import org.apache.commons.lang3.StringUtils +import org.sunbird.common.{JsonUtils, Platform, Slug} +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import org.sunbird.common.exception.{ClientException, ServerException} +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.dac.enums.RelationTypes +import org.sunbird.graph.dac.model.{Node, Relation, SubGraph} +import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.path.DataSubGraph +import org.sunbird.graph.schema.{DefinitionNode, ObjectCategoryDefinition} +import org.sunbird.graph.utils.NodeUtil +import org.sunbird.graph.utils.NodeUtil.{convertJsonProperties, handleKeyNames} + +import java.util +import java.util.Collections +import java.util.concurrent.{CompletionException, Executors} +import scala.collection.JavaConverters +import scala.collection.JavaConverters._ +import scala.collection.JavaConversions._ +import scala.concurrent.{ExecutionContext, Future} +import org.sunbird.utils.{CategoryCache, Constants, FrameworkCache} + +object FrameworkManager { + private val languageCodes = Platform.getStringList("platform.language.codes", new util.ArrayList[String]()) + val schemaVersion: String = "1.0" + def validateTranslationMap(request: Request) = { + val translations: util.Map[String, AnyRef] = request.getOrElse("translations", "").asInstanceOf[util.HashMap[String, AnyRef]] + if (translations.isEmpty) request.getRequest.remove("translations") + else { + if (translations.asScala.exists(entry => !languageCodes.contains(entry._1))) + throw new ClientException("ERR_INVALID_LANGUAGE_CODE", "Please Provide Valid Language Code For translations. Valid Language Codes are : " + languageCodes) + } + } + + def filterFrameworkCategories(framework: util.Map[String, AnyRef], categoryNames: util.List[String]): Map[String, AnyRef] = { + val categories = framework.getOrDefault("categories", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]] + if (!categories.isEmpty && !categoryNames.isEmpty) { + val filteredCategories = categories.filter(category => { + val name = category.get("name").asInstanceOf[String] + categoryNames.contains(name.toLowerCase()) + }).toList.asJava + val filteredData = framework.-("categories") ++ Map("categories" -> filteredCategories) + val finalCategories = removeAssociations(filteredData.toMap, categoryNames) + (filteredData.-("categories") ++ Map("categories" -> finalCategories)).toMap + } else { + framework.toMap + } + } + + private def removeAssociations(responseMap: Map[String, AnyRef], returnCategories: java.util.List[String]): util.List[util.Map[String, AnyRef]] = { + val categories = responseMap.getOrDefault("categories", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]] + categories.map( category => { + removeTermAssociations(category.getOrDefault("terms", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]], returnCategories) + }) + categories + } + + private def removeTermAssociations(terms: util.List[util.Map[String, AnyRef]], returnCategories: java.util.List[String]): Unit = { + terms.map(term => { + val associations = term.getOrDefault("associations", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]] + if (associations.nonEmpty) { + val filteredAssociations = associations.filter(p => p != null && returnCategories.contains(p.get("category"))) + term.put("associations", filteredAssociations) + if (filteredAssociations.isEmpty) + term.remove("associations") + removeTermAssociations(term.getOrDefault("children", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]], returnCategories) + } + }) + } + + def getCompleteMetadata(id: String, subGraph: SubGraph)(implicit oec: OntologyEngineContext, ec: ExecutionContext): util.Map[String, AnyRef] = { + val nodes = subGraph.getNodes + val relations = subGraph.getRelations + val node = nodes.get(id) + val metadata = node.getMetadata + val objectType = node.getObjectType.toLowerCase().replace("image", "") + val channel = node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String] + val definition: ObjectCategoryDefinition = DefinitionNode.getObjectCategoryDefinition("", objectType, channel) + val jsonProps = DefinitionNode.fetchJsonProps(node.getGraphId, schemaVersion, objectType, definition) + val updatedMetadata: util.Map[String, AnyRef] = metadata.entrySet().asScala.filter(entry => null != entry.getValue) + .map((entry: util.Map.Entry[String, AnyRef]) => handleKeyNames(entry, null) -> convertJsonProperties(entry, jsonProps)).toMap ++ + Map("objectType" -> node.getObjectType, "identifier" -> node.getIdentifier, "languageCode" -> NodeUtil.getLanguageCodes(node)) + + val relationDef = DefinitionNode.getRelationDefinitionMap(node.getGraphId, schemaVersion, objectType, definition) + val outRelations = relations.filter((rel: Relation) => { + StringUtils.equals(rel.getStartNodeId.toString(), node.getIdentifier) + }).sortBy((rel: Relation) => rel.getMetadata.get("IL_SEQUENCE_INDEX").asInstanceOf[Long])(Ordering.Long).toList + + val relMetadata = getRelationAsMetadata(relationDef, outRelations, "out") + val childHierarchy = relMetadata.map(x => (x._1, x._2.map(a => { + val identifier = a.getOrElse("identifier", "") + val childNode = nodes.get(identifier) + val index = a.getOrElse("index", 1).asInstanceOf[Number] + val metaData = (childNode.getMetadata ++ Map("index" -> index)).asJava + childNode.setMetadata(metaData) + getCompleteMetadata(childNode.getIdentifier, subGraph) + }).toList.asJava)) + (updatedMetadata ++ childHierarchy).asJava + } + + private def getRelationAsMetadata(definitionMap: Map[String, AnyRef], relationMap: util.List[Relation], direction: String) = { + relationMap.asScala.map(rel => + { + val endObjectType = rel.getEndNodeObjectType.replace("Image", "") + val relKey: String = rel.getRelationType + "_" + direction + "_" + endObjectType + if (definitionMap.containsKey(relKey)) { + val relData =Map[String, Object]("identifier" -> rel.getEndNodeId.replace(".img", ""), + "name"-> rel.getEndNodeName, + "objectType"-> endObjectType, + "relation"-> rel.getRelationType, + "KEY" -> definitionMap.getOrDefault(relKey, "").asInstanceOf[String] + ) ++ rel.getMetadata.asScala + val indexMap = if(rel.getRelationType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX",1.asInstanceOf[Number]).asInstanceOf[Number]) else Map() + relData ++ indexMap + } else Map[String, Object]() + }).filter(x => x.nonEmpty) + .groupBy(x => x.getOrDefault("KEY", "").asInstanceOf[String]) + .map(x => (x._1, (x._2.toList.map(x => { + x.-("KEY") + x.-("IL_SEQUENCE_INDEX") + })).asJava )) + } + + def getFrameworkHierarchy(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Map[String, AnyRef]] = { + val req = new Request(request) + req.put("identifier", request.get("identifier")) + val graph_id = req.getContext.getOrDefault("graph_id", "domain").asInstanceOf[String] + val schemaName = req.getContext.getOrDefault("schemaName", "framework").asInstanceOf[String] + val schemaVersion = req.getContext.getOrDefault("schemaVersion", "1.0").asInstanceOf[String] + val externalProps = DefinitionNode.getExternalProps(graph_id, schemaVersion, schemaName) + + val responseFuture = oec.graphService.readExternalProps(request, externalProps) + responseFuture.map(response => { + if (!ResponseHandler.checkError(response)) { + val hierarchyString = response.getResult.toMap.getOrDefault("hierarchy", "").asInstanceOf[String] + if (StringUtils.isNotEmpty(hierarchyString)) { + Future(JsonUtils.deserialize(hierarchyString, classOf[java.util.Map[String, AnyRef]]).toMap) + } else + Future(Map[String, AnyRef]()) + } else if (ResponseHandler.checkError(response) && response.getResponseCode.code() == 404) + Future(Map[String, AnyRef]()) + else + throw new ServerException("ERR_WHILE_FETCHING_HIERARCHY_FROM_CASSANDRA", "Error while fetching hierarchy from cassandra") + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } + } + + def copyHierarchy(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Response] = { + val frameworkId = request.getRequest.getOrDefault(Constants.IDENTIFIER, "").asInstanceOf[String] + val code = request.getRequest.getOrDefault(Constants.CODE, "").asInstanceOf[String] + if (StringUtils.isBlank(code)) + throw new ClientException("ERR_FRAMEWORK_CODE_REQUIRED", "Unique code is mandatory for framework copy") + + if (StringUtils.equals(frameworkId, code)) + throw new ClientException("ERR_FRAMEWORKID_CODE_MATCHES", "FrameworkId and code should not be same.") + + val getFrameworkReq = new Request() + getFrameworkReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getFrameworkReq.getContext.put(Constants.SCHEMA_NAME, Constants.FRAMEWORK_SCHEMA_NAME) + getFrameworkReq.getContext.put(Constants.VERSION, Constants.FRAMEWORK_SCHEMA_VERSION) + getFrameworkReq.getContext.put("frameworkId", code) + copyRelationHierarchy(getFrameworkReq, frameworkId, code) + } + + private def copyRelationHierarchy(request: Request, oldId: String, newId: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Response] = { + request.put(Constants.IDENTIFIER, oldId) + DataNode.read(request).map(node => { + val schemaName = request.getContext.getOrDefault("schemaName", "framework").asInstanceOf[String] + val schemaVersion = request.getContext.getOrDefault("schemaVersion", "1.0").asInstanceOf[String] + val objectType = node.getObjectType.toLowerCase().replace("image", "") + val channel = node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String] + val definition: ObjectCategoryDefinition = DefinitionNode.getObjectCategoryDefinition("", objectType, channel) + val relationDef = DefinitionNode.getRelationDefinitionMap(node.getGraphId, schemaVersion, objectType, definition) + val frameworkId = request.getContext.getOrDefault("frameworkId", "").asInstanceOf[String] + val outRelations = node.getOutRelations.filter((rel: Relation) => { + StringUtils.equals(rel.getStartNodeId.toString(), node.getIdentifier) + }).toList + + node.setInRelations(null) + node.setOutRelations(null) + val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, new util.ArrayList(), schemaName, schemaVersion) + val requestMap = request.getRequest + if(metadata.get("framework").asInstanceOf[String] != null){ + metadata.put("framework", frameworkId) + } + metadata.putAll(requestMap) + + val req = getRequestMap(request, metadata, newId, relationDef) + DataNode.create(req).map(copiedNode => { + outRelations.map(rel => { + if(!rel.getMetadata.isEmpty){ + val endObjectType = rel.getEndNodeObjectType.replace("Image", "") + val StartObjectType = rel.getStartNodeObjectType.replace("Image", "") + val relKey: String = rel.getRelationType + "_out_" + endObjectType + var endNodeId = rel.getEndNodeId() + endNodeId = endNodeId.replaceFirst(oldId.toLowerCase(), newId.toLowerCase()) + if (relationDef.containsKey(relKey)) { + val relReq = new Request(request) + relReq.getContext.put(Constants.SCHEMA_NAME, rel.getEndNodeObjectType) + relReq.getContext.put(Constants.VERSION, schemaVersion) + relReq.getContext.put("frameworkId", frameworkId) + relReq.put("disableCache", Option(true)) + + val inRelKey: String = rel.getRelationType + "_in_" + StartObjectType + val relationMap: util.Map[String, Object] = new util.HashMap[String, Object]() + relationMap.put("identifier", newId) + val index: Integer = rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).asInstanceOf[Number].intValue() + relationMap.put("index", index) + relationMap.put("KEY", inRelKey) + relReq.getContext.put("relationMap", relationMap) + + copyRelationHierarchy(relReq, rel.getEndNodeId, endNodeId) + } + } + }) + ResponseHandler.OK.put("node_id", frameworkId) + }) + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } + } + + private def getRequestMap(request: Request, metadata: util.Map[String, AnyRef], objectId: String, relationDef: Map[String, AnyRef]): Request = { + val req = new Request(request) + req.setRequest(metadata) + req.put("identifier", objectId) + req.put("code", objectId) + var relMap = request.getContext.getOrDefault("relationMap", new util.HashMap[String, Object]()).asInstanceOf[util.Map[String, Object]] + if (!relMap.isEmpty) { + val relKey = relMap.getOrDefault("KEY", "").asInstanceOf[String] + relMap = relMap.toMap.-("KEY") + if (!relationDef.getOrDefault(relKey, "").asInstanceOf[String].isEmpty) { + val tempArr = new util.ArrayList[util.Map[String, Object]]() + tempArr.add(relMap) + req.put(relationDef.getOrDefault(relKey, "").asInstanceOf[String], tempArr) + } + } + req.getContext.remove("relationMap") + req + } + + def validateChannel(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext) = { + val channel = request.getRequest.getOrDefault(Constants.CHANNEL, "").asInstanceOf[String] + if (channel.isEmpty()) throw new ClientException("ERR_INVALID_CHANNEL_ID", "Please provide valid channel identifier") + val getChannelReq = new Request() + getChannelReq.setContext(new util.HashMap[String, AnyRef]() { + { + putAll(request.getContext) + } + }) + getChannelReq.getContext.put(Constants.SCHEMA_NAME, Constants.CHANNEL_SCHEMA_NAME) + getChannelReq.getContext.put(Constants.VERSION, Constants.CHANNEL_SCHEMA_VERSION) + getChannelReq.put(Constants.IDENTIFIER, channel) + DataNode.read(getChannelReq)(oec, ec).map(node => { + if (null != node && StringUtils.equalsAnyIgnoreCase(node.getIdentifier, channel)) node + else + throw new ClientException("ERR_INVALID_CHANNEL_ID", "Please provide valid channel identifier") + })(ec) + } + +} diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/CategoryCache.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/CategoryCache.scala new file mode 100644 index 000000000..8420ee93d --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/CategoryCache.scala @@ -0,0 +1,55 @@ +package org.sunbird.utils + +import java.util +import org.sunbird.cache.impl.RedisCache +import org.sunbird.telemetry.logger.TelemetryManager + +import java.util.ArrayList +import java.util.stream.Collectors +import scala.collection.JavaConverters._ +import scala.collection.JavaConversions._ +import scala.collection.mutable.ListBuffer + + +object CategoryCache{ + def getTerms(framework: String, category: String): List[String] = { + val key = getKey(framework, category) + RedisCache.getList(key) + } + + def setFramework(id: String, framework: util.Map[String, AnyRef]): Unit = { + if (null != framework && !framework.isEmpty) { + val categories = framework.getOrDefault("categories", new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]].toList + categories.map(category => { + val catName = category.get("code").asInstanceOf[String] + val terms = getTerms(category, "terms") + if (terms.nonEmpty) { + val key = getKey(id, catName) + TelemetryManager.info("Setting framework category cache with key: " + key) + RedisCache.saveList(key, terms) + } + }) + } + } + + private def getKey(framework: String, category: String) = "cat_" + framework + category + + private def getTerms(category: util.Map[String, AnyRef], key: String): List[String] = { + val returnTerms = new util.ArrayList[String] + if (category != null && category.nonEmpty) { + val terms = category.getOrDefault(key, new util.ArrayList[util.Map[String, AnyRef]]).asInstanceOf[util.List[util.Map[String, AnyRef]]].toList + if (terms != null && terms.nonEmpty) { + for (term <- terms) { + val termName = term.getOrElse("name", "").asInstanceOf[String] + if (termName != null && termName.trim.nonEmpty) { + returnTerms += termName + val childTerms = getTerms(term, "associations") + if (childTerms.nonEmpty) + returnTerms ++= childTerms + } + } + } + } + returnTerms.toList + } +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/Constants.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/Constants.scala index 29f875655..4ca9b554a 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/Constants.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/Constants.scala @@ -24,4 +24,47 @@ object Constants { val CHANNEL = "channel"; val TARGET_OBJECT_TYPE = "targetObjectType" val OBJECT_CATEGORY_DEFINITION: String = "objectCategoryDefinition" + + // Constants For Framework + val FRAMEWORK: String = "framework"; + val CREATE_FRAMEWORK: String = "createFramework"; + val READ_FRAMEWORK: String = "readFramework" + val UPDATE_FRAMEWORK: String = "updateFramework"; + val RETIRE_FRAMEWORK: String = "retireFramework"; + val COPY_FRAMEWORK: String = "copyFramework"; + val PUBLISH_FRAMEWORK: String = "publishFramework"; + val FRAMEWORK_SCHEMA_VERSION = "1.0"; + val FRAMEWORK_SCHEMA_NAME="framework" + val CATEGORIES: String = "categories" + val ROOT_ID: String = "rootId" + val CHANNEL_SCHEMA_NAME="channel" + val CHANNEL_SCHEMA_VERSION = "1.0"; + val PUBLISH_STATUS: String = "publishStatus" + + val CATEGORY: String = "category"; + val CREATE_CATEGORY: String = "createCategory"; + val READ_CATEGORY: String = "readCategory" + val UPDATE_CATEGORY: String = "updateCategory" + val RETIRE_CATEGORY: String = "retireCategory" + val CATEGORY_SCHEMA_VERSION = "1.0"; + val CATEGORY_SCHEMA_NAME = "category" + val CODE: String = "code"; + + val CREATE_CATEGORY_INSTANCE: String = "createCategoryInstance"; + val READ_CATEGORY_INSTANCE: String = "readCategoryInstance" + val UPDATE_CATEGORY_INSTANCE: String = "updateCategoryInstance" + val RETIRE_CATEGORY_INSTANCE: String = "retireCategoryInstance" + val CATEGORY_INSTANCE_SCHEMA_VERSION: String = "1.0"; + val CATEGORY_INSTANCE_SCHEMA_NAME: String = "CategoryInstance" + val VERSION_KEY: String = "versionKey" + + val TERM: String = "term"; + val CREATE_TERM: String = "createTerm"; + val READ_TERM: String = "readTerm" + val UPDATE_TERM: String = "updateTerm" + val RETIRE_TERM: String = "retireTerm" + val TERM_SCHEMA_VERSION: String = "1.0"; + val TERM_SCHEMA_NAME: String = "term" + + } diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/FrameworkCache.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/FrameworkCache.scala new file mode 100644 index 000000000..658fb99e4 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/utils/FrameworkCache.scala @@ -0,0 +1,65 @@ +package org.sunbird.utils + +import com.fasterxml.jackson.core.`type`.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import org.apache.commons.collections4.{CollectionUtils, MapUtils} +import org.apache.commons.lang3.StringUtils +import org.sunbird.cache.impl.RedisCache +import org.sunbird.common.{JsonUtils, Platform} +import org.sunbird.graph.util.ScalaJsonUtil +import org.sunbird.graph.utils.ScalaJsonUtils + +import java.util +import java.util.Collections +import scala.collection.JavaConverters._ +import scala.collection.JavaConversions.{asJavaCollection, asScalaBuffer} +import scala.collection.JavaConverters.seqAsJavaListConverter + +object FrameworkCache{ + + private val cacheTtl: Int = if (Platform.config.hasPath("framework.cache.ttl")) Platform.config.getInt("framework.cache.ttl") else 86400 + protected var cacheEnabled: Boolean = if (Platform.config.hasPath("framework.cache.read")) Platform.config.getBoolean("framework.cache.read") else false + private val CACHE_PREFIX: String = "fw_" + protected var mapper: ObjectMapper = new ObjectMapper + + + protected def getFwCacheKey(identifier: String, categoryNames: util.List[String]): String = { + Collections.sort(categoryNames) + CACHE_PREFIX + identifier.toLowerCase + "_" + categoryNames.map(_.toLowerCase).mkString("_") + } + + def get(id: String, returnCategories: util.List[String]): util.Map[String, Object] = { + if (cacheEnabled) { + if (returnCategories.nonEmpty) { + val categories = new util.ArrayList[String](returnCategories) + Collections.sort(categories) + val cachedCategories: String = RedisCache.get(getFwCacheKey(id, categories)) + if (StringUtils.isNotBlank(cachedCategories)) + return JsonUtils.deserialize(cachedCategories, classOf[util.Map[String, Object]]) + } else { + val frameworkMetadata: String = RedisCache.get(id) + if (StringUtils.isNotBlank(frameworkMetadata)) + return JsonUtils.deserialize(frameworkMetadata, classOf[util.Map[String, Object]]) + } + } + null + } + + + def save(framework: Map[String, AnyRef], categoryNames: util.List[String]): Unit = { + val identifier = framework.getOrElse("identifier", "").asInstanceOf[String] + if (cacheEnabled && !framework.isEmpty && StringUtils.isNotBlank(identifier) && categoryNames.nonEmpty) { + val categories = new util.ArrayList[String](categoryNames) + Collections.sort(categories) + val key: String = getFwCacheKey(identifier, categories) + RedisCache.set(key, ScalaJsonUtil.serialize(framework), cacheTtl) + } + } + + def delete(id: String): Unit = { + if (StringUtils.isNotBlank(id)) { + RedisCache.deleteByPattern(CACHE_PREFIX + id + "_*") + } + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-actors/src/test/resources/application.conf b/taxonomy-api/taxonomy-actors/src/test/resources/application.conf index 57efe9812..100c86b76 100644 --- a/taxonomy-api/taxonomy-actors/src/test/resources/application.conf +++ b/taxonomy-api/taxonomy-actors/src/test/resources/application.conf @@ -384,7 +384,7 @@ framework.cache.read=false # Max size(width/height) of thumbnail in pixels max.thumbnail.size.pixels=150 -schema.base_path="../../schemas/" +schema.base_path = "../../schemas" diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/BaseSpec.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/BaseSpec.scala index 3c7b95d98..745c4d3eb 100644 --- a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/BaseSpec.scala +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/BaseSpec.scala @@ -46,4 +46,34 @@ class BaseSpec extends FlatSpec with Matchers { }) util.Arrays.asList(node) } + + def getFrameworkNode(): util.List[Node] = { + val node = new Node() + node.setIdentifier("NCF") + node.setNodeType("DATA_NODE") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "NCF") + put("objectType", "Framework") + put("name", "NCF") + } + }) + util.Arrays.asList(node) + } + + def getCategoryInstanceNode(): util.List[Node] = { + val node = new Node() + node.setIdentifier("ncf_board") + node.setNodeType("DATA_NODE") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board") + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + util.Arrays.asList(node) + } } diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryActorTest.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryActorTest.scala new file mode 100644 index 000000000..913b85174 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryActorTest.scala @@ -0,0 +1,219 @@ +package org.sunbird.actors + +import java.util + +import akka.actor.Props +import org.scalamock.scalatest.MockFactory +import org.sunbird.common.dto.Request +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import org.sunbird.graph.dac.model.{Node, SearchCriteria} +import org.sunbird.utils.Constants + +import scala.collection.JavaConversions.mapAsJavaMap +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global + +class CategoryActorTest extends BaseSpec with MockFactory{ + + "CategoryActor" should "return failed response for 'unknown' operation" in { + implicit val oec: OntologyEngineContext = new OntologyEngineContext + testUnknownOperation(Props(new CategoryActor()), getCategoryRequest()) + } + + it should "return success response for 'createCategory' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node("domain", "DATA_NODE", "Category") + node.setIdentifier("state") + node.setObjectType("Category") + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(node)) + + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getCategoryRequest() + request.getRequest.put("name", "State") + request.getRequest.put("code", "state") + request.getRequest.put("orgIdFieldName", "stateIds") + request.getRequest.put("targetIdFieldName", "targetStateIds") + request.getRequest.put("searchIdFieldName", "se_stateIds") + request.getRequest.put("searchLabelFieldName", "se_states") + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.get(Constants.IDENTIFIER).equals("state")) + assert(response.get(Constants.NODE_ID).equals("state")) + } + + it should "throw exception if status sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val request = getCategoryRequest() + request.getRequest.put("name", "category_test") + request.getRequest.put("code", "category_test") + request.getRequest.put("status", "Live") + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if invalid translations sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + val translations = new java.util.HashMap[String, String]() + translations.put("sta", "trnm") + val request = getCategoryRequest() + request.getRequest.put("name", "category_test") + request.getRequest.put("code", "category_test") + request.getRequest.put("translations", translations) + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if null values sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + val request = getCategoryRequest() + request.getRequest.put("name", "") + request.getRequest.put("code", "") + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if no nodes are present " in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val translations = new java.util.HashMap[String, String]() + val request = getCategoryRequest() + request.getRequest.put("name", "category_test") + request.getRequest.put("code", "category_test") + request.getRequest.put("translations", translations) + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if code not sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val request = getCategoryRequest() + request.getRequest.put("name", "category_test") + request.setOperation(Constants.CREATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "return success response for 'readCategory'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB) + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)) + val request = getCategoryRequest() + request.getContext.put("identifier", "category_test") + request.putAll(mapAsJavaMap(Map("identifier" -> "category_test"))) + request.setOperation("readCategory") + val response = callActor(request, Props(new CategoryActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "return success response for updateCategory" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getCategoryRequest() + request.putAll(mapAsJavaMap(Map("description" -> "test desc"))) + request.setOperation(Constants.UPDATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "throw an exception if identifier is sent in update request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryRequest() + request.putAll(mapAsJavaMap(Map("description" -> "test desc", "identifier"-> "category_test"))) + request.setOperation(Constants.UPDATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw an exception if code is sent in update request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryRequest() + request.putAll(mapAsJavaMap(Map("description" -> "test desc", "code" -> "category_test"))) + request.setOperation(Constants.UPDATE_CATEGORY) + val response = callActor(request, Props(new CategoryActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "return success response for 'retireCategory' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + node.setObjectType("Category") + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)) + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(node)) + + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getCategoryRequest() + request.getContext.put("identifier", "category_test"); + request.getRequest.put("identifier", "category_test") + request.setOperation("retireCategory") + val response = callActor(request, Props(new CategoryActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + private def getCategoryRequest(): Request = { + val request = new Request() + request.setContext(new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "Category") + put("schemaName", "category") + + } + }) + request.setObjectType("Category") + request + } + + private def getValidNode(): Node = { + val node = new Node() + node.setIdentifier("category_test") + node.setNodeType("DATA_NODE") + node.setObjectType("Category") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "category_test") + put("objectType", "Category") + put("name", "category_test") + put("code", "category_test") + put("orgIdFieldName", "stateIds") + put("targetIdFieldName", "targetStateIds") + put("searchIdFieldName", "se_stateIds") + put("searchLabelFieldName", "se_states") + } + }) + node + } +} diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryInstanceActorTest.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryInstanceActorTest.scala new file mode 100644 index 000000000..6ee90251a --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/CategoryInstanceActorTest.scala @@ -0,0 +1,364 @@ +package org.sunbird.actors + +import java.util +import akka.actor.Props +import org.apache.commons.lang3.StringUtils +import org.scalamock.scalatest.MockFactory +import org.sunbird.common.dto.{Request, Response} +import org.sunbird.common.exception.{ResourceNotFoundException, ResponseCode} +import org.sunbird.graph.common.enums.GraphDACParams +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import org.sunbird.graph.dac.model.{Node, SearchCriteria} +import org.sunbird.utils.Constants +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global +import scala.collection.JavaConversions.mapAsJavaMap + +class CategoryInstanceActorTest extends BaseSpec with MockFactory { + + "CategoryInstanceActor" should "return failed response for 'unknown' operation" in { + implicit val oec: OntologyEngineContext = new OntologyEngineContext + testUnknownOperation(Props(new CategoryInstanceActor()), getCategoryInstanceRequest()) + } + + it should "create a CategoryInstance node and store it in neo4j" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("NCF") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "NCF"); + put("objectType", "Framework") + put("name", "NCF") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "NCF", *, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getFrameworkNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val categoryNode = new Node() + categoryNode.setIdentifier("board") + categoryNode.setObjectType("Category") + categoryNode.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "board"); + put("objectType", "Category") + put("name", "board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "board", *, *).returns(Future(categoryNode)).anyNumberOfTimes() + val categoryNodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(categoryNodes)).anyNumberOfTimes() + + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(getCategoryInstanceOfNode())) + val loopResult: util.Map[String, Object] = new util.HashMap[String, Object]() + loopResult.put(GraphDACParams.loop.name, new java.lang.Boolean(false)) + (graphDB.checkCyclicLoop _).expects(*, *, *, *).returns(loopResult).anyNumberOfTimes() + (graphDB.createRelation _).expects(*, *).returns(Future(new Response())) + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF","code" -> "board" ,"name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.get(Constants.IDENTIFIER) != null) + assert(response.get("versionKey") != null) + } + + it should "throw error if category does not belong to master category" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("NCF") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "NCF"); + put("objectType", "Framework") + put("name", "NCF") + } + }) + + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "code" -> "board", "name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw error if code value is empty " in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("NCF") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "NCF"); + put("objectType", "Framework") + put("name", "NCF") + } + }) + + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "code" -> "", "name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throws exception if identifier is empty" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", ""); + put("objectType", "Framework") + put("name", "NCF") + } + }) + + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "code" -> "board", "name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if frameworkId is not sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("code" -> "board", "name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + + } + + it should "throw exception if frameworkId is null " in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "", "code" -> "board", "name" -> "Board"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + + } + + it should "throw exception if node id is empty for 'readCategoryInstance'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB) + val node = new Node() + node.setIdentifier("") + node.setNodeType("DATA_NODE") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "") + put("objectType", "CategoryInstance") + put("name", "Board") + put("code", "board") + put("description", "Board") + put("versionKey", "1234") + + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)) + val request = getCategoryInstanceRequest() + request.getContext.put("identifier", "ncf_board") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "name" -> "Board", "description" -> "Board", "code" -> "board", "identifier" -> "ncf_board", "channel" -> "sunbird", "category" -> "board"))) + request.setOperation("readCategoryInstance") + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if code is not sent in request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "name" -> "Board", "frameworks" -> "[{identifier=NCF_TEST1}]}]"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + + } + + it should "throw exception if status is sent in createCategoryInstance request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF","code" -> "board","status" -> "Live", "name" -> "Board", "frameworks" -> "[{identifier=NCF_TEST1}]}]"))) + request.setOperation(Constants.CREATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) + assert(response.getParams.getErr == "ERROR_RESTRICTED_PROP") + assert(response.getParams.getErrmsg == "Properties in list [status] are not allowed in request") + } + + it should "return success response for 'readCategoryInstance'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB) + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)) + val request = getCategoryInstanceRequest() + request.getContext.put("identifier", "ncf_board") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF","name" -> "Board", "description" -> "Board", "code" -> "board","identifier" -> "ncf_board", "channel" ->"sunbird","category" -> "board"))) + request.setOperation("readCategoryInstance") + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "throw exception if cateogry has null values for 'readCategoryInstance'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val request = getCategoryInstanceRequest() + request.getContext.put("identifier", "ncf_board") + request.putAll(mapAsJavaMap(Map("framework" -> "ncf", "name" -> "Board", "description" -> "Board", "code" -> "board", "identifier" -> "ncf_board", "channel" -> "sunbird", "category" -> ""))) + request.setOperation("readCategoryInstance") + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if frameworkId has null values for 'readCategoryInstance'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val request = getCategoryInstanceRequest() + request.getContext.put("identifier", "ncf_board") + request.putAll(mapAsJavaMap(Map("framework" -> "", "name" -> "Board", "description" -> "Board", "code" -> "board", "identifier" -> "ncf_board", "channel" -> "sunbird", "category" -> "board"))) + request.setOperation("readCategoryInstance") + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "return success response for updateCategoryInstance" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF","name" -> "Board", "description" -> "Board", "code" -> "board", "channel" ->"sunbird","category" -> "board"))) + request.setOperation(Constants.UPDATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.get(Constants.IDENTIFIER) != null) + assert(response.get(Constants.VERSION_KEY) != null) + assert(response.get(Constants.IDENTIFIER).equals("ncf_board")) + } + + it should "throw exception if identifier is sent in updateCategoryInstance request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getCategoryInstanceRequest() + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "name" -> "Board", "identifier"->"ncf_board", "description" -> "Board", "code" -> "board", "channel" -> "sunbird", "category" -> "board"))) + request.setOperation(Constants.UPDATE_CATEGORY_INSTANCE) + val response = callActor(request, Props(new CategoryInstanceActor())) + assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) + assert(response.getParams.getErr == "ERROR_RESTRICTED_PROP") + assert(response.getParams.getErrmsg == "Properties in list [identifier] are not allowed in request") + } + + it should "return success response for retireCategoryInstance" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + val request = getCategoryInstanceRequest() + request.getContext.put("identifier", "ncf_board") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF","name" -> "Board", "description" -> "Board", "code" -> "board", "channel" ->"sunbird","category" -> "board"))) + request.putAll(mapAsJavaMap(Map("identifier" -> "ncf_board"))) + request.setOperation("retireCategoryInstance") + val response = callActor(request, Props(new CategoryInstanceActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + + private def getCategoryInstanceOfNode(): Node = { + val node = new Node() + node.setIdentifier("ncf_board") + node.setNodeType("DATA_NODE") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board") + put("framework", "NCF") + put("objectType", "CategoryInstance") + put("name", "board") + put("code", "board") + put("versionKey", "12345") + } + }) + node + } + + + + private def getCategoryInstanceRequest(): Request = { + val request = new Request() + request.setContext(getContext()) + request + } + + private def getContext(): util.Map[String, AnyRef] = new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "CategoryInstance") + put("schemaName", "CategoryInstance") + } + } + + + + private def getValidNode(): Node = { + val node = new Node() + node.setIdentifier("ncf_board") + node.setNodeType("DATA_NODE") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "Board") + put("objectType", "CategoryInstance") + put("name", "Board") + put("code", "board") + put("description", "Board") + put("versionKey", "1234") + + } + }) + node + } + + +} diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/FrameworkActorTest.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/FrameworkActorTest.scala new file mode 100644 index 000000000..b88877144 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/FrameworkActorTest.scala @@ -0,0 +1,315 @@ +package org.sunbird.actors + +import java.util +import akka.actor.Props +import org.scalamock.scalatest.MockFactory +import org.sunbird.common.dto.{Request, Response, ResponseParams} +import org.sunbird.common.exception.ResponseCode +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import org.sunbird.graph.dac.model.{Node, Relation, SearchCriteria, SubGraph} +import org.sunbird.utils.Constants + +import scala.collection.JavaConversions.mapAsJavaMap +import scala.collection.immutable.List +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global + +class FrameworkActorTest extends BaseSpec with MockFactory { + + "FrameworkActor" should "return failed response for 'unknown' operation" in { + implicit val oec: OntologyEngineContext = new OntologyEngineContext + testUnknownOperation(Props(new FrameworkActor()), getFrameworkRequest()) + } + + + it should "return success response for 'createFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node("domain", "DATA_NODE", "Channel") + node.setIdentifier("channel_test") + node.setObjectType("Channel") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "channel_test"); + put("objectType", "Channel") + put("name", "Channel") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getFrameworkNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(getFrameworkOfNode())) + + val request = getFrameworkRequest() + request.putAll(mapAsJavaMap(Map("name" ->"framework_test", "code"-> "framework_test", "description" -> "desc_test", "channel"->"channel_test"))) + request.setOperation(Constants.CREATE_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.get(Constants.NODE_ID).equals("framework_test")) + } + + it should "throw exception if code is sent empty for 'createFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getFrameworkRequest() + request.putAll(mapAsJavaMap(Map("name" -> "framework_test", "code" -> "", "description" -> "desc_test", "channel" -> "channel_test"))) + request.setOperation(Constants.CREATE_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if channel is not sent in the request for 'createFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val request = getFrameworkRequest() + request.putAll(mapAsJavaMap(Map("name" -> "framework_test", "code" -> "framework_test", "description" -> "desc_test"))) + request.setOperation(Constants.CREATE_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if empty channel identifier is sent in the request 'createFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node("domain", "DATA_NODE", "Channel") + node.setIdentifier("") + node.setObjectType("Channel") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "channel_test"); + put("objectType", "Channel") + put("name", "Channel") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val request = getFrameworkRequest() + request.putAll(mapAsJavaMap(Map("name" -> "framework_test", "code" -> "framework_test", "description" -> "desc_test", "channel" -> "channel_test"))) + request.setOperation(Constants.CREATE_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "return success response for updateFramework" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getFrameworkNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getFrameworkRequest() + request.putAll(mapAsJavaMap(Map("description" -> "test desc"))) + request.setOperation(Constants.UPDATE_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "return success response for 'retireCategory' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + node.setObjectType("Framework") + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)) + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(node)) + + val nodes: util.List[Node] = getFrameworkNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getFrameworkRequest() + request.getContext.put("identifier", "framework_test"); + request.getRequest.put("identifier", "framework_test") + request.setOperation("retireFramework") + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "return success response for 'copyFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getFrameworkOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getFramwrokRequest() + request.putAll(mapAsJavaMap(Map("identifier" -> "NCF", + "createdBy" -> "username_1", + "code" -> "NCF_COPY"))) + request.setOperation(Constants.COPY_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.getResult.containsKey("node_id")) + } + + it should "throw exception if code not sent in the request 'copyFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getFrameworkOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getFramwrokRequest() + request.putAll(mapAsJavaMap(Map("identifier" -> "NCF"))) + request.setOperation(Constants.COPY_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("failed".equals(response.getParams.getStatus)) + assert("ERR_FRAMEWORK_CODE_REQUIRED".equals(response.getParams.getErr)) + } + + it should "throw exception if code & identifier values same in the request 'copyFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getFrameworkOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getFramwrokRequest() + request.putAll(mapAsJavaMap(Map("identifier" -> "NCF", "code" -> "NCF"))) + request.setOperation(Constants.COPY_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("failed".equals(response.getParams.getStatus)) + assert("ERR_FRAMEWORKID_CODE_MATCHES".equals(response.getParams.getErr)) + } + + it should "return success response for 'publishFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node("domain", "DATA_NODE", "Channel") + node.setIdentifier("sunbird") + node.setObjectType("Channel") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "sunbird"); + put("objectType", "Channel") + put("name", "Channel") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val subGraph = getSubGraphData() + (graphDB.getSubGraph(_: String, _: String, _: Int)).expects(*, *, *).returns(Future(subGraph)).anyNumberOfTimes() + (graphDB.saveExternalProps(_: Request)).expects(*).returns(Future(getSuccessfulResponse())).anyNumberOfTimes + + val request = getFramwrokRequest() + request.getContext.put("identifier", "framework_test") + request.putAll(mapAsJavaMap(Map("identifier" -> "framework_test", "channel" -> "sunbird"))) + request.setOperation(Constants.PUBLISH_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "return success response for 'readFramework' operation" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + (graphDB.readExternalProps(_: Request, _: List[String])).expects(*, *).returns(Future(new Response())) + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + + val request = getFramwrokRequest() + request.getContext.put("identifier", "framework_test") + request.putAll(mapAsJavaMap(Map("identifier" -> "framework_test", "channel" -> "sunbird", "categories" -> ""))) + request.setOperation(Constants.READ_FRAMEWORK) + val response = callActor(request, Props(new FrameworkActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + + private def getFrameworkOfNode(): Node = { + val node = new Node() + node.setIdentifier("framework_test") + node.setNodeType("DATA_NODE") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "framework_test") + put("objectType", "Framework") + put("name", "framework_test") + put("code", "framework_test") + put("X-Channel-Id", "channel_test") + } + }) + node + } + + private def getValidNode(): Node = { + val node = new Node() + node.setIdentifier("framework_test") + node.setGraphId("domain") + node.setNodeType("DATA_NODE") + node.setObjectType("Framework") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("code", "framework_test") + put("objectType", "Framework") + put("name", "framework_test") + put("channel", "sunbird") + } + }) + node + } + + private def getFrameworkRequest(): Request = { + val request = new Request() + request.setContext(getContext()) + request + } + + private def getContext(): util.Map[String, AnyRef] = new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "Framework") + put("schemaName", "framework") + + } + } + + private def getFramwrokRequest(): Request = { + val request = new Request() + request.setContext(new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "Framework") + put("schemaName", "framework") + put("channel", "sunbird") + } + }) + request.setObjectType("Framework") + request + } + + def getSubGraphData(): SubGraph = { + val nodeMap: Map[String, Node] = Map("framework_test" -> getValidNode()) + val relationsList: util.List[Relation] = new util.ArrayList[Relation]() + val subGraphFData = new SubGraph(nodeMap, relationsList) + subGraphFData + } + + def getSuccessfulResponse(): Response = { + val response = new Response + response.setVer("3.0") + val responseParams = new ResponseParams + responseParams.setStatus("successful") + response.setParams(responseParams) + response.setResponseCode(ResponseCode.OK) + response + } + +} diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/TermActorTest.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/TermActorTest.scala new file mode 100644 index 000000000..1281fc036 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/actors/TermActorTest.scala @@ -0,0 +1,350 @@ +package org.sunbird.actors + +import java.util +import akka.actor.Props +import org.scalamock.scalatest.MockFactory +import org.sunbird.common.dto.{Request, Response} +import org.sunbird.common.exception.ResponseCode +import org.sunbird.graph.common.enums.GraphDACParams +import org.sunbird.graph.{GraphService, OntologyEngineContext} +import org.sunbird.graph.dac.model.{Node, SearchCriteria} +import org.sunbird.utils.Constants + +import scala.collection.JavaConversions.mapAsJavaMap +import scala.concurrent.Future +import scala.concurrent.ExecutionContext.Implicits.global + +class TermActorTest extends BaseSpec with MockFactory{ + + "TermActor" should "return failed response for 'unknown' operation" in { + implicit val oec: OntologyEngineContext = new OntologyEngineContext + testUnknownOperation(Props(new TermActor()), getTermRequest()) + } + + it should "create a Term node and store it in neo4j" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("ncf_board") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board"); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val nodes: util.List[Node] = getCategoryInstanceNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + (graphDB.addNode(_: String, _: Node)).expects(*, *).returns(Future(getTermOfNode())) + val loopResult: util.Map[String, Object] = new util.HashMap[String, Object]() + loopResult.put(GraphDACParams.loop.name, new java.lang.Boolean(false)) + (graphDB.checkCyclicLoop _).expects(*, *, *, *).returns(loopResult).anyNumberOfTimes() + (graphDB.createRelation _).expects(*, *).returns(Future(new Response())) + + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code"->"class1", "name"->"Class1", "description"->"Class1", "framework"->"NCF", "category"->"board", "channel"->"sunbird", "categories"-> "[{identifier=ncf_board}]", "identifier"->"ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("successful".equals(response.getParams.getStatus)) + assert(response.get(Constants.IDENTIFIER) != null) + assert(response.get(Constants.IDENTIFIER).equals("ncf_board_class1")) + assert(response.get(Constants.NODE_ID).equals("ncf_board_class1")) + } + + it should "throw exception if categoryId and identifier are same" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("ncf_board") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board"); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code"->"class1", "name"->"Class1", "description"->"Class1", "framework"->"NCF", "category"->"board", "channel"->"sunbird", "categories"-> "[{identifier=ncf_board}]", "identifier"->"ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if identifier is null" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", ""); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code" -> "class1", "name" -> "Class1", "description" -> "Class1", "framework" -> "NCF", "category" -> "board", "channel" -> "sunbird", "categories" -> "[{identifier=ncf_board}]", "identifier" -> "ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if categoryId is null" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("ncf_board") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board"); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code" -> "class1", "name" -> "Class1", "description" -> "Class1", "framework" -> "NCF", "category" ->"", "channel" -> "sunbird", "categories" -> "[{identifier=ncf_board}]", "identifier" -> "ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if frameworkId is null" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("ncf_board") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board"); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code" -> "class1", "name" -> "Class1", "description" -> "Class1", "framework" -> "", "category" -> "board", "channel" -> "sunbird", "categories" -> "[{identifier=ncf_board}]", "identifier" -> "ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if code is not sent in the request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = new Node() + node.setIdentifier("ncf_board") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board"); + put("objectType", "CategoryInstance") + put("name", "ncf_board") + } + }) + + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + + val request = getTermRequest() + request.putAll(mapAsJavaMap(Map("code" -> "", "name" -> "Class1", "description" -> "Class1", "framework" -> "NCF", "category" -> "board", "channel" -> "sunbird", "categories" -> "[{identifier=ncf_board}]", "identifier" -> "ncf_board_class1"))) + request.setOperation(Constants.CREATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) + assert(response.getParams.getErr == "CLIENT_ERROR") + assert(response.getParams.getErrmsg == "Validation Errors") + } + + it should "return success response for 'readTerm'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val categoryInstanceNode = getCategoryInstanceOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "ncf_board", *, *).returns(Future(categoryInstanceNode)) + val termNode = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "ncf_board_class1", *, *).returns(Future(termNode)) + val request = getTermRequest() + request.getContext.put("identifier", "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "term" -> "class1", "category" -> "board", "channel" -> "sunbird"))) + request.setOperation(Constants.READ_TERM) + val response = callActor(request, Props(new TermActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "throw exception if identifier is empty for 'readTerm'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val categoryInstanceNode = getCategoryInstanceOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "ncf_board", *, *).returns(Future(categoryInstanceNode)) + val node = new Node() + node.setIdentifier("") + node.setGraphId("domain") + node.setNodeType("DATA_NODE") + node.setObjectType("Term") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("code", "ncf_board_class1") + put("objectType", "Term") + put("name", "ncf_board_class1") + put("channel", "sunbird") + put("category", "ncf_board") + } + }) + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "ncf_board_class1", *, *).returns(Future(node)) + val request = getTermRequest() + request.getContext.put("identifier", "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "term" -> "class1", "category" -> "board", "channel" -> "sunbird"))) + request.setOperation(Constants.READ_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + it should "throw exception if termId is empty for 'readTerm'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val categoryInstanceNode = getCategoryInstanceOfNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, "ncf_board", *, *).returns(Future(categoryInstanceNode)) + val request = getTermRequest() + request.getContext.put("identifier", "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "term" -> "", "category" -> "board", "channel" -> "sunbird"))) + request.setOperation(Constants.READ_TERM) + val response = callActor(request, Props(new TermActor())) + assert("failed".equals(response.getParams.getStatus)) + } + + + it should "return success response for 'updateTerm'" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + + val request = getTermRequest() + request.getContext.put(Constants.IDENTIFIER, "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "name" -> "Board", "description" -> "Board", "code" -> "board", "term" ->"class1" ,"channel" -> "sunbird", "category" -> "board"))) + request.setOperation(Constants.UPDATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("successful".equals(response.getParams.getStatus)) + } + + it should "throw exception if identifier is sent in updateCategoryInstance request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + + val request = getTermRequest() + request.getContext.put(Constants.IDENTIFIER, "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "identifier"->"ncf_board_class1","name" -> "Board", "description" -> "Board", "code" -> "board", "term" -> "class1", "channel" -> "sunbird", "category" -> "board"))) + request.setOperation(Constants.UPDATE_TERM) + val response = callActor(request, Props(new TermActor())) + assert(response.getResponseCode == ResponseCode.CLIENT_ERROR) + assert(response.getParams.getErr == "ERROR_RESTRICTED_PROP") + assert(response.getParams.getErrmsg == "Properties in list [identifier] are not allowed in request") + } + + it should "return success response for 'retireTerm' " in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val graphDB = mock[GraphService] + (oec.graphService _).expects().returns(graphDB).anyNumberOfTimes() + val node = getValidNode() + (graphDB.getNodeByUniqueId(_: String, _: String, _: Boolean, _: Request)).expects(*, *, *, *).returns(Future(node)).anyNumberOfTimes() + (graphDB.upsertNode(_: String, _: Node, _: Request)).expects(*, *, *).returns(Future(getValidNode())) + val nodes: util.List[Node] = getCategoryNode() + (graphDB.getNodeByUniqueIds(_: String, _: SearchCriteria)).expects(*, *).returns(Future(nodes)).anyNumberOfTimes() + val request = getTermRequest() + request.getContext.put("identifier", "ncf_board_class1") + request.putAll(mapAsJavaMap(Map("framework" -> "NCF", "name" -> "Board", "description" -> "Board", "code" -> "board", "term" ->"class1" ,"channel" -> "sunbird", "category" -> "board"))) + request.putAll(mapAsJavaMap(Map("identifier" -> "ncf_board_class1"))) + request.setOperation(Constants.RETIRE_TERM) + val response = callActor(request, Props(new TermActor())) + assert("successful".equals(response.getParams.getStatus)) + } + private def getValidNode(): Node = { + val node = new Node() + node.setIdentifier("ncf_board_class1") + node.setGraphId("domain") + node.setNodeType("DATA_NODE") + node.setObjectType("Term") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("code", "ncf_board_class1") + put("objectType", "Term") + put("name", "ncf_board_class1") + put("channel", "sunbird") + put("category", "ncf_board") + } + }) + node + } + + private def getTermOfNode(): Node = { + val node = new Node() + node.setIdentifier("ncf_board_class1") + node.setNodeType("DATA_NODE") + node.setObjectType("Term") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board_class1") + put("framework", "NCF") + put("category", "board") + put("objectType", "Term") + put("name", "Class1") + put("code", "class1") + put("versionKey", "12345") + } + }) + node + } + private def getTermRequest(): Request = { + val request = new Request() + request.setContext(new util.HashMap[String, AnyRef]() { + { + put("graph_id", "domain") + put("version", "1.0") + put("objectType", "Term") + put("schemaName", "term") + + } + }) + request.setObjectType("Term") + request + } + + private def getCategoryInstanceOfNode(): Node = { + val node = new Node() + node.setIdentifier("ncf_board") + node.setNodeType("DATA_NODE") + node.setObjectType("CategoryInstance") + node.setMetadata(new util.HashMap[String, AnyRef]() { + { + put("identifier", "ncf_board") + put("framework", "NCF") + put("objectType", "CategoryInstance") + put("name", "board") + put("code", "board") + put("versionKey", "12345") + } + }) + node + } +} diff --git a/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/utils/RequestUtilTest.scala b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/utils/RequestUtilTest.scala new file mode 100644 index 000000000..c99d3a025 --- /dev/null +++ b/taxonomy-api/taxonomy-actors/src/test/scala/org/sunbird/utils/RequestUtilTest.scala @@ -0,0 +1,34 @@ +package org.sunbird.util + +import java.util + +import org.scalamock.scalatest.AsyncMockFactory +import org.scalatest.{FlatSpec, Matchers} +import org.sunbird.common.dto.Request +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.utils.RequestUtil + + +class RequestUtilTest extends FlatSpec with Matchers with AsyncMockFactory { + + + it should "throw clientException for invalid request" in { + implicit val oec: OntologyEngineContext = mock[OntologyEngineContext] + val exception = intercept[ClientException] { + val context = new util.HashMap[String, AnyRef](){{ + put("graphId", "domain") + put("version", "1.0") + put("schemaName", "content") + put("objectType", "Content") + }} + val request = new Request() + request.setContext(context) + request.setOperation("create") + request.put("status", "Live") + RequestUtil.restrictProperties(request) + } + exception.getErrCode shouldEqual "ERROR_RESTRICTED_PROP" + } + +} diff --git a/taxonomy-api/taxonomy-service/app/controllers/BaseController.scala b/taxonomy-api/taxonomy-service/app/controllers/BaseController.scala index 47670af40..07271a486 100644 --- a/taxonomy-api/taxonomy-service/app/controllers/BaseController.scala +++ b/taxonomy-api/taxonomy-service/app/controllers/BaseController.scala @@ -10,7 +10,7 @@ import org.sunbird.common.exception.ResponseCode import play.api.mvc._ import utils.JavaJsonUtils -import scala.collection.JavaConversions._ + import scala.collection.JavaConverters._ import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} diff --git a/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryController.scala b/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryController.scala new file mode 100644 index 000000000..c238d5494 --- /dev/null +++ b/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryController.scala @@ -0,0 +1,57 @@ +package controllers.v3 + +import akka.actor.{ActorRef, ActorSystem} + +import scala.concurrent.{ExecutionContext, Future} +import controllers.BaseController + +import javax.inject.{Inject, Named, Singleton} +import org.sunbird.utils.Constants +import play.api.mvc.ControllerComponents +import utils.{ActorNames, ApiId, JavaJsonUtils} + +class CategoryController @Inject()(@Named(ActorNames.CATEGORY_ACTOR) categoryActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val objectType = "Category" + def createCategory() = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val category = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + category.putAll(headers) + val categoryRequest = getRequest(category, headers, Constants.CREATE_CATEGORY) + setRequestContext(categoryRequest, Constants.CATEGORY_SCHEMA_VERSION, objectType, Constants.CATEGORY_SCHEMA_NAME) + getResult(ApiId.CREATE_CATEGORY, categoryActor, categoryRequest) + } + + def readCategory(identifier: String) = Action.async { implicit request => + val headers = commonHeaders() + val category = new java.util.HashMap().asInstanceOf[java.util.Map[String, Object]] + category.put(Constants.IDENTIFIER, identifier) + category.putAll(headers) + val readCategoryRequest = getRequest(category, headers, Constants.READ_CATEGORY) + setRequestContext(readCategoryRequest, Constants.CATEGORY_SCHEMA_VERSION, objectType, Constants.CATEGORY_SCHEMA_NAME) + getResult(ApiId.READ_CATEGORY, categoryActor, readCategoryRequest) + } + + def updateCategory(identifier: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val category = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + category.putAll(headers) + val categoryRequest = getRequest(category, headers, Constants.UPDATE_CATEGORY) + setRequestContext(categoryRequest, Constants.CATEGORY_SCHEMA_VERSION, objectType, Constants.CATEGORY_SCHEMA_NAME) + categoryRequest.getContext.put("identifier", identifier); + getResult(ApiId.UPDATE_CATEGORY, categoryActor, categoryRequest) + } + + def retireCategory(identifier: String) = Action.async { implicit request => + val headers = commonHeaders() + val category = new java.util.HashMap().asInstanceOf[java.util.Map[String, Object]] + category.put(Constants.IDENTIFIER, identifier) + category.putAll(headers) + val categoryRequest = getRequest(category, headers, Constants.RETIRE_CATEGORY) + setRequestContext(categoryRequest, Constants.CATEGORY_SCHEMA_VERSION, objectType, Constants.CATEGORY_SCHEMA_NAME) + categoryRequest.getContext.put("identifier", identifier); + getResult(ApiId.RETIRE_CATEGORY, categoryActor, categoryRequest) + } +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryInstanceController.scala b/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryInstanceController.scala new file mode 100644 index 000000000..deb7e6584 --- /dev/null +++ b/taxonomy-api/taxonomy-service/app/controllers/v3/CategoryInstanceController.scala @@ -0,0 +1,67 @@ +package controllers.v3 + +import akka.actor.{ActorRef, ActorSystem} +import controllers.BaseController +import org.apache.commons.lang3.StringUtils +import org.sunbird.common.exception.ClientException +import org.sunbird.utils.Constants +import play.api.mvc.ControllerComponents +import utils.{ActorNames, ApiId} + +import javax.inject.{Inject, Named} +import scala.collection.JavaConverters.asJavaIterableConverter +import scala.concurrent.ExecutionContext + +class CategoryInstanceController @Inject()(@Named(ActorNames.CATEGORY_INSTANCE_ACTOR) categoryInstanceActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val objectType = "CategoryInstance" + def createCategoryInstance(framework: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val categoryInstance = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryInstance.put(Constants.FRAMEWORK, framework) + categoryInstance.putAll(headers) + val categoryRequest = getRequest(categoryInstance, headers, Constants.CREATE_CATEGORY_INSTANCE) + setRequestContext(categoryRequest, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION, objectType, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + getResult(ApiId.CREATE_CATEGORY_INSTANCE, categoryInstanceActor, categoryRequest) + } + + def readCategoryInstance(category: String, framework: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val categoryInstance = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryInstance.put(Constants.CATEGORY, category) + categoryInstance.put(Constants.FRAMEWORK, framework) + categoryInstance.putAll(headers) + val readCategoryRequest = getRequest(categoryInstance, headers, Constants.READ_CATEGORY_INSTANCE) + setRequestContext(readCategoryRequest, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION, objectType, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + getResult(ApiId.READ_CATEGORY_INSTANCE, categoryInstanceActor, readCategoryRequest) + } + + def updateCategoryInstance(category: String, framework: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val categoryInstance = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryInstance.put(Constants.CATEGORY, category) + categoryInstance.put(Constants.FRAMEWORK, framework) + categoryInstance.putAll(headers) + val categoryInstanceRequest = getRequest(categoryInstance, headers, Constants.UPDATE_CATEGORY_INSTANCE) + setRequestContext(categoryInstanceRequest, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION, objectType, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + categoryInstanceRequest.getContext.put(Constants.CATEGORY, category) + getResult(ApiId.UPDATE_CATEGORY_INSTANCE, categoryInstanceActor, categoryInstanceRequest) + } + + def retireCategoryInstance(category: String, framework: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val categoryInstance = body.getOrDefault(Constants.CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryInstance.put(Constants.CATEGORY, category) + categoryInstance.put(Constants.FRAMEWORK, framework) + categoryInstance.putAll(headers) + val categoryInstanceRequest = getRequest(categoryInstance, headers, Constants.RETIRE_CATEGORY_INSTANCE) + setRequestContext(categoryInstanceRequest, Constants.CATEGORY_INSTANCE_SCHEMA_VERSION, objectType, Constants.CATEGORY_INSTANCE_SCHEMA_NAME) + categoryInstanceRequest.getContext.put(Constants.CATEGORY, category) + getResult(ApiId.RETIRE_CATEGORY_INSTANCE, categoryInstanceActor, categoryInstanceRequest) + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkController.scala b/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkController.scala index 361403b1c..e86c43763 100644 --- a/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkController.scala +++ b/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkController.scala @@ -1,37 +1,60 @@ package controllers.v3 -import scala.concurrent.{ExecutionContext, Future} +import akka.actor.{ActorRef, ActorSystem} +import com.google.inject.Singleton import controllers.BaseController -import javax.inject.{Inject, Singleton} -import org.sunbird.common.dto.ResponseHandler +import javax.inject.{Inject, Named} +import org.sunbird.utils.Constants import play.api.mvc.ControllerComponents -import utils.JavaJsonUtils +import utils.{ActorNames, ApiId, JavaJsonUtils} +import scala.collection.JavaConverters._ +import scala.concurrent.{ExecutionContext,Future} +import org.sunbird.common.dto.ResponseHandler @Singleton -class FrameworkController @Inject()(cc: ControllerComponents)(implicit exec: ExecutionContext) extends BaseController(cc) { - - def createFramework() = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) +class FrameworkController @Inject()(@Named(ActorNames.FRAMEWORK_ACTOR) frameworkActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val objectType = "Framework" + def createFramework()= Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val framework = body.getOrDefault(Constants.FRAMEWORK, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + val frameworkRequest = getRequest(framework, headers, Constants.CREATE_FRAMEWORK) + setRequestContext(frameworkRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + getResult(ApiId.CREATE_FRAMEWORK, frameworkActor, frameworkRequest) } - def readFramework(identifier: String, categories: Option[String]) = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) + def readFramework(identifier: String, fields: Option[String], categories: Option[String]) = Action.async { implicit request => + val headers = commonHeaders() + val framework = new java.util.HashMap().asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + framework.putAll(Map(Constants.IDENTIFIER -> identifier, Constants.CATEGORIES -> categories.getOrElse("")).asJava ) + val readRequest = getRequest(framework, headers, "readFramework") + setRequestContext(readRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + getResult(ApiId.READ_FRAMEWORK, frameworkActor, readRequest) } def retire(identifier: String) = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) + val headers = commonHeaders() + val body = requestBody() + val framework = body.getOrDefault(Constants.FRAMEWORK, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + val frameworkRequest = getRequest(framework, headers, Constants.RETIRE_FRAMEWORK) + setRequestContext(frameworkRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + frameworkRequest.getContext.put(Constants.IDENTIFIER, identifier) + getResult(ApiId.RETIRE_FRAMEWORK, frameworkActor, frameworkRequest) } def updateFramework(identifier: String) = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) + val headers = commonHeaders() + val body = requestBody() + val framework = body.getOrDefault(Constants.FRAMEWORK, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + val frameworkRequest = getRequest(framework, headers, Constants.UPDATE_FRAMEWORK) + setRequestContext(frameworkRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + frameworkRequest.getContext.put(Constants.IDENTIFIER, identifier) + getResult(ApiId.UPDATE_FRAMEWORK, frameworkActor, frameworkRequest) } def listFramework() = Action.async { implicit request => @@ -41,14 +64,25 @@ class FrameworkController @Inject()(cc: ControllerComponents)(implicit exec: Exe } def copyFramework(identifier: String) = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) + val headers = commonHeaders() + val body = requestBody() + val framework = body.getOrDefault(Constants.FRAMEWORK, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + framework.putAll(Map("identifier" -> identifier ).asJava) + val frameworkRequest = getRequest(framework, headers, Constants.COPY_FRAMEWORK) + setRequestContext(frameworkRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + getResult(ApiId.COPY_FRAMEWORK, frameworkActor, frameworkRequest) } def publish(identifier: String) = Action.async { implicit request => - val result = ResponseHandler.OK() - val response = JavaJsonUtils.serialize(result) - Future(Ok(response).as("application/json")) + val headers = commonHeaders() + val body = requestBody() + val framework = body.getOrDefault(Constants.FRAMEWORK, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + framework.putAll(headers) + framework.putAll(Map("identifier" -> identifier).asJava) + val frameworkRequest = getRequest(framework, headers, Constants.PUBLISH_FRAMEWORK) + setRequestContext(frameworkRequest, Constants.FRAMEWORK_SCHEMA_VERSION, objectType, Constants.FRAMEWORK_SCHEMA_NAME) + getResult(ApiId.PUBLISH_FRAMEWORK, frameworkActor, frameworkRequest) + } } diff --git a/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkTermController.scala b/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkTermController.scala new file mode 100644 index 000000000..4422d1f3f --- /dev/null +++ b/taxonomy-api/taxonomy-service/app/controllers/v3/FrameworkTermController.scala @@ -0,0 +1,68 @@ +package controllers.v3 + +import akka.actor.{ActorRef, ActorSystem} +import controllers.BaseController +import org.sunbird.utils.Constants +import play.api.mvc.ControllerComponents +import utils.{ActorNames, ApiId} + +import javax.inject.{Inject, Named} +import scala.concurrent.ExecutionContext + +class FrameworkTermController @Inject()(@Named(ActorNames.TERM_ACTOR) termActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val objectType = "Term" + def createFrameworkTerm(framework: String, category: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val term = body.getOrDefault(Constants.TERM, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + term.put(Constants.FRAMEWORK, framework) + term.put(Constants.CATEGORY, category) + term.putAll(headers) + val termRequest = getRequest(term, headers, Constants.CREATE_TERM) + setRequestContext(termRequest, Constants.TERM_SCHEMA_VERSION, objectType, Constants.TERM_SCHEMA_NAME) + getResult(ApiId.CREATE_TERM, termActor, termRequest) + } + + def readFrameworkTerm(termId: String, framework: String, category: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val term = body.getOrDefault(Constants.TERM, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + term.put(Constants.TERM, termId) + term.put(Constants.CATEGORY, category) + term.put(Constants.FRAMEWORK, framework) + term.putAll(headers) + val readTermRequest = getRequest(term, headers, Constants.READ_TERM) + setRequestContext(readTermRequest, Constants.TERM_SCHEMA_VERSION, objectType, Constants.TERM_SCHEMA_NAME) + getResult(ApiId.READ_TERM, termActor, readTermRequest) + } + + def updateFrameworkTerm(termId: String, framework: String, category: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val term = body.getOrDefault(Constants.TERM, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + term.put(Constants.TERM, termId) + term.put(Constants.CATEGORY, category) + term.put(Constants.FRAMEWORK, framework) + term.putAll(headers) + val termRequest = getRequest(term, headers, Constants.UPDATE_TERM) + setRequestContext(termRequest, Constants.TERM_SCHEMA_VERSION, objectType, Constants.TERM_SCHEMA_NAME) + termRequest.getContext.put(Constants.TERM, termId) + getResult(ApiId.UPDATE_TERM, termActor, termRequest) + } + + def retireFrameworkTerm(termId: String, framework: String, category: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val term = body.getOrDefault(Constants.TERM, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + term.put(Constants.TERM, termId) + term.put(Constants.CATEGORY, category) + term.put(Constants.FRAMEWORK, framework) + term.putAll(headers) + val termRequest = getRequest(term, headers, Constants.RETIRE_TERM) + setRequestContext(termRequest, Constants.TERM_SCHEMA_VERSION, objectType, Constants.TERM_SCHEMA_NAME) + termRequest.getContext.put(Constants.TERM, termId) + getResult(ApiId.RETIRE_TERM, termActor, termRequest) + } + +} \ No newline at end of file diff --git a/taxonomy-api/taxonomy-service/app/modules/TaxonomyModule.scala b/taxonomy-api/taxonomy-service/app/modules/TaxonomyModule.scala index 25f2cd660..300db2f5a 100644 --- a/taxonomy-api/taxonomy-service/app/modules/TaxonomyModule.scala +++ b/taxonomy-api/taxonomy-service/app/modules/TaxonomyModule.scala @@ -1,7 +1,7 @@ package modules import com.google.inject.AbstractModule -import org.sunbird.actors.{HealthActor, ObjectCategoryActor, ObjectCategoryDefinitionActor} +import org.sunbird.actors.{CategoryActor, CategoryInstanceActor, HealthActor, ObjectCategoryActor, ObjectCategoryDefinitionActor, FrameworkActor, TermActor} import play.libs.akka.AkkaGuiceSupport import utils.ActorNames @@ -12,6 +12,10 @@ class TaxonomyModule extends AbstractModule with AkkaGuiceSupport { bindActor(classOf[HealthActor], ActorNames.HEALTH_ACTOR) bindActor(classOf[ObjectCategoryActor], ActorNames.OBJECT_CATEGORY_ACTOR) bindActor(classOf[ObjectCategoryDefinitionActor], ActorNames.OBJECT_CATEGORY_DEFINITION_ACTOR) + bindActor(classOf[FrameworkActor], ActorNames.FRAMEWORK_ACTOR) + bindActor(classOf[CategoryActor], ActorNames.CATEGORY_ACTOR) + bindActor(classOf[CategoryInstanceActor], ActorNames.CATEGORY_INSTANCE_ACTOR) + bindActor(classOf[TermActor], ActorNames.TERM_ACTOR) println("Initialized application actors for taxonomy service") } } diff --git a/taxonomy-api/taxonomy-service/app/utils/ActorNames.scala b/taxonomy-api/taxonomy-service/app/utils/ActorNames.scala index 0d3a17695..213a10897 100644 --- a/taxonomy-api/taxonomy-service/app/utils/ActorNames.scala +++ b/taxonomy-api/taxonomy-service/app/utils/ActorNames.scala @@ -5,5 +5,9 @@ object ActorNames { final val HEALTH_ACTOR = "healthActor" final val OBJECT_CATEGORY_ACTOR = "objectCategoryActor" final val OBJECT_CATEGORY_DEFINITION_ACTOR = "objectCategoryDefinitionActor" + final val FRAMEWORK_ACTOR = "frameworkActor" + final val CATEGORY_ACTOR = "categoryActor" + final val CATEGORY_INSTANCE_ACTOR = "categoryInstanceActor" + final val TERM_ACTOR = "TermActor" } diff --git a/taxonomy-api/taxonomy-service/app/utils/ApiId.scala b/taxonomy-api/taxonomy-service/app/utils/ApiId.scala index 2947f8da6..96a0c1f70 100644 --- a/taxonomy-api/taxonomy-service/app/utils/ApiId.scala +++ b/taxonomy-api/taxonomy-service/app/utils/ApiId.scala @@ -9,5 +9,25 @@ object ApiId { final val CREATE_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.create" final val READ_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.read" final val UPDATE_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.update" + final val CREATE_FRAMEWORK = "api.taxonomy.framework.create" + final val READ_FRAMEWORK = "api.taxonomy.framework.read" + final val UPDATE_FRAMEWORK = "api.taxonomy.framework.update" + final val RETIRE_FRAMEWORK = "api.taxonomy.framework.retire" + final val COPY_FRAMEWORK = "api.taxonomy.framework.copy" + final val PUBLISH_FRAMEWORK = "api.taxonomy.framework.publish" + final val CREATE_CATEGORY = "api.taxonomy.category.create" + final val READ_CATEGORY = "api.taxonomy.category.read" + final val UPDATE_CATEGORY = "api.taxonomy.category.update" + final val RETIRE_CATEGORY = "api.taxonomy.category.retire" + + final val CREATE_CATEGORY_INSTANCE = "api.taxonomy.category.instance.create" + final val READ_CATEGORY_INSTANCE = "api.taxonomy.category.instance.read" + final val UPDATE_CATEGORY_INSTANCE = "api.taxonomy.category.instance.update" + final val RETIRE_CATEGORY_INSTANCE = "api.taxonomy.category.instance.retire" + + final val CREATE_TERM = "api.taxonomy.term.create" + final val READ_TERM = "api.taxonomy.term.read" + final val UPDATE_TERM = "api.taxonomy.term.update" + final val RETIRE_TERM = "api.taxonomy.term.retire" } diff --git a/taxonomy-api/taxonomy-service/conf/application.conf b/taxonomy-api/taxonomy-service/conf/application.conf index 11b9db9b1..f4237f5ef 100644 --- a/taxonomy-api/taxonomy-service/conf/application.conf +++ b/taxonomy-api/taxonomy-service/conf/application.conf @@ -161,7 +161,7 @@ play.server.netty { # If you run Play on Linux, you can use Netty's native socket transport # for higher performance with less garbage. - transport = "native" + transport = "jdk" } ## WS (HTTP Client) @@ -280,7 +280,7 @@ play.filters { play.http.parser.maxMemoryBuffer = 50MB akka.http.parsing.max-content-length = 50MB -schema.base_path = "../../schemas/" +schema.base_path="../../schemas/" # Graph Configuration graph.dir=/data/testingGraphDB @@ -316,5 +316,17 @@ languageCode { } platform.language.codes=["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd"] -objectcategorydefinition.keyspace=category_store - +objectcategorydefinition.keyspace=dev_category_store + +cloudstorage.metadata.replace_absolute_path=false +cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" +cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" +cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net"] +cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] +cloud_storage_container="sunbird-content-dev" + +framework.keyspace="dev_hierarchy_store" +framework.hierarchy.table="framework_hierarchy" +framework.categories_cached=["subject", "medium", "gradeLevel", "board"] +framework.cache.ttl=86400 +framework.cache.read=true \ No newline at end of file diff --git a/taxonomy-api/taxonomy-service/conf/routes b/taxonomy-api/taxonomy-service/conf/routes index 4a1c21232..32fb3f509 100644 --- a/taxonomy-api/taxonomy-service/conf/routes +++ b/taxonomy-api/taxonomy-service/conf/routes @@ -5,7 +5,7 @@ GET /health controllers.HealthController.health # Framework API's POST /framework/v3/create controllers.v3.FrameworkController.createFramework -GET /framework/v3/read/:identifier controllers.v3.FrameworkController.readFramework(identifier:String, categories: Option[String]) +GET /framework/v3/read/:identifier controllers.v3.FrameworkController.readFramework(identifier:String, fields: Option[String],categories: Option[String]) PATCH /framework/v3/update/:identifier controllers.v3.FrameworkController.updateFramework(identifier:String) POST /framework/v3/list controllers.v3.FrameworkController.listFramework DELETE /framework/v3/retire/:identifier controllers.v3.FrameworkController.retire(identifier:String) @@ -13,10 +13,22 @@ POST /framework/v3/copy/:identifier controllers.v3.Framewo POST /framework/v3/publish/:identifier controllers.v3.FrameworkController.publish(identifier:String) # Category API's +POST /framework/v3/category/master/create controllers.v3.CategoryController.createCategory +GET /framework/v3/category/master/read/:identifier controllers.v3.CategoryController.readCategory(identifier:String) +PATCH /framework/v3/category/master/update/:identifier controllers.v3.CategoryController.updateCategory(identifier:String) +DELETE /framework/v3/category/master/retire/:identifier controllers.v3.CategoryController.retireCategory(identifier:String) +# Framework Category API's +POST /framework/v3/category/create controllers.v3.CategoryInstanceController.createCategoryInstance(framework: String) +GET /framework/v3/category/read/:category controllers.v3.CategoryInstanceController.readCategoryInstance(category:String, framework: String) +PATCH /framework/v3/category/update/:category controllers.v3.CategoryInstanceController.updateCategoryInstance(category:String, framework: String) +DELETE /framework/v3/category/retire/:category controllers.v3.CategoryInstanceController.retireCategoryInstance(category:String, framework: String) # Term API's - +POST /framework/v3/term/create controllers.v3.FrameworkTermController.createFrameworkTerm(framework: String, category: String) +GET /framework/v3/term/read/:termId controllers.v3.FrameworkTermController.readFrameworkTerm(termId: String, framework: String, category: String) +PATCH /framework/v3/term/update/:termId controllers.v3.FrameworkTermController.updateFrameworkTerm(termId: String, framework: String, category: String) +DELETE /framework/v3/term/retire/:termId controllers.v3.FrameworkTermController.retireFrameworkTerm(termId: String, framework: String, category: String) # Object Category API's POST /object/category/v4/create controllers.v4.ObjectCategoryController.create diff --git a/taxonomy-api/taxonomy-service/test/controllers/v3/CategoryInstanceSpec.scala b/taxonomy-api/taxonomy-service/test/controllers/v3/CategoryInstanceSpec.scala new file mode 100644 index 000000000..75e33015d --- /dev/null +++ b/taxonomy-api/taxonomy-service/test/controllers/v3/CategoryInstanceSpec.scala @@ -0,0 +1,39 @@ +package controllers.v3 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers._ +@RunWith(classOf[JUnitRunner]) +class CategoryInstanceSpec extends BaseSpec { + "CategoryInstanceController" should { + + val controller = app.injector.instanceOf[CategoryInstanceController] + + "return success response for create api" in { + val result = controller.createCategoryInstance("NCF")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read api" in { + val result = controller.readCategoryInstance("board","NCF")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update api" in { + val result = controller.updateCategoryInstance("board","NCF")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for retire api" in { + val result = controller.retireCategoryInstance("board","NCF")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + } + +} diff --git a/taxonomy-api/taxonomy-service/test/controllers/v3/CategorySpec.scala b/taxonomy-api/taxonomy-service/test/controllers/v3/CategorySpec.scala new file mode 100644 index 000000000..c917c9140 --- /dev/null +++ b/taxonomy-api/taxonomy-service/test/controllers/v3/CategorySpec.scala @@ -0,0 +1,39 @@ +package controllers.v3 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers._ +@RunWith(classOf[JUnitRunner]) +class CategorySpec extends BaseSpec { + "CategoryController" should { + + val controller = app.injector.instanceOf[CategoryController] + + "return success response for create api" in { + val result = controller.createCategory()(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read api" in { + val result = controller.readCategory("test_content_all")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update api" in { + val result = controller.updateCategory("test_content_all")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for retire api" in { + val result = controller.retireCategory("test_content_all")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + } + +} diff --git a/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkSpec.scala b/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkSpec.scala index 94e4f9f62..4ad3ae2d4 100644 --- a/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkSpec.scala +++ b/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkSpec.scala @@ -35,7 +35,7 @@ class FrameworkSpec extends BaseSpec { "return success response for read framework API" in { val controller = app.injector.instanceOf[controllers.v3.FrameworkController] - val result = controller.readFramework("do_123", Option(""))(FakeRequest()) + val result = controller.readFramework("do_123", Option(""),Option("") )(FakeRequest()) isOK(result) status(result) must equalTo(OK) } diff --git a/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkTermSpec.scala b/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkTermSpec.scala new file mode 100644 index 000000000..83ca478c6 --- /dev/null +++ b/taxonomy-api/taxonomy-service/test/controllers/v3/FrameworkTermSpec.scala @@ -0,0 +1,45 @@ +package controllers.v3 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers.{OK, status} +import play.api.test.Helpers._ + +@RunWith(classOf[JUnitRunner]) +class FrameworkTermSpec extends BaseSpec { + + "FrameworkTerm Controller " should { + + "return success response for create frameworkTerm API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkTermController] + val result = controller.createFrameworkTerm("NCF","board")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update frameworkTerm API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkTermController] + val result = controller.updateFrameworkTerm("class1","NCF","board")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for retire frameworkTerm API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkTermController] + val result = controller.retireFrameworkTerm("class1","NCF","board")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkTermController] + val result = controller.readFrameworkTerm("class1","NCF","board" )(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + + } +} diff --git a/taxonomy-api/taxonomy-service/test/modules/TestModule.scala b/taxonomy-api/taxonomy-service/test/modules/TestModule.scala index 71a40937b..228710c39 100644 --- a/taxonomy-api/taxonomy-service/test/modules/TestModule.scala +++ b/taxonomy-api/taxonomy-service/test/modules/TestModule.scala @@ -14,6 +14,10 @@ class TestModule extends AbstractModule with AkkaGuiceSupport { bindActor(classOf[TestActor], ActorNames.HEALTH_ACTOR) bindActor(classOf[TestActor], ActorNames.OBJECT_CATEGORY_ACTOR) bindActor(classOf[TestActor], ActorNames.OBJECT_CATEGORY_DEFINITION_ACTOR) + bindActor(classOf[TestActor], ActorNames.FRAMEWORK_ACTOR) + bindActor(classOf[TestActor], ActorNames.CATEGORY_ACTOR) + bindActor(classOf[TestActor], ActorNames.CATEGORY_INSTANCE_ACTOR) + bindActor(classOf[TestActor], ActorNames.TERM_ACTOR) println("Test Module is initialized...") } } diff --git a/taxonomy-service-sbt/app/controllers/BaseController.scala b/taxonomy-service-sbt/app/controllers/BaseController.scala new file mode 100644 index 000000000..07271a486 --- /dev/null +++ b/taxonomy-service-sbt/app/controllers/BaseController.scala @@ -0,0 +1,75 @@ +package controllers + +import java.util.UUID + +import akka.actor.ActorRef +import akka.pattern.Patterns +import org.sunbird.common.DateUtils +import org.sunbird.common.dto.{Response, ResponseHandler} +import org.sunbird.common.exception.ResponseCode +import play.api.mvc._ +import utils.JavaJsonUtils + + +import scala.collection.JavaConverters._ +import scala.collection.mutable +import scala.concurrent.{ExecutionContext, Future} + +abstract class BaseController(protected val cc: ControllerComponents)(implicit exec: ExecutionContext) extends AbstractController(cc) { + + def requestBody()(implicit request: Request[AnyContent]) = { + val body = request.body.asJson.getOrElse("{}").toString + JavaJsonUtils.deserialize[java.util.Map[String, Object]](body).getOrDefault("request", new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + } + + def commonHeaders()(implicit request: Request[AnyContent]): java.util.Map[String, Object] = { + val customHeaders = Map("x-channel-id" -> "channel", "X-Consumer-ID" -> "consumerId", "X-App-Id" -> "appId") + customHeaders.map(ch => { + val value = request.headers.get(ch._1) + if (value.isDefined && !value.isEmpty) { + collection.mutable.HashMap[String, Object](ch._2 -> value.get).asJava + } else { + collection.mutable.HashMap[String, Object]().asJava + } + }).reduce((a, b) => { + a.putAll(b) + return a + }) + } + + def getRequest(input: java.util.Map[String, AnyRef], context: java.util.Map[String, AnyRef], operation: String): org.sunbird.common.dto.Request = { + new org.sunbird.common.dto.Request(context, input, operation, null); + } + + def getResult(apiId: String, actor: ActorRef, request: org.sunbird.common.dto.Request) : Future[Result] = { + val future = Patterns.ask(actor, request, 30000) recoverWith {case e: Exception => Future(ResponseHandler.getErrorResponse(e))} + future.map(f => { + val result = f.asInstanceOf[Response] + result.setId(apiId) + setResponseEnvelope(result) + val response = JavaJsonUtils.serialize(result); + result.getResponseCode match { + case ResponseCode.OK => Ok(response).as("application/json") + case ResponseCode.CLIENT_ERROR => BadRequest(response).as("application/json") + case ResponseCode.RESOURCE_NOT_FOUND => NotFound(response).as("application/json") + case _ => play.api.mvc.Results.InternalServerError(response).as("application/json") + } + }) + } + + def setResponseEnvelope(response: Response) = { + response.setTs(DateUtils.formatCurrentDate("yyyy-MM-dd'T'HH:mm:ss'Z'XXX")) + response.getParams.setResmsgid(UUID.randomUUID().toString) + } + + def setRequestContext(request:org.sunbird.common.dto.Request, version: String, objectType: String, schemaName: String): Unit = { + var contextMap: java.util.Map[String, AnyRef] = new java.util.HashMap[String, AnyRef](){{ + put("graph_id", "domain") + put("version" , version) + put("objectType" , objectType) + put("schemaName", schemaName) + }}; + request.setObjectType(objectType); + request.setContext(contextMap) + } +} diff --git a/taxonomy-service-sbt/app/controllers/HealthController.scala b/taxonomy-service-sbt/app/controllers/HealthController.scala new file mode 100644 index 000000000..2c6749650 --- /dev/null +++ b/taxonomy-service-sbt/app/controllers/HealthController.scala @@ -0,0 +1,23 @@ +package controllers + +import akka.actor.{ActorRef, ActorSystem} + +import javax.inject._ +import play.api.mvc._ +import handlers.SignalHandler +import utils.{ActorNames, ApiId} + +import scala.concurrent.{ExecutionContext, Future} + +class HealthController @Inject()(@Named(ActorNames.HEALTH_ACTOR) healthActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem, signalHandler: SignalHandler)(implicit ec: ExecutionContext) extends BaseController(cc) { + + def health() = Action.async { implicit request => + if (signalHandler.isShuttingDown) { + Future { + ServiceUnavailable + } + } else { + getResult(ApiId.APPLICATION_HEALTH, healthActor, new org.sunbird.common.dto.Request()) + } + } +} diff --git a/taxonomy-service-sbt/app/controllers/v3/FrameworkController.scala b/taxonomy-service-sbt/app/controllers/v3/FrameworkController.scala new file mode 100644 index 000000000..361403b1c --- /dev/null +++ b/taxonomy-service-sbt/app/controllers/v3/FrameworkController.scala @@ -0,0 +1,54 @@ +package controllers.v3 + +import scala.concurrent.{ExecutionContext, Future} +import controllers.BaseController +import javax.inject.{Inject, Singleton} +import org.sunbird.common.dto.ResponseHandler +import play.api.mvc.ControllerComponents +import utils.JavaJsonUtils + +@Singleton +class FrameworkController @Inject()(cc: ControllerComponents)(implicit exec: ExecutionContext) extends BaseController(cc) { + + def createFramework() = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def readFramework(identifier: String, categories: Option[String]) = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def retire(identifier: String) = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def updateFramework(identifier: String) = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def listFramework() = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def copyFramework(identifier: String) = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } + + def publish(identifier: String) = Action.async { implicit request => + val result = ResponseHandler.OK() + val response = JavaJsonUtils.serialize(result) + Future(Ok(response).as("application/json")) + } +} diff --git a/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryController.scala b/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryController.scala new file mode 100644 index 000000000..415f0c423 --- /dev/null +++ b/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryController.scala @@ -0,0 +1,49 @@ +package controllers.v4 + +import akka.actor.{ActorRef, ActorSystem} +import com.google.inject.Singleton +import controllers.BaseController +import javax.inject.{Inject, Named} +import org.sunbird.utils.Constants +import play.api.mvc.ControllerComponents +import utils.{ActorNames, ApiId} + +import scala.collection.JavaConverters._ +import scala.concurrent.ExecutionContext + +@Singleton +class ObjectCategoryController @Inject()(@Named(ActorNames.OBJECT_CATEGORY_ACTOR) objectCategoryActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val objectType = "ObjectCategory" + + def create() = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val category = body.getOrDefault(Constants.OBJECT_CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + category.putAll(headers) + val categoryRequest = getRequest(category, headers, Constants.CREATE_OBJECT_CATEGORY) + setRequestContext(categoryRequest, Constants.OBJECT_CATEGORY_SCHEMA_VERSION, objectType, Constants.OBJECT_CATEGORY_SCHEMA_NAME) + getResult(ApiId.CREATE_OBJECT_CATEGORY, objectCategoryActor, categoryRequest) + } + + def read(identifier: String, fields: Option[String]) = Action.async { implicit request => + val headers = commonHeaders() + val category = new java.util.HashMap().asInstanceOf[java.util.Map[String, Object]] + category.putAll(headers) + category.putAll(Map(Constants.IDENTIFIER -> identifier, Constants.FIELDS -> fields.getOrElse("")).asJava) + val categoryRequest = getRequest(category, headers, Constants.READ_OBJECT_CATEGORY) + setRequestContext(categoryRequest, Constants.OBJECT_CATEGORY_SCHEMA_VERSION, objectType, Constants.OBJECT_CATEGORY_SCHEMA_NAME) + getResult(ApiId.READ_OBJECT_CATEGORY, objectCategoryActor, categoryRequest) + } + + def update(identifier: String) = Action.async { implicit request => + val headers = commonHeaders() + val body = requestBody() + val category = body.getOrDefault(Constants.OBJECT_CATEGORY, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + category.putAll(headers) + val categoryRequest = getRequest(category, headers, Constants.UPDATE_OBJECT_CATEGORY) + setRequestContext(categoryRequest, Constants.OBJECT_CATEGORY_SCHEMA_VERSION, objectType, Constants.OBJECT_CATEGORY_SCHEMA_NAME) + categoryRequest.getContext.put(Constants.IDENTIFIER, identifier) + getResult(ApiId.UPDATE_OBJECT_CATEGORY, objectCategoryActor, categoryRequest) + } +} diff --git a/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryDefinitionController.scala b/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryDefinitionController.scala new file mode 100644 index 000000000..630464ee2 --- /dev/null +++ b/taxonomy-service-sbt/app/controllers/v4/ObjectCategoryDefinitionController.scala @@ -0,0 +1,69 @@ +package controllers.v4 + +import akka.actor.{ActorRef, ActorSystem} +import controllers.BaseController +import javax.inject.{Inject, Named} +import org.apache.commons.lang3.StringUtils +import org.sunbird.common.exception.ClientException +import org.sunbird.utils.Constants +import play.api.mvc.ControllerComponents +import utils.{ActorNames, ApiId} + +import scala.collection.JavaConverters._ +import scala.concurrent.ExecutionContext + +class ObjectCategoryDefinitionController @Inject()(@Named(ActorNames.OBJECT_CATEGORY_DEFINITION_ACTOR) objCategoryDefinitionActor: ActorRef, cc: ControllerComponents, actorSystem: ActorSystem)(implicit exec: ExecutionContext) extends BaseController(cc) { + + val OBJECT_TYPE = "ObjectCategoryDefinition" + val OBJECT_CATEGORY_DEFINITION = "objectCategoryDefinition" + val SCHEMA_NAME: String = "objectcategorydefinition" + val SCHEMA_VERSION = "1.0" + + def create() = Action.async { implicit request => + val headers = commonHeaders() + headers.remove("channel") + val body = requestBody() + val categoryDefinition = body.getOrDefault(OBJECT_CATEGORY_DEFINITION, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryDefinition.putAll(headers) + val categoryDefinitionReq = getRequest(categoryDefinition, headers, Constants.CREATE_OBJECT_CATEGORY_DEFINITION) + setRequestContext(categoryDefinitionReq, SCHEMA_VERSION, OBJECT_TYPE, SCHEMA_NAME) + getResult(ApiId.CREATE_OBJECT_CATEGORY_DEFINITION, objCategoryDefinitionActor, categoryDefinitionReq) + } + + def read(identifier: String, fields: Option[String]) = Action.async { implicit request => + val headers = commonHeaders() + val categoryDefinition = new java.util.HashMap().asInstanceOf[java.util.Map[String, Object]] + categoryDefinition.putAll(headers) + categoryDefinition.putAll(Map(Constants.IDENTIFIER -> identifier, Constants.FIELDS -> fields.getOrElse("")).asJava) + val categoryDefinitionReq = getRequest(categoryDefinition, headers, Constants.READ_OBJECT_CATEGORY_DEFINITION) + setRequestContext(categoryDefinitionReq, SCHEMA_VERSION, OBJECT_TYPE, SCHEMA_NAME) + getResult(ApiId.READ_OBJECT_CATEGORY_DEFINITION, objCategoryDefinitionActor, categoryDefinitionReq) + } + + def update(identifier: String) = Action.async { implicit request => + val headers = commonHeaders() + headers.remove("channel") + val body = requestBody() + val categoryDefinition = body.getOrDefault(OBJECT_CATEGORY_DEFINITION, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryDefinition.putAll(headers) + val categoryDefinitionReq = getRequest(categoryDefinition, headers, Constants.UPDATE_OBJECT_CATEGORY_DEFINITION) + setRequestContext(categoryDefinitionReq, SCHEMA_VERSION, OBJECT_TYPE, SCHEMA_NAME) + categoryDefinitionReq.getContext.put(Constants.IDENTIFIER, identifier) + getResult(ApiId.UPDATE_OBJECT_CATEGORY_DEFINITION, objCategoryDefinitionActor, categoryDefinitionReq) + } + + def readCategoryDefinition(fields: Option[String]) = Action.async { implicit request => + val headers = commonHeaders() + headers.remove("channel") + val body = requestBody() + val categoryDefinition = body.getOrDefault(OBJECT_CATEGORY_DEFINITION, new java.util.HashMap()).asInstanceOf[java.util.Map[String, Object]] + categoryDefinition.putAll(headers) + categoryDefinition.put(Constants.FIELDS, fields.getOrElse("")) + categoryDefinition.put("REQ_METHOD", request.method) + val categoryDefinitionReq = getRequest(categoryDefinition, headers, Constants.READ_OBJECT_CATEGORY_DEFINITION) + setRequestContext(categoryDefinitionReq, SCHEMA_VERSION, OBJECT_TYPE, SCHEMA_NAME) + getResult(ApiId.READ_OBJECT_CATEGORY_DEFINITION, objCategoryDefinitionActor, categoryDefinitionReq) + } + + +} diff --git a/taxonomy-service-sbt/app/filters/AccessLogFilter.scala b/taxonomy-service-sbt/app/filters/AccessLogFilter.scala new file mode 100644 index 000000000..aad9f8419 --- /dev/null +++ b/taxonomy-service-sbt/app/filters/AccessLogFilter.scala @@ -0,0 +1,45 @@ +package filters + +import akka.util.ByteString +import javax.inject.Inject +import org.sunbird.telemetry.util.TelemetryAccessEventUtil +import play.api.Logging +import play.api.libs.streams.Accumulator +import play.api.mvc._ + +import scala.concurrent.ExecutionContext +import scala.collection.JavaConverters._ + +class AccessLogFilter @Inject() (implicit ec: ExecutionContext) extends EssentialFilter with Logging { + + val xHeaderNames = Map("x-session-id" -> "X-Session-ID", "X-Consumer-ID" -> "x-consumer-id", "x-device-id" -> "X-Device-ID", "x-app-id" -> "APP_ID", "x-authenticated-userid" -> "X-Authenticated-Userid", "x-channel-id" -> "X-Channel-Id") + + def apply(nextFilter: EssentialAction) = new EssentialAction { + def apply(requestHeader: RequestHeader) = { + + val startTime = System.currentTimeMillis + + val accumulator: Accumulator[ByteString, Result] = nextFilter(requestHeader) + + accumulator.map { result => + val endTime = System.currentTimeMillis + val requestTime = endTime - startTime + + val path = requestHeader.uri + if(!path.contains("/health")){ + val headers = requestHeader.headers.headers.groupBy(_._1).mapValues(_.map(_._2)) + val appHeaders = headers.filter(header => xHeaderNames.keySet.contains(header._1.toLowerCase)) + .map(entry => (xHeaderNames.get(entry._1.toLowerCase()).get, entry._2.head)) + val otherDetails = Map[String, Any]("StartTime" -> startTime, "env" -> "assessment", + "RemoteAddress" -> requestHeader.remoteAddress, + "ContentLength" -> result.body.contentLength.getOrElse(0), + "Status" -> result.header.status, "Protocol" -> "http", + "path" -> path, + "Method" -> requestHeader.method.toString) + TelemetryAccessEventUtil.writeTelemetryEventLog((otherDetails ++ appHeaders).asInstanceOf[Map[String, AnyRef]].asJava) + } + result.withHeaders("Request-Time" -> requestTime.toString) + } + } + } + } \ No newline at end of file diff --git a/taxonomy-service-sbt/app/handlers/SignalHandler.scala b/taxonomy-service-sbt/app/handlers/SignalHandler.scala new file mode 100644 index 000000000..4cad301c1 --- /dev/null +++ b/taxonomy-service-sbt/app/handlers/SignalHandler.scala @@ -0,0 +1,33 @@ +package handlers + +import java.util.concurrent.TimeUnit + +import akka.actor.ActorSystem +import javax.inject.{Inject, Singleton} +import org.slf4j.LoggerFactory +import play.api.inject.DefaultApplicationLifecycle +import sun.misc.Signal + +import scala.concurrent.duration.Duration + +@Singleton +class SignalHandler @Inject()(implicit actorSystem: ActorSystem, lifecycle: DefaultApplicationLifecycle) { + val LOG = LoggerFactory.getLogger(classOf[SignalHandler]) + val STOP_DELAY = Duration.create(30, TimeUnit.SECONDS) + var isShuttingDown = false + + println("Initializing SignalHandler...") + Signal.handle(new Signal("TERM"), new sun.misc.SignalHandler() { + override def handle(signal: Signal): Unit = { + // $COVERAGE-OFF$ Disabling scoverage as this code is impossible to test + isShuttingDown = true + println("Termination required, swallowing SIGTERM to allow current requests to finish. : " + System.currentTimeMillis()) + actorSystem.scheduler.scheduleOnce(STOP_DELAY)(() => { + println("ApplicationLifecycle stop triggered... : " + System.currentTimeMillis()) + lifecycle.stop() + })(actorSystem.dispatcher) + // $COVERAGE-ON + } + }) +} + diff --git a/taxonomy-service-sbt/app/modules/TaxonomyModule.scala b/taxonomy-service-sbt/app/modules/TaxonomyModule.scala new file mode 100644 index 000000000..25f2cd660 --- /dev/null +++ b/taxonomy-service-sbt/app/modules/TaxonomyModule.scala @@ -0,0 +1,17 @@ +package modules + +import com.google.inject.AbstractModule +import org.sunbird.actors.{HealthActor, ObjectCategoryActor, ObjectCategoryDefinitionActor} +import play.libs.akka.AkkaGuiceSupport +import utils.ActorNames + +class TaxonomyModule extends AbstractModule with AkkaGuiceSupport { + + override def configure() = { + super.configure() + bindActor(classOf[HealthActor], ActorNames.HEALTH_ACTOR) + bindActor(classOf[ObjectCategoryActor], ActorNames.OBJECT_CATEGORY_ACTOR) + bindActor(classOf[ObjectCategoryDefinitionActor], ActorNames.OBJECT_CATEGORY_DEFINITION_ACTOR) + println("Initialized application actors for taxonomy service") + } +} diff --git a/taxonomy-service-sbt/app/utils/ActorNames.scala b/taxonomy-service-sbt/app/utils/ActorNames.scala new file mode 100644 index 000000000..0d3a17695 --- /dev/null +++ b/taxonomy-service-sbt/app/utils/ActorNames.scala @@ -0,0 +1,9 @@ +package utils + +object ActorNames { + + final val HEALTH_ACTOR = "healthActor" + final val OBJECT_CATEGORY_ACTOR = "objectCategoryActor" + final val OBJECT_CATEGORY_DEFINITION_ACTOR = "objectCategoryDefinitionActor" + +} diff --git a/taxonomy-service-sbt/app/utils/ApiId.scala b/taxonomy-service-sbt/app/utils/ApiId.scala new file mode 100644 index 000000000..2947f8da6 --- /dev/null +++ b/taxonomy-service-sbt/app/utils/ApiId.scala @@ -0,0 +1,13 @@ +package utils + +object ApiId { + + final val APPLICATION_HEALTH = "api.taxonomy.service.health" + final val CREATE_OBJECT_CATEGORY = "api.object.category.create" + final val READ_OBJECT_CATEGORY = "api.object.category.read" + final val UPDATE_OBJECT_CATEGORY = "api.object.category.update" + final val CREATE_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.create" + final val READ_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.read" + final val UPDATE_OBJECT_CATEGORY_DEFINITION = "api.object.category.definition.update" + +} diff --git a/taxonomy-service-sbt/app/utils/ItemSetOperations.scala b/taxonomy-service-sbt/app/utils/ItemSetOperations.scala new file mode 100644 index 000000000..4f3fcaa5b --- /dev/null +++ b/taxonomy-service-sbt/app/utils/ItemSetOperations.scala @@ -0,0 +1,5 @@ +package utils + +object ItemSetOperations extends Enumeration { + val createItemSet, readItemSet, updateItemSet, reviewItemSet, retireItemSet = Value +} diff --git a/taxonomy-service-sbt/app/utils/JavaJsonUtils.scala b/taxonomy-service-sbt/app/utils/JavaJsonUtils.scala new file mode 100644 index 000000000..2093c2e33 --- /dev/null +++ b/taxonomy-service-sbt/app/utils/JavaJsonUtils.scala @@ -0,0 +1,38 @@ +package utils + +import java.lang.reflect.{ParameterizedType, Type} + +import com.fasterxml.jackson.core.`type`.TypeReference +import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper} + +object JavaJsonUtils { + + @transient val mapper = new ObjectMapper(); + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); +// mapper.configure(SerializationFeature.WRITE_NULL_MAP_VALUES, false); +// mapper.setSerializationInclusion(Include.NON_NULL); + + @throws(classOf[Exception]) + def serialize(obj: AnyRef): String = { + mapper.writeValueAsString(obj); + } + + @throws(classOf[Exception]) + def deserialize[T: Manifest](value: String): T = mapper.readValue(value, typeReference[T]); + + private[this] def typeReference[T: Manifest] = new TypeReference[T] { + override def getType = typeFromManifest(manifest[T]) + } + + + private[this] def typeFromManifest(m: Manifest[_]): Type = { + if (m.typeArguments.isEmpty) { m.runtimeClass } + // $COVERAGE-OFF$Disabling scoverage as this code is impossible to test + else new ParameterizedType { + def getRawType = m.runtimeClass + def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray + def getOwnerType = null + } + // $COVERAGE-ON$ + } +} diff --git a/taxonomy-service-sbt/build.sbt b/taxonomy-service-sbt/build.sbt new file mode 100644 index 000000000..57d79a760 --- /dev/null +++ b/taxonomy-service-sbt/build.sbt @@ -0,0 +1,30 @@ +import sbt.Keys._ +import play.sbt.PlaySettings + +lazy val root = (project in file(".")) + .enablePlugins(PlayScala, PlayNettyServer) + .disablePlugins(PlayAkkaHttpServer) + .settings( + name := "taxonomy-service-sbt", + version := "1.0-SNAPSHOT", + scalaVersion := "2.12.8", + javacOptions ++= Seq("-source", "11", "-target", "11"), + libraryDependencies ++= Seq( + guice, + "org.joda" % "joda-convert" % "2.1.2", + "net.logstash.logback" % "logstash-logback-encoder" % "5.2", + "org.sunbird" % "taxonomy-actors" % "1.0-SNAPSHOT", + "io.lemonlabs" %% "scala-uri" % "1.4.10", + "net.codingwell" %% "scala-guice" % "4.2.5", + "com.typesafe.play" %% "play-specs2" % "2.7.9", + "org.scalatestplus.play" %% "scalatestplus-play" % "4.0.3" % Test + ) + ) + .settings( + libraryDependencies += ("org.sunbird" % "taxonomy-actors" % "1.0-SNAPSHOT") + .exclude("com.typesafe.akka","akka-actor_2.11") + .exclude("org.scala-lang.modules","scala-java8-compat_2.11") + .exclude("org.scala-lang.modules","scala-parser-combinators_2.11") + .exclude("com.typesafe.akka","akka-slf4j_2.11") + ) + resolvers += "Local Maven Repository" at "file:///"+Path.userHome+"/.m2/repository" diff --git a/taxonomy-service-sbt/conf/application.conf b/taxonomy-service-sbt/conf/application.conf new file mode 100644 index 000000000..b67ddee9a --- /dev/null +++ b/taxonomy-service-sbt/conf/application.conf @@ -0,0 +1,339 @@ +include "secure" + +# db connections = ((physical_core_count * 2) + effective_spindle_count) +fixedConnectionPool = 5 + +repository.dispatcher { + executor = "thread-pool-executor" + throughput = 1 + thread-pool-executor { + fixed-pool-size = ${fixedConnectionPool} + } +} + +# This is the main configuration file for the application. +# https://www.playframework.com/documentation/latest/ConfigFile +# ~~~~~ +# Play uses HOCON as its configuration file format. HOCON has a number +# of advantages over other config formats, but there are two things that +# can be used when modifying settings. +# +# You can include other configuration files in this main application.conf file: +#include "extra-config.conf" +# +# You can declare variables and substitute for them: +#mykey = ${some.value} +# +# And if an environment variable exists when there is no other substitution, then +# HOCON will fall back to substituting environment variable: +#mykey = ${JAVA_HOME} + +## Akka +# https://www.playframework.com/documentation/latest/ScalaAkka#Configuration +# https://www.playframework.com/documentation/latest/JavaAkka#Configuration +# ~~~~~ +# Play uses Akka internally and exposes Akka Streams and actors in Websockets and +# other streaming HTTP responses. +akka { + # "akka.log-config-on-start" is extraordinarly useful because it log the complete + # configuration at INFO level, including defaults and overrides, so it s worth + # putting at the very top. + # + # Put the following in your conf/logback.xml file: + # + # + # + # And then uncomment this line to debug the configuration. + # + #log-config-on-start = true +} + +## Secret key +# http://www.playframework.com/documentation/latest/ApplicationSecret +# ~~~~~ +# The secret key is used to sign Play's session cookie. +# This must be changed for production, but we don't recommend you change it in this file. +play.http.secret.key = a-long-secret-to-calm-the-rage-of-the-entropy-gods + +## Modules +# https://www.playframework.com/documentation/latest/Modules +# ~~~~~ +# Control which modules are loaded when Play starts. Note that modules are +# the replacement for "GlobalSettings", which are deprecated in 2.5.x. +# Please see https://www.playframework.com/documentation/latest/GlobalSettings +# for more information. +# +# You can also extend Play functionality by using one of the publically available +# Play modules: https://playframework.com/documentation/latest/ModuleDirectory +play.modules { + # By default, Play will load any class called Module that is defined + # in the root package (the "app" directory), or you can define them + # explicitly below. + # If there are any built-in modules that you want to enable, you can list them here. + #enabled += my.application.Module + + # If there are any built-in modules that you want to disable, you can list them here. + #disabled += "" + enabled += modules.TaxonomyModule +} + +## IDE +# https://www.playframework.com/documentation/latest/IDE +# ~~~~~ +# Depending on your IDE, you can add a hyperlink for errors that will jump you +# directly to the code location in the IDE in dev mode. The following line makes +# use of the IntelliJ IDEA REST interface: +#play.editor="http://localhost:63342/api/file/?file=%s&line=%s" + +## Internationalisation +# https://www.playframework.com/documentation/latest/JavaI18N +# https://www.playframework.com/documentation/latest/ScalaI18N +# ~~~~~ +# Play comes with its own i18n settings, which allow the user's preferred language +# to map through to internal messages, or allow the language to be stored in a cookie. +play.i18n { + # The application languages + langs = [ "en" ] + + # Whether the language cookie should be secure or not + #langCookieSecure = true + + # Whether the HTTP only attribute of the cookie should be set to true + #langCookieHttpOnly = true +} + +## Play HTTP settings +# ~~~~~ +play.http { + ## Router + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # Define the Router object to use for this application. + # This router will be looked up first when the application is starting up, + # so make sure this is the entry point. + # Furthermore, it's assumed your route file is named properly. + # So for an application router like `my.application.Router`, + # you may need to define a router file `conf/my.application.routes`. + # Default to Routes in the root package (aka "apps" folder) (and conf/routes) + #router = my.application.Router + + ## Action Creator + # https://www.playframework.com/documentation/latest/JavaActionCreator + # ~~~~~ + #actionCreator = null + + ## ErrorHandler + # https://www.playframework.com/documentation/latest/JavaRouting + # https://www.playframework.com/documentation/latest/ScalaRouting + # ~~~~~ + # If null, will attempt to load a class called ErrorHandler in the root package, + #errorHandler = null + + ## Session & Flash + # https://www.playframework.com/documentation/latest/JavaSessionFlash + # https://www.playframework.com/documentation/latest/ScalaSessionFlash + # ~~~~~ + session { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + + # Sets the max-age field of the cookie to 5 minutes. + # NOTE: this only sets when the browser will discard the cookie. Play will consider any + # cookie value with a valid signature to be a valid session forever. To implement a server side session timeout, + # you need to put a timestamp in the session and check it at regular intervals to possibly expire it. + #maxAge = 300 + + # Sets the domain on the session cookie. + #domain = "example.com" + } + + flash { + # Sets the cookie to be sent only over HTTPS. + #secure = true + + # Sets the cookie to be accessed only by the server. + #httpOnly = true + } +} + +play.http.parser.maxDiskBuffer = 10MB +parsers.anyContent.maxLength = 10MB + +play.server.provider = play.core.server.NettyServerProvider + +## Netty Provider +# https://www.playframework.com/documentation/latest/SettingsNetty +# ~~~~~ +play.server.netty { + # Whether the Netty wire should be logged + log.wire = true + + # If you run Play on Linux, you can use Netty's native socket transport + # for higher performance with less garbage. + transport = "native" +} + +## WS (HTTP Client) +# https://www.playframework.com/documentation/latest/ScalaWS#Configuring-WS +# ~~~~~ +# The HTTP client primarily used for REST APIs. The default client can be +# configured directly, but you can also create different client instances +# with customized settings. You must enable this by adding to build.sbt: +# +# libraryDependencies += ws // or javaWs if using java +# +play.ws { + # Sets HTTP requests not to follow 302 requests + #followRedirects = false + + # Sets the maximum number of open HTTP connections for the client. + #ahc.maxConnectionsTotal = 50 + + ## WS SSL + # https://www.playframework.com/documentation/latest/WsSSL + # ~~~~~ + ssl { + # Configuring HTTPS with Play WS does not require programming. You can + # set up both trustManager and keyManager for mutual authentication, and + # turn on JSSE debugging in development with a reload. + #debug.handshake = true + #trustManager = { + # stores = [ + # { type = "JKS", path = "exampletrust.jks" } + # ] + #} + } +} + +## Cache +# https://www.playframework.com/documentation/latest/JavaCache +# https://www.playframework.com/documentation/latest/ScalaCache +# ~~~~~ +# Play comes with an integrated cache API that can reduce the operational +# overhead of repeated requests. You must enable this by adding to build.sbt: +# +# libraryDependencies += cache +# +play.cache { + # If you want to bind several caches, you can bind the individually + #bindCaches = ["db-cache", "user-cache", "session-cache"] +} + +## Filter Configuration +# https://www.playframework.com/documentation/latest/Filters +# ~~~~~ +# There are a number of built-in filters that can be enabled and configured +# to give Play greater security. +# +play.filters { + + # Enabled filters are run automatically against Play. + # CSRFFilter, AllowedHostFilters, and SecurityHeadersFilters are enabled by default. + enabled = [filters.AccessLogFilter] + + # Disabled filters remove elements from the enabled list. + # disabled += filters.CSRFFilter + + + ## CORS filter configuration + # https://www.playframework.com/documentation/latest/CorsFilter + # ~~~~~ + # CORS is a protocol that allows web applications to make requests from the browser + # across different domains. + # NOTE: You MUST apply the CORS configuration before the CSRF filter, as CSRF has + # dependencies on CORS settings. + cors { + # Filter paths by a whitelist of path prefixes + #pathPrefixes = ["/some/path", ...] + + # The allowed origins. If null, all origins are allowed. + #allowedOrigins = ["http://www.example.com"] + + # The allowed HTTP methods. If null, all methods are allowed + #allowedHttpMethods = ["GET", "POST"] + } + + ## Security headers filter configuration + # https://www.playframework.com/documentation/latest/SecurityHeaders + # ~~~~~ + # Defines security headers that prevent XSS attacks. + # If enabled, then all options are set to the below configuration by default: + headers { + # The X-Frame-Options header. If null, the header is not set. + #frameOptions = "DENY" + + # The X-XSS-Protection header. If null, the header is not set. + #xssProtection = "1; mode=block" + + # The X-Content-Type-Options header. If null, the header is not set. + #contentTypeOptions = "nosniff" + + # The X-Permitted-Cross-Domain-Policies header. If null, the header is not set. + #permittedCrossDomainPolicies = "master-only" + + # The Content-Security-Policy header. If null, the header is not set. + #contentSecurityPolicy = "default-src 'self'" + } + + ## Allowed hosts filter configuration + # https://www.playframework.com/documentation/latest/AllowedHostsFilter + # ~~~~~ + # Play provides a filter that lets you configure which hosts can access your application. + # This is useful to prevent cache poisoning attacks. + hosts { + # Allow requests to example.com, its subdomains, and localhost:9000. + #allowed = [".example.com", "localhost:9000"] + } +} + +play.http.parser.maxMemoryBuffer = 50MB +akka.http.parsing.max-content-length = 50MB + +schema.base_path = "../../schemas/" + +# Graph Configuration +graph.dir=/data/testingGraphDB +akka.request_timeout=30 +environment.id=10000000 +graph.ids=["domain"] +graph.passport.key.base=31b6fd1c4d64e745c867e61a45edc34a +route.domain="bolt://localhost:7687" +route.bolt.write.domain="bolt://localhost:7687" +route.bolt.read.domain="bolt://localhost:7687" +route.bolt.comment.domain="bolt://localhost:7687" +route.all="bolt://localhost:7687" +route.bolt.write.all="bolt://localhost:7687" +route.bolt.read.all="bolt://localhost:7687" +route.bolt.comment.all="bolt://localhost:7687" + +# Cassandra Configuration +cassandra.lp.connection="127.0.0.1:9042" +cassandra.lpa.connection="127.0.0.1:9042" + + +languageCode { + assamese : "as" + bengali : "bn" + english : "en" + gujarati : "gu" + hindi : "hi" + kannada : "ka" + marathi : "mr" + odia : "or" + tamil : "ta" + telugu : "te" +} + +platform.language.codes=["as","bn","en","gu","hi","hoc","jun","ka","mai","mr","unx","or","san","sat","ta","te","urd"] +objectcategorydefinition.keyspace=dev_category_store + +cloudstorage.metadata.replace_absolute_path=false +cloudstorage.relative_path_prefix= "CONTENT_STORAGE_BASE_PATH" +cloudstorage.read_base_path="https://sunbirddev.blob.core.windows.net" +cloudstorage.write_base_path=["https://sunbirddev.blob.core.windows.net"] +cloudstorage.metadata.list=["appIcon","posterImage","artifactUrl","downloadUrl","variants","previewUrl","pdfUrl", "streamingUrl", "toc_url"] +cloud_storage_container="sunbird-content-dev" diff --git a/taxonomy-service-sbt/conf/logback.xml b/taxonomy-service-sbt/conf/logback.xml new file mode 100644 index 000000000..ee6900e17 --- /dev/null +++ b/taxonomy-service-sbt/conf/logback.xml @@ -0,0 +1,28 @@ + + + + + + + + + + %d %msg%n + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/taxonomy-service-sbt/conf/routes b/taxonomy-service-sbt/conf/routes new file mode 100644 index 000000000..4a1c21232 --- /dev/null +++ b/taxonomy-service-sbt/conf/routes @@ -0,0 +1,30 @@ +# Routes +# This file defines all application routes (Higher priority routes first) +# ~~~~ +GET /health controllers.HealthController.health + +# Framework API's +POST /framework/v3/create controllers.v3.FrameworkController.createFramework +GET /framework/v3/read/:identifier controllers.v3.FrameworkController.readFramework(identifier:String, categories: Option[String]) +PATCH /framework/v3/update/:identifier controllers.v3.FrameworkController.updateFramework(identifier:String) +POST /framework/v3/list controllers.v3.FrameworkController.listFramework +DELETE /framework/v3/retire/:identifier controllers.v3.FrameworkController.retire(identifier:String) +POST /framework/v3/copy/:identifier controllers.v3.FrameworkController.copyFramework(identifier:String) +POST /framework/v3/publish/:identifier controllers.v3.FrameworkController.publish(identifier:String) + +# Category API's + + +# Term API's + + +# Object Category API's +POST /object/category/v4/create controllers.v4.ObjectCategoryController.create +PATCH /object/category/v4/update/:identifier controllers.v4.ObjectCategoryController.update(identifier:String) +GET /object/category/v4/read/:identifier controllers.v4.ObjectCategoryController.read(identifier:String, fields:Option[String]) + +# ObjectCategoryDefinition API's +POST /object/category/definition/v4/create controllers.v4.ObjectCategoryDefinitionController.create +GET /object/category/definition/v4/read/:identifier controllers.v4.ObjectCategoryDefinitionController.read(identifier:String, fields:Option[String]) +PATCH /object/category/definition/v4/update/:identifier controllers.v4.ObjectCategoryDefinitionController.update(identifier:String) +POST /object/category/definition/v4/read controllers.v4.ObjectCategoryDefinitionController.readCategoryDefinition(fields:Option[String]) \ No newline at end of file diff --git a/taxonomy-service-sbt/project/build.properties b/taxonomy-service-sbt/project/build.properties new file mode 100644 index 000000000..c8fcab543 --- /dev/null +++ b/taxonomy-service-sbt/project/build.properties @@ -0,0 +1 @@ +sbt.version=1.6.2 diff --git a/taxonomy-service-sbt/project/plugins.sbt b/taxonomy-service-sbt/project/plugins.sbt new file mode 100644 index 000000000..a027c82c9 --- /dev/null +++ b/taxonomy-service-sbt/project/plugins.sbt @@ -0,0 +1,12 @@ +// The Play plugin +addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.7.9") + +// sbt-paradox, used for documentation +addSbtPlugin("com.lightbend.paradox" % "sbt-paradox" % "0.4.4") + +// Load testing tool: +// http://gatling.io/docs/2.2.2/extensions/sbt_plugin.html +addSbtPlugin("io.gatling" % "gatling-sbt" % "3.0.0") + +// Scala formatting: "sbt scalafmt" +addSbtPlugin("com.lucidchart" % "sbt-scalafmt" % "1.15") diff --git a/taxonomy-service-sbt/test/controllers/base/BaseSpec.scala b/taxonomy-service-sbt/test/controllers/base/BaseSpec.scala new file mode 100644 index 000000000..443b98a17 --- /dev/null +++ b/taxonomy-service-sbt/test/controllers/base/BaseSpec.scala @@ -0,0 +1,38 @@ +package controllers.base + +import com.typesafe.config.ConfigFactory +import modules.TestModule +import org.specs2.mutable.Specification +import play.api.inject.guice.GuiceApplicationBuilder +import play.api.libs.json.Json +import play.api.mvc.Result +import play.api.test.Helpers.{POST, contentAsString, contentType, defaultAwaitTimeout, route, status, _} +import play.api.test.{FakeHeaders, FakeRequest} + +import scala.concurrent.Future + +class BaseSpec extends Specification { + implicit val app = new GuiceApplicationBuilder() + .disable(classOf[modules.TaxonomyModule]) + .bindings(new TestModule) + .build + implicit val config = ConfigFactory.load() + + def post(apiURL: String, request: String, h: FakeHeaders = FakeHeaders(Seq())) + : Future[Result] = { + val headers = h.add(("content-type", "application/json")) + route(app, FakeRequest(POST, apiURL, headers, Json.toJson(Json.parse(request)))).get + } + + def isOK(response: Future[Result]) { + status(response) must equalTo(OK) + contentType(response) must beSome.which(_ == "application/json") + contentAsString(response) must contain(""""status":"successful"""") + } + + def hasClientError(response: Future[Result]) { + status(response) must equalTo(BAD_REQUEST) + contentType(response) must beSome.which(_ == "application/json") + contentAsString(response) must contain(""""err":"CLIENT_ERROR","status":"failed"""") + } +} diff --git a/taxonomy-service-sbt/test/controllers/v3/FrameworkSpec.scala b/taxonomy-service-sbt/test/controllers/v3/FrameworkSpec.scala new file mode 100644 index 000000000..94e4f9f62 --- /dev/null +++ b/taxonomy-service-sbt/test/controllers/v3/FrameworkSpec.scala @@ -0,0 +1,65 @@ +package controllers.v3 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers.{OK, status} +import play.api.test.Helpers._ + +@RunWith(classOf[JUnitRunner]) +class FrameworkSpec extends BaseSpec { + + "Framework Controller " should { + + "return success response for create framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.createFramework()(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.updateFramework("do_123")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for retire framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.retire("do_123")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.readFramework("do_123", Option(""))(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for list framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.listFramework()(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for copy framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.copyFramework("do_123")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for list framework API" in { + val controller = app.injector.instanceOf[controllers.v3.FrameworkController] + val result = controller.publish("do_123")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + } +} diff --git a/taxonomy-service-sbt/test/controllers/v3/HealthControllerSpec.scala b/taxonomy-service-sbt/test/controllers/v3/HealthControllerSpec.scala new file mode 100644 index 000000000..49f351a62 --- /dev/null +++ b/taxonomy-service-sbt/test/controllers/v3/HealthControllerSpec.scala @@ -0,0 +1,18 @@ +package controllers.v3 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.{FakeRequest, Helpers} +import play.api.test.Helpers.{OK, status} + +@RunWith(classOf[JUnitRunner]) +class HealthControllerSpec extends BaseSpec { + + "return api health status report - successful response" in { + val controller = app.injector.instanceOf[controllers.HealthController] + val result = controller.health()(FakeRequest()) + isOK(result) + status(result)(Helpers.defaultAwaitTimeout) must equalTo(OK) + } +} diff --git a/taxonomy-service-sbt/test/controllers/v4/ObjectCategoryDefinitionSpec.scala b/taxonomy-service-sbt/test/controllers/v4/ObjectCategoryDefinitionSpec.scala new file mode 100644 index 000000000..c88a693de --- /dev/null +++ b/taxonomy-service-sbt/test/controllers/v4/ObjectCategoryDefinitionSpec.scala @@ -0,0 +1,40 @@ +package controllers.v4 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers._ + +@RunWith(classOf[JUnitRunner]) +class ObjectCategoryDefinitionSpec extends BaseSpec { + + "ObjectCategoryDefinitionController" should { + + val controller = app.injector.instanceOf[ObjectCategoryDefinitionController] + + "return success response for create api" in { + val result = controller.create()(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read api" in { + val result = controller.read("test_content_all", None)(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update api" in { + val result = controller.update("test_content_all")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read category api" in { + val result = controller.readCategoryDefinition(None)(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + } +} diff --git a/taxonomy-service-sbt/test/controllers/v4/ObjectCategorySpec.scala b/taxonomy-service-sbt/test/controllers/v4/ObjectCategorySpec.scala new file mode 100644 index 000000000..dc0440b24 --- /dev/null +++ b/taxonomy-service-sbt/test/controllers/v4/ObjectCategorySpec.scala @@ -0,0 +1,37 @@ +package controllers.v4 + +import controllers.base.BaseSpec +import org.junit.runner.RunWith +import org.specs2.runner.JUnitRunner +import play.api.test.FakeRequest +import play.api.test.Helpers.{OK, status, _} + + +@RunWith(classOf[JUnitRunner]) +class ObjectCategorySpec extends BaseSpec { + + "Category Controller " should { + + val controller = app.injector.instanceOf[ObjectCategoryController] + + "return success response for create API" in { + val result = controller.create()(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for update API" in { + val result = controller.update("obj-cat:test")(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + "return success response for read API" in { + val result = controller.read("obj-cat:test", None)(FakeRequest()) + isOK(result) + status(result) must equalTo(OK) + } + + } +} + diff --git a/taxonomy-service-sbt/test/modules/TestModule.scala b/taxonomy-service-sbt/test/modules/TestModule.scala new file mode 100644 index 000000000..71a40937b --- /dev/null +++ b/taxonomy-service-sbt/test/modules/TestModule.scala @@ -0,0 +1,28 @@ +package modules + +import com.google.inject.AbstractModule +import org.sunbird.actor.core.BaseActor +import org.sunbird.actors.ObjectCategoryActor +import org.sunbird.common.dto.{Request, Response, ResponseHandler} +import play.libs.akka.AkkaGuiceSupport +import utils.ActorNames + +import scala.concurrent.{ExecutionContext, Future} + +class TestModule extends AbstractModule with AkkaGuiceSupport { + override def configure(): Unit = { + bindActor(classOf[TestActor], ActorNames.HEALTH_ACTOR) + bindActor(classOf[TestActor], ActorNames.OBJECT_CATEGORY_ACTOR) + bindActor(classOf[TestActor], ActorNames.OBJECT_CATEGORY_DEFINITION_ACTOR) + println("Test Module is initialized...") + } +} + +class TestActor extends BaseActor { + + implicit val ec: ExecutionContext = getContext().dispatcher + + override def onReceive(request: Request): Future[Response] = { + Future(ResponseHandler.OK) + } +} diff --git a/vmsetup.sh b/vmsetup.sh index 83b01344c..acbdc056d 100644 --- a/vmsetup.sh +++ b/vmsetup.sh @@ -26,4 +26,4 @@ find ./ -type f -name "*.java" -print0 | xargs -0 sed -i -e 's/\/data\//~\//g' mvn scoverage:report JAVA_REPORT_PATHS=`find /home/circleci/project -iname jacoco.xml | awk 'BEGIN { RS = "" ; FS = "\n"; OFS = ","}{$1=$1; print $0}'` -mvn verify sonar:sonar -Dsonar.projectKey=project-sunbird_knowledge-platform -Dsonar.organization=project-sunbird -Dsonar.host.url=https://sonarcloud.io -Dsonar.coverage.exclusions=**/CustomProblemHandler.java -Dsonar.scala.coverage.reportPaths=/home/circleci/project/content-api/hierarchy-manager/target/scoverage.xml,/home/circleci/project/content-api/content-service/target/scoverage.xml,/home/circleci/project/target/scoverage.xml,/home/circleci/project/ontology-engine/graph-engine_2.11/target/scoverage.xml,/home/circleci/project/ontology-engine/parseq/target/scoverage.xml -Dsonar.coverage.jacoco.xmlReportPaths=${JAVA_REPORT_PATHS} +mvn verify sonar:sonar -Dsonar.projectKey=project-sunbird_knowledge-platform -Dsonar.organization=project-sunbird -Dsonar.host.url=https://sonarcloud.io -Dsonar.coverage.exclusions=**/CustomProblemHandler.java -Dsonar.scala.coverage.reportPaths=/home/circleci/project/content-api/hierarchy-manager/target/scoverage.xml,/home/circleci/project/content-api/content-service/target/scoverage.xml,/home/circleci/project/target/scoverage.xml,/home/circleci/project/ontology-engine/graph-engine_2.12/target/scoverage.xml,/home/circleci/project/ontology-engine/parseq/target/scoverage.xml -Dsonar.coverage.jacoco.xmlReportPaths=${JAVA_REPORT_PATHS} From 96390d49cc4ee1d4b408aa0626e1792e35cd98c3 Mon Sep 17 00:00:00 2001 From: Aiman Sharief Date: Wed, 13 Sep 2023 19:10:05 +0530 Subject: [PATCH 02/20] Issue #KN fix: Updated the mvn command to build for content service --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1e4e3d577..3770d2313 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ kafka-topics.sh --create --zookeeper zookeeper:2181 --replication-factor 1 --par 1. Go to the path: /knowledge-platform and run the below maven command to build the application. ```shell -mvn clean install -DskipTests +mvn clean install -DskipTests -DCLOUD_STORE_GROUP_ID=org.sunbird -DCLOUD_STORE_ARTIFACT_ID=cloud-store-sdk_2.12 -DCLOUD_STORE_VERSION=1.4.6 ``` 2. Go to the path: /knowledge-platform/content-api/content-service and run the below maven command to run the netty server. ```shell From 56ce3e5087a2c0618a1b65f21b5acd1d7f6f0945 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Sun, 19 May 2024 14:14:04 +0530 Subject: [PATCH 03/20] Issue #KN-1041 fix: updated code to create vertices and edges --- .../sunbird/content/actors/ContentActor.scala | 18 +- .../sunbird/content/actors/EventActor.scala | 11 +- .../org/sunbird/graph/GraphService.scala | 20 +- .../org/sunbird/graph/util/CSPMetaUtil.scala | 9 +- ontology-engine/graph-dac-api/pom.xml | 15 + .../org/sunbird/graph/dac/model/Edges.java | 300 ++++++++++++++++++ .../org/sunbird/graph/dac/model/Vertex.java | 180 +++++++++++ ontology-engine/graph-dac/pom.xml | 140 ++++++++ .../src/conf/janusgraph-inmemory.properties | 35 ++ .../src/conf/remote-graph.properties | 3 + .../graph-dac/src/conf/remote-object.yaml | 5 + .../service/operation/EdgeOperations.scala | 55 ++++ .../service/operation/GremlinOperations.scala | 145 +++++++++ .../service/util/JanusConnectionUtil.scala | 32 ++ .../org/sunbird/graph/nodes/DataNode.scala | 45 ++- .../sunbird/graph/schema/DefinitionNode.scala | 56 +++- .../sunbird/graph/schema/IDefinition.scala | 6 +- .../schema/validator/BaseDefinitionNode.scala | 54 +++- .../schema/validator/SchemaValidator.scala | 14 +- ontology-engine/pom.xml | 1 + .../sunbird/actors/ObjectCategoryActor.scala | 2 +- 21 files changed, 1133 insertions(+), 13 deletions(-) create mode 100644 ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java create mode 100644 ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java create mode 100644 ontology-engine/graph-dac/pom.xml create mode 100644 ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties create mode 100644 ontology-engine/graph-dac/src/conf/remote-graph.properties create mode 100644 ontology-engine/graph-dac/src/conf/remote-object.yaml create mode 100644 ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala create mode 100644 ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala create mode 100644 ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala index 837810741..d0943406e 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala @@ -15,7 +15,7 @@ import org.sunbird.content.review.mgr.ReviewManager import org.sunbird.content.upload.mgr.UploadManager import org.sunbird.content.util._ import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.NodeUtil import org.sunbird.managers.HierarchyManager @@ -66,7 +66,7 @@ class ContentActor @Inject() (implicit oec: OntologyEngineContext, ss: StorageSe def create(request: Request): Future[Response] = { populateDefaultersForCreation(request) RequestUtil.restrictProperties(request) - DataNode.create(request, dataModifier).map(node => { + DataNode.creates(request, vertexDataModifier).map(node => { ResponseHandler.OK.put(ContentConstants.IDENTIFIER, node.getIdentifier).put("node_id", node.getIdentifier) .put("versionKey", node.getMetadata.get("versionKey")) }) @@ -296,6 +296,20 @@ class ContentActor @Inject() (implicit oec: OntologyEngineContext, ss: StorageSe node } + def vertexDataModifier(vertex: Vertex): Vertex = { + if (vertex.getMetadata.containsKey("trackable") && + vertex.getMetadata.getOrDefault("trackable", new java.util.HashMap[String, AnyRef]).asInstanceOf[java.util.Map[String, AnyRef]].containsKey("enabled") && + "Yes".equalsIgnoreCase(vertex.getMetadata.getOrDefault("trackable", new java.util.HashMap[String, AnyRef]).asInstanceOf[java.util.Map[String, AnyRef]].getOrDefault("enabled", "").asInstanceOf[String])) { + vertex.getMetadata.put("contentType", "Course") + } + + //TODO: Below fix to be reviewed when the fix for null to Stringify in ExternalStore.scala is implemented + if (vertex.getExternalData != null && vertex.getExternalData.containsKey("relational_metadata") && vertex.getExternalData.get("relational_metadata") == null) { + vertex.getExternalData.put("relational_metadata", "{}") + } + vertex + } + def getImportConfig(): ImportConfig = { val requiredProps = Platform.getStringList("import.required_props", java.util.Arrays.asList("name", "code", "mimeType", "contentType", "artifactUrl", "framework")).asScala.toList val validStages = Platform.getStringList("import.valid_stages", java.util.Arrays.asList("create", "upload", "review", "publish")).asScala.toList diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/EventActor.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/EventActor.scala index a1d672b9b..718ed8b0f 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/EventActor.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/EventActor.scala @@ -6,7 +6,7 @@ import org.sunbird.common.dto.{Request, Response, ResponseHandler} import org.sunbird.common.exception.{ClientException, ResponseCode} import org.sunbird.content.util.ContentConstants import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.{Node, Relation} +import org.sunbird.graph.dac.model.{Node, Relation, Vertex} import org.sunbird.graph.nodes.DataNode import java.util @@ -79,4 +79,13 @@ class EventActor @Inject()(implicit oec: OntologyEngineContext, ss: StorageServi node } + override def vertexDataModifier(vertex: Vertex): Vertex = { + if (vertex.getMetadata.containsKey("trackable") && + vertex.getMetadata.getOrDefault("trackable", new java.util.HashMap[String, AnyRef]).asInstanceOf[java.util.Map[String, AnyRef]].containsKey("enabled") && + "Yes".equalsIgnoreCase(vertex.getMetadata.getOrDefault("trackable", new java.util.HashMap[String, AnyRef]).asInstanceOf[java.util.Map[String, AnyRef]].getOrDefault("enabled", "").asInstanceOf[String])) { + vertex.getMetadata.put("contentType", "Event") + } + vertex + } + } \ No newline at end of file diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala index e090a8591..6cfdc0652 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala @@ -3,15 +3,19 @@ package org.sunbird.graph import org.sunbird.common.Platform import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} import org.sunbird.common.exception.ResponseCode -import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph} +import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph, Vertex} import org.sunbird.graph.external.ExternalPropsManager import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} import org.sunbird.graph.util.CSPMetaUtil +import org.sunbird.janus.service.operation.{EdgeOperations, GremlinOperations} import java.lang import scala.concurrent.{ExecutionContext, Future} class GraphService { + + val gremlinOps = new GremlinOperations() + val edgeOps = new EdgeOperations() implicit val ec: ExecutionContext = ExecutionContext.global val isrRelativePathEnabled: lang.Boolean = Platform.getBoolean("cloudstorage.metadata.replace_absolute_path", false) @@ -23,6 +27,15 @@ class GraphService { NodeAsyncOperations.addNode(graphId, node).map(resNode => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) } + def addVertex(graphId: String, node: Vertex): Future[Vertex] = { + if (isrRelativePathEnabled) { + val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) + node.setMetadata(metadata) + } + gremlinOps.addNode(graphId, node).map(resNode => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) + + } + def upsertNode(graphId: String, node: Node, request: Request): Future[Node] = { if(isrRelativePathEnabled) { val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) @@ -107,6 +120,11 @@ class GraphService { GraphAsyncOperations.createRelation(graphId, relationMap) } + def createEdges(graphId: String, relationMap: java.util.List[java.util.Map[String, AnyRef]]) = { + edgeOps.createEdges(graphId, relationMap) + } + + def getSubGraph(graphId: String, nodeId: String, depth: Int): Future[SubGraph] = { GraphAsyncOperations.getSubGraph(graphId, nodeId, depth) } diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala index c8c4b51d3..d8771da22 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/util/CSPMetaUtil.scala @@ -1,13 +1,12 @@ package org.sunbird.graph.util import java.util - import org.apache.commons.collections4.MapUtils import org.apache.commons.lang3.StringUtils import org.slf4j.LoggerFactory import org.sunbird.common.dto.Property import org.sunbird.common.{JsonUtils, Platform} -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import scala.collection.JavaConverters._ import scala.collection.immutable.Map @@ -43,6 +42,12 @@ object CSPMetaUtil { node } + def updateAbsolutePath(vertex: Vertex): Vertex = { + val metadata = updateAbsolutePath(vertex.getMetadata) + vertex.setMetadata(metadata) + vertex + } + def updateAbsolutePath(nodes: java.util.List[Node]): java.util.List[Node] = { nodes.asScala.toList.map(node => { updateAbsolutePath(node) diff --git a/ontology-engine/graph-dac-api/pom.xml b/ontology-engine/graph-dac-api/pom.xml index 0c51b8f68..2bb1e628a 100644 --- a/ontology-engine/graph-dac-api/pom.xml +++ b/ontology-engine/graph-dac-api/pom.xml @@ -86,6 +86,21 @@ 1.17.6 test
+ + org.apache.tinkerpop + gremlin-driver + 3.7.2 + + + org.janusgraph + janusgraph-core + 1.0.0 + + + org.janusgraph + janusgraph-inmemory + 1.0.0 +
diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java new file mode 100644 index 000000000..72eefe38b --- /dev/null +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java @@ -0,0 +1,300 @@ +package org.sunbird.graph.dac.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import org.apache.commons.collections4.MapUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.tinkerpop.gremlin.structure.Edge; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.neo4j.graphdb.Node; +import org.sunbird.common.exception.ServerException; +import org.sunbird.graph.common.enums.SystemProperties; +import org.sunbird.graph.dac.enums.GraphDACErrorCodes; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class Edges implements Serializable { + + private static final long serialVersionUID = -7207054262120122453L; + private String id; + private String graphId; + private String relationType; + private String startNodeId; + private String endNodeId; + private String startNodeName; + private String endNodeName; + private String startNodeType; + private String endNodeType; + private String startNodeObjectType; + private String endNodeObjectType; + private Map metadata; + private Map startNodeMetadata; + private Map endNodeMetadata; + + public Edges() { + + } + + public Edges(String startNodeId, String relationType, String endNodeId) { + this.startNodeId = startNodeId; + this.endNodeId = endNodeId; + this.relationType = relationType; + } + + public Edges(String graphId, Edge edge) { + if (null == edge) + throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_REL.name(), + "Failed to create relation object. Relation from database is null."); + this.graphId = graphId; + + Vertex startNode = edge.outVertex(); + Vertex endNode = edge.inVertex(); + this.startNodeId = startNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.endNodeId = endNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.startNodeName = getName(startNode); + this.endNodeName = getName(endNode); + this.startNodeType = getNodeType(startNode); + this.endNodeType = getNodeType(endNode); + this.startNodeObjectType = getObjectType(startNode); + this.endNodeObjectType = getObjectType(endNode); + this.relationType = edge.label(); + this.metadata = new HashMap(); + this.startNodeMetadata = getNodeMetadata(edge.outVertex()); + this.endNodeMetadata = getNodeMetadata(edge.inVertex()); + edge.keys().forEach(key -> this.metadata.put(key, edge.value(key))); + } + + public Edges(String graphId, Edge edge, Map startNodeMap, Map endNodeMap) { + if (null == edge) + throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_REL.name(), + "Failed to create relation object. Relation from database is null."); + this.id = edge.id().toString(); + this.graphId = graphId; + Vertex startNode = startNodeMap.get(edge.outVertex().id()); + Vertex endNode = endNodeMap.get(edge.inVertex().id()); + this.startNodeId = startNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.endNodeId = endNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.startNodeName = getName(startNode); + this.endNodeName = getName(endNode); + this.startNodeType = getNodeType(startNode); + this.endNodeType = getNodeType(endNode); + this.startNodeObjectType = getObjectType(startNode); + this.endNodeObjectType = getObjectType(endNode); + this.relationType = edge.label(); + this.metadata = new HashMap(); + this.startNodeMetadata = getNodeMetadata(startNode); + this.endNodeMetadata = getNodeMetadata(endNode); + edge.keys().forEach(key -> { + Object value = edge.value(key); + if(null != value){ + if (value instanceof List) { + List list = (List) value; + if (!list.isEmpty()) { + Object obj = list.get(0); + if (obj instanceof String) { + this.metadata.put(key, list.toArray(new String[0])); + } else if (obj instanceof Number) { + this.metadata.put(key, list.toArray(new Number[0])); + } else if (obj instanceof Boolean) { + this.metadata.put(key, list.toArray(new Boolean[0])); + } else { + this.metadata.put(key, list.toArray(new Object[0])); + } + } + } else + this.metadata.put(key, value); + } + }); + } + + + private String getName(Node node) { + String name = (String) node.getProperty("name", null); + if (StringUtils.isBlank(name)) { + name = (String) node.getProperty("title", null); + if (StringUtils.isBlank(name)) { + name = (String) node.getProperty(SystemProperties.IL_FUNC_OBJECT_TYPE.name(), null); + if (StringUtils.isBlank(name)) + name = (String) node.getProperty(SystemProperties.IL_SYS_NODE_TYPE.name(), null); + } + } + return name; + } + + private String getName(Vertex node) { + String name = node.property("name").isPresent() ? node.property("name").value().toString() : null; + if (StringUtils.isBlank(name)) { + name = node.property("title").isPresent() ? node.property("title").value().toString() : null; + if (StringUtils.isBlank(name)) { + name = node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; + if (StringUtils.isBlank(name)) + name = node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; + } + } + return name; + } + + + private String getNodeType(Vertex node) { + return node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; + } + + private String getObjectType(Vertex node) { + return node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; + } + + private Map getNodeMetadata(Vertex vertex) { + Map metadata = new HashMap<>(); + if (vertex != null) { + vertex.keys().forEach(key -> { + Object value = vertex.value(key); + if (value instanceof List) { + List list = (List) value; + if (!list.isEmpty()) { + Object firstElement = list.get(0); + if (firstElement instanceof String) { + metadata.put(key, list.toArray(new String[0])); + } else if (firstElement instanceof Number) { + metadata.put(key, list.toArray(new Number[0])); + } else if (firstElement instanceof Boolean) { + metadata.put(key, list.toArray(new Boolean[0])); + } else { + metadata.put(key, list.toArray(new Object[0])); + } + } + } else { + metadata.put(key, value); + } + }); + } + return metadata; + } + + public String getRelationType() { + return relationType; + } + + public void setRelationType(String relationType) { + this.relationType = relationType; + } + + public String getStartNodeId() { + return startNodeId; + } + + public void setStartNodeId(String startNodeId) { + this.startNodeId = startNodeId; + } + + public String getEndNodeId() { + return endNodeId; + } + + public void setEndNodeId(String endNodeId) { + this.endNodeId = endNodeId; + } + + public Map getMetadata() { + if (!MapUtils.isEmpty(metadata)) + return metadata; + else + return new HashMap(); + } + + public Edges updateMetadata(Map metadata) { + if (!MapUtils.isEmpty(metadata)) + this.metadata = metadata; + return this; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public String getGraphId() { + return graphId; + } + + public void setGraphId(String graphId) { + this.graphId = graphId; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getStartNodeName() { + return startNodeName; + } + + public void setStartNodeName(String startNodeName) { + this.startNodeName = startNodeName; + } + + public String getEndNodeName() { + return endNodeName; + } + + public void setEndNodeName(String endNodeName) { + this.endNodeName = endNodeName; + } + + public String getStartNodeType() { + return startNodeType; + } + + public void setStartNodeType(String startNodeType) { + this.startNodeType = startNodeType; + } + + public String getEndNodeType() { + return endNodeType; + } + + public void setEndNodeType(String endNodeType) { + this.endNodeType = endNodeType; + } + + public String getStartNodeObjectType() { + return startNodeObjectType; + } + + public void setStartNodeObjectType(String startNodeObjectType) { + this.startNodeObjectType = startNodeObjectType; + } + + public String getEndNodeObjectType() { + return endNodeObjectType; + } + + public void setEndNodeObjectType(String endNodeObjectType) { + this.endNodeObjectType = endNodeObjectType; + } + + @JsonIgnore + public Map getStartNodeMetadata() { + return startNodeMetadata; + } + + @JsonIgnore + public void setStartNodeMetadata(Map startNodeMetadata) { + this.startNodeMetadata = startNodeMetadata; + } + + @JsonIgnore + public Map getEndNodeMetadata() { + return endNodeMetadata; + } + + @JsonIgnore + public void setEndNodeMetadata(Map endNodeMetadata) { + this.endNodeMetadata = endNodeMetadata; + } + +} diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java new file mode 100644 index 000000000..5d99cf387 --- /dev/null +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java @@ -0,0 +1,180 @@ +package org.sunbird.graph.dac.model; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.sunbird.graph.common.enums.SystemProperties; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class Vertex implements Serializable { + + private static final long serialVersionUID = 252337826576516976L; + + private long id; + private String graphId; + private String identifier; + private String nodeType; + private String objectType; + private Map metadata; + private List outRelations; + private List inRelations; + private List addedRelations; + private List deletedRelations; + private Map relationNodes; + private Map externalData; + + public Vertex() { + addedRelations = new ArrayList<>(); + deletedRelations = new ArrayList<>(); + } + + public Vertex(String identifier, String nodeType, String objectType) { + this.identifier = identifier; + this.nodeType = nodeType; + this.objectType = objectType; + addedRelations = new ArrayList<>(); + deletedRelations = new ArrayList<>(); + } + + public Vertex(String graphId, Map metadata) { + this.graphId = graphId; + this.metadata = metadata; + if (null != metadata && !metadata.isEmpty()) { + if (null != metadata.get(SystemProperties.IL_UNIQUE_ID.name())) + this.identifier = metadata.get(SystemProperties.IL_UNIQUE_ID.name()).toString(); + if (null != metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name())) + this.nodeType = metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()).toString(); + if (null != metadata.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name())) + this.objectType = metadata.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).toString(); + } + addedRelations = new ArrayList<>(); + deletedRelations = new ArrayList<>(); + } + + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + @JsonIgnore + public String getGraphId() { + return graphId; + } + + public void setGraphId(String graphId) { + this.graphId = graphId; + } + + public String getIdentifier() { + if (StringUtils.isBlank(identifier) && null != metadata) + this.identifier = (String) metadata.get(SystemProperties.IL_UNIQUE_ID.name()); + return identifier; + } + + public void setIdentifier(String identifier) { + this.identifier = identifier; + } + + public String getNodeType() { + if (StringUtils.isBlank(nodeType) && null != metadata) + this.nodeType = (String) metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()); + return nodeType; + } + + public void setNodeType(String nodeType) { + this.nodeType = nodeType; + } + + public String getObjectType() { + if (StringUtils.isBlank(objectType) && null != metadata) + this.objectType = (String) metadata.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name()); + return objectType; + } + + public void setObjectType(String objectType) { + this.objectType = objectType; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public List getOutRelations() { + if (!CollectionUtils.isEmpty(outRelations)) + return outRelations; + else return new ArrayList<>(); + } + + public void setOutRelations(List outRelations) { + this.outRelations = outRelations; + } + + public List getInRelations() { + if (!CollectionUtils.isEmpty(inRelations)) + return inRelations; + else return new ArrayList<>(); + } + + public void setInRelations(List inRelations) { + this.inRelations = inRelations; + } + + public List getAddedRelations() { + return addedRelations; + } + + public void setAddedRelations(List addedRelations) { + if(CollectionUtils.isEmpty(this.addedRelations)) + this.addedRelations = new ArrayList<>(); + this.addedRelations.addAll(addedRelations); + } + + public List getDeletedRelations() { + return deletedRelations; + } + + public void setDeletedRelations(List deletedRelations) { + this.deletedRelations = deletedRelations; + } + + public Map getExternalData() { + return externalData; + } + + public Map getRelationNodes() { + return relationNodes; + } + + public void setRelationNodes(Map relationNodes) { + this.relationNodes = relationNodes; + } + + public void setExternalData(Map externalData) { + this.externalData = externalData; + } + + public Vertex getNode() { + return (Vertex) this; + } + + public Vertex getRelationNode(String identifier) { + return relationNodes.get(identifier); + } + + public String getArtifactUrl() { + return (String) this.metadata.getOrDefault("artifactUrl", ""); + } + +} diff --git a/ontology-engine/graph-dac/pom.xml b/ontology-engine/graph-dac/pom.xml new file mode 100644 index 000000000..c311d6e19 --- /dev/null +++ b/ontology-engine/graph-dac/pom.xml @@ -0,0 +1,140 @@ + + 4.0.0 + + ontology-engine + org.sunbird + 1.0-SNAPSHOT + + graph-dac + + + org.scala-lang + scala-library + ${scala.version} + + + org.sunbird + graph-dac-api + + + org.apache.commons + commons-lang3 + + + 1.0-SNAPSHOT + jar + + + org.sunbird + graph-common + 1.0-SNAPSHOT + jar + + + org.apache.commons + commons-lang3 + 3.3.2 + + + io.netty + netty-codec + 4.1.68.Final + + + org.scala-lang.modules + scala-java8-compat_${scala.maj.version} + 0.9.0 + + + org.powermock + powermock-api-mockito + 1.7.4 + test + + + org.powermock + powermock-module-junit4 + 1.7.4 + test + + + org.testcontainers + testcontainers + 1.17.6 + test + + + org.apache.tinkerpop + gremlin-driver + 3.7.2 + + + org.janusgraph + janusgraph-core + 1.0.0 + + + org.janusgraph + janusgraph-inmemory + 1.0.0 + + + + + src/main/scala + + + net.alchim31.maven + scala-maven-plugin + 3.2.2 + + ${scala.version} + false + + + + scala-compile-first + process-resources + + add-source + compile + + + + scala-test-compile + process-test-resources + + testCompile + + + + + + org.scalatest + scalatest-maven-plugin + 2.0.0 + + + test + test + + test + + + + + + org.scoverage + scoverage-maven-plugin + ${scoverage.plugin.version} + + ${scala.version} + true + true + + + + + + \ No newline at end of file diff --git a/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties b/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties new file mode 100644 index 000000000..affaf236e --- /dev/null +++ b/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties @@ -0,0 +1,35 @@ +# Copyright 2020 JanusGraph Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# JanusGraph configuration sample: in-memory +# +# This file connects to an in-memory storage backend + +# The implementation of graph factory that will be used by gremlin server +# +# Default: org.janusgraph.core.JanusGraphFactory +# Data Type: String +# Mutability: LOCAL +gremlin.graph=org.janusgraph.core.JanusGraphFactory + +# The primary persistence provider used by JanusGraph. This is required. +# It should be set one of JanusGraph's built-in shorthand names for its +# standard storage backends (shorthands: berkeleyje, cql, hbase, inmemory, +# scylla) or to the full package and classname of a custom/third-party +# StoreManager implementation. +# +# Default: (no default value) +# Data Type: String +# Mutability: LOCAL +storage.backend=inmemory diff --git a/ontology-engine/graph-dac/src/conf/remote-graph.properties b/ontology-engine/graph-dac/src/conf/remote-graph.properties new file mode 100644 index 000000000..eef10dbd9 --- /dev/null +++ b/ontology-engine/graph-dac/src/conf/remote-graph.properties @@ -0,0 +1,3 @@ +gremlin.remote.remoteConnectionClass=org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection +gremlin.remote.driver.clusterFile=/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/remote-object.yaml +gremlin.remote.driver.sourceName=g \ No newline at end of file diff --git a/ontology-engine/graph-dac/src/conf/remote-object.yaml b/ontology-engine/graph-dac/src/conf/remote-object.yaml new file mode 100644 index 000000000..8aecad8d0 --- /dev/null +++ b/ontology-engine/graph-dac/src/conf/remote-object.yaml @@ -0,0 +1,5 @@ +hosts: [localhost] +port: 8182 +serializer: { + className: org.apache.tinkerpop.gremlin.util.ser.GraphBinaryMessageSerializerV1, + config: { ioRegistries: [org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry] }} \ No newline at end of file diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala new file mode 100644 index 000000000..6766830e4 --- /dev/null +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala @@ -0,0 +1,55 @@ +package org.sunbird.janus.service.operation + +import org.apache.commons.collections4.CollectionUtils +import org.apache.commons.lang3.StringUtils +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.apache.tinkerpop.gremlin.structure.{Edge, Vertex} +import org.janusgraph.core.JanusGraph +import org.sunbird.common.dto.{Response, ResponseHandler} +import org.sunbird.common.exception.ClientException +import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} +import org.sunbird.janus.service.util.JanusConnectionUtil + +import java.util +import scala.collection.JavaConverters.asScalaBufferConverter +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Future + +class EdgeOperations { + + val graphConnection = new JanusConnectionUtil + def createEdges(graphId: String, relationData: util.List[util.Map[String, AnyRef]]): Future[Response] = { + Future{ + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") + if (CollectionUtils.isEmpty(relationData)) + throw new ClientException(DACErrorCodeConstants.INVALID_RELATION.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Relation Operation Failed.]") + + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGts + val graph: JanusGraph = graphConnection.getGraph + + createBulkRelations(g, graphId, relationData) + ResponseHandler.OK() + } + } + + def createBulkRelations(g: GraphTraversalSource, graphId: String, relationData: util.List[util.Map[String, AnyRef]]): Unit = { + for (row <- relationData.asScala) { + val startNodeId = row.get("startNodeId").toString + val endNodeId = row.get("endNodeId").toString + val relation = row.get("relation").toString + val relMetadata = row.get("relMetadata").asInstanceOf[Map[String, AnyRef]] + + val startNode: Vertex = g.V().hasLabel(graphId).has("IL_UNIQUE_ID", startNodeId).next() + val endNode: Vertex = g.V().hasLabel(graphId).has("IL_UNIQUE_ID", endNodeId).next() + + val edge: Edge = startNode.addEdge(relation, endNode) + for (key <- relMetadata.keySet) { + edge.property(key, relMetadata.get(key).toString) + } + } + } +} diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala new file mode 100644 index 000000000..3017bdf65 --- /dev/null +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala @@ -0,0 +1,145 @@ +package org.sunbird.janus.service.operation + +import org.apache.commons.lang3.{BooleanUtils, StringUtils} +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.valueMap +import org.janusgraph.core.JanusGraph +import org.sunbird.common.exception.ClientException +import org.sunbird.common.{DateUtils, JsonUtils} +import org.sunbird.graph.common.Identifier +import org.sunbird.graph.common.enums.{AuditProperties, GraphDACParams, SystemProperties} +import org.sunbird.graph.dac.model.{Node, Vertex} +import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} +import org.sunbird.janus.service.util.JanusConnectionUtil +import org.sunbird.telemetry.logger.TelemetryManager + +import java.util +import scala.collection.convert.ImplicitConversions.`map AsScala` +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Future + +class GremlinOperations { + + val graphConnection = new JanusConnectionUtil + def addNode(graphId: String, node: Vertex): Future[Vertex] = { Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") + + if (null == node) + throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Node Operation Failed.]") + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put(GraphDACParams.node.name, setPrimitiveData(node.getMetadata)) + prepareMap(parameterMap) + + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGts + val graph: JanusGraph = graphConnection.getGraph + + val vertex = g.addV(node.getGraphId) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + + finalMap.foreach { case (key, value) => vertex.property(key, value) } + vertex.as("ee").next() + + val retrieveVertex = g.V().select("ee").by(valueMap()).next() + println(" vertex details found !" + retrieveVertex) + + node.setGraphId("domain") + node.setIdentifier("do_12332409i") + node.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") + + node + } + + } + + def prepareMap(parameterMap: util.Map[String, AnyRef]) = { + if(null != parameterMap){ + val graphId = parameterMap.getOrDefault("graphId","").asInstanceOf[String] + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Create Node' Query Generation Failed.]") + + val node: Node = parameterMap.get(GraphDACParams.node.name).asInstanceOf[Node] + if (null == node) + throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Node Query Generation Failed.]") + + val date: String = DateUtils.formatCurrentDate + + val mpMap :util.Map[String, AnyRef] = getMetadataCypherQueryMap(node) + val spMap :util.Map[String, AnyRef] = getSystemPropertyMap(node, date) + val apMap :util.Map[String, AnyRef] = getAuditPropertyMap(node, date, false) + val vpMap :util.Map[String, AnyRef] = getVersionPropertyMap(node, date) + + parameterMap.put(GraphDACParams.paramValueMap.name, mpMap) + parameterMap.put(GraphDACParams.paramValueMap.name, spMap) + parameterMap.put(GraphDACParams.paramValueMap.name, apMap) + parameterMap.put(GraphDACParams.paramValueMap.name, vpMap) + + println("parameterMap ->"+parameterMap) + } + } + + def getMetadataCypherQueryMap(node: Node): util.Map[String, AnyRef] = { + val metadataPropertyMap = new util.HashMap[String, AnyRef] + if (null != node && null != node.getMetadata && !node.getMetadata.isEmpty) { + node.getMetadata.foreach { case (key, value) => metadataPropertyMap.put(key, value) } + } + metadataPropertyMap + } + def getSystemPropertyMap(node: Node, date: String): util.Map[String, AnyRef] = { + val systemPropertyMap = new util.HashMap[String, AnyRef] + if (null != node && StringUtils.isNotBlank(date)) { + if (StringUtils.isBlank(node.getIdentifier)) + node.setIdentifier(Identifier.getIdentifier(node.getGraphId, Identifier.getUniqueIdFromTimestamp)) + systemPropertyMap.put(SystemProperties.IL_UNIQUE_ID.name, node.getIdentifier) + systemPropertyMap.put(SystemProperties.IL_SYS_NODE_TYPE.name, node.getNodeType) + systemPropertyMap.put(SystemProperties.IL_FUNC_OBJECT_TYPE.name, node.getObjectType) + } + systemPropertyMap + } + + def getAuditPropertyMap(node: Node, date: String, isUpdateOnly: Boolean):util.Map[String, AnyRef] = { + val auditPropertyMap = new util.HashMap[String, AnyRef] + if(null != node && StringUtils.isNotBlank(date)) { + if (BooleanUtils.isFalse(isUpdateOnly)) { + auditPropertyMap.put(AuditProperties.createdOn.name, + if (node.getMetadata.containsKey(AuditProperties.createdOn.name)) + node.getMetadata.get(AuditProperties.createdOn.name) + else date) + } + if (null != node.getMetadata && null == node.getMetadata.get(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name)) + auditPropertyMap.put(AuditProperties.lastUpdatedOn.name, date) + } + auditPropertyMap + } + + def getVersionPropertyMap(node: Node, date: String): util.Map[String, AnyRef] = { + val versionPropertyMap = new util.HashMap[String, AnyRef] + if (null != node && StringUtils.isNotBlank(date)) + versionPropertyMap.put(GraphDACParams.versionKey.name, DateUtils.parse(date).getTime.toString) + versionPropertyMap + } + + def setPrimitiveData(metadata: util.Map[String, AnyRef]): util.Map[String, AnyRef] = { + metadata.forEach((key, value) => { + try { + value match { + case v: util.Map[String, AnyRef] => metadata.put(key, JsonUtils.serialize(v)) + case v: util.List[util.Map[String, AnyRef]] if (!v.isEmpty && v.isInstanceOf[util.Map[String, AnyRef]]) => metadata.put(key, JsonUtils.serialize(v)) + case _ => + } + } catch { + case e: Exception => TelemetryManager.error(s"Exception Occurred While Processing Primitive Data Types | Exception is : ${e.getMessage}", e) + } + }) + metadata + } + + +} diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala new file mode 100644 index 000000000..56fe5bb4a --- /dev/null +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala @@ -0,0 +1,32 @@ +package org.sunbird.janus.service.util + +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.janusgraph.core.{JanusGraph, JanusGraphFactory} +class JanusConnectionUtil { + + var g: GraphTraversalSource = _ + var graph: JanusGraph = _ + + @throws[Exception] + def initialiseGraphClient(): Unit = { + if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/remote-graph.properties") + if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties") + + println("GraphTraversalSource: " + g) + println("graph: " + graph) + } + + @throws[Exception] + def getGts: GraphTraversalSource = g + + @throws[Exception] + def getGraph: JanusGraph = graph + + @throws[Exception] + def closeClient(): Unit = { + g.close() + graph.close() + } + +} diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala index 1cd022a11..da9d3df0e 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala @@ -10,7 +10,7 @@ import org.sunbird.common.dto.{Request, Response} import org.sunbird.common.exception.{ClientException, ErrorCodes, ResponseCode} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.SystemProperties -import org.sunbird.graph.dac.model.{Filter, MetadataCriterion, Node, Relation, SearchConditions, SearchCriteria} +import org.sunbird.graph.dac.model.{Edges, Vertex, Filter, MetadataCriterion, Node, Relation, SearchConditions, SearchCriteria} import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} import org.sunbird.parseq.Task @@ -36,6 +36,19 @@ object DataNode { }).flatMap(f => f) } + @throws[Exception] + def creates(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + DefinitionNode.validates(request).map(vertex => { + val response = oec.graphService.addVertex(request.graphId, dataModifier(vertex)) + response.map(vertex => DefinitionNode.postProcessor(request, vertex)).map(result => { + val futureList = Task.parallel[Response]( + saveExternalProperties(vertex.getIdentifier, vertex.getExternalData, request.getContext, request.getObjectType), + createEdges(request.graphId, vertex, request.getContext)) + futureList.map(list => result) + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } + }).flatMap(f => f) + } + @throws[Exception] def update(request: Request, dataModifier: (Node) => Node = defaultDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val identifier: String = request.getContext.get("identifier").asInstanceOf[String] @@ -141,6 +154,15 @@ object DataNode { } } + private def createEdges(graphId: String, node: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + val edges: util.List[Edges] = node.getAddedRelations + if (CollectionUtils.isNotEmpty(edges)) { + oec.graphService.createEdges(graphId, getEdgesMap(edges)) + } else { + Future(new Response) + } + } + private def populateExternalProperties(fields: List[String], node: Node, request: Request, externalProps: List[String])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { if(StringUtils.equalsIgnoreCase(request.get("mode").asInstanceOf[String], "edit")) request.put("identifier", node.getIdentifier) @@ -185,11 +207,32 @@ object DataNode { } list } + + private def getEdgesMap(edges: util.List[Edges]): java.util.List[util.Map[String, AnyRef]] = { + val list = new util.ArrayList[util.Map[String, AnyRef]] + for (edge <- edges) { + if ((StringUtils.isNotBlank(edge.getStartNodeId) && StringUtils.isNotBlank(edge.getEndNodeId)) && StringUtils.isNotBlank(edge.getRelationType)) { + val map = new util.HashMap[String, AnyRef] + map.put("startNodeId", edge.getStartNodeId) + map.put("endNodeId", edge.getEndNodeId) + map.put("relation", edge.getRelationType) + if (MapUtils.isNotEmpty(edge.getMetadata)) map.put("relMetadata", edge.getMetadata) + else map.put("relMetadata", new util.HashMap[String, AnyRef]()) + list.add(map) + } + else throw new ClientException("ERR_INVALID_RELATION_OBJECT", "Invalid Relation Object Found.") + } + list + } private def defaultDataModifier(node: Node) = { node } + private def defaultVertexDataModifier(vertex: Vertex) = { + vertex + } + @throws[Exception] def systemUpdate(request: Request, nodeList: util.List[Node], hierarchyKey: String, hierarchyFunc: Option[Request => Future[Response]] = None)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { val data: util.Map[String, AnyRef] = request.getRequest diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala index 7471d5635..1cb859586 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala @@ -10,7 +10,7 @@ import org.sunbird.cache.impl.RedisCache import org.sunbird.common.JsonUtils import org.sunbird.common.dto.Request import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.{Node, Relation} +import org.sunbird.graph.dac.model.{Node, Relation, Vertex} import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ @@ -31,6 +31,19 @@ object DefinitionNode { definition.validate(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause } } + def validates(request: Request, setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(request.getRequest.getOrDefault("primaryCategory", "").asInstanceOf[String], + schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) + definition.validateRequest(request) + val inputNode = definition.getVertex(request.getRequest) + updateEdgeMetadata(inputNode) + definition.validates(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause } + } + def getExternalProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { val definition = DefinitionFactory.getDefinition(graphId, schemaName, version, ocd) definition.getExternalProps() @@ -146,6 +159,31 @@ object DefinitionNode { node } + def postProcessor(request: Request, vertex: Vertex)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Vertex = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val primaryCategory: String = if (null != vertex.getMetadata) vertex.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schemaName, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val categoryDefinition = DefinitionFactory.getDefinition(graphId, schemaName, version, objectCategoryDefinition) + val edgeKey = categoryDefinition.getEdgeKey() + if (null != edgeKey && !edgeKey.isEmpty) { + val metadata = vertex.getMetadata + val cacheKey = "edge_" + request.getObjectType.toLowerCase() + val data = metadata.containsKey(edgeKey) match { + case true => List[String](metadata.get(edgeKey).asInstanceOf[String]) + case _ => List[String]() + } + if (!data.isEmpty) { + metadata.get("status") match { + case "Live" => RedisCache.addToList(cacheKey, data) + case "Retired" => RedisCache.removeFromList(cacheKey, data) + } + } + } + vertex + } + private def setRelationship(dbNode: Node, inputNode: Node, dbRels: util.Map[String, util.List[Relation]]): Unit = { var addRels: util.List[Relation] = new util.ArrayList[Relation]() var delRels: util.List[Relation] = new util.ArrayList[Relation]() @@ -206,6 +244,22 @@ object DefinitionNode { node.setAddedRelations(rels) } + def updateEdgeMetadata(vertex: Vertex): Unit = { + var relOcr = new util.HashMap[String, Integer]() + val rels = vertex.getAddedRelations + for (rel <- rels) { + val relKey = rel.getStartNodeObjectType + rel.getRelationType + rel.getEndNodeObjectType + if (relOcr.containsKey(relKey)) + relOcr.put(relKey, relOcr.get(relKey) + 1) + else relOcr.put(relKey, 1) + if (relKey.contains("hasSequenceMember")) { + val index = if (rel.getMetadata.containsKey("index")) rel.getMetadata.get("index").asInstanceOf[Integer] else relOcr.get(relKey) + rel.setMetadata(Map[String, AnyRef]("IL_SEQUENCE_INDEX" -> index).asJava) + } else rel.setMetadata(new util.HashMap[String, AnyRef]()) + } + vertex.setAddedRelations(rels) + } + def resetJsonProperties(node: Node, graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { val jsonPropList = fetchJsonProps(graphId, version, schemaName, ocd) if (!jsonPropList.isEmpty) { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala index b173f0fe2..06e867d30 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala @@ -1,7 +1,7 @@ package org.sunbird.graph.schema import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.schema.{ISchemaValidator, SchemaValidatorFactory} import scala.concurrent.{ExecutionContext, Future} @@ -12,9 +12,13 @@ abstract class IDefinition(graphId: String, schemaName: String, version: String def getNode(input: java.util.Map[String, AnyRef]): Node + def getVertex(input: java.util.Map[String, AnyRef]): Vertex @throws[Exception] def validate(node: Node, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] + @throws[Exception] + def validates(vertex: Vertex, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] + @throws[Exception] def getNode(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala index 10a11200d..8430dd0da 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala @@ -1,14 +1,13 @@ package org.sunbird.graph.schema.validator import java.util - import org.apache.commons.collections4.{CollectionUtils, MapUtils} import org.apache.commons.lang3.StringUtils import org.sunbird.common.dto.Request import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.Identifier import org.sunbird.graph.dac.enums.SystemNodeTypes -import org.sunbird.graph.dac.model.{Node, Relation} +import org.sunbird.graph.dac.model.{Edges, Node, Relation, Vertex} import org.sunbird.graph.schema.{IDefinition, ObjectCategoryDefinition} import scala.collection.JavaConverters._ @@ -53,11 +52,33 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = node } + override def getVertex(input: java.util.Map[String, Object]): Vertex = { + val result = schemaValidator.getStructuredData(input) + val node = new Vertex(graphId, result.getMetadata) + val objectType = schemaValidator.getConfig.getString("objectType") + node.setNodeType(SystemNodeTypes.DATA_NODE.name) + node.setObjectType(objectType) + node.setIdentifier(input.getOrDefault("identifier", Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)).asInstanceOf[String]) + input.remove("identifier") + setEdges(node, result.getRelations) + if (CollectionUtils.isNotEmpty(node.getInRelations)) node.setAddedRelations(node.getInRelations) + if (CollectionUtils.isNotEmpty(node.getOutRelations)) node.setAddedRelations(node.getOutRelations) + node.setExternalData(result.getExternalData) + node + } + @throws[Exception] override def validate(node: Node, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { Future{node} } + @throws[Exception] + override def validates(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + Future { + vertex + } + } + override def getNode(identifier: String, operation: String, mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val request: Request = new Request() val node: Future[Node] = oec.graphService.getNodeByUniqueId(graphId, identifier, false, request) @@ -92,4 +113,33 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = node.setOutRelations(outRelations) } } + + protected def setEdges(vertex: Vertex, edges: java.util.Map[String, AnyRef]): Unit = { + if (MapUtils.isNotEmpty(edges)) { + def getEdges(schema: Map[String, AnyRef], direction: String): List[Edges] = { + edges.asScala.filterKeys(key => schema.keySet.contains(key)) + .flatten(entry => { + val relSchema = schema.get(entry._1).get.asInstanceOf[java.util.Map[String, AnyRef]].asScala + val relData = entry._2.asInstanceOf[java.util.List[java.util.Map[String, AnyRef]]] + relData.asScala.map(r => { + val relation = { + if (StringUtils.equalsAnyIgnoreCase("out", direction)) { + new Edges(vertex.getIdentifier, relSchema.get("type").get.asInstanceOf[String], r.get("identifier").asInstanceOf[String]) + .updateMetadata((r.asScala - "identifier").asJava) + } else { + new Edges(r.get("identifier").asInstanceOf[String], relSchema.get("type").get.asInstanceOf[String], vertex.getIdentifier) + .updateMetadata((r.asScala - "identifier").asJava) + } + } + relation + }) + }).toList + } + + val inRelations = getEdges(inRelationsSchema, "in").asJava + vertex.setInRelations(inRelations) + val outRelations = getEdges(outRelationsSchema, "out").asJava + vertex.setOutRelations(outRelations) + } + } } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala index 6eab97c08..397ca3f15 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala @@ -1,7 +1,7 @@ package org.sunbird.graph.schema.validator import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.schema.IDefinition import scala.concurrent.{ExecutionContext, Future} @@ -19,4 +19,16 @@ trait SchemaValidator extends IDefinition { super.validate(node, operation) } + + @throws[Exception] + abstract override def validates(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + if (setDefaultValue) { + val result = schemaValidator.validate(vertex.getMetadata) + if (setDefaultValue && operation.equalsIgnoreCase("create")) { + vertex.setMetadata(result.getMetadata) + } + } + + super.validates(vertex, operation) + } } diff --git a/ontology-engine/pom.xml b/ontology-engine/pom.xml index 3dc8826f6..59e194dd1 100644 --- a/ontology-engine/pom.xml +++ b/ontology-engine/pom.xml @@ -13,6 +13,7 @@ graph-common graph-dac-api + graph-dac graph-core_2.12 graph-engine_2.12 parseq diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala index 4b91c9df9..35eda167e 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala @@ -35,7 +35,7 @@ class ObjectCategoryActor @Inject()(implicit oec: OntologyEngineContext) extends RequestUtil.restrictProperties(request) if (!request.getRequest.containsKey(Constants.NAME)) throw new ClientException("ERR_NAME_SET_AS_IDENTIFIER", "name will be set as identifier") request.getRequest.put(Constants.IDENTIFIER, Constants.CATEGORY_PREFIX + Slug.makeSlug(request.getRequest.get(Constants.NAME).asInstanceOf[String])) - DataNode.create(request).map(node => { + DataNode.creates(request).map(node => { ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier) }) } From 97119907c372bae78007762de3aa926e601cb868 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Tue, 21 May 2024 11:45:23 +0530 Subject: [PATCH 04/20] Issue #KN-1041 fix: updated code for Gremlin Operations --- content-api/content-actors/pom.xml | 13 +++++ ontology-engine/graph-core_2.12/pom.xml | 12 +++++ .../conf/janusgraph-inmemory.properties | 0 .../{src => }/conf/remote-graph.properties | 2 +- .../{src => }/conf/remote-object.yaml | 0 ontology-engine/graph-dac/pom.xml | 14 +++++- .../service/operation/GremlinOperations.scala | 48 ++++++++++--------- .../service/util/JanusConnectionUtil.scala | 18 +++++-- 8 files changed, 76 insertions(+), 31 deletions(-) rename ontology-engine/graph-dac/{src => }/conf/janusgraph-inmemory.properties (100%) rename ontology-engine/graph-dac/{src => }/conf/remote-graph.properties (72%) rename ontology-engine/graph-dac/{src => }/conf/remote-object.yaml (100%) diff --git a/content-api/content-actors/pom.xml b/content-api/content-actors/pom.xml index 45e1e286a..c0f084a7b 100644 --- a/content-api/content-actors/pom.xml +++ b/content-api/content-actors/pom.xml @@ -93,6 +93,19 @@ 2.5.22 test + + + org.sunbird + graph-dac + + + org.apache.commons + commons-lang3 + + + 1.0-SNAPSHOT + jar + diff --git a/ontology-engine/graph-core_2.12/pom.xml b/ontology-engine/graph-core_2.12/pom.xml index 729c430e4..2f20c92b4 100644 --- a/ontology-engine/graph-core_2.12/pom.xml +++ b/ontology-engine/graph-core_2.12/pom.xml @@ -28,6 +28,18 @@ 1.0-SNAPSHOT jar + + org.sunbird + graph-dac + + + org.apache.commons + commons-lang3 + + + 1.0-SNAPSHOT + jar + org.sunbird schema-validator diff --git a/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties b/ontology-engine/graph-dac/conf/janusgraph-inmemory.properties similarity index 100% rename from ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties rename to ontology-engine/graph-dac/conf/janusgraph-inmemory.properties diff --git a/ontology-engine/graph-dac/src/conf/remote-graph.properties b/ontology-engine/graph-dac/conf/remote-graph.properties similarity index 72% rename from ontology-engine/graph-dac/src/conf/remote-graph.properties rename to ontology-engine/graph-dac/conf/remote-graph.properties index eef10dbd9..1af1295f4 100644 --- a/ontology-engine/graph-dac/src/conf/remote-graph.properties +++ b/ontology-engine/graph-dac/conf/remote-graph.properties @@ -1,3 +1,3 @@ gremlin.remote.remoteConnectionClass=org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection -gremlin.remote.driver.clusterFile=/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/remote-object.yaml +gremlin.remote.driver.clusterFile=/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/remote-object.yaml gremlin.remote.driver.sourceName=g \ No newline at end of file diff --git a/ontology-engine/graph-dac/src/conf/remote-object.yaml b/ontology-engine/graph-dac/conf/remote-object.yaml similarity index 100% rename from ontology-engine/graph-dac/src/conf/remote-object.yaml rename to ontology-engine/graph-dac/conf/remote-object.yaml diff --git a/ontology-engine/graph-dac/pom.xml b/ontology-engine/graph-dac/pom.xml index c311d6e19..bc56d8228 100644 --- a/ontology-engine/graph-dac/pom.xml +++ b/ontology-engine/graph-dac/pom.xml @@ -64,6 +64,16 @@ 1.17.6 test + + org.janusgraph + janusgraph-driver + 1.0.0 + + + org.janusgraph + janusgraph-core + 1.0.0 + org.apache.tinkerpop gremlin-driver @@ -71,12 +81,12 @@ org.janusgraph - janusgraph-core + janusgraph-inmemory 1.0.0 org.janusgraph - janusgraph-inmemory + janusgraph-cql 1.0.0 diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala index 3017bdf65..f1ce2ce95 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala @@ -8,7 +8,7 @@ import org.sunbird.common.exception.ClientException import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier import org.sunbird.graph.common.enums.{AuditProperties, GraphDACParams, SystemProperties} -import org.sunbird.graph.dac.model.{Node, Vertex} +import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager @@ -21,60 +21,61 @@ import scala.concurrent.Future class GremlinOperations { val graphConnection = new JanusConnectionUtil - def addNode(graphId: String, node: Vertex): Future[Vertex] = { Future { + def addNode(graphId: String, vertex: Vertex): Future[Vertex] = { Future { if (StringUtils.isBlank(graphId)) throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") - if (null == node) + if (null == vertex) throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, DACErrorMessageConstants.INVALID_NODE + " | [Create Node Operation Failed.]") val parameterMap = new util.HashMap[String, AnyRef] parameterMap.put(GraphDACParams.graphId.name, graphId) - parameterMap.put(GraphDACParams.node.name, setPrimitiveData(node.getMetadata)) + parameterMap.put("vertex", setPrimitiveData(vertex)) prepareMap(parameterMap) graphConnection.initialiseGraphClient() val g: GraphTraversalSource = graphConnection.getGts val graph: JanusGraph = graphConnection.getGraph - val vertex = g.addV(node.getGraphId) + val newVertex = g.addV(vertex.getGraphId) val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] - finalMap.foreach { case (key, value) => vertex.property(key, value) } - vertex.as("ee").next() + finalMap.foreach { case (key, value) => newVertex.property(key, value) } + newVertex.as("ee").next() val retrieveVertex = g.V().select("ee").by(valueMap()).next() println(" vertex details found !" + retrieveVertex) - node.setGraphId("domain") - node.setIdentifier("do_12332409i") - node.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") + vertex.setGraphId("domain") + vertex.setIdentifier("do_12332409i") + vertex.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") - node + vertex } } def prepareMap(parameterMap: util.Map[String, AnyRef]) = { if(null != parameterMap){ + println("parameterMap ==>"+parameterMap) val graphId = parameterMap.getOrDefault("graphId","").asInstanceOf[String] if (StringUtils.isBlank(graphId)) throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Create Node' Query Generation Failed.]") - val node: Node = parameterMap.get(GraphDACParams.node.name).asInstanceOf[Node] - if (null == node) + val vertex = parameterMap.getOrDefault("vertex", null).asInstanceOf[Vertex] + if (null == vertex) throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, DACErrorMessageConstants.INVALID_NODE + " | [Create Node Query Generation Failed.]") val date: String = DateUtils.formatCurrentDate - val mpMap :util.Map[String, AnyRef] = getMetadataCypherQueryMap(node) - val spMap :util.Map[String, AnyRef] = getSystemPropertyMap(node, date) - val apMap :util.Map[String, AnyRef] = getAuditPropertyMap(node, date, false) - val vpMap :util.Map[String, AnyRef] = getVersionPropertyMap(node, date) + val mpMap :util.Map[String, AnyRef] = getMetadataCypherQueryMap(vertex) + val spMap :util.Map[String, AnyRef] = getSystemPropertyMap(vertex, date) + val apMap :util.Map[String, AnyRef] = getAuditPropertyMap(vertex, date, false) + val vpMap :util.Map[String, AnyRef] = getVersionPropertyMap(vertex, date) parameterMap.put(GraphDACParams.paramValueMap.name, mpMap) parameterMap.put(GraphDACParams.paramValueMap.name, spMap) @@ -85,14 +86,14 @@ class GremlinOperations { } } - def getMetadataCypherQueryMap(node: Node): util.Map[String, AnyRef] = { + def getMetadataCypherQueryMap(node: Vertex): util.Map[String, AnyRef] = { val metadataPropertyMap = new util.HashMap[String, AnyRef] if (null != node && null != node.getMetadata && !node.getMetadata.isEmpty) { node.getMetadata.foreach { case (key, value) => metadataPropertyMap.put(key, value) } } metadataPropertyMap } - def getSystemPropertyMap(node: Node, date: String): util.Map[String, AnyRef] = { + def getSystemPropertyMap(node: Vertex, date: String): util.Map[String, AnyRef] = { val systemPropertyMap = new util.HashMap[String, AnyRef] if (null != node && StringUtils.isNotBlank(date)) { if (StringUtils.isBlank(node.getIdentifier)) @@ -104,7 +105,7 @@ class GremlinOperations { systemPropertyMap } - def getAuditPropertyMap(node: Node, date: String, isUpdateOnly: Boolean):util.Map[String, AnyRef] = { + def getAuditPropertyMap(node: Vertex, date: String, isUpdateOnly: Boolean):util.Map[String, AnyRef] = { val auditPropertyMap = new util.HashMap[String, AnyRef] if(null != node && StringUtils.isNotBlank(date)) { if (BooleanUtils.isFalse(isUpdateOnly)) { @@ -119,14 +120,15 @@ class GremlinOperations { auditPropertyMap } - def getVersionPropertyMap(node: Node, date: String): util.Map[String, AnyRef] = { + def getVersionPropertyMap(node: Vertex, date: String): util.Map[String, AnyRef] = { val versionPropertyMap = new util.HashMap[String, AnyRef] if (null != node && StringUtils.isNotBlank(date)) versionPropertyMap.put(GraphDACParams.versionKey.name, DateUtils.parse(date).getTime.toString) versionPropertyMap } - def setPrimitiveData(metadata: util.Map[String, AnyRef]): util.Map[String, AnyRef] = { + def setPrimitiveData(vertex: Vertex): Vertex = { + val metadata: util.Map[String, AnyRef] = vertex.getMetadata metadata.forEach((key, value) => { try { value match { @@ -138,7 +140,7 @@ class GremlinOperations { case e: Exception => TelemetryManager.error(s"Exception Occurred While Processing Primitive Data Types | Exception is : ${e.getMessage}", e) } }) - metadata + vertex } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala index 56fe5bb4a..790b7afaf 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala @@ -3,6 +3,7 @@ package org.sunbird.janus.service.util import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource import org.janusgraph.core.{JanusGraph, JanusGraphFactory} +import org.sunbird.telemetry.logger.TelemetryManager class JanusConnectionUtil { var g: GraphTraversalSource = _ @@ -10,11 +11,18 @@ class JanusConnectionUtil { @throws[Exception] def initialiseGraphClient(): Unit = { - if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/remote-graph.properties") - if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/conf/janusgraph-inmemory.properties") - - println("GraphTraversalSource: " + g) - println("graph: " + graph) + try { + if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/remote-graph.properties") + if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/janusgraph-inmemory.properties") + + println("GraphTraversalSource: " + g) + println("graph: " + graph) + } + catch { + case e: Exception => + TelemetryManager.log("JanusConnectionUtil --> Exception: " + e.getCause) + e.printStackTrace() + } } @throws[Exception] From e82cb6357aebf541aeb120df81d6fcd8d0a69e00 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Wed, 22 May 2024 11:03:51 +0530 Subject: [PATCH 05/20] Issue #KN-1041 fix: Added fix for the vertex creation --- .../org/sunbird/graph/GraphService.scala | 2 +- ontology-engine/graph-dac/pom.xml | 10 ++-- .../main}/conf/janusgraph-inmemory.properties | 0 .../main}/conf/remote-graph.properties | 2 +- .../main/conf/remote-objects.yaml} | 0 .../service/operation/GremlinOperations.scala | 57 +++++++++---------- .../service/util/JanusConnectionUtil.scala | 19 +++++-- 7 files changed, 50 insertions(+), 40 deletions(-) rename ontology-engine/graph-dac/{ => src/main}/conf/janusgraph-inmemory.properties (100%) rename ontology-engine/graph-dac/{ => src/main}/conf/remote-graph.properties (71%) rename ontology-engine/graph-dac/{conf/remote-object.yaml => src/main/conf/remote-objects.yaml} (100%) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala index 6cfdc0652..58b8a66ed 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala @@ -32,7 +32,7 @@ class GraphService { val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) node.setMetadata(metadata) } - gremlinOps.addNode(graphId, node).map(resNode => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) + gremlinOps.addVertex(graphId, node).map(resNode => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) } diff --git a/ontology-engine/graph-dac/pom.xml b/ontology-engine/graph-dac/pom.xml index bc56d8228..c1e8a963e 100644 --- a/ontology-engine/graph-dac/pom.xml +++ b/ontology-engine/graph-dac/pom.xml @@ -31,11 +31,6 @@ 1.0-SNAPSHOT jar - - org.apache.commons - commons-lang3 - 3.3.2 - io.netty netty-codec @@ -89,6 +84,11 @@ janusgraph-cql 1.0.0 + + org.apache.commons + commons-text + 1.9 + diff --git a/ontology-engine/graph-dac/conf/janusgraph-inmemory.properties b/ontology-engine/graph-dac/src/main/conf/janusgraph-inmemory.properties similarity index 100% rename from ontology-engine/graph-dac/conf/janusgraph-inmemory.properties rename to ontology-engine/graph-dac/src/main/conf/janusgraph-inmemory.properties diff --git a/ontology-engine/graph-dac/conf/remote-graph.properties b/ontology-engine/graph-dac/src/main/conf/remote-graph.properties similarity index 71% rename from ontology-engine/graph-dac/conf/remote-graph.properties rename to ontology-engine/graph-dac/src/main/conf/remote-graph.properties index 1af1295f4..74b4d6c0c 100644 --- a/ontology-engine/graph-dac/conf/remote-graph.properties +++ b/ontology-engine/graph-dac/src/main/conf/remote-graph.properties @@ -1,3 +1,3 @@ gremlin.remote.remoteConnectionClass=org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection -gremlin.remote.driver.clusterFile=/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/remote-object.yaml +gremlin.remote.driver.clusterFile=/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/remote-objects.yaml gremlin.remote.driver.sourceName=g \ No newline at end of file diff --git a/ontology-engine/graph-dac/conf/remote-object.yaml b/ontology-engine/graph-dac/src/main/conf/remote-objects.yaml similarity index 100% rename from ontology-engine/graph-dac/conf/remote-object.yaml rename to ontology-engine/graph-dac/src/main/conf/remote-objects.yaml diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala index f1ce2ce95..f26d57f40 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala @@ -1,7 +1,8 @@ package org.sunbird.janus.service.operation import org.apache.commons.lang3.{BooleanUtils, StringUtils} -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.valueMap import org.janusgraph.core.JanusGraph import org.sunbird.common.exception.ClientException @@ -21,40 +22,38 @@ import scala.concurrent.Future class GremlinOperations { val graphConnection = new JanusConnectionUtil - def addNode(graphId: String, vertex: Vertex): Future[Vertex] = { Future { - if (StringUtils.isBlank(graphId)) - throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, - DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") - - if (null == vertex) - throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, - DACErrorMessageConstants.INVALID_NODE + " | [Create Node Operation Failed.]") - - val parameterMap = new util.HashMap[String, AnyRef] - parameterMap.put(GraphDACParams.graphId.name, graphId) - parameterMap.put("vertex", setPrimitiveData(vertex)) - prepareMap(parameterMap) - - graphConnection.initialiseGraphClient() - val g: GraphTraversalSource = graphConnection.getGts - val graph: JanusGraph = graphConnection.getGraph + def addVertex(graphId: String, vertex: Vertex): Future[Vertex] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") - val newVertex = g.addV(vertex.getGraphId) - val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + if (null == vertex) + throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Node Operation Failed.]") - finalMap.foreach { case (key, value) => newVertex.property(key, value) } - newVertex.as("ee").next() + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put("vertex", setPrimitiveData(vertex)) + prepareMap(parameterMap) - val retrieveVertex = g.V().select("ee").by(valueMap()).next() - println(" vertex details found !" + retrieveVertex) + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGts + val graph: JanusGraph = graphConnection.getGraph - vertex.setGraphId("domain") - vertex.setIdentifier("do_12332409i") - vertex.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") + val newVertex = g.addV(vertex.getGraphId) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] - vertex - } + finalMap.foreach { case (key, value) => newVertex.property(key, value) } + newVertex.as("ee").next() + val retrieveVertex: Vertex = g.V().select("ee").by(valueMap()).next() + println(" vertex details found !" + retrieveVertex) + vertex.setGraphId("domain") + vertex.setIdentifier("do_12332409i") + vertex.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") + vertex + } } def prepareMap(parameterMap: util.Map[String, AnyRef]) = { diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala index 790b7afaf..1472a9a98 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala @@ -1,19 +1,30 @@ package org.sunbird.janus.service.util +import org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection +import org.apache.tinkerpop.gremlin.driver.{Client, Cluster} import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.apache.tinkerpop.gremlin.structure.io.binary.TypeSerializerRegistry +import org.apache.tinkerpop.gremlin.structure.util.empty.EmptyGraph +import org.apache.tinkerpop.gremlin.util.ser.GraphBinaryMessageSerializerV1 import org.janusgraph.core.{JanusGraph, JanusGraphFactory} +import org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry import org.sunbird.telemetry.logger.TelemetryManager + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.Future class JanusConnectionUtil { - var g: GraphTraversalSource = _ - var graph: JanusGraph = _ + var g: GraphTraversalSource = null + var graph: JanusGraph = null + + @throws[Exception] def initialiseGraphClient(): Unit = { try { - if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/remote-graph.properties") - if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/conf/janusgraph-inmemory.properties") + if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/remote-graph.properties") + if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/janusgraph-inmemory.properties") println("GraphTraversalSource: " + g) println("graph: " + graph) From 82a85515d5ab211952489e711ab913f35755247a Mon Sep 17 00:00:00 2001 From: aimansharief Date: Wed, 22 May 2024 17:19:59 +0530 Subject: [PATCH 06/20] Issue #KN-1041 fix: Restructure code specific for JanusGraph --- .../sunbird/content/actors/ContentActor.scala | 3 +- .../org/sunbird/graph/GraphService.scala | 19 +- .../org/sunbird/graph/JanusGraphService.scala | 35 ++++ .../sunbird/graph/OntologyEngineContext.scala | 6 +- .../org/sunbird/graph/dac/model/Edges.java | 184 +++++++++--------- .../org/sunbird/graph/dac/model/Vertex.java | 94 ++++----- .../service/operation/EdgeOperations.scala | 14 +- ...perations.scala => VertexOperations.scala} | 35 ++-- .../service/util/JanusConnectionUtil.scala | 9 +- .../org/sunbird/graph/nodes/DataNode.scala | 43 +--- .../sunbird/graph/schema/DefinitionNode.scala | 8 +- .../sunbird/graph/schema/IDefinition.scala | 4 +- .../schema/validator/BaseDefinitionNode.scala | 24 +-- .../schema/validator/SchemaValidator.scala | 5 +- .../org/sunbird/graph/vertex/DataVertex.scala | 66 +++++++ .../sunbird/actors/ObjectCategoryActor.scala | 4 +- 16 files changed, 300 insertions(+), 253 deletions(-) create mode 100644 ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala rename ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/{GremlinOperations.scala => VertexOperations.scala} (87%) create mode 100644 ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala diff --git a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala index d0943406e..8151b7897 100644 --- a/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala +++ b/content-api/content-actors/src/main/scala/org/sunbird/content/actors/ContentActor.scala @@ -17,6 +17,7 @@ import org.sunbird.content.util._ import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.nodes.DataNode +import org.sunbird.graph.vertex.DataVertex import org.sunbird.graph.utils.NodeUtil import org.sunbird.managers.HierarchyManager import org.sunbird.managers.HierarchyManager.hierarchyPrefix @@ -66,7 +67,7 @@ class ContentActor @Inject() (implicit oec: OntologyEngineContext, ss: StorageSe def create(request: Request): Future[Response] = { populateDefaultersForCreation(request) RequestUtil.restrictProperties(request) - DataNode.creates(request, vertexDataModifier).map(node => { + DataVertex.create(request, vertexDataModifier).map(node => { ResponseHandler.OK.put(ContentConstants.IDENTIFIER, node.getIdentifier).put("node_id", node.getIdentifier) .put("versionKey", node.getMetadata.get("versionKey")) }) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala index 58b8a66ed..412465d23 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/GraphService.scala @@ -3,19 +3,16 @@ package org.sunbird.graph import org.sunbird.common.Platform import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} import org.sunbird.common.exception.ResponseCode -import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph, Vertex} +import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph} import org.sunbird.graph.external.ExternalPropsManager import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} import org.sunbird.graph.util.CSPMetaUtil -import org.sunbird.janus.service.operation.{EdgeOperations, GremlinOperations} import java.lang import scala.concurrent.{ExecutionContext, Future} class GraphService { - val gremlinOps = new GremlinOperations() - val edgeOps = new EdgeOperations() implicit val ec: ExecutionContext = ExecutionContext.global val isrRelativePathEnabled: lang.Boolean = Platform.getBoolean("cloudstorage.metadata.replace_absolute_path", false) @@ -27,15 +24,6 @@ class GraphService { NodeAsyncOperations.addNode(graphId, node).map(resNode => if(isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) } - def addVertex(graphId: String, node: Vertex): Future[Vertex] = { - if (isrRelativePathEnabled) { - val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) - node.setMetadata(metadata) - } - gremlinOps.addVertex(graphId, node).map(resNode => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resNode) else resNode) - - } - def upsertNode(graphId: String, node: Node, request: Request): Future[Node] = { if(isrRelativePathEnabled) { val metadata = CSPMetaUtil.updateRelativePath(node.getMetadata) @@ -120,11 +108,6 @@ class GraphService { GraphAsyncOperations.createRelation(graphId, relationMap) } - def createEdges(graphId: String, relationMap: java.util.List[java.util.Map[String, AnyRef]]) = { - edgeOps.createEdges(graphId, relationMap) - } - - def getSubGraph(graphId: String, nodeId: String, depth: Int): Future[SubGraph] = { GraphAsyncOperations.getSubGraph(graphId, nodeId, depth) } diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala new file mode 100644 index 000000000..e5722ec43 --- /dev/null +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -0,0 +1,35 @@ +package org.sunbird.graph + +import org.sunbird.common.Platform +import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} +import org.sunbird.common.exception.ResponseCode +import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph, Vertex} +import org.sunbird.graph.external.ExternalPropsManager +import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} +import org.sunbird.graph.util.CSPMetaUtil +import org.sunbird.janus.service.operation.{EdgeOperations, VertexOperations} + +import java.lang +import scala.concurrent.{ExecutionContext, Future} +class JanusGraphService { + + private val VertexOperations = new VertexOperations() + private val EdgeOperations = new EdgeOperations() + implicit val ec: ExecutionContext = ExecutionContext.global + val isrRelativePathEnabled: lang.Boolean = Platform.getBoolean("cloudstorage.metadata.replace_absolute_path", false) + + + def addVertex(graphId: String, vertex: Vertex): Future[Vertex] = { + if (isrRelativePathEnabled) { + val metadata = CSPMetaUtil.updateRelativePath(vertex.getMetadata) + vertex.setMetadata(metadata) + } + VertexOperations.addVertex(graphId, vertex).map(resVertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resVertex) else resVertex) + + } + + def createEdges(graphId: String, edgeMap: java.util.List[java.util.Map[String, AnyRef]]) = { + EdgeOperations.createEdges(graphId, edgeMap) + } + +} diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala index 167eff5ad..106410c66 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/OntologyEngineContext.scala @@ -10,11 +10,15 @@ class OntologyEngineContext { private val dialGraphDB = new DialGraphService private val hUtil = new HttpUtil private lazy val kfClient = new KafkaClient - + private lazy val janusGraphDB = new JanusGraphService def graphService = { graphDB } + def janusGraphService = { + janusGraphDB + } + def dialgraphService = { dialGraphDB } diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java index 72eefe38b..ff18bacbe 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java @@ -20,27 +20,27 @@ public class Edges implements Serializable { private static final long serialVersionUID = -7207054262120122453L; private String id; private String graphId; - private String relationType; - private String startNodeId; - private String endNodeId; - private String startNodeName; - private String endNodeName; - private String startNodeType; - private String endNodeType; - private String startNodeObjectType; - private String endNodeObjectType; + private String edgeType; + private String startVertexId; + private String endVertexId; + private String startVertexName; + private String endVertexName; + private String startVertexType; + private String endVertexType; + private String startVertexObjectType; + private String endVertexObjectType; private Map metadata; - private Map startNodeMetadata; - private Map endNodeMetadata; + private Map startVertexMetadata; + private Map endVertexMetadata; public Edges() { } - public Edges(String startNodeId, String relationType, String endNodeId) { - this.startNodeId = startNodeId; - this.endNodeId = endNodeId; - this.relationType = relationType; + public Edges(String startVertexId, String edgeType, String endVertexId) { + this.startVertexId = startVertexId; + this.endVertexId = endVertexId; + this.edgeType = edgeType; } public Edges(String graphId, Edge edge) { @@ -49,20 +49,20 @@ public Edges(String graphId, Edge edge) { "Failed to create relation object. Relation from database is null."); this.graphId = graphId; - Vertex startNode = edge.outVertex(); - Vertex endNode = edge.inVertex(); - this.startNodeId = startNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); - this.endNodeId = endNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); - this.startNodeName = getName(startNode); - this.endNodeName = getName(endNode); - this.startNodeType = getNodeType(startNode); - this.endNodeType = getNodeType(endNode); - this.startNodeObjectType = getObjectType(startNode); - this.endNodeObjectType = getObjectType(endNode); - this.relationType = edge.label(); + Vertex startVertex = edge.outVertex(); + Vertex endVertex = edge.inVertex(); + this.startVertexId = startVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.endVertexId = endVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + this.startVertexName = getName(startVertex); + this.endVertexName = getName(endVertex); + this.startVertexType = getVertexType(startVertex); + this.endVertexType = getVertexType(endVertex); + this.startVertexObjectType = getObjectType(startVertex); + this.endVertexObjectType = getObjectType(endVertex); + this.edgeType = edge.label(); this.metadata = new HashMap(); - this.startNodeMetadata = getNodeMetadata(edge.outVertex()); - this.endNodeMetadata = getNodeMetadata(edge.inVertex()); + this.startVertexMetadata = getNodeMetadata(edge.outVertex()); + this.endVertexMetadata = getNodeMetadata(edge.inVertex()); edge.keys().forEach(key -> this.metadata.put(key, edge.value(key))); } @@ -74,18 +74,18 @@ public Edges(String graphId, Edge edge, Map startNodeMap, Map(); - this.startNodeMetadata = getNodeMetadata(startNode); - this.endNodeMetadata = getNodeMetadata(endNode); + this.startVertexMetadata = getNodeMetadata(startNode); + this.endVertexMetadata = getNodeMetadata(endNode); edge.keys().forEach(key -> { Object value = edge.value(key); if(null != value){ @@ -123,26 +123,26 @@ private String getName(Node node) { return name; } - private String getName(Vertex node) { - String name = node.property("name").isPresent() ? node.property("name").value().toString() : null; + private String getName(Vertex vertex) { + String name = vertex.property("name").isPresent() ? vertex.property("name").value().toString() : null; if (StringUtils.isBlank(name)) { - name = node.property("title").isPresent() ? node.property("title").value().toString() : null; + name = vertex.property("title").isPresent() ? vertex.property("title").value().toString() : null; if (StringUtils.isBlank(name)) { - name = node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; + name = vertex.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? vertex.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; if (StringUtils.isBlank(name)) - name = node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; + name = vertex.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? vertex.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; } } return name; } - private String getNodeType(Vertex node) { - return node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; + private String getVertexType(Vertex vertex) { + return vertex.property(SystemProperties.IL_SYS_NODE_TYPE.name()).isPresent() ? vertex.property(SystemProperties.IL_SYS_NODE_TYPE.name()).value().toString() : null; } - private String getObjectType(Vertex node) { - return node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? node.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; + private String getObjectType(Vertex vertex) { + return vertex.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).isPresent() ? vertex.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).value().toString() : null; } private Map getNodeMetadata(Vertex vertex) { @@ -172,28 +172,28 @@ private Map getNodeMetadata(Vertex vertex) { return metadata; } - public String getRelationType() { - return relationType; + public String getEdgeType() { + return edgeType; } - public void setRelationType(String relationType) { - this.relationType = relationType; + public void setEdgeType(String edgeType) { + this.edgeType = edgeType; } - public String getStartNodeId() { - return startNodeId; + public String getStartVertexId() { + return startVertexId; } - public void setStartNodeId(String startNodeId) { - this.startNodeId = startNodeId; + public void setStartVertexId(String startVertexId) { + this.startVertexId = startVertexId; } - public String getEndNodeId() { - return endNodeId; + public String getEndVertexId() { + return endVertexId; } - public void setEndNodeId(String endNodeId) { - this.endNodeId = endNodeId; + public void setEndVertexId(String endVertexId) { + this.endVertexId = endVertexId; } public Map getMetadata() { @@ -229,72 +229,72 @@ public void setId(String id) { this.id = id; } - public String getStartNodeName() { - return startNodeName; + public String getStartVertexName() { + return startVertexName; } - public void setStartNodeName(String startNodeName) { - this.startNodeName = startNodeName; + public void setStartVertexName(String startVertexName) { + this.startVertexName = startVertexName; } - public String getEndNodeName() { - return endNodeName; + public String getEndVertexName() { + return endVertexName; } - public void setEndNodeName(String endNodeName) { - this.endNodeName = endNodeName; + public void setEndVertexName(String endVertexName) { + this.endVertexName = endVertexName; } - public String getStartNodeType() { - return startNodeType; + public String getStartVertexType() { + return startVertexType; } - public void setStartNodeType(String startNodeType) { - this.startNodeType = startNodeType; + public void setStartVertexType(String startVertexType) { + this.startVertexType = startVertexType; } - public String getEndNodeType() { - return endNodeType; + public String getEndVertexType() { + return endVertexType; } - public void setEndNodeType(String endNodeType) { - this.endNodeType = endNodeType; + public void setEndVertexType(String endVertexType) { + this.endVertexType = endVertexType; } - public String getStartNodeObjectType() { - return startNodeObjectType; + public String getStartVertexObjectType() { + return startVertexObjectType; } - public void setStartNodeObjectType(String startNodeObjectType) { - this.startNodeObjectType = startNodeObjectType; + public void setStartVertexObjectType(String startVertexObjectType) { + this.startVertexObjectType = startVertexObjectType; } - public String getEndNodeObjectType() { - return endNodeObjectType; + public String getEndVertexObjectType() { + return endVertexObjectType; } - public void setEndNodeObjectType(String endNodeObjectType) { - this.endNodeObjectType = endNodeObjectType; + public void setEndVertexObjectType(String endVertexObjectType) { + this.endVertexObjectType = endVertexObjectType; } @JsonIgnore - public Map getStartNodeMetadata() { - return startNodeMetadata; + public Map getStartVertexMetadata() { + return startVertexMetadata; } @JsonIgnore - public void setStartNodeMetadata(Map startNodeMetadata) { - this.startNodeMetadata = startNodeMetadata; + public void setStartVertexMetadata(Map startVertexMetadata) { + this.startVertexMetadata = startVertexMetadata; } @JsonIgnore - public Map getEndNodeMetadata() { - return endNodeMetadata; + public Map getEndVertexMetadata() { + return endVertexMetadata; } @JsonIgnore - public void setEndNodeMetadata(Map endNodeMetadata) { - this.endNodeMetadata = endNodeMetadata; + public void setEndVertexMetadata(Map endVertexMetadata) { + this.endVertexMetadata = endVertexMetadata; } } diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java index 5d99cf387..9b151691e 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Vertex.java @@ -17,27 +17,27 @@ public class Vertex implements Serializable { private long id; private String graphId; private String identifier; - private String nodeType; + private String vertexType; private String objectType; private Map metadata; - private List outRelations; - private List inRelations; - private List addedRelations; - private List deletedRelations; - private Map relationNodes; + private List outEdges; + private List inEdges; + private List addedEdges; + private List deletedEdges; + private Map edgeVertices; private Map externalData; public Vertex() { - addedRelations = new ArrayList<>(); - deletedRelations = new ArrayList<>(); + addedEdges = new ArrayList<>(); + deletedEdges = new ArrayList<>(); } - public Vertex(String identifier, String nodeType, String objectType) { + public Vertex(String identifier, String vertexType, String objectType) { this.identifier = identifier; - this.nodeType = nodeType; + this.vertexType = vertexType; this.objectType = objectType; - addedRelations = new ArrayList<>(); - deletedRelations = new ArrayList<>(); + addedEdges = new ArrayList<>(); + deletedEdges = new ArrayList<>(); } public Vertex(String graphId, Map metadata) { @@ -47,12 +47,12 @@ public Vertex(String graphId, Map metadata) { if (null != metadata.get(SystemProperties.IL_UNIQUE_ID.name())) this.identifier = metadata.get(SystemProperties.IL_UNIQUE_ID.name()).toString(); if (null != metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name())) - this.nodeType = metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()).toString(); + this.vertexType = metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()).toString(); if (null != metadata.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name())) this.objectType = metadata.get(SystemProperties.IL_FUNC_OBJECT_TYPE.name()).toString(); } - addedRelations = new ArrayList<>(); - deletedRelations = new ArrayList<>(); + addedEdges = new ArrayList<>(); + deletedEdges = new ArrayList<>(); } @@ -83,14 +83,14 @@ public void setIdentifier(String identifier) { this.identifier = identifier; } - public String getNodeType() { - if (StringUtils.isBlank(nodeType) && null != metadata) - this.nodeType = (String) metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()); - return nodeType; + public String getVertexType() { + if (StringUtils.isBlank(vertexType) && null != metadata) + this.vertexType = (String) metadata.get(SystemProperties.IL_SYS_NODE_TYPE.name()); + return vertexType; } - public void setNodeType(String nodeType) { - this.nodeType = nodeType; + public void setVertexType(String vertexType) { + this.vertexType = vertexType; } public String getObjectType() { @@ -111,66 +111,66 @@ public void setMetadata(Map metadata) { this.metadata = metadata; } - public List getOutRelations() { - if (!CollectionUtils.isEmpty(outRelations)) - return outRelations; + public List getOutEdges() { + if (!CollectionUtils.isEmpty(outEdges)) + return outEdges; else return new ArrayList<>(); } - public void setOutRelations(List outRelations) { - this.outRelations = outRelations; + public void setOutEdges(List outEdges) { + this.outEdges = outEdges; } - public List getInRelations() { - if (!CollectionUtils.isEmpty(inRelations)) - return inRelations; + public List getInEdges() { + if (!CollectionUtils.isEmpty(inEdges)) + return inEdges; else return new ArrayList<>(); } - public void setInRelations(List inRelations) { - this.inRelations = inRelations; + public void setInEdges(List inEdges) { + this.inEdges = inEdges; } - public List getAddedRelations() { - return addedRelations; + public List getAddedEdges() { + return addedEdges; } - public void setAddedRelations(List addedRelations) { - if(CollectionUtils.isEmpty(this.addedRelations)) - this.addedRelations = new ArrayList<>(); - this.addedRelations.addAll(addedRelations); + public void setAddedEdges(List addedEdges) { + if(CollectionUtils.isEmpty(this.addedEdges)) + this.addedEdges = new ArrayList<>(); + this.addedEdges.addAll(addedEdges); } - public List getDeletedRelations() { - return deletedRelations; + public List getDeletedEdges() { + return deletedEdges; } - public void setDeletedRelations(List deletedRelations) { - this.deletedRelations = deletedRelations; + public void setDeletedEdges(List deletedEdges) { + this.deletedEdges = deletedEdges; } public Map getExternalData() { return externalData; } - public Map getRelationNodes() { - return relationNodes; + public Map getEdgeVertices() { + return edgeVertices; } - public void setRelationNodes(Map relationNodes) { - this.relationNodes = relationNodes; + public void setEdgeVertices(Map edgeVertices) { + this.edgeVertices = edgeVertices; } public void setExternalData(Map externalData) { this.externalData = externalData; } - public Vertex getNode() { + public Vertex getVertex() { return (Vertex) this; } public Vertex getRelationNode(String identifier) { - return relationNodes.get(identifier); + return edgeVertices.get(identifier); } public String getArtifactUrl() { diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala index 6766830e4..d7369bd62 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala @@ -18,26 +18,26 @@ import scala.concurrent.Future class EdgeOperations { val graphConnection = new JanusConnectionUtil - def createEdges(graphId: String, relationData: util.List[util.Map[String, AnyRef]]): Future[Response] = { + def createEdges(graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Future[Response] = { Future{ if (StringUtils.isBlank(graphId)) throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") - if (CollectionUtils.isEmpty(relationData)) + + if (CollectionUtils.isEmpty(edgeData)) throw new ClientException(DACErrorCodeConstants.INVALID_RELATION.name, DACErrorMessageConstants.INVALID_NODE + " | [Create Relation Operation Failed.]") graphConnection.initialiseGraphClient() - val g: GraphTraversalSource = graphConnection.getGts - val graph: JanusGraph = graphConnection.getGraph + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - createBulkRelations(g, graphId, relationData) + createBulkRelations(g, graphId, edgeData) ResponseHandler.OK() } } - def createBulkRelations(g: GraphTraversalSource, graphId: String, relationData: util.List[util.Map[String, AnyRef]]): Unit = { - for (row <- relationData.asScala) { + def createBulkRelations(g: GraphTraversalSource, graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Unit = { + for (row <- edgeData.asScala) { val startNodeId = row.get("startNodeId").toString val endNodeId = row.get("endNodeId").toString val relation = row.get("relation").toString diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala similarity index 87% rename from ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala rename to ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index f26d57f40..fcfd7214d 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/GremlinOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -19,7 +19,7 @@ import scala.collection.convert.ImplicitConversions.`map AsScala` import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future -class GremlinOperations { +class VertexOperations { val graphConnection = new JanusConnectionUtil def addVertex(graphId: String, vertex: Vertex): Future[Vertex] = { @@ -38,33 +38,30 @@ class GremlinOperations { prepareMap(parameterMap) graphConnection.initialiseGraphClient() - val g: GraphTraversalSource = graphConnection.getGts - val graph: JanusGraph = graphConnection.getGraph + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource val newVertex = g.addV(vertex.getGraphId) val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] finalMap.foreach { case (key, value) => newVertex.property(key, value) } - newVertex.as("ee").next() - val retrieveVertex: Vertex = g.V().select("ee").by(valueMap()).next() - println(" vertex details found !" + retrieveVertex) + val retrieveVertex = newVertex.elementMap().next() - vertex.setGraphId("domain") - vertex.setIdentifier("do_12332409i") - vertex.getMetadata.put(GraphDACParams.versionKey.name, "1023535325") + vertex.setGraphId(graphId) + vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) + vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) vertex } } def prepareMap(parameterMap: util.Map[String, AnyRef]) = { - if(null != parameterMap){ - println("parameterMap ==>"+parameterMap) + if (null != parameterMap) { val graphId = parameterMap.getOrDefault("graphId","").asInstanceOf[String] + val vertex = parameterMap.getOrDefault("vertex", null).asInstanceOf[Vertex] + if (StringUtils.isBlank(graphId)) throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Create Node' Query Generation Failed.]") - val vertex = parameterMap.getOrDefault("vertex", null).asInstanceOf[Vertex] if (null == vertex) throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, DACErrorMessageConstants.INVALID_NODE + " | [Create Node Query Generation Failed.]") @@ -76,12 +73,13 @@ class GremlinOperations { val apMap :util.Map[String, AnyRef] = getAuditPropertyMap(vertex, date, false) val vpMap :util.Map[String, AnyRef] = getVersionPropertyMap(vertex, date) - parameterMap.put(GraphDACParams.paramValueMap.name, mpMap) - parameterMap.put(GraphDACParams.paramValueMap.name, spMap) - parameterMap.put(GraphDACParams.paramValueMap.name, apMap) - parameterMap.put(GraphDACParams.paramValueMap.name, vpMap) + val combinedMap: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef] + combinedMap.putAll(mpMap) + combinedMap.putAll(spMap) + combinedMap.putAll(apMap) + combinedMap.putAll(vpMap) - println("parameterMap ->"+parameterMap) + parameterMap.put(GraphDACParams.paramValueMap.name, combinedMap) } } @@ -92,13 +90,14 @@ class GremlinOperations { } metadataPropertyMap } + def getSystemPropertyMap(node: Vertex, date: String): util.Map[String, AnyRef] = { val systemPropertyMap = new util.HashMap[String, AnyRef] if (null != node && StringUtils.isNotBlank(date)) { if (StringUtils.isBlank(node.getIdentifier)) node.setIdentifier(Identifier.getIdentifier(node.getGraphId, Identifier.getUniqueIdFromTimestamp)) systemPropertyMap.put(SystemProperties.IL_UNIQUE_ID.name, node.getIdentifier) - systemPropertyMap.put(SystemProperties.IL_SYS_NODE_TYPE.name, node.getNodeType) + systemPropertyMap.put(SystemProperties.IL_SYS_NODE_TYPE.name, node.getVertexType) systemPropertyMap.put(SystemProperties.IL_FUNC_OBJECT_TYPE.name, node.getObjectType) } systemPropertyMap diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala index 1472a9a98..f602d8528 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/util/JanusConnectionUtil.scala @@ -23,11 +23,12 @@ class JanusConnectionUtil { @throws[Exception] def initialiseGraphClient(): Unit = { try { - if (null == g) g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/remote-graph.properties") + if (null == g) { + println("GraphTraversalSource: "+g) + g = traversal.withRemote("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/remote-graph.properties") + } if (null == graph) graph = JanusGraphFactory.open("/Users/admin/Documents/workspace/knowledge-platform/ontology-engine/graph-dac/src/main/conf/janusgraph-inmemory.properties") - println("GraphTraversalSource: " + g) - println("graph: " + graph) } catch { case e: Exception => @@ -37,7 +38,7 @@ class JanusConnectionUtil { } @throws[Exception] - def getGts: GraphTraversalSource = g + def getGraphTraversalSource: GraphTraversalSource = g @throws[Exception] def getGraph: JanusGraph = graph diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala index da9d3df0e..3dc800a12 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala @@ -36,19 +36,6 @@ object DataNode { }).flatMap(f => f) } - @throws[Exception] - def creates(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { - DefinitionNode.validates(request).map(vertex => { - val response = oec.graphService.addVertex(request.graphId, dataModifier(vertex)) - response.map(vertex => DefinitionNode.postProcessor(request, vertex)).map(result => { - val futureList = Task.parallel[Response]( - saveExternalProperties(vertex.getIdentifier, vertex.getExternalData, request.getContext, request.getObjectType), - createEdges(request.graphId, vertex, request.getContext)) - futureList.map(list => result) - }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } - }).flatMap(f => f) - } - @throws[Exception] def update(request: Request, dataModifier: (Node) => Node = defaultDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val identifier: String = request.getContext.get("identifier").asInstanceOf[String] @@ -125,7 +112,7 @@ object DataNode { oec.graphService.deleteNode(request.graphId, identifier, request) } - private def saveExternalProperties(identifier: String, externalProps: util.Map[String, AnyRef], context: util.Map[String, AnyRef], objectType: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + def saveExternalProperties(identifier: String, externalProps: util.Map[String, AnyRef], context: util.Map[String, AnyRef], objectType: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { if (MapUtils.isNotEmpty(externalProps)) { externalProps.put("identifier", identifier) val request = new Request(context, externalProps, "", objectType) @@ -154,15 +141,6 @@ object DataNode { } } - private def createEdges(graphId: String, node: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { - val edges: util.List[Edges] = node.getAddedRelations - if (CollectionUtils.isNotEmpty(edges)) { - oec.graphService.createEdges(graphId, getEdgesMap(edges)) - } else { - Future(new Response) - } - } - private def populateExternalProperties(fields: List[String], node: Node, request: Request, externalProps: List[String])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { if(StringUtils.equalsIgnoreCase(request.get("mode").asInstanceOf[String], "edit")) request.put("identifier", node.getIdentifier) @@ -208,30 +186,11 @@ object DataNode { list } - private def getEdgesMap(edges: util.List[Edges]): java.util.List[util.Map[String, AnyRef]] = { - val list = new util.ArrayList[util.Map[String, AnyRef]] - for (edge <- edges) { - if ((StringUtils.isNotBlank(edge.getStartNodeId) && StringUtils.isNotBlank(edge.getEndNodeId)) && StringUtils.isNotBlank(edge.getRelationType)) { - val map = new util.HashMap[String, AnyRef] - map.put("startNodeId", edge.getStartNodeId) - map.put("endNodeId", edge.getEndNodeId) - map.put("relation", edge.getRelationType) - if (MapUtils.isNotEmpty(edge.getMetadata)) map.put("relMetadata", edge.getMetadata) - else map.put("relMetadata", new util.HashMap[String, AnyRef]()) - list.add(map) - } - else throw new ClientException("ERR_INVALID_RELATION_OBJECT", "Invalid Relation Object Found.") - } - list - } private def defaultDataModifier(node: Node) = { node } - private def defaultVertexDataModifier(vertex: Vertex) = { - vertex - } @throws[Exception] def systemUpdate(request: Request, nodeList: util.List[Node], hierarchyKey: String, hierarchyFunc: Option[Request => Future[Response]] = None)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala index 1cb859586..8cb35cf4a 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala @@ -41,7 +41,7 @@ object DefinitionNode { definition.validateRequest(request) val inputNode = definition.getVertex(request.getRequest) updateEdgeMetadata(inputNode) - definition.validates(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause } + definition.validateVertex(inputNode, "create", setDefaultValue) recoverWith { case e: CompletionException => throw e.getCause } } def getExternalProps(graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): List[String] = { @@ -246,9 +246,9 @@ object DefinitionNode { def updateEdgeMetadata(vertex: Vertex): Unit = { var relOcr = new util.HashMap[String, Integer]() - val rels = vertex.getAddedRelations + val rels = vertex.getAddedEdges for (rel <- rels) { - val relKey = rel.getStartNodeObjectType + rel.getRelationType + rel.getEndNodeObjectType + val relKey = rel.getStartVertexObjectType + rel.getEdgeType + rel.getEndVertexObjectType if (relOcr.containsKey(relKey)) relOcr.put(relKey, relOcr.get(relKey) + 1) else relOcr.put(relKey, 1) @@ -257,7 +257,7 @@ object DefinitionNode { rel.setMetadata(Map[String, AnyRef]("IL_SEQUENCE_INDEX" -> index).asJava) } else rel.setMetadata(new util.HashMap[String, AnyRef]()) } - vertex.setAddedRelations(rels) + vertex.setAddedEdges(rels) } def resetJsonProperties(node: Node, graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala index 06e867d30..ed7340309 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala @@ -15,10 +15,8 @@ abstract class IDefinition(graphId: String, schemaName: String, version: String def getVertex(input: java.util.Map[String, AnyRef]): Vertex @throws[Exception] def validate(node: Node, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] - @throws[Exception] - def validates(vertex: Vertex, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] - + def validateVertex(vertex: Vertex, operation: String = "update", setDefaultValue: Boolean = true)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] @throws[Exception] def getNode(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala index 8430dd0da..cf7b20fcd 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala @@ -54,17 +54,17 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = override def getVertex(input: java.util.Map[String, Object]): Vertex = { val result = schemaValidator.getStructuredData(input) - val node = new Vertex(graphId, result.getMetadata) + val vertex = new Vertex(graphId, result.getMetadata) val objectType = schemaValidator.getConfig.getString("objectType") - node.setNodeType(SystemNodeTypes.DATA_NODE.name) - node.setObjectType(objectType) - node.setIdentifier(input.getOrDefault("identifier", Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)).asInstanceOf[String]) + vertex.setVertexType(SystemNodeTypes.DATA_NODE.name) + vertex.setObjectType(objectType) + vertex.setIdentifier(input.getOrDefault("identifier", Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)).asInstanceOf[String]) input.remove("identifier") - setEdges(node, result.getRelations) - if (CollectionUtils.isNotEmpty(node.getInRelations)) node.setAddedRelations(node.getInRelations) - if (CollectionUtils.isNotEmpty(node.getOutRelations)) node.setAddedRelations(node.getOutRelations) - node.setExternalData(result.getExternalData) - node + setEdges(vertex, result.getRelations) + if (CollectionUtils.isNotEmpty(vertex.getInEdges)) vertex.setAddedEdges(vertex.getInEdges) + if (CollectionUtils.isNotEmpty(vertex.getOutEdges)) vertex.setAddedEdges(vertex.getOutEdges) + vertex.setExternalData(result.getExternalData) + vertex } @throws[Exception] @@ -73,7 +73,7 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = } @throws[Exception] - override def validates(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + override def validateVertex(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { Future { vertex } @@ -137,9 +137,9 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = } val inRelations = getEdges(inRelationsSchema, "in").asJava - vertex.setInRelations(inRelations) + vertex.setInEdges(inRelations) val outRelations = getEdges(outRelationsSchema, "out").asJava - vertex.setOutRelations(outRelations) + vertex.setOutEdges(outRelations) } } } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala index 397ca3f15..e803f286a 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/SchemaValidator.scala @@ -21,7 +21,7 @@ trait SchemaValidator extends IDefinition { } @throws[Exception] - abstract override def validates(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + abstract override def validateVertex(vertex: Vertex, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { if (setDefaultValue) { val result = schemaValidator.validate(vertex.getMetadata) if (setDefaultValue && operation.equalsIgnoreCase("create")) { @@ -29,6 +29,7 @@ trait SchemaValidator extends IDefinition { } } - super.validates(vertex, operation) + super.validateVertex(vertex, operation) } + } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala new file mode 100644 index 000000000..e3e4274ea --- /dev/null +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -0,0 +1,66 @@ +package org.sunbird.graph.vertex + +import java.util +import java.util.Optional +import java.util.concurrent.CompletionException +import org.apache.commons.collections4.{CollectionUtils, MapUtils} +import org.apache.commons.lang3.StringUtils +import org.sunbird.common.DateUtils +import org.sunbird.common.dto.{Request, Response} +import org.sunbird.common.exception.{ClientException, ErrorCodes, ResponseCode} +import org.sunbird.graph.OntologyEngineContext +import org.sunbird.graph.common.enums.SystemProperties +import org.sunbird.graph.dac.model.{Edges, Filter, MetadataCriterion, SearchConditions, SearchCriteria, Vertex} +import org.sunbird.graph.nodes.DataNode.saveExternalProperties +import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} +import org.sunbird.parseq.Task + +import scala.collection.convert.ImplicitConversions._ +import scala.collection.JavaConverters._ +import scala.concurrent.{ExecutionContext, Future} +object DataVertex { + + @throws[Exception] + def create(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + DefinitionNode.validates(request).map(vertex => { + val response = oec.janusGraphService.addVertex(request.graphId, dataModifier(vertex)) + response.map(vertex => DefinitionNode.postProcessor(request, vertex)).map(result => { + val futureList = Task.parallel[Response]( + saveExternalProperties(vertex.getIdentifier, vertex.getExternalData, request.getContext, request.getObjectType), + createEdges(request.graphId, vertex, request.getContext)) + futureList.map(list => result) + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } + }).flatMap(f => f) + } + + private def createEdges(graphId: String, vertex: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + val edges: util.List[Edges] = vertex.getAddedEdges + if (CollectionUtils.isNotEmpty(edges)) { + oec.janusGraphService.createEdges(graphId, getEdgesMap(edges)) + } else { + Future(new Response) + } + } + + private def getEdgesMap(edges: util.List[Edges]): java.util.List[util.Map[String, AnyRef]] = { + val list = new util.ArrayList[util.Map[String, AnyRef]] + for (edge <- edges) { + if ((StringUtils.isNotBlank(edge.getStartVertexId) && StringUtils.isNotBlank(edge.getEndVertexId)) && StringUtils.isNotBlank(edge.getEdgeType)) { + val map = new util.HashMap[String, AnyRef] + map.put("startNodeId", edge.getStartVertexId) + map.put("endNodeId", edge.getEndVertexId) + map.put("relation", edge.getEdgeType) + if (MapUtils.isNotEmpty(edge.getMetadata)) map.put("relMetadata", edge.getMetadata) + else map.put("relMetadata", new util.HashMap[String, AnyRef]()) + list.add(map) + } + else throw new ClientException("ERR_INVALID_RELATION_OBJECT", "Invalid Relation Object Found.") + } + list + } + + private def defaultVertexDataModifier(vertex: Vertex) = { + vertex + } + +} diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala index 35eda167e..f3aa34258 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala @@ -1,7 +1,6 @@ package org.sunbird.actors import java.util - import javax.inject.Inject import org.apache.commons.lang3.StringUtils import org.sunbird.actor.core.BaseActor @@ -11,6 +10,7 @@ import org.sunbird.common.exception.ClientException import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.NodeUtil +import org.sunbird.graph.vertex.DataVertex import org.sunbird.utils.{Constants, RequestUtil} import scala.collection.JavaConverters @@ -35,7 +35,7 @@ class ObjectCategoryActor @Inject()(implicit oec: OntologyEngineContext) extends RequestUtil.restrictProperties(request) if (!request.getRequest.containsKey(Constants.NAME)) throw new ClientException("ERR_NAME_SET_AS_IDENTIFIER", "name will be set as identifier") request.getRequest.put(Constants.IDENTIFIER, Constants.CATEGORY_PREFIX + Slug.makeSlug(request.getRequest.get(Constants.NAME).asInstanceOf[String])) - DataNode.creates(request).map(node => { + DataVertex.create(request).map(node => { ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier) }) } From c908ece999db5bd8f4630384bed81dacd1488e17 Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Wed, 22 May 2024 17:35:52 +0530 Subject: [PATCH 07/20] Issue #KN-1041 fix: Unique key constraint violation error handling added --- .../service/operation/VertexOperations.scala | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index fcfd7214d..2db74ee41 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -5,7 +5,7 @@ import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.t import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.valueMap import org.janusgraph.core.JanusGraph -import org.sunbird.common.exception.ClientException +import org.sunbird.common.exception.{ClientException, ServerException} import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier import org.sunbird.graph.common.enums.{AuditProperties, GraphDACParams, SystemProperties} @@ -36,20 +36,32 @@ class VertexOperations { parameterMap.put(GraphDACParams.graphId.name, graphId) parameterMap.put("vertex", setPrimitiveData(vertex)) prepareMap(parameterMap) + try{ + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - graphConnection.initialiseGraphClient() - val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + val newVertex = g.addV(vertex.getGraphId) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] - val newVertex = g.addV(vertex.getGraphId) - val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + finalMap.foreach { case (key, value) => newVertex.property(key, value) } + val retrieveVertex = newVertex.elementMap().next() - finalMap.foreach { case (key, value) => newVertex.property(key, value) } - val retrieveVertex = newVertex.elementMap().next() - - vertex.setGraphId(graphId) - vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) - vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) - vertex + vertex.setGraphId(graphId) + vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) + vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) + vertex + } catch { + case e: Throwable => + e.getCause match { + case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => + throw new ClientException( + DACErrorCodeConstants.CONSTRAINT_VALIDATION_FAILED.name(), + DACErrorMessageConstants.CONSTRAINT_VALIDATION_FAILED + vertex.getIdentifier + ) + case cause => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } } } From 4c55f1ec362cacdb1f811918abf1c83ffb4e61ea Mon Sep 17 00:00:00 2001 From: aimansharief Date: Thu, 23 May 2024 16:10:12 +0530 Subject: [PATCH 08/20] Issue #KN-1041 fix: Updated code to support the read vertex functionality --- .../org/sunbird/graph/JanusGraphService.scala | 8 +- .../org/sunbird/graph/dac/model/Edges.java | 36 ++--- .../org/sunbird/graph/dac/model/Vertex.java | 6 +- .../janus/dac/util/GremlinVertexUtil.scala | 74 ++++++++++ .../service/operation/SearchOperations.scala | 137 ++++++++++++++++++ 5 files changed, 233 insertions(+), 28 deletions(-) create mode 100644 ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala create mode 100644 ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index e5722ec43..4fd537433 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -7,7 +7,7 @@ import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph, Vertex} import org.sunbird.graph.external.ExternalPropsManager import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} import org.sunbird.graph.util.CSPMetaUtil -import org.sunbird.janus.service.operation.{EdgeOperations, VertexOperations} +import org.sunbird.janus.service.operation.{EdgeOperations, SearchOperations, VertexOperations} import java.lang import scala.concurrent.{ExecutionContext, Future} @@ -15,6 +15,8 @@ class JanusGraphService { private val VertexOperations = new VertexOperations() private val EdgeOperations = new EdgeOperations() + private val SearchOperations = new SearchOperations() + implicit val ec: ExecutionContext = ExecutionContext.global val isrRelativePathEnabled: lang.Boolean = Platform.getBoolean("cloudstorage.metadata.replace_absolute_path", false) @@ -32,4 +34,8 @@ class JanusGraphService { EdgeOperations.createEdges(graphId, edgeMap) } + def getNodeByUniqueId(graphId: String, vertexId: String, getTags: Boolean, request: Request): Future[Vertex] = { + SearchOperations.getNodeByUniqueId(graphId, vertexId, getTags, request).map(vertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(vertex) else vertex) + } + } diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java index ff18bacbe..b232a5c1d 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java @@ -18,7 +18,7 @@ public class Edges implements Serializable { private static final long serialVersionUID = -7207054262120122453L; - private String id; + private Object id; private String graphId; private String edgeType; private String startVertexId; @@ -49,10 +49,10 @@ public Edges(String graphId, Edge edge) { "Failed to create relation object. Relation from database is null."); this.graphId = graphId; - Vertex startVertex = edge.outVertex(); - Vertex endVertex = edge.inVertex(); - this.startVertexId = startVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); - this.endVertexId = endVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); + Vertex startVertex = edge.inVertex(); + Vertex endVertex = edge.outVertex(); + this.startVertexId = (String) startVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value(); + this.endVertexId = (String) endVertex.property(SystemProperties.IL_UNIQUE_ID.name()).value(); this.startVertexName = getName(startVertex); this.endVertexName = getName(endVertex); this.startVertexType = getVertexType(startVertex); @@ -66,14 +66,15 @@ public Edges(String graphId, Edge edge) { edge.keys().forEach(key -> this.metadata.put(key, edge.value(key))); } - public Edges(String graphId, Edge edge, Map startNodeMap, Map endNodeMap) { + public Edges(String graphId, Edge edge, Map startNodeMap, Map endNodeMap) { if (null == edge) throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_REL.name(), "Failed to create relation object. Relation from database is null."); - this.id = edge.id().toString(); + this.id = edge.id(); this.graphId = graphId; - Vertex startNode = startNodeMap.get(edge.outVertex().id()); - Vertex endNode = endNodeMap.get(edge.inVertex().id()); + + Vertex startNode = (Vertex) startNodeMap.get(edge.inVertex().id()); + Vertex endNode = (Vertex) endNodeMap.get(edge.outVertex().id()); this.startVertexId = startNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); this.endVertexId = endNode.property(SystemProperties.IL_UNIQUE_ID.name()).value().toString(); this.startVertexName = getName(startNode); @@ -110,19 +111,6 @@ public Edges(String graphId, Edge edge, Map startNodeMap, Map metadata) { } - public long getId() { + public Object getId() { return id; } - public void setId(long id) { + public void setId(Object id) { this.id = id; } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala new file mode 100644 index 000000000..e3bb42bd4 --- /dev/null +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala @@ -0,0 +1,74 @@ +package org.sunbird.janus.dac.util + +import org.apache.commons.lang3.StringUtils +import org.sunbird.graph.dac.model.{Vertex, Edges} +import org.sunbird.common.exception.ServerException +import org.sunbird.graph.common.enums.SystemProperties +import org.sunbird.graph.dac.enums.GraphDACErrorCodes +import java.util + +class GremlinVertexUtil { + + def getNode(graphId: String, gremlinVertex: org.apache.tinkerpop.gremlin.structure.Vertex, edgeMap: util.Map[Object, AnyRef], + startNodeMap: util.Map[Object, AnyRef], endNodeMap: util.Map[Object, AnyRef]): Vertex = { + + if (null == gremlinVertex) + throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_NODE.name(), + "Failed to create node object. Node from database is null.") + + val vertex: Vertex = new Vertex() + vertex.setGraphId(graphId) + vertex.setId(gremlinVertex.id()) + + val metadata = new util.HashMap[String, Object]() + gremlinVertex.keys().forEach { key => + val value = gremlinVertex.property(key).value() + if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_UNIQUE_ID.name())) + vertex.setIdentifier(value.asInstanceOf[String]) + else if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_SYS_NODE_TYPE.name())) + vertex.setVertexType(value.asInstanceOf[String]) + else if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_FUNC_OBJECT_TYPE.name())) + vertex.setObjectType(value.asInstanceOf[String]) + else { + if (null != value) { + if (value.isInstanceOf[util.List[_]]) { + val list = value.asInstanceOf[util.List[_]] + if (null != list && list.size() > 0) { + + val obj = list.get(0) + obj match { + case _: String => metadata.put(key, list.toArray(new Array[String](list.size()))) + case _: Number => metadata.put(key, list.toArray(new Array[Number](list.size()))) + case _: java.lang.Boolean => metadata.put(key, list.toArray(new Array[java.lang.Boolean](list.size()))) + case _ => metadata.put(key, list.toArray(new Array[AnyRef](list.size()))) + } + + } + } + } + else + metadata.put(key, value) + } + } + vertex.setMetadata(metadata) + + if (null != edgeMap && !edgeMap.isEmpty && null != startNodeMap && !startNodeMap.isEmpty && null != endNodeMap && !endNodeMap.isEmpty) { + val inEdges = new util.ArrayList[Edges]() + val outEdges = new util.ArrayList[Edges]() + + edgeMap.forEach { (id, rel) => + val edge = rel.asInstanceOf[org.apache.tinkerpop.gremlin.structure.Edge] + if (edge.inVertex().id() == gremlinVertex.id()) { + outEdges.add(new Edges(graphId, edge, startNodeMap, endNodeMap)) + } + if (edge.outVertex().id() == gremlinVertex.id()) { + inEdges.add(new Edges(graphId, edge, startNodeMap, endNodeMap)) + } + } + vertex.setInEdges(inEdges) + vertex.setOutEdges(outEdges) + } + + vertex + } +} diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala new file mode 100644 index 000000000..170e5156b --- /dev/null +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala @@ -0,0 +1,137 @@ +package org.sunbird.janus.service.operation + +import org.apache.commons.collections4.CollectionUtils +import org.apache.commons.lang3.StringUtils +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource +import org.sunbird.janus.dac.util.GremlinVertexUtil +import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__._ +import org.sunbird.graph.dac.model.Vertex +import org.apache.tinkerpop.gremlin.structure.Edge +import org.sunbird.common.dto.Request +import org.sunbird.common.exception.{ClientException, MiddlewareException, ResourceNotFoundException, ServerException} +import org.sunbird.graph.common.enums.GraphDACParams +import org.sunbird.graph.service.common.{CypherQueryConfigurationConstants, DACErrorCodeConstants, DACErrorMessageConstants} +import org.sunbird.janus.service.util.JanusConnectionUtil +import org.sunbird.telemetry.logger.TelemetryManager + +import java.util +import scala.concurrent.{Future, ExecutionContext} +import ExecutionContext.Implicits.global +class SearchOperations { + + val graphConnection = new JanusConnectionUtil + val gremlinVertexUtil = new GremlinVertexUtil + + def getNodeByUniqueId(graphId: String, vertexId: String, getTags: Boolean, request: Request): Future[Vertex] = { + Future { + TelemetryManager.log("Graph Id: " + graphId + "\nVertex Id: " + vertexId + "\nGet Tags:" + getTags) + + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Get Node By Unique Id' Operation Failed.]") + + if (StringUtils.isBlank(vertexId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node By Unique Id' Operation Failed.]") + + TelemetryManager.log("Driver Initialised. | [Graph Id: " + graphId + "]") + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put(GraphDACParams.nodeId.name, vertexId) + parameterMap.put(GraphDACParams.getTags.name, getTags.asInstanceOf[java.lang.Boolean]) + parameterMap.put(GraphDACParams.request.name, request) + + val retrievedVertices = getVertexByUniqueId(parameterMap, g) + var newVertex: Vertex = null + if (CollectionUtils.isEmpty(retrievedVertices)) + throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, + DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + vertexId, vertexId) + + val vertexMap = new util.HashMap[Object, AnyRef] + val relationMap = new util.HashMap[Object, AnyRef] + val startNodeMap = new util.HashMap[Object, AnyRef] + val endNodeMap = new util.HashMap[Object, AnyRef] + + retrievedVertices.forEach { result => + if (null != result) + getRecordValues(result, vertexMap, relationMap, startNodeMap, endNodeMap) + } + + if (!vertexMap.isEmpty) { + val entry = vertexMap.entrySet().iterator().next() + newVertex = gremlinVertexUtil.getNode(graphId, entry.getValue.asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex], relationMap, startNodeMap, endNodeMap) + } + newVertex + } + + catch { + case ex: MiddlewareException => throw ex + case e: Throwable => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name(), + DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } + } + + private def getVertexByUniqueId(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): util.List[util.Map[String, AnyRef]] = { + try { + if (null != parameterMap) { + val graphId = parameterMap.getOrDefault(GraphDACParams.graphId.name, "").asInstanceOf[String] + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Get Node By Id' Query Generation Failed.]") + + val vertexId = parameterMap.get(GraphDACParams.nodeId.name).asInstanceOf[String] + if (StringUtils.isBlank(vertexId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node By Unique Id' Query Generation Failed.]") + + g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(identity(), outE().as("r").inV().as("__endNode") + .select("ee", "r", "__endNode"), inE().as("r").outV().as("__startNode") + .select("ee", "r", "__startNode")).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] + + } + else new util.ArrayList[util.Map[String, AnyRef]] + } + catch { + case e :Exception => + throw new ServerException(DACErrorCodeConstants.SERVER_ERROR.name, "Error! Something went wrong while creating node object. ", e.getCause); + } + } + + private def getRecordValues(result: util.Map[String, AnyRef], nodeMap :util.Map[Object, AnyRef], relationMap :util.Map[Object, AnyRef], startNodeMap :util.Map[Object, AnyRef], endNodeMap :util.Map[Object, AnyRef] ): Unit = { + if (null != nodeMap) { + val vertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT) + if(null != vertexValue && vertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { + val gremlinVertex : org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] + nodeMap.put(gremlinVertex.id(), gremlinVertex) + } + } + if (null != relationMap) { + val edgeValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT) + if (null != edgeValue && edgeValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Edge]) { + val edge: org.apache.tinkerpop.gremlin.structure.Edge = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).asInstanceOf[Edge] + nodeMap.put(edge.id(), edge) + } + } + if (null != startNodeMap) { + val startVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT) + if (null != startVertexValue && startVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { + val startVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] + nodeMap.put(startVertex.id(), startVertex) + } + } + if (null != endNodeMap) { + val endVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT) + if (null != endVertexValue && endVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { + val endVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] + nodeMap.put(endVertex.id(), endVertex) + } + } + } + +} From a45c601420fde40434b9dd86489dd26ed6bcfa31 Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Thu, 23 May 2024 18:30:30 +0530 Subject: [PATCH 09/20] Read Vertex changes initial commit --- .../janus/dac/util/GremlinVertexUtil.scala | 4 +- .../service/operation/SearchOperations.scala | 38 +++- .../sunbird/graph/schema/DefinitionNode.scala | 8 + .../sunbird/graph/schema/IDefinition.scala | 2 + .../schema/validator/BaseDefinitionNode.scala | 34 ++-- .../schema/validator/VersioningNode.scala | 108 ++++++++++- .../org/sunbird/graph/utils/NodeUtil.scala | 167 +++++++++++++++++- .../org/sunbird/graph/vertex/DataVertex.scala | 142 +++++++++++++++ .../sunbird/actors/ObjectCategoryActor.scala | 4 +- 9 files changed, 481 insertions(+), 26 deletions(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala index e3bb42bd4..edf3d92f8 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala @@ -11,7 +11,7 @@ class GremlinVertexUtil { def getNode(graphId: String, gremlinVertex: org.apache.tinkerpop.gremlin.structure.Vertex, edgeMap: util.Map[Object, AnyRef], startNodeMap: util.Map[Object, AnyRef], endNodeMap: util.Map[Object, AnyRef]): Vertex = { - + println("gremlinVertex ", gremlinVertex) if (null == gremlinVertex) throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_NODE.name(), "Failed to create node object. Node from database is null.") @@ -22,7 +22,9 @@ class GremlinVertexUtil { val metadata = new util.HashMap[String, Object]() gremlinVertex.keys().forEach { key => + println("key ", key) val value = gremlinVertex.property(key).value() + println("value ", value) if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_UNIQUE_ID.name())) vertex.setIdentifier(value.asInstanceOf[String]) else if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_SYS_NODE_TYPE.name())) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala index 170e5156b..61c64c938 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala @@ -15,8 +15,9 @@ import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager import java.util -import scala.concurrent.{Future, ExecutionContext} +import scala.concurrent.{ExecutionContext, Future} import ExecutionContext.Implicits.global +import scala.collection.JavaConverters.asScalaBufferConverter class SearchOperations { val graphConnection = new JanusConnectionUtil @@ -24,6 +25,7 @@ class SearchOperations { def getNodeByUniqueId(graphId: String, vertexId: String, getTags: Boolean, request: Request): Future[Vertex] = { Future { + println("IN getNodeByUniqueId operation") TelemetryManager.log("Graph Id: " + graphId + "\nVertex Id: " + vertexId + "\nGet Tags:" + getTags) if (StringUtils.isBlank(graphId)) @@ -38,7 +40,7 @@ class SearchOperations { try { graphConnection.initialiseGraphClient() val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - + println("IN Try operation") val parameterMap = new util.HashMap[String, AnyRef] parameterMap.put(GraphDACParams.graphId.name, graphId) parameterMap.put(GraphDACParams.nodeId.name, vertexId) @@ -51,6 +53,8 @@ class SearchOperations { throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + vertexId, vertexId) + println("retrievedVertices ", retrievedVertices) + val vertexMap = new util.HashMap[Object, AnyRef] val relationMap = new util.HashMap[Object, AnyRef] val startNodeMap = new util.HashMap[Object, AnyRef] @@ -58,19 +62,24 @@ class SearchOperations { retrievedVertices.forEach { result => if (null != result) + println("result === ", result) getRecordValues(result, vertexMap, relationMap, startNodeMap, endNodeMap) } + println("vertexMap === ", vertexMap) if (!vertexMap.isEmpty) { val entry = vertexMap.entrySet().iterator().next() newVertex = gremlinVertexUtil.getNode(graphId, entry.getValue.asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex], relationMap, startNodeMap, endNodeMap) } + println("newVertex === ", newVertex) newVertex } catch { case ex: MiddlewareException => throw ex case e: Throwable => + e.printStackTrace() + println("IN catch operation", e) throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name(), DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) } @@ -90,9 +99,21 @@ class SearchOperations { throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node By Unique Id' Query Generation Failed.]") - g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(identity(), outE().as("r").inV().as("__endNode") - .select("ee", "r", "__endNode"), inE().as("r").outV().as("__startNode") - .select("ee", "r", "__startNode")).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] +// g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(identity(), outE().as("r").inV().as("__endNode") +// .select("ee", "r", "__endNode"), inE().as("r").outV().as("__startNode") +// .select("ee", "r", "__startNode")).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] + + g.V().hasLabel("domain").has("IL_UNIQUE_ID", "obj-cat:explanation-content1").as("ee") + .project("ee", "r", "__startNode", "__endNode") + .by(identity()) + .by(bothE().elementMap().fold()) + .by(inE().outV().elementMap().fold()) + .by(outE().inV().elementMap().fold()) + .toList() + +// g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(project("id", "label").by(id()).by(label()), outE().as("r").inV().as("__endNode") +// .select("ee", "r", "__endNode") .by(project("id", "label").by(id()).by(label())) .by(project("id", "label", "inVLabel", "outVLabel", "inV", "outV") .by(id()).by(label()).by(inV().label()).by(outV().label()).by(inV().id()).by(outV().id())) .by(project("id", "label").by(id()).by(label())), inE().as("r").outV().as("__startNode") +// .select("ee", "r", "__startNode") .by(project("id", "label").by(id()).by(label())) .by(project("id", "label", "inVLabel", "outVLabel", "inV", "outV") .by(id()).by(label()).by(inV().label()).by(outV().label()).by(inV().id()).by(outV().id())) .by(project("id", "label").by(id()).by(label())) ).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] } else new util.ArrayList[util.Map[String, AnyRef]] @@ -106,13 +127,18 @@ class SearchOperations { private def getRecordValues(result: util.Map[String, AnyRef], nodeMap :util.Map[Object, AnyRef], relationMap :util.Map[Object, AnyRef], startNodeMap :util.Map[Object, AnyRef], endNodeMap :util.Map[Object, AnyRef] ): Unit = { if (null != nodeMap) { val vertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT) + println("vertexValue ", vertexValue) if(null != vertexValue && vertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val gremlinVertex : org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] + println("gremlinVertex ", gremlinVertex) nodeMap.put(gremlinVertex.id(), gremlinVertex) + } else { + println("In ELSE ", vertexValue) } } if (null != relationMap) { val edgeValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT) + println("edgeValue ", edgeValue) if (null != edgeValue && edgeValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Edge]) { val edge: org.apache.tinkerpop.gremlin.structure.Edge = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).asInstanceOf[Edge] nodeMap.put(edge.id(), edge) @@ -120,6 +146,7 @@ class SearchOperations { } if (null != startNodeMap) { val startVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT) + println("startVertexValue ", startVertexValue) if (null != startVertexValue && startVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val startVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] nodeMap.put(startVertex.id(), startVertex) @@ -127,6 +154,7 @@ class SearchOperations { } if (null != endNodeMap) { val endVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT) + println("endVertexValue ", endVertexValue) if (null != endVertexValue && endVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val endVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] nodeMap.put(endVertex.id(), endVertex) diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala index 8cb35cf4a..d2e0c862f 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala @@ -90,6 +90,14 @@ object DefinitionNode { definition.getNode(request.get("identifier").asInstanceOf[String], "read", if (request.getRequest.containsKey("mode")) request.get("mode").asInstanceOf[String] else "read", None, disableCache) } + def getVertex(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val definition = DefinitionFactory.getDefinition(request.getContext.get("graph_id").asInstanceOf[String] + , schemaName, request.getContext.get("version").asInstanceOf[String]) + val disableCache: Option[Boolean] = if (request.getRequest.containsKey("disableCache")) request.get("disableCache").asInstanceOf[Option[Boolean]] else None + definition.getVertex(request.get("identifier").asInstanceOf[String], "read", if (request.getRequest.containsKey("mode")) request.get("mode").asInstanceOf[String] else "read", None, disableCache) + } + @throws[Exception] def validate(identifier: String, request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala index ed7340309..df36fa398 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/IDefinition.scala @@ -20,6 +20,8 @@ abstract class IDefinition(graphId: String, schemaName: String, version: String @throws[Exception] def getNode(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] + def getVertex(identifier: String, operation: String = "read", mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] + def getSchemaName(): String ={ schemaName } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala index cf7b20fcd..8f1561025 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/BaseDefinitionNode.scala @@ -52,20 +52,20 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = node } - override def getVertex(input: java.util.Map[String, Object]): Vertex = { - val result = schemaValidator.getStructuredData(input) - val vertex = new Vertex(graphId, result.getMetadata) - val objectType = schemaValidator.getConfig.getString("objectType") - vertex.setVertexType(SystemNodeTypes.DATA_NODE.name) - vertex.setObjectType(objectType) - vertex.setIdentifier(input.getOrDefault("identifier", Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)).asInstanceOf[String]) - input.remove("identifier") - setEdges(vertex, result.getRelations) - if (CollectionUtils.isNotEmpty(vertex.getInEdges)) vertex.setAddedEdges(vertex.getInEdges) - if (CollectionUtils.isNotEmpty(vertex.getOutEdges)) vertex.setAddedEdges(vertex.getOutEdges) - vertex.setExternalData(result.getExternalData) - vertex - } + override def getVertex(input: java.util.Map[String, Object]): Vertex = { + val result = schemaValidator.getStructuredData(input) + val vertex = new Vertex(graphId, result.getMetadata) + val objectType = schemaValidator.getConfig.getString("objectType") + vertex.setVertexType(SystemNodeTypes.DATA_NODE.name) + vertex.setObjectType(objectType) + vertex.setIdentifier(input.getOrDefault("identifier", Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)).asInstanceOf[String]) + input.remove("identifier") + setEdges(vertex, result.getRelations) + if (CollectionUtils.isNotEmpty(vertex.getInEdges)) vertex.setAddedEdges(vertex.getInEdges) + if (CollectionUtils.isNotEmpty(vertex.getOutEdges)) vertex.setAddedEdges(vertex.getOutEdges) + vertex.setExternalData(result.getExternalData) + vertex + } @throws[Exception] override def validate(node: Node, operation: String, setDefaultValue: Boolean)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { @@ -85,6 +85,12 @@ class BaseDefinitionNode(graphId: String, schemaName: String, version: String = node } + override def getVertex(identifier: String, operation: String, mode: String, versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val request: Request = new Request() + val vertex: Future[Vertex] = oec.janusGraphService.getNodeByUniqueId(graphId, identifier, false, request) + vertex + } + protected def setRelations(node: Node, relations: java.util.Map[String, AnyRef]): Unit = { if (MapUtils.isNotEmpty(relations)) { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala index 0e81b68f5..c378517f2 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/validator/VersioningNode.scala @@ -2,14 +2,13 @@ package org.sunbird.graph.schema.validator import java.util import java.util.concurrent.CompletionException - import org.sunbird.cache.impl.RedisCache import org.sunbird.common.{DateUtils, JsonUtils, Platform} import org.sunbird.common.dto.{Request, ResponseHandler} import org.sunbird.common.exception.ResourceNotFoundException import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.AuditProperties -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.exception.GraphErrorCodes import org.sunbird.graph.external.ExternalPropsManager import org.sunbird.graph.schema.{DefinitionFactory, IDefinition} @@ -36,6 +35,14 @@ trait VersioningNode extends IDefinition { } } + abstract override def getVertex(identifier: String, operation: String, mode: String = "read", versioning: Option[String] = None, disableCache: Option[Boolean] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + operation match { + case "update" => getVertexToUpdate(identifier, versioning); + case "read" => getVertexToRead(identifier, mode, disableCache) + case _ => getVertexToRead(identifier, mode, disableCache) + } + } + private def getNodeToUpdate(identifier: String, versioning: Option[String] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { val nodeFuture: Future[Node] = super.getNode(identifier , "update", null) nodeFuture.map(node => { @@ -49,6 +56,23 @@ trait VersioningNode extends IDefinition { }).flatMap(f => f) } + private def getVertexToUpdate(identifier: String, versioning: Option[String] = None)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val vertexFuture: Future[Vertex] = super.getVertex(identifier, "update", null) + vertexFuture.map(vertex => { + val versioningEnable = versioning.getOrElse({ + if (schemaValidator.getConfig.hasPath("version")) schemaValidator.getConfig.getString("version") else "disable" + }) + if (null == vertex) + throw new ResourceNotFoundException(GraphErrorCodes.ERR_INVALID_NODE.toString, "Node Not Found With Identifier : " + identifier) + else if ("enable".equalsIgnoreCase(versioningEnable)) + getEditableVertex(identifier, vertex) + else + Future { + vertex + } + }).flatMap(f => f) + } + private def getNodeToRead(identifier: String, mode: String, disableCache: Option[Boolean])(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node] = { if ("edit".equalsIgnoreCase(mode)) { val imageNode = super.getNode(identifier + IMAGE_SUFFIX, "read", mode) @@ -71,11 +95,39 @@ trait VersioningNode extends IDefinition { } } } + private def getVertexToRead(identifier: String, mode: String, disableCache: Option[Boolean])(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + println("IN getVertexToRead") + if ("edit".equalsIgnoreCase(mode)) { + val imageVertex = super.getVertex(identifier + IMAGE_SUFFIX, "read", mode) + imageVertex recoverWith { + case e: CompletionException => { + if (e.getCause.isInstanceOf[ResourceNotFoundException]) + super.getVertex(identifier, "read", mode) + else + throw e.getCause + } + } + } else { + if (disableCache.nonEmpty) { + if (disableCache.get) super.getVertex(identifier, "read", mode) + else getVertexFromCache(identifier) + } else { + val cacheKey = getSchemaName().toLowerCase() + ".cache.enable" + if (Platform.getBoolean(cacheKey, false)) getVertexFromCache(identifier) + else super.getVertex(identifier, "read", mode) + } + } + } private def getNodeFromCache(identifier: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Node]= { val ttl: Integer = if (Platform.config.hasPath(getSchemaName().toLowerCase() + ".cache.ttl")) Platform.config.getInt(getSchemaName().toLowerCase() + ".cache.ttl") else 86400 getCachedNode(identifier, ttl) } + private def getVertexFromCache(identifier: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val ttl: Integer = if (Platform.config.hasPath(getSchemaName().toLowerCase() + ".cache.ttl")) Platform.config.getInt(getSchemaName().toLowerCase() + ".cache.ttl") else 86400 + getCachedVertex(identifier, ttl) + } + private def getEditableNode(identifier: String, node: Node)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Node] = { val status = node.getMetadata.get("status").asInstanceOf[String] if(statusList.contains(status)) { @@ -110,6 +162,42 @@ trait VersioningNode extends IDefinition { Future{node} } + private def getEditableVertex(identifier: String, vertex: Vertex)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + val status = vertex.getMetadata.get("status").asInstanceOf[String] + if (statusList.contains(status)) { + val imageId = vertex.getIdentifier + IMAGE_SUFFIX + try { + val imageVertex = oec.janusGraphService.getNodeByUniqueId(vertex.getGraphId, imageId, false, new Request()) + imageVertex recoverWith { + case e: CompletionException => { + TelemetryManager.error("Exception occurred while fetching image node, may not be found", e.getCause) + if (e.getCause.isInstanceOf[ResourceNotFoundException]) { + vertex.setIdentifier(imageId) + vertex.setObjectType(vertex.getObjectType + IMAGE_OBJECT_SUFFIX) + vertex.getMetadata.put("status", "Draft") + vertex.getMetadata.put("prevStatus", status) + vertex.getMetadata.put(AuditProperties.lastStatusChangedOn.name, DateUtils.formatCurrentDate()) + oec.janusGraphService.addVertex(vertex.getGraphId, vertex).map(imgVertex => { + imgVertex.getMetadata.put("isImageNodeCreated", "yes"); + copyExternalProps(identifier, vertex.getGraphId, imgVertex.getObjectType.toLowerCase().replace("image", "")).map(response => { + if (!ResponseHandler.checkError(response)) { + if (null != response.getResult && !response.getResult.isEmpty) + imgVertex.setExternalData(response.getResult) + } + imgVertex + }) + }).flatMap(f => f) + } else + throw e.getCause + } + } + } + } else + Future { + vertex + } + } + private def copyExternalProps(identifier: String, graphId: String, schemaName: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext) = { val request = new Request() request.setContext(new util.HashMap[String, AnyRef](){{ @@ -144,6 +232,22 @@ trait VersioningNode extends IDefinition { }).flatMap(f => f) } + def getCachedVertex(identifier: String, ttl: Integer)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val nodeStringFuture: Future[String] = RedisCache.getAsync(identifier, nodeCacheAsyncHandler, ttl) + nodeStringFuture.map(nodeString => { + if (null != nodeString && !nodeString.asInstanceOf[String].isEmpty) { + val nodeMap: util.Map[String, AnyRef] = JsonUtils.deserialize(nodeString.asInstanceOf[String], classOf[java.util.Map[String, AnyRef]]) + val vertex: Vertex = NodeUtil.deserializeVertex(nodeMap, getSchemaName(), schemaValidator.getConfig + .getAnyRef("relations").asInstanceOf[java.util.Map[String, AnyRef]]) + Future { + vertex + } + } else { + super.getVertex(identifier, "read", null) + } + }).flatMap(f => f) + } + private def nodeCacheAsyncHandler(objKey: String)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[String] = { super.getNode(objKey, "read", null).map(node => { if (List("Live", "Unlisted").contains(node.getMetadata.get("status").asInstanceOf[String])) { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala index dc0d53869..866bb3c86 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/utils/NodeUtil.scala @@ -1,7 +1,6 @@ package org.sunbird.graph.utils import java.util - import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.apache.commons.collections4.{CollectionUtils, MapUtils} @@ -9,7 +8,7 @@ import org.apache.commons.lang3.StringUtils import org.sunbird.common.{JsonUtils, Platform} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.SystemProperties -import org.sunbird.graph.dac.model.{Node, Relation} +import org.sunbird.graph.dac.model.{Edges, Node, Relation, Vertex} import org.sunbird.graph.schema.{DefinitionNode, ObjectCategoryDefinition, ObjectCategoryDefinitionMap} import scala.collection.JavaConverters @@ -40,6 +39,26 @@ object NodeUtil { finalMetadata } + def serializeVertex(vertex: Vertex, fields: util.List[String], schemaName: String, schemaVersion: String, withoutRelations: Boolean = false)(implicit oec: OntologyEngineContext, ec: ExecutionContext): util.Map[String, AnyRef] = { + val metadataMap = vertex.getMetadata + val objectCategoryDefinition: ObjectCategoryDefinition = DefinitionNode.getObjectCategoryDefinition(vertex.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String], vertex.getObjectType.toLowerCase().replace("image", ""), vertex.getMetadata.getOrDefault("channel", "all").asInstanceOf[String]) + val jsonProps = DefinitionNode.fetchJsonProps(vertex.getGraphId, schemaVersion, vertex.getObjectType.toLowerCase().replace("image", ""), objectCategoryDefinition) + val updatedMetadataMap: util.Map[String, AnyRef] = metadataMap.entrySet().asScala.filter(entry => null != entry.getValue).map((entry: util.Map.Entry[String, AnyRef]) => handleKeyNames(entry, fields) -> convertJsonProperties(entry, jsonProps)).toMap.asJava + val definitionMap = DefinitionNode.getRelationDefinitionMap(vertex.getGraphId, schemaVersion, vertex.getObjectType.toLowerCase().replace("image", ""), objectCategoryDefinition).asJava + val finalMetadata = new util.HashMap[String, AnyRef]() + finalMetadata.put("objectType", vertex.getObjectType) + finalMetadata.putAll(updatedMetadataMap) + if (!withoutRelations) { + val relMap: util.Map[String, util.List[util.Map[String, AnyRef]]] = getRelationMap(vertex, updatedMetadataMap, definitionMap) + finalMetadata.putAll(relMap) + } + if (CollectionUtils.isNotEmpty(fields)) + finalMetadata.keySet.retainAll(fields) + finalMetadata.put("identifier", vertex.getIdentifier) + finalMetadata.put("languageCode", getLanguageCodes(vertex)) + finalMetadata + } + def setRelation(node: Node, nodeMap: util.Map[String, AnyRef], relationMap: util.Map[String, AnyRef]) = { val inRelations: util.List[Relation] = new util.ArrayList[Relation]() @@ -93,6 +112,66 @@ object NodeUtil { node.setOutRelations(outRelations) } + def setEdges(vertex: Vertex, nodeMap: util.Map[String, AnyRef], relationMap: util.Map[String, AnyRef]) = { + val inRelations: util.List[Edges] = new util.ArrayList[Edges]() + val outRelations: util.List[Edges] = new util.ArrayList[Edges]() + relationMap.asScala.foreach(entry => { + if (nodeMap.containsKey(entry._1) && null != nodeMap.get(entry._1) && !nodeMap.get(entry._1).asInstanceOf[util.List[util.Map[String, AnyRef]]].isEmpty) { + nodeMap.get(entry._1).asInstanceOf[util.List[util.Map[String, AnyRef]]].asScala.map(relMap => { + if ("in".equalsIgnoreCase(entry._2.asInstanceOf[util.Map[String, AnyRef]].get("direction").asInstanceOf[String])) { + val rel: Edges = new Edges(relMap.get("identifier").asInstanceOf[String], entry._2.asInstanceOf[util.Map[String, AnyRef]].get("type").asInstanceOf[String], vertex.getIdentifier) + rel.setStartVertexObjectType(relMap.get("objectType").asInstanceOf[String]) + rel.setEndVertexObjectType(vertex.getObjectType) + rel.setStartVertexName(relMap.get("name").asInstanceOf[String]) + rel.setStartVertexMetadata(new util.HashMap[String, AnyRef]() { + { + put("description", relMap.get("description")) + put("status", relMap.get("status")) + } + }) + if (null != relMap.get("index") && 0 < relMap.get("index").asInstanceOf[Integer]) { + rel.setMetadata(new util.HashMap[String, AnyRef]() { + { + put(SystemProperties.IL_SEQUENCE_INDEX.name(), relMap.get("index")) + } + }) + } + inRelations.add(rel) + } else { + val rel: Edges = new Edges(vertex.getIdentifier, entry._2.asInstanceOf[util.Map[String, AnyRef]].get("type").asInstanceOf[String], relMap.get("identifier").asInstanceOf[String]) + rel.setStartVertexObjectType(vertex.getObjectType) + rel.setEndVertexObjectType(relMap.get("objectType").asInstanceOf[String]) + rel.setStartVertexName(relMap.get("name").asInstanceOf[String]) + rel.setStartVertexMetadata(new util.HashMap[String, AnyRef]() { + { + put("description", relMap.get("description")) + put("status", relMap.get("status")) + } + }) + val index: Integer = { + if (null != relMap.get("index")) { + if (relMap.get("index").isInstanceOf[String]) { + Integer.parseInt(relMap.get("index").asInstanceOf[String]) + } else relMap.get("index").asInstanceOf[Number].intValue() + } else + null + } + if (null != index && 0 < index) { + rel.setMetadata(new util.HashMap[String, AnyRef]() { + { + put(SystemProperties.IL_SEQUENCE_INDEX.name(), relMap.get("index")) + } + }) + } + outRelations.add(rel) + } + }) + } + }) + vertex.setInEdges(inRelations) + vertex.setOutEdges(outRelations) + } + def deserialize(nodeMap: util.Map[String, AnyRef], schemaName: String, relationMap:util.Map[String, AnyRef]): Node = { val node: Node = new Node() if(MapUtils.isNotEmpty(nodeMap)) { @@ -109,6 +188,22 @@ object NodeUtil { node } + def deserializeVertex(nodeMap: util.Map[String, AnyRef], schemaName: String, relationMap: util.Map[String, AnyRef]): Vertex = { + val vertex: Vertex = new Vertex() + if (MapUtils.isNotEmpty(nodeMap)) { + vertex.setIdentifier(nodeMap.get("identifier").asInstanceOf[String]) + vertex.setObjectType(nodeMap.get("objectType").asInstanceOf[String]) + val filteredMetadata: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef](JavaConverters.mapAsJavaMapConverter(nodeMap.asScala.filterNot(entry => relationMap.containsKey(entry._1)).toMap).asJava) + vertex.setMetadata(filteredMetadata) + setEdges(vertex, nodeMap, relationMap) + } + vertex.getMetadata.asScala.map(entry => { + if (entry._2.isInstanceOf[::[AnyRef]]) (entry._1 -> entry._2.asInstanceOf[::[AnyRef]].toArray.toList) + else entry + }) + vertex + } + def handleKeyNames(entry: util.Map.Entry[String, AnyRef], fields: util.List[String]) = { if(CollectionUtils.isEmpty(fields)) { @@ -142,6 +237,39 @@ object NodeUtil { } relMap } + + def getRelationMap(vertex: Vertex, updatedMetadataMap: util.Map[String, AnyRef], relationMap: util.Map[String, AnyRef]): util.Map[String, util.List[util.Map[String, AnyRef]]] = { + val inRelations: util.List[Edges] = { + if (CollectionUtils.isEmpty(vertex.getInEdges)) new util.ArrayList[Edges] else vertex.getInEdges + } + val outRelations: util.List[Edges] = { + if (CollectionUtils.isEmpty(vertex.getOutEdges)) new util.ArrayList[Edges] else vertex.getOutEdges + } + val relMap = new util.HashMap[String, util.List[util.Map[String, AnyRef]]] + for (rel <- inRelations.asScala) { + val relKey: String = rel.getEdgeType + "_in_" + rel.getStartVertexObjectType + if (relMap.containsKey(relationMap.get(relKey))) relMap.get(relationMap.get(relKey)).add(populateRelationMaps(rel, "in")) + else { + if (null != relationMap.get(relKey)) { + relMap.put(relationMap.get(relKey).asInstanceOf[String], new util.ArrayList[util.Map[String, AnyRef]]() { + add(populateRelationMaps(rel, "in")) + }) + } + } + } + for (rel <- outRelations.asScala) { + val relKey: String = rel.getEdgeType + "_out_" + rel.getEndVertexObjectType + if (relMap.containsKey(relationMap.get(relKey))) relMap.get(relationMap.get(relKey)).add(populateRelationMaps(rel, "out")) + else { + if (null != relationMap.get(relKey)) { + relMap.put(relationMap.get(relKey).asInstanceOf[String], new util.ArrayList[util.Map[String, AnyRef]]() { + add(populateRelationMaps(rel, "out")) + }) + } + } + } + relMap + } def convertJsonProperties(entry: util.Map.Entry[String, AnyRef], jsonProps: scala.List[String]) = { if(jsonProps.contains(entry.getKey)) { @@ -176,6 +304,26 @@ object NodeUtil { } } + def populateRelationMaps(rel: Edges, direction: String): util.Map[String, AnyRef] = { + if ("out".equalsIgnoreCase(direction)) { + val objectType = rel.getEndVertexObjectType.replace("Image", "") + val relData = Map("identifier" -> rel.getEndVertexId.replace(".img", ""), + "name" -> rel.getEndVertexName, + "objectType" -> objectType, + "relation" -> rel.getEdgeType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getEndVertexMetadata.get(key))).toMap + val indexMap = if (rel.getEdgeType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).asInstanceOf[Number]) else Map() + val completeRelData = relData ++ indexMap + mapAsJavaMap(completeRelData) + } else { + val objectType = rel.getStartVertexObjectType.replace("Image", "") + val relData = Map("identifier" -> rel.getStartVertexId.replace(".img", ""), + "name" -> rel.getStartVertexName, + "objectType" -> objectType, + "relation" -> rel.getEdgeType) ++ relationObjectAttributes(objectType).map(key => (key -> rel.getStartVertexMetadata.get(key))).toMap + mapAsJavaMap(relData) + } + } + def getLanguageCodes(node: Node): util.List[String] = { val value = node.getMetadata.get("language") val languages:util.List[String] = value match { @@ -191,6 +339,21 @@ object NodeUtil { } } + def getLanguageCodes(vertex: Vertex): util.List[String] = { + val value = vertex.getMetadata.get("language") + val languages: util.List[String] = value match { + case value: String => List(value).asJava + case value: util.List[String] => value + case value: Array[String] => value.filter((lng: String) => StringUtils.isNotBlank(lng)).toList.asJava + case _ => new util.ArrayList[String]() + } + if (CollectionUtils.isNotEmpty(languages)) { + JavaConverters.bufferAsJavaListConverter(languages.asScala.map(lang => if (Platform.config.hasPath("languageCode." + lang.toLowerCase)) Platform.config.getString("languageCode." + lang.toLowerCase) else "")).asJava + } else { + languages + } + } + def isRetired(node: Node): Boolean = StringUtils.equalsIgnoreCase(node.getMetadata.get("status").asInstanceOf[String], "Retired") } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index e3e4274ea..b62a6afee 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -20,6 +20,8 @@ import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} object DataVertex { + private val SYSTEM_UPDATE_ALLOWED_CONTENT_STATUS = List("Live", "Unlisted") + @throws[Exception] def create(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { DefinitionNode.validates(request).map(vertex => { @@ -33,6 +35,28 @@ object DataVertex { }).flatMap(f => f) } + @throws[Exception] + def read(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + DefinitionNode.getVertex(request).map(vertex => { + val schema = vertex.getObjectType.toLowerCase.replace("image", "") + val objectType: String = request.getContext.get("objectType").asInstanceOf[String] + request.getContext.put("schemaName", schema) + val fields: List[String] = Optional.ofNullable(request.get("fields").asInstanceOf[util.List[String]]).orElse(new util.ArrayList[String]()).toList + val version: String = if (null != vertex && null != vertex.getMetadata) { + val schemaVersion: String = vertex.getMetadata.getOrDefault("schemaVersion", "0.0").asInstanceOf[String] + val scVer = if (StringUtils.isNotBlank(schemaVersion) && schemaVersion.toDouble != 0.0) schemaVersion else request.getContext.get("version").asInstanceOf[String] + scVer + } else request.getContext.get("version").asInstanceOf[String] + val extPropNameList = DefinitionNode.getExternalProps(request.getContext.get("graph_id").asInstanceOf[String], version, schema) + if (CollectionUtils.isNotEmpty(extPropNameList) && null != fields && fields.exists(field => extPropNameList.contains(field))) + populateExternalProperties(fields, vertex, request, extPropNameList) + else + Future(vertex) + }).flatMap(f => f) recoverWith { + case e: CompletionException => throw e.getCause + } + } + private def createEdges(graphId: String, vertex: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { val edges: util.List[Edges] = vertex.getAddedEdges if (CollectionUtils.isNotEmpty(edges)) { @@ -63,4 +87,122 @@ object DataVertex { vertex } + private def defaultDataModifier(vertex: Vertex) = { + vertex + } + +// def systemUpdate(request: Request, vertexList: util.List[Vertex], hierarchyKey: String, hierarchyFunc: Option[Request => Future[Response]] = None)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { +// val data: util.Map[String, AnyRef] = request.getRequest +// +// // validate nodes +// validateVertex(vertexList, request) +// +// // get definition for the object and filter relations +// val definition = getDefinition(request) +// val metadata = filterRelations(definition, data) +// +// // get status +// val status = getStatus(request, vertexList) +// // Generate request for new metadata +// val newRequest = new Request(request) +// newRequest.putAll(metadata) +// newRequest.getContext.put("versioning", "disabled") +// // Enrich Hierarchy and Update the nodes +// vertexList.map(vertex => { +// enrichHierarchyAndUpdate(newRequest, vertex, status, hierarchyKey, hierarchyFunc) +// }).head +// +// } +// +// @throws[Exception] +// private def enrichHierarchyAndUpdate(request: Request, vertex: Vertex, status: String, hierarchyKey: String, hierarchyFunc: Option[Request => Future[Response]] = None)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { +// val metadata: util.Map[String, AnyRef] = request.getRequest +// val identifier = vertex.getIdentifier +// // Image node cannot be made Live or Unlisted using system call +// if (identifier.endsWith(".img") && +// SYSTEM_UPDATE_ALLOWED_CONTENT_STATUS.contains(status)) metadata.remove("status") +// if (metadata.isEmpty) throw new ClientException(ErrorCodes.ERR_BAD_REQUEST.name(), s"Invalid Request. Cannot update status of Image Node to $status.") +// +// // Update previous status and status update Timestamp +// if (metadata.containsKey("status")) { +// metadata.put("prevStatus", vertex.getMetadata.get("status")) +// metadata.put("lastStatusChangedOn", DateUtils.formatCurrentDate) +// } +// // Generate new request object for Each request +// val newRequest = new Request(request) +// newRequest.putAll(metadata) +// newRequest.getContext.put("identifier", identifier) +// // Enrich Hierarchy and Update with the new request +// enrichHierarchy(newRequest, metadata, status, hierarchyKey: String, hierarchyFunc) +// .flatMap(req => update(req)) recoverWith { case e: CompletionException => throw e.getCause } +// } + + private def enrichHierarchy(request: Request, metadata: util.Map[String, AnyRef], status: String, hierarchyKey: String, hierarchyFunc: Option[Request => Future[Response]] = None)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Request] = { + val identifier = request.getContext.get("identifier").asInstanceOf[String] + // Check if hierarchy could be enriched + if (!identifier.endsWith(".img") && SYSTEM_UPDATE_ALLOWED_CONTENT_STATUS.contains(status)) { + hierarchyFunc match { + case Some(hierarchyFunc) => { + // Get current Hierarchy + val hierarchyRequest = new Request(request) + hierarchyRequest.put("rootId", identifier) + hierarchyFunc(hierarchyRequest).map(response => { + // Add metadata to the hierarchy + if (response.get(hierarchyKey) != null) { + val hierarchy = response.get(hierarchyKey).asInstanceOf[util.Map[String, AnyRef]] + val hierarchyMetadata = new util.HashMap[String, AnyRef]() + hierarchyMetadata.putAll(hierarchy) + hierarchyMetadata.putAll(metadata) + // add hierarchy to the request object + request.put("hierarchy", hierarchyMetadata) + request + } else request + }) + } + case _ => Future(request) + } + } else Future(request) + } + + def validateVertex(vertexs: java.util.List[Vertex], request: Request): Unit = { + if (vertexs.isEmpty) + throw new ClientException(ResponseCode.RESOURCE_NOT_FOUND.name(), s"Error! Node(s) doesn't Exists with identifier : ${request.getContext.get("identifier")}.") + + val objectType = request.getContext.get("objectType").asInstanceOf[String] + vertexs.foreach(vertex => { + if (vertex.getMetadata == null && !objectType.equalsIgnoreCase(vertex.getObjectType) && vertex.getMetadata.get("status").asInstanceOf[String].equalsIgnoreCase("failed")) + throw new ClientException(ErrorCodes.ERR_BAD_REQUEST.name(), s"Cannot update content with FAILED status for id : ${vertex.getIdentifier}.") + }) + } + + private def getStatus(request: Request, vertexList: util.List[Vertex]): String = { + val vertex = vertexList.filter(node => !node.getIdentifier.endsWith(".img")).headOption.getOrElse(vertexList.head) + request.getOrDefault("status", vertex.getMetadata.get("status")).asInstanceOf[String] + } + + private def getDefinition(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): DefinitionDTO = { + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] + val version = request.getContext.get("version").asInstanceOf[String] + DefinitionFactory.getDefinition(request.graphId, schemaName, version) + } + + private def filterRelations(definition: DefinitionDTO, data: util.Map[String, AnyRef]): util.Map[String, AnyRef] = { + val relations = definition.getRelationsMap().keySet() + data.filter(item => { + !relations.contains(item._1) + }) + } + + private def populateExternalProperties(fields: List[String], vertex: Vertex, request: Request, externalProps: List[String])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Vertex] = { + if (StringUtils.equalsIgnoreCase(request.get("mode").asInstanceOf[String], "edit")) + request.put("identifier", vertex.getIdentifier) + val externalPropsResponse = oec.graphService.readExternalProps(request, externalProps.filter(prop => fields.contains(prop))) + externalPropsResponse.map(response => { + vertex.getMetadata.putAll(response.getResult) + Future { + vertex + } + }).flatMap(f => f) + } + } diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala index f3aa34258..df16f2ed3 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala @@ -44,8 +44,8 @@ class ObjectCategoryActor @Inject()(implicit oec: OntologyEngineContext) extends private def read(request: Request): Future[Response] = { val fields: util.List[String] = JavaConverters.seqAsJavaListConverter(request.get(Constants.FIELDS).asInstanceOf[String].split(",").filter(field => StringUtils.isNotBlank(field) && !StringUtils.equalsIgnoreCase(field, "null"))).asJava request.getRequest.put(Constants.FIELDS, fields) - DataNode.read(request).map(node => { - val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, fields, request.getContext.get(Constants.SCHEMA_NAME).asInstanceOf[String], request.getContext.get(Constants.VERSION).asInstanceOf[String]) + DataVertex.read(request).map(node => { + val metadata: util.Map[String, AnyRef] = NodeUtil.serializeVertex(node, fields, request.getContext.get(Constants.SCHEMA_NAME).asInstanceOf[String], request.getContext.get(Constants.VERSION).asInstanceOf[String]) ResponseHandler.OK.put(Constants.OBJECT_CATEGORY, metadata) }) } From 815a4632ae40b37f551cdaefc5fdad788519dcf3 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Fri, 24 May 2024 12:19:21 +0530 Subject: [PATCH 10/20] Issue #KN-1041 fix: Updated the code to create and read vertex --- .../graph/common/enums/GraphDACParams.java | 2 +- .../org/sunbird/graph/JanusGraphService.scala | 4 + .../janus/dac/util/GremlinVertexUtil.scala | 41 +++-- .../service/operation/SearchOperations.scala | 9 +- .../service/operation/VertexOperations.scala | 107 +++++++++---- .../sunbird/graph/schema/DefinitionDTO.scala | 16 +- .../sunbird/graph/schema/DefinitionNode.scala | 143 +++++++++++++++++- .../org/sunbird/graph/vertex/DataVertex.scala | 5 + 8 files changed, 262 insertions(+), 65 deletions(-) diff --git a/ontology-engine/graph-common/src/main/java/org/sunbird/graph/common/enums/GraphDACParams.java b/ontology-engine/graph-common/src/main/java/org/sunbird/graph/common/enums/GraphDACParams.java index 230c04005..4b853c3cb 100644 --- a/ontology-engine/graph-common/src/main/java/org/sunbird/graph/common/enums/GraphDACParams.java +++ b/ontology-engine/graph-common/src/main/java/org/sunbird/graph/common/enums/GraphDACParams.java @@ -8,5 +8,5 @@ public enum GraphDACParams { MERGE, nodes, RETURN, keys, rootNode, nodeId, WHERE, startNodeId, endNodeId, relationType, startNodeIds, endNodeIds, collectionId, collection, indexProperty, taskId, input, getTags, searchCriteria, paramMap, cypherQuery, paramValueMap, queryStatementMap, SYS_INTERNAL_LAST_UPDATED_ON, - CONSUMER_ID, consumerId, CHANNEL_ID, channel, APP_ID, appId, Nodes_Count, Relations_Count; + CONSUMER_ID, consumerId, CHANNEL_ID, channel, APP_ID, appId, Nodes_Count, Relations_Count, vertex; } diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 4fd537433..74cb29817 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -38,4 +38,8 @@ class JanusGraphService { SearchOperations.getNodeByUniqueId(graphId, vertexId, getTags, request).map(vertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(vertex) else vertex) } + def deleteNode(graphId: String, vertexId: String, request: Request): Future[java.lang.Boolean] = { + VertexOperations.deleteVertex(graphId, vertexId, request) + } + } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala index edf3d92f8..a4532d140 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala @@ -1,17 +1,17 @@ package org.sunbird.janus.dac.util import org.apache.commons.lang3.StringUtils -import org.sunbird.graph.dac.model.{Vertex, Edges} +import org.sunbird.graph.dac.model.{Edges, Vertex} import org.sunbird.common.exception.ServerException import org.sunbird.graph.common.enums.SystemProperties import org.sunbird.graph.dac.enums.GraphDACErrorCodes -import java.util + +import java.{lang, util} class GremlinVertexUtil { def getNode(graphId: String, gremlinVertex: org.apache.tinkerpop.gremlin.structure.Vertex, edgeMap: util.Map[Object, AnyRef], startNodeMap: util.Map[Object, AnyRef], endNodeMap: util.Map[Object, AnyRef]): Vertex = { - println("gremlinVertex ", gremlinVertex) if (null == gremlinVertex) throw new ServerException(GraphDACErrorCodes.ERR_GRAPH_NULL_DB_NODE.name(), "Failed to create node object. Node from database is null.") @@ -22,34 +22,31 @@ class GremlinVertexUtil { val metadata = new util.HashMap[String, Object]() gremlinVertex.keys().forEach { key => - println("key ", key) - val value = gremlinVertex.property(key).value() - println("value ", value) if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_UNIQUE_ID.name())) - vertex.setIdentifier(value.asInstanceOf[String]) + vertex.setIdentifier(gremlinVertex.values(key).next().asInstanceOf[String]) else if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_SYS_NODE_TYPE.name())) - vertex.setVertexType(value.asInstanceOf[String]) + vertex.setVertexType(gremlinVertex.values(key).next().asInstanceOf[String]) else if (StringUtils.equalsIgnoreCase(key, SystemProperties.IL_FUNC_OBJECT_TYPE.name())) - vertex.setObjectType(value.asInstanceOf[String]) + vertex.setObjectType(gremlinVertex.values(key).next().asInstanceOf[String]) else { + val value = gremlinVertex.values(key) if (null != value) { - if (value.isInstanceOf[util.List[_]]) { - val list = value.asInstanceOf[util.List[_]] - if (null != list && list.size() > 0) { + value match { + case list: util.List[_] => + if (null != list && list.size() > 0) { - val obj = list.get(0) - obj match { - case _: String => metadata.put(key, list.toArray(new Array[String](list.size()))) - case _: Number => metadata.put(key, list.toArray(new Array[Number](list.size()))) - case _: java.lang.Boolean => metadata.put(key, list.toArray(new Array[java.lang.Boolean](list.size()))) - case _ => metadata.put(key, list.toArray(new Array[AnyRef](list.size()))) - } + val obj = list.get(0) + obj match { + case _: String => metadata.put(key, list.toArray(new Array[String](list.size()))) + case _: Number => metadata.put(key, list.toArray(new Array[Number](list.size()))) + case _: lang.Boolean => metadata.put(key, list.toArray(new Array[lang.Boolean](list.size()))) + case _ => metadata.put(key, list.toArray(new Array[AnyRef](list.size()))) + } - } + } + case _ => metadata.put(key, value) } } - else - metadata.put(key, value) } } vertex.setMetadata(metadata) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala index 61c64c938..dc770b77a 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala @@ -25,7 +25,6 @@ class SearchOperations { def getNodeByUniqueId(graphId: String, vertexId: String, getTags: Boolean, request: Request): Future[Vertex] = { Future { - println("IN getNodeByUniqueId operation") TelemetryManager.log("Graph Id: " + graphId + "\nVertex Id: " + vertexId + "\nGet Tags:" + getTags) if (StringUtils.isBlank(graphId)) @@ -40,7 +39,7 @@ class SearchOperations { try { graphConnection.initialiseGraphClient() val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - println("IN Try operation") + val parameterMap = new util.HashMap[String, AnyRef] parameterMap.put(GraphDACParams.graphId.name, graphId) parameterMap.put(GraphDACParams.nodeId.name, vertexId) @@ -53,7 +52,6 @@ class SearchOperations { throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + vertexId, vertexId) - println("retrievedVertices ", retrievedVertices) val vertexMap = new util.HashMap[Object, AnyRef] val relationMap = new util.HashMap[Object, AnyRef] @@ -62,16 +60,13 @@ class SearchOperations { retrievedVertices.forEach { result => if (null != result) - println("result === ", result) getRecordValues(result, vertexMap, relationMap, startNodeMap, endNodeMap) } - println("vertexMap === ", vertexMap) if (!vertexMap.isEmpty) { val entry = vertexMap.entrySet().iterator().next() newVertex = gremlinVertexUtil.getNode(graphId, entry.getValue.asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex], relationMap, startNodeMap, endNodeMap) } - println("newVertex === ", newVertex) newVertex } @@ -103,7 +98,7 @@ class SearchOperations { // .select("ee", "r", "__endNode"), inE().as("r").outV().as("__startNode") // .select("ee", "r", "__startNode")).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] - g.V().hasLabel("domain").has("IL_UNIQUE_ID", "obj-cat:explanation-content1").as("ee") + g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee") .project("ee", "r", "__startNode", "__endNode") .by(identity()) .by(bothE().elementMap().fold()) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index 2db74ee41..7c92713f9 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -1,10 +1,8 @@ package org.sunbird.janus.service.operation import org.apache.commons.lang3.{BooleanUtils, StringUtils} -import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.valueMap -import org.janusgraph.core.JanusGraph +import org.sunbird.common.dto.Request import org.sunbird.common.exception.{ClientException, ServerException} import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier @@ -34,41 +32,74 @@ class VertexOperations { val parameterMap = new util.HashMap[String, AnyRef] parameterMap.put(GraphDACParams.graphId.name, graphId) - parameterMap.put("vertex", setPrimitiveData(vertex)) - prepareMap(parameterMap) - try{ - graphConnection.initialiseGraphClient() - val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - - val newVertex = g.addV(vertex.getGraphId) - val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] - - finalMap.foreach { case (key, value) => newVertex.property(key, value) } - val retrieveVertex = newVertex.elementMap().next() - - vertex.setGraphId(graphId) - vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) - vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) - vertex - } catch { - case e: Throwable => - e.getCause match { - case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => - throw new ClientException( - DACErrorCodeConstants.CONSTRAINT_VALIDATION_FAILED.name(), - DACErrorMessageConstants.CONSTRAINT_VALIDATION_FAILED + vertex.getIdentifier - ) - case cause => - throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) - } + parameterMap.put(GraphDACParams.vertex.name, setPrimitiveData(vertex)) + + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val addedVertex = createVertexTraversal(parameterMap, g) + val vertexElementMap = addedVertex.elementMap().next() + + vertex.setGraphId(graphId) + vertex.setIdentifier(vertexElementMap.get(SystemProperties.IL_UNIQUE_ID.name)) + vertex.getMetadata.put(GraphDACParams.versionKey.name, vertexElementMap.get(GraphDACParams.versionKey.name)) + vertex + } + catch { + case e: Throwable => + e.getCause match { + case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => + throw new ClientException( + DACErrorCodeConstants.CONSTRAINT_VALIDATION_FAILED.name(), + DACErrorMessageConstants.CONSTRAINT_VALIDATION_FAILED + vertex.getIdentifier + ) + case cause => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } + } + } + + def deleteVertex(graphId: String, vertexId: String, request: Request): Future[java.lang.Boolean] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Remove Property Values Operation Failed.]") + + if (StringUtils.isBlank(vertexId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | [Remove Property Values Operation Failed.]") + + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put(GraphDACParams.nodeId.name, vertexId) + parameterMap.put(GraphDACParams.request.name, request) + + deleteQuery(parameterMap, g) + + true } + catch { + case e: Exception => throw e + } + } + } - def prepareMap(parameterMap: util.Map[String, AnyRef]) = { + def deleteQuery(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): Unit = { + + } + + private def createVertexTraversal(parameterMap: util.Map[String, AnyRef], graphTraversalSource: GraphTraversalSource): GraphTraversal[org.apache.tinkerpop.gremlin.structure.Vertex, org.apache.tinkerpop.gremlin.structure.Vertex] = { if (null != parameterMap) { - val graphId = parameterMap.getOrDefault("graphId","").asInstanceOf[String] - val vertex = parameterMap.getOrDefault("vertex", null).asInstanceOf[Vertex] + val graphId = parameterMap.getOrDefault(GraphDACParams.graphId.name,"").asInstanceOf[String] + val vertex = parameterMap.getOrDefault(GraphDACParams.vertex.name, null).asInstanceOf[Vertex] if (StringUtils.isBlank(graphId)) throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, @@ -92,6 +123,16 @@ class VertexOperations { combinedMap.putAll(vpMap) parameterMap.put(GraphDACParams.paramValueMap.name, combinedMap) + + val newVertexTraversal = graphTraversalSource.addV(vertex.getGraphId) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + + finalMap.foreach { case (key, value) => newVertexTraversal.property(key, value) } + + newVertexTraversal + } + else { + throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP ) } } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala index 70413b352..16c125015 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionDTO.scala @@ -8,7 +8,7 @@ import org.sunbird.common.exception.{ClientException, ResponseCode} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.Identifier import org.sunbird.graph.dac.enums.SystemNodeTypes -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.schema.validator._ import scala.collection.JavaConverters._ @@ -32,6 +32,20 @@ class DefinitionDTO(graphId: String, schemaName: String, version: String = "1.0" node } + def getVertex(identifier: String, input: java.util.Map[String, AnyRef], vertexType: String): Vertex = { + val result = schemaValidator.getStructuredData(input) + val objectType = schemaValidator.getConfig.getString("objectType") + val vertex = new Vertex(identifier, objectType, vertexType) + vertex.setGraphId(graphId) + vertex.setVertexType(SystemNodeTypes.DATA_NODE.name) + vertex.setObjectType(objectType) + if (MapUtils.isNotEmpty(input)) vertex.setMetadata(result.getMetadata) else vertex.setMetadata(new util.HashMap[String, AnyRef]()) + if (StringUtils.isBlank(vertex.getIdentifier)) vertex.setIdentifier(Identifier.getIdentifier(graphId, Identifier.getUniqueIdFromTimestamp)) + setEdges(vertex, result.getRelations) + if (MapUtils.isNotEmpty(result.getExternalData)) vertex.setExternalData(result.getExternalData) else vertex.setExternalData(new util.HashMap[String, AnyRef]()) + vertex + } + def getExternalProps(): List[String] = { if (schemaValidator.getConfig.hasPath("external.properties")) { val propsSet = Set.empty ++ schemaValidator.getConfig.getObject("external.properties").keySet().asScala diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala index d2e0c862f..42d4886af 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/schema/DefinitionNode.scala @@ -10,7 +10,7 @@ import org.sunbird.cache.impl.RedisCache import org.sunbird.common.JsonUtils import org.sunbird.common.dto.Request import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.{Node, Relation, Vertex} +import org.sunbird.graph.dac.model.{Edges, Node, Relation, Vertex} import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ @@ -142,6 +142,51 @@ object DefinitionNode { }).flatMap(f => f) } + @throws[Exception] + def validates(identifier: String, request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] + val version: String = request.getContext.get("version").asInstanceOf[String] + val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String].replaceAll("image", "") + val reqVersioning: String = request.getContext.getOrDefault("versioning", "").asInstanceOf[String] + val versioning = if (StringUtils.isBlank(reqVersioning)) None else Option(reqVersioning) + val req: util.HashMap[String, AnyRef] = new util.HashMap[String, AnyRef](request.getRequest) + val skipValidation: Boolean = { + if (request.getContext.containsKey("skipValidation")) request.getContext.get("skipValidation").asInstanceOf[Boolean] else false + } + val definition = DefinitionFactory.getDefinition(graphId, schemaName, version) + val removeProps = request.getContext.getOrDefault("removeProps", new util.ArrayList[String]()).asInstanceOf[util.List[String]] + + definition.getVertex(identifier, "update", null, versioning, None).map(dbVertex => { + val schema = dbVertex.getObjectType.toLowerCase.replace("image", "") + val primaryCategory: String = if (null != dbVertex.getMetadata) dbVertex.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String] else "" + val objectCategoryDefinition: ObjectCategoryDefinition = getObjectCategoryDefinition(primaryCategory, schema, request.getContext.getOrDefault("channel", "all").asInstanceOf[String]) + val categoryDefinition = DefinitionFactory.getDefinition(graphId, schema, version, objectCategoryDefinition) + categoryDefinition.validateRequest(request) + resetVertexJsonProperties(dbVertex, graphId, version, schema, objectCategoryDefinition) + val inputVertex: Vertex = categoryDefinition.getVertex(dbVertex.getIdentifier, request.getRequest, dbVertex.getVertexType) + val dbRels = getDBEdges(graphId, schema, version, req, dbVertex, objectCategoryDefinition) + setEdges(dbVertex, inputVertex, dbRels) + if (dbVertex.getIdentifier.endsWith(".img") && StringUtils.equalsAnyIgnoreCase("Yes", dbVertex.getMetadata.getOrDefault("isImageNodeCreated", "").asInstanceOf[String])) { + inputVertex.getMetadata.put("versionKey", dbVertex.getMetadata.getOrDefault("versionKey", "")) + dbVertex.getMetadata.remove("isImageNodeCreated") + } + dbVertex.getMetadata.putAll(inputVertex.getMetadata) + if (MapUtils.isNotEmpty(inputVertex.getExternalData)) { + if (MapUtils.isNotEmpty(dbVertex.getExternalData)) + dbVertex.getExternalData.putAll(inputVertex.getExternalData) + else + dbVertex.setExternalData(inputVertex.getExternalData) + } + if (!removeProps.isEmpty) removeProps.toList.foreach(prop => dbVertex.getMetadata.remove(prop)) + val validatedVertex = if (!skipValidation) categoryDefinition.validateVertex(dbVertex, "update") else Future(dbVertex) + validatedVertex.map(node => { + if (!removeProps.isEmpty) removeProps.toList.foreach(prop => dbVertex.getMetadata.put(prop, null)) + node + }) + + }).flatMap(f => f) + } + def postProcessor(request: Request, node: Node)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Node = { val graphId: String = request.getContext.get("graph_id").asInstanceOf[String] val version: String = request.getContext.get("version").asInstanceOf[String] @@ -221,6 +266,35 @@ object DefinitionNode { dbNode.setDeletedRelations(delRels) } + private def setEdges(dbVertex: Vertex, inputVertex: Vertex, dbRels: util.Map[String, util.List[Edges]]): Unit = { + var addRels: util.List[Edges] = new util.ArrayList[Edges]() + var delRels: util.List[Edges] = new util.ArrayList[Edges]() + val inRel: util.List[Edges] = dbVertex.getInEdges + val outRel: util.List[Edges] = dbVertex.getOutEdges + val inRelReq: util.List[Edges] = if (CollectionUtils.isNotEmpty(inputVertex.getInEdges)) new util.ArrayList[Edges](inputVertex.getInEdges) else new util.ArrayList[Edges]() + val outRelReq: util.List[Edges] = if (CollectionUtils.isNotEmpty(inputVertex.getOutEdges)) new util.ArrayList[Edges](inputVertex.getOutEdges) else new util.ArrayList[Edges]() + if (CollectionUtils.isNotEmpty(inRelReq)) { + if (CollectionUtils.isNotEmpty(dbRels.get("in"))) { + inRelReq.addAll(dbRels.get("in")) + inputVertex.setInEdges(inRelReq) + } + getNewEdgesList(inRel, inRelReq, addRels, delRels) + } + if (CollectionUtils.isNotEmpty(outRelReq)) { + if (CollectionUtils.isNotEmpty(dbRels.get("out"))) { + outRelReq.addAll(dbRels.get("out")) + inputVertex.setOutEdges(outRelReq) + } + getNewEdgesList(outRel, outRelReq, addRels, delRels) + } + if (CollectionUtils.isNotEmpty(addRels)) { + dbVertex.setAddedEdges(addRels) + updateEdgeMetadata(dbVertex) + } + if (CollectionUtils.isNotEmpty(delRels)) + dbVertex.setDeletedEdges(delRels) + } + private def getNewRelationsList(dbRelations: util.List[Relation], newRelations: util.List[Relation], addRels: util.List[Relation], delRels: util.List[Relation]): Unit = { val relList = new util.ArrayList[String] for (rel <- newRelations) { @@ -236,6 +310,21 @@ object DefinitionNode { } } + private def getNewEdgesList(dbEdges: util.List[Edges], newEdges: util.List[Edges], addEdges: util.List[Edges], delEdges: util.List[Edges]): Unit = { + val edgeList = new util.ArrayList[String] + for (edge <- newEdges) { + addEdges.add(edge) + val relKey = edge.getStartVertexId + edge.getEdgeType + edge.getEndVertexId + if (!edgeList.contains(relKey)) edgeList.add(relKey) + } + if (null != dbEdges && !dbEdges.isEmpty) { + for (rel <- dbEdges) { + val relKey = rel.getStartVertexId + rel.getEdgeType + rel.getEndVertexId + if (!edgeList.contains(relKey)) delEdges.add(rel) + } + } + } + def updateRelationMetadata(node: Node): Unit = { var relOcr = new util.HashMap[String, Integer]() val rels = node.getAddedRelations @@ -283,6 +372,21 @@ object DefinitionNode { node } + def resetVertexJsonProperties(vertex: Vertex, graphId: String, version: String, schemaName: String, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): Vertex = { + val jsonPropList = fetchJsonProps(graphId, version, schemaName, ocd) + if (!jsonPropList.isEmpty) { + vertex.getMetadata.entrySet().map(entry => { + if (jsonPropList.contains(entry.getKey)) { + entry.getValue match { + case value: String => entry.setValue(JsonUtils.deserialize(value.asInstanceOf[String], classOf[Object])) + case _ => entry + } + } + }) + } + vertex + } + def getDBRelations(graphId: String, schemaName: String, version: String, request: util.Map[String, AnyRef], dbNode: Node, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): util.Map[String, util.List[Relation]] = { val inRelations = new util.ArrayList[Relation]() val outRelations = new util.ArrayList[Relation]() @@ -319,6 +423,43 @@ object DefinitionNode { } } + def getDBEdges(graphId: String, schemaName: String, version: String, request: util.Map[String, AnyRef], dbVertex: Vertex, ocd: ObjectCategoryDefinition = ObjectCategoryDefinition())(implicit ec: ExecutionContext, oec: OntologyEngineContext): util.Map[String, util.List[Edges]] = { + val inEdges = new util.ArrayList[Edges]() + val outEdges = new util.ArrayList[Edges]() + val relDefMap = getRelationDefinitionMap(graphId, version, schemaName, ocd); + if (null != dbVertex) { + if (CollectionUtils.isNotEmpty(dbVertex.getInEdges)) { + for (inRel <- dbVertex.getInEdges) { + val key = inRel.getEdgeType + "_in_" + inRel.getStartVertexObjectType + if (relDefMap.containsKey(key)) { + val value = relDefMap.get(key).get + if (!request.containsKey(value)) { + inEdges.add(inRel) + } + } + } + } + if (CollectionUtils.isNotEmpty(dbVertex.getOutEdges)) { + for (outRel <- dbVertex.getOutEdges) { + val key = outRel.getEdgeType + "_out_" + outRel.getEndVertexObjectType + if (relDefMap.containsKey(key)) { + val value = relDefMap.get(key).get + if (!request.containsKey(value)) { + outEdges.add(outRel) + } + } + } + } + } + new util.HashMap[String, util.List[Edges]]() { + { + put("in", inEdges) + put("out", outEdges) + } + } + } + + def validateContentNodes(nodes: List[Node], graphId: String, schemaName: String, version: String)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[List[Node]] = { val futures = nodes.map(node => { val ocd = ObjectCategoryDefinition(node.getMetadata.getOrDefault("primaryCategory", "").asInstanceOf[String], node.getObjectType, node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String]) diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index b62a6afee..579706e11 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -57,6 +57,11 @@ object DataVertex { } } + def deleteNode(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[java.lang.Boolean] = { + val identifier: String = request.getRequest.getOrDefault("identifier", "").asInstanceOf[String] + oec.janusGraphService.deleteNode(request.graphId, identifier, request) + } + private def createEdges(graphId: String, vertex: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { val edges: util.List[Edges] = vertex.getAddedEdges if (CollectionUtils.isNotEmpty(edges)) { From b030a541f2c579007ced44497603a3f9d09fee2a Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Fri, 24 May 2024 12:24:49 +0530 Subject: [PATCH 11/20] Update relation changes --- .../org/sunbird/graph/JanusGraphService.scala | 6 ++- .../service/operation/EdgeOperations.scala | 38 +++++++++++++++++-- .../org/sunbird/graph/vertex/DataVertex.scala | 14 +++++++ 3 files changed, 54 insertions(+), 4 deletions(-) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 74cb29817..9549279eb 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -30,10 +30,14 @@ class JanusGraphService { } - def createEdges(graphId: String, edgeMap: java.util.List[java.util.Map[String, AnyRef]]) = { + def createEdges(graphId: String, edgeMap: java.util.List[java.util.Map[String, AnyRef]]): Future[Response] = { EdgeOperations.createEdges(graphId, edgeMap) } + def removeEdges(graphId: String, edgeMap: java.util.List[java.util.Map[String, AnyRef]]): Future[Response] = { + EdgeOperations.removeEdges(graphId, edgeMap) + } + def getNodeByUniqueId(graphId: String, vertexId: String, getTags: Boolean, request: Request): Future[Vertex] = { SearchOperations.getNodeByUniqueId(graphId, vertexId, getTags, request).map(vertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(vertex) else vertex) } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala index d7369bd62..10e5282e0 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala @@ -7,7 +7,8 @@ import org.apache.tinkerpop.gremlin.structure.{Edge, Vertex} import org.janusgraph.core.JanusGraph import org.sunbird.common.dto.{Response, ResponseHandler} import org.sunbird.common.exception.ClientException -import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} +import org.sunbird.graph.common.enums.SystemProperties +import org.sunbird.graph.service.common.{CypherQueryConfigurationConstants, DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import java.util @@ -36,6 +37,24 @@ class EdgeOperations { } } + def removeEdges(graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Future[Response] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") + + if (CollectionUtils.isEmpty(edgeData)) + throw new ClientException(DACErrorCodeConstants.INVALID_RELATION.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Relation Operation Failed.]") + + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + deleteBulkRelations(g, graphId, edgeData) + ResponseHandler.OK() + } + } + def createBulkRelations(g: GraphTraversalSource, graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Unit = { for (row <- edgeData.asScala) { val startNodeId = row.get("startNodeId").toString @@ -43,8 +62,8 @@ class EdgeOperations { val relation = row.get("relation").toString val relMetadata = row.get("relMetadata").asInstanceOf[Map[String, AnyRef]] - val startNode: Vertex = g.V().hasLabel(graphId).has("IL_UNIQUE_ID", startNodeId).next() - val endNode: Vertex = g.V().hasLabel(graphId).has("IL_UNIQUE_ID", endNodeId).next() + val startNode: Vertex = g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name, startNodeId).next() + val endNode: Vertex = g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name, endNodeId).next() val edge: Edge = startNode.addEdge(relation, endNode) for (key <- relMetadata.keySet) { @@ -52,4 +71,17 @@ class EdgeOperations { } } } + + private def deleteBulkRelations(g: GraphTraversalSource, graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Unit = { + for (row <- edgeData.asScala) { + val startNodeId = row.get("startNodeId").toString + val endNodeId = row.get("endNodeId").toString + val relation = row.get("relation").toString + + g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name, startNodeId) + .outE().as(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).inV().has(SystemProperties.IL_UNIQUE_ID.name, endNodeId) + .select(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).drop().iterate() + + } + } } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index 579706e11..fe522cdfd 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -71,6 +71,20 @@ object DataVertex { } } + def updateEdges(graphId: String, vertex: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + if (CollectionUtils.isEmpty(vertex.getAddedEdges) && CollectionUtils.isEmpty(vertex.getDeletedEdges)) { + Future(new Response) + } else { + if (CollectionUtils.isNotEmpty(vertex.getDeletedEdges)) { + oec.janusGraphService.removeEdges(graphId, getEdgesMap(vertex.getDeletedEdges)) + } + if (CollectionUtils.isNotEmpty(vertex.getAddedEdges)) { + oec.janusGraphService.createEdges(graphId, getEdgesMap(vertex.getAddedEdges)) + } + Future(new Response) + } + } + private def getEdgesMap(edges: util.List[Edges]): java.util.List[util.Map[String, AnyRef]] = { val list = new util.ArrayList[util.Map[String, AnyRef]] for (edge <- edges) { From 37a742b2dab59cca58df8269567225f4f9e6e9b1 Mon Sep 17 00:00:00 2001 From: shourya-solanki Date: Fri, 24 May 2024 13:57:55 +0530 Subject: [PATCH 12/20] Issue #KN-1041 fix: added upsert vertex changes --- .../org/sunbird/graph/JanusGraphService.scala | 16 +- .../service/operation/VertexOperations.scala | 144 +++++++++++++++++- .../org/sunbird/graph/nodes/DataNode.scala | 12 +- .../org/sunbird/graph/vertex/DataVertex.scala | 44 +++++- 4 files changed, 202 insertions(+), 14 deletions(-) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 9549279eb..6a2873be8 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -1,11 +1,8 @@ package org.sunbird.graph import org.sunbird.common.Platform -import org.sunbird.common.dto.{Property, Request, Response, ResponseHandler} -import org.sunbird.common.exception.ResponseCode -import org.sunbird.graph.dac.model.{Node, SearchCriteria, SubGraph, Vertex} -import org.sunbird.graph.external.ExternalPropsManager -import org.sunbird.graph.service.operation.{GraphAsyncOperations, Neo4JBoltSearchOperations, NodeAsyncOperations, SearchAsyncOperations} +import org.sunbird.common.dto.{Request, Response} +import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.util.CSPMetaUtil import org.sunbird.janus.service.operation.{EdgeOperations, SearchOperations, VertexOperations} @@ -45,5 +42,14 @@ class JanusGraphService { def deleteNode(graphId: String, vertexId: String, request: Request): Future[java.lang.Boolean] = { VertexOperations.deleteVertex(graphId, vertexId, request) } + def upsertVertex(graphId: String, vertex: Vertex, request: Request): Future[Vertex] = { + if (isrRelativePathEnabled) { + val metadata = CSPMetaUtil.updateRelativePath(vertex.getMetadata) + vertex.setMetadata(metadata) + } + // Assuming VertexOperations provides access to JanusGraph vertex upsert + VertexOperations.upsertVertex(graphId, vertex, request) + .map(resVertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resVertex) else resVertex) + } } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index 7c92713f9..7a81c715f 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -2,7 +2,6 @@ package org.sunbird.janus.service.operation import org.apache.commons.lang3.{BooleanUtils, StringUtils} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} -import org.sunbird.common.dto.Request import org.sunbird.common.exception.{ClientException, ServerException} import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier @@ -11,9 +10,11 @@ import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager +import org.sunbird.common.dto.Request import java.util -import scala.collection.convert.ImplicitConversions.`map AsScala` +import scala.collection.convert.ImplicitConversions.{`map AsJavaMap`, `map AsScala`} +import scala.collection.immutable.HashMap import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future @@ -178,6 +179,144 @@ class VertexOperations { versionPropertyMap } + def upsertVertex(graphId: String, vertex: Vertex, request: Request): Future[Vertex] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Create Node Operation Failed.]") + + if (null == vertex) + throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, + DACErrorMessageConstants.INVALID_NODE + " | [Create Node Operation Failed.]") + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put("vertex", setPrimitiveData(vertex)) + parameterMap.put(GraphDACParams.request.name, request) + + prepareUpsertMap(parameterMap) + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + // Find existing vertex by identifier (replace with your logic if needed) + val existingVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.toString, vertex.getIdentifier) + + + val existingVertexProps = existingVertex.elementMap().next().asInstanceOf[util.HashMap[String, AnyRef]] + parameterMap.foreach { case (key, value) => + if (!key.equals(GraphDACParams.graphId.name) && !key.equals(GraphDACParams.request.name)) { + existingVertexProps.put(key, value) + } + } +// existingVertex.properties(existingVertexProps) + + val retrieveVertex = existingVertex.elementMap().next() + vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) + vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) + vertex + } + + catch { + case e: Throwable => + e.getCause match { + case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => + throw new ClientException( + DACErrorCodeConstants.CONSTRAINT_VALIDATION_FAILED.name(), + DACErrorMessageConstants.CONSTRAINT_VALIDATION_FAILED + vertex.getIdentifier + ) + case cause => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } + } + } + + def prepareUpsertMap(parameterMap: util.Map[String, AnyRef]) = { + if (null != parameterMap) { + val vertex = parameterMap.getOrDefault("vertex", null).asInstanceOf[Vertex] + if (null == vertex) + throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, + DACErrorMessageConstants.INVALID_NODE + " | [Upsert Node Query Generation Failed.]") + + if (StringUtils.isBlank(vertex.getIdentifier)) + vertex.setIdentifier(Identifier.getIdentifier(vertex.getGraphId, Identifier.getUniqueIdFromTimestamp)) + + val graphId = parameterMap.getOrDefault("graphId", "").asInstanceOf[String] + val date: String = DateUtils.formatCurrentDate + + val ocsMap: util.Map[String, AnyRef] = getOnCreateSetMap( vertex, date) + val omsMap: util.Map[String, AnyRef] = getOnMatchSetMap( vertex,date, true) + + val combinedMap: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef] + combinedMap.putAll(ocsMap) + combinedMap.putAll(omsMap) + + parameterMap.put(GraphDACParams.paramValueMap.name, combinedMap) + } + } + + def getOnCreateSetMap(node: Vertex, date: String): Map[String, Object] = { + val paramMap = new HashMap[String, Object]() + + if (node != null && StringUtils.isNotBlank(date)) { + if (StringUtils.isBlank(node.getIdentifier)) { + node.setIdentifier(Identifier.getIdentifier(node.getGraphId, Identifier.getUniqueIdFromTimestamp)) + } + + paramMap.put(SystemProperties.IL_UNIQUE_ID.name, node.getId) + + paramMap.put(SystemProperties.IL_SYS_NODE_TYPE.name, node.getVertexType) + + if (StringUtils.isNotBlank(node.getObjectType)) { + paramMap.put(SystemProperties.IL_FUNC_OBJECT_TYPE.name, node.getObjectType) + } + paramMap.put(AuditProperties.createdOn.name, date) + + if (node.getMetadata.containsKey(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name) == false) { + paramMap.put(AuditProperties.lastUpdatedOn.name, date) + } + val versionKey = DateUtils.parse(date).getTime.toString + paramMap.put(GraphDACParams.versionKey.name, versionKey) + } + + paramMap + } + + def getOnMatchSetMap(node: Vertex, date: String, merge: Boolean): Map[String, Object] = { + val paramMap = new HashMap[String, Object]() + + if (node != null && StringUtils.isNotBlank(date)) { + // Set operation based on merge flag + val setClause = if (merge) { + s"${GraphDACParams.ON.name} ${GraphDACParams.MATCH.name} ${GraphDACParams.SET.name}" + } else { + GraphDACParams.SET.name + } + + if (node.getMetadata != null) { + node.getMetadata.foreach { + case (key, value) => if (key != GraphDACParams.versionKey.name) paramMap.put(key, value) + } + } + + + // Set lastUpdatedOn property if not already set + if (!node.getMetadata.containsKey(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name)) { + paramMap.put(AuditProperties.lastUpdatedOn.name, date) + } + + // Set versionKey property if missing in metadata + if (node.getMetadata != null && + StringUtils.isBlank(node.getMetadata.get(GraphDACParams.versionKey.name()).toString)) { + val versionKey = DateUtils.parse(date).getTime.toString + paramMap.put(GraphDACParams.versionKey.name, versionKey) + } + } + + paramMap + } + def setPrimitiveData(vertex: Vertex): Vertex = { val metadata: util.Map[String, AnyRef] = vertex.getMetadata metadata.forEach((key, value) => { @@ -195,4 +334,5 @@ class VertexOperations { } + } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala index 3dc800a12..ca3b62d83 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/nodes/DataNode.scala @@ -1,8 +1,5 @@ package org.sunbird.graph.nodes -import java.util -import java.util.Optional -import java.util.concurrent.CompletionException import org.apache.commons.collections4.{CollectionUtils, MapUtils} import org.apache.commons.lang3.StringUtils import org.sunbird.common.DateUtils @@ -10,12 +7,15 @@ import org.sunbird.common.dto.{Request, Response} import org.sunbird.common.exception.{ClientException, ErrorCodes, ResponseCode} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.SystemProperties -import org.sunbird.graph.dac.model.{Edges, Vertex, Filter, MetadataCriterion, Node, Relation, SearchConditions, SearchCriteria} +import org.sunbird.graph.dac.model._ import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} import org.sunbird.parseq.Task -import scala.collection.convert.ImplicitConversions._ +import java.util +import java.util.Optional +import java.util.concurrent.CompletionException import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} @@ -122,7 +122,7 @@ object DataNode { } } - private def updateExternalProperties(identifier: String, externalProps: util.Map[String, AnyRef], context: util.Map[String, AnyRef], objectType: String, request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + def updateExternalProperties(identifier: String, externalProps: util.Map[String, AnyRef], context: util.Map[String, AnyRef], objectType: String, request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { if (MapUtils.isNotEmpty(externalProps)) { val req = new Request(request) req.put("identifier", identifier) diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index fe522cdfd..4df28c868 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -11,7 +11,7 @@ import org.sunbird.common.exception.{ClientException, ErrorCodes, ResponseCode} import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.SystemProperties import org.sunbird.graph.dac.model.{Edges, Filter, MetadataCriterion, SearchConditions, SearchCriteria, Vertex} -import org.sunbird.graph.nodes.DataNode.saveExternalProperties +import org.sunbird.graph.nodes.DataNode.{saveExternalProperties, updateExternalProperties} import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} import org.sunbird.parseq.Task @@ -62,6 +62,48 @@ object DataVertex { oec.janusGraphService.deleteNode(request.graphId, identifier, request) } + def update(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { + val identifier: String = request.getContext.get("identifier").asInstanceOf[String] + DefinitionNode.validates(identifier, request).map(vertex => { + request.getContext().put("schemaName", vertex.getObjectType.toLowerCase.replace("image", "")) + val response = oec.janusGraphService.upsertVertex(request.graphId, dataModifier(vertex), request) + response.map(vertex => DefinitionNode.postProcessor(request, vertex)) + .map(result => { + val futureList = Task.parallel[Response]( + updateExternalProperties(vertex.getIdentifier, vertex.getExternalData, request.getContext, request.getObjectType, request), + updateEdges(request.graphId, vertex, request.getContext) + ) + futureList.map(list => result) + }) + .flatMap(f => f) + .recoverWith { case e: CompletionException => throw e.getCause } + }).flatMap(f => f) + .recoverWith { case e: CompletionException => throw e.getCause } + } + +/* private def updateEdges(vertex: Vertex, graphId: String, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { + val request = new Request // Assuming Request is required for JanusGraph + request.setContext(context) + + if (vertex.getAddedEdges.isEmpty && vertex.getDeletedEdges.isEmpty) { + Future(new Response) // No changes, return empty response + } else { + val futures = Seq( + // Delete edges if any + if (vertex.getDeletedEdges.nonEmpty) { + oec.janusGraphService.removeEdges(graphId, getEdgesMap(vertex.getDeletedEdges)) + } else Future.successful(Unit), + // Add edges if any + if (vertex.getAddedEdges.nonEmpty) { + oec.janusGraphService.createEdges(graphId, getEdgesMap(vertex.getAddedEdges)) + } else Future.successful(Unit) + ) + Future.sequence(futures) // Combine deletion and addition calls into one future + .map(_ => new Response) // Wrap the combined future with a new Response + } + + }*/ + private def createEdges(graphId: String, vertex: Vertex, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { val edges: util.List[Edges] = vertex.getAddedEdges if (CollectionUtils.isNotEmpty(edges)) { From 0e5b46594c7e7956d07c9941f60cc35902c4b1c3 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Fri, 24 May 2024 16:23:28 +0530 Subject: [PATCH 13/20] Issue #KN-1041 fix: Fixed the code to get values in a proper format --- .../scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala index a4532d140..cd0e668ec 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/dac/util/GremlinVertexUtil.scala @@ -44,7 +44,7 @@ class GremlinVertexUtil { } } - case _ => metadata.put(key, value) + case _ => metadata.put(key, value.next()) } } } From a4ac065424dfa39169822fb14b0f7569ef604771 Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Fri, 24 May 2024 17:24:31 +0530 Subject: [PATCH 14/20] Update Vertex fixes --- .../service/operation/VertexOperations.scala | 58 +++++++------------ .../org/sunbird/graph/vertex/DataVertex.scala | 2 +- .../sunbird/actors/ObjectCategoryActor.scala | 2 +- 3 files changed, 23 insertions(+), 39 deletions(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index 7a81c715f..4f1a5b243 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -191,7 +191,7 @@ class VertexOperations { val parameterMap = new util.HashMap[String, AnyRef] parameterMap.put(GraphDACParams.graphId.name, graphId) - parameterMap.put("vertex", setPrimitiveData(vertex)) + parameterMap.put(GraphDACParams.vertex.name, setPrimitiveData(vertex)) parameterMap.put(GraphDACParams.request.name, request) prepareUpsertMap(parameterMap) @@ -200,25 +200,21 @@ class VertexOperations { val g: GraphTraversalSource = graphConnection.getGraphTraversalSource // Find existing vertex by identifier (replace with your logic if needed) - val existingVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.toString, vertex.getIdentifier) - - - val existingVertexProps = existingVertex.elementMap().next().asInstanceOf[util.HashMap[String, AnyRef]] - parameterMap.foreach { case (key, value) => - if (!key.equals(GraphDACParams.graphId.name) && !key.equals(GraphDACParams.request.name)) { - existingVertexProps.put(key, value) - } + val existingVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.name, vertex.getIdentifier) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + finalMap.foreach { case (key, value) => + if (!key.equals(GraphDACParams.graphId.name) && !key.equals(GraphDACParams.request.name)) { + existingVertex.property(key, value) } -// existingVertex.properties(existingVertexProps) - - val retrieveVertex = existingVertex.elementMap().next() - vertex.setIdentifier(retrieveVertex.get("IL_UNIQUE_ID")) - vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get("versionKey")) - vertex - } + } - catch { + val retrieveVertex = existingVertex.elementMap().next() + vertex.setIdentifier(retrieveVertex.get(SystemProperties.IL_UNIQUE_ID.name)) + vertex.getMetadata.put(GraphDACParams.versionKey.name, retrieveVertex.get(GraphDACParams.versionKey.name)) + vertex + } catch { case e: Throwable => + e.printStackTrace() e.getCause match { case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => throw new ClientException( @@ -238,33 +234,28 @@ class VertexOperations { if (null == vertex) throw new ClientException(DACErrorCodeConstants.INVALID_NODE.name, DACErrorMessageConstants.INVALID_NODE + " | [Upsert Node Query Generation Failed.]") - if (StringUtils.isBlank(vertex.getIdentifier)) vertex.setIdentifier(Identifier.getIdentifier(vertex.getGraphId, Identifier.getUniqueIdFromTimestamp)) - - val graphId = parameterMap.getOrDefault("graphId", "").asInstanceOf[String] val date: String = DateUtils.formatCurrentDate val ocsMap: util.Map[String, AnyRef] = getOnCreateSetMap( vertex, date) - val omsMap: util.Map[String, AnyRef] = getOnMatchSetMap( vertex,date, true) - + val omsMap: util.Map[String, AnyRef] = getOnMatchSetMap( vertex,date, merge = true) val combinedMap: util.Map[String, AnyRef] = new util.HashMap[String, AnyRef] combinedMap.putAll(ocsMap) combinedMap.putAll(omsMap) - parameterMap.put(GraphDACParams.paramValueMap.name, combinedMap) } } - def getOnCreateSetMap(node: Vertex, date: String): Map[String, Object] = { - val paramMap = new HashMap[String, Object]() + private def getOnCreateSetMap(node: Vertex, date: String): util.Map[String, Object] = { + val paramMap = new util.HashMap[String, Object]() if (node != null && StringUtils.isNotBlank(date)) { if (StringUtils.isBlank(node.getIdentifier)) { node.setIdentifier(Identifier.getIdentifier(node.getGraphId, Identifier.getUniqueIdFromTimestamp)) } - paramMap.put(SystemProperties.IL_UNIQUE_ID.name, node.getId) + paramMap.put(SystemProperties.IL_UNIQUE_ID.name, node.getIdentifier) paramMap.put(SystemProperties.IL_SYS_NODE_TYPE.name, node.getVertexType) @@ -273,7 +264,7 @@ class VertexOperations { } paramMap.put(AuditProperties.createdOn.name, date) - if (node.getMetadata.containsKey(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name) == false) { + if (!node.getMetadata.containsKey(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name)) { paramMap.put(AuditProperties.lastUpdatedOn.name, date) } val versionKey = DateUtils.parse(date).getTime.toString @@ -283,16 +274,10 @@ class VertexOperations { paramMap } - def getOnMatchSetMap(node: Vertex, date: String, merge: Boolean): Map[String, Object] = { - val paramMap = new HashMap[String, Object]() + private def getOnMatchSetMap(node: Vertex, date: String, merge: Boolean): util.Map[String, Object] = { + val paramMap = new util.HashMap[String, Object]() if (node != null && StringUtils.isNotBlank(date)) { - // Set operation based on merge flag - val setClause = if (merge) { - s"${GraphDACParams.ON.name} ${GraphDACParams.MATCH.name} ${GraphDACParams.SET.name}" - } else { - GraphDACParams.SET.name - } if (node.getMetadata != null) { node.getMetadata.foreach { @@ -300,7 +285,6 @@ class VertexOperations { } } - // Set lastUpdatedOn property if not already set if (!node.getMetadata.containsKey(GraphDACParams.SYS_INTERNAL_LAST_UPDATED_ON.name)) { paramMap.put(AuditProperties.lastUpdatedOn.name, date) @@ -317,7 +301,7 @@ class VertexOperations { paramMap } - def setPrimitiveData(vertex: Vertex): Vertex = { + private def setPrimitiveData(vertex: Vertex): Vertex = { val metadata: util.Map[String, AnyRef] = vertex.getMetadata metadata.forEach((key, value) => { try { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index 4df28c868..10aa933dc 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -65,7 +65,7 @@ object DataVertex { def update(request: Request, dataModifier: (Vertex) => Vertex = defaultVertexDataModifier)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Vertex] = { val identifier: String = request.getContext.get("identifier").asInstanceOf[String] DefinitionNode.validates(identifier, request).map(vertex => { - request.getContext().put("schemaName", vertex.getObjectType.toLowerCase.replace("image", "")) + request.getContext.put("schemaName", vertex.getObjectType.toLowerCase.replace("image", "")) val response = oec.janusGraphService.upsertVertex(request.graphId, dataModifier(vertex), request) response.map(vertex => DefinitionNode.postProcessor(request, vertex)) .map(result => { diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala index df16f2ed3..544880f75 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/ObjectCategoryActor.scala @@ -53,7 +53,7 @@ class ObjectCategoryActor @Inject()(implicit oec: OntologyEngineContext) extends @throws[Exception] private def update(request: Request): Future[Response] = { RequestUtil.restrictProperties(request) - DataNode.update(request).map(node => { + DataVertex.update(request).map(node => { ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier) }) } From dd12903a253c27e00d2595c3c63373655d16e825 Mon Sep 17 00:00:00 2001 From: aimansharief Date: Fri, 24 May 2024 17:49:45 +0530 Subject: [PATCH 15/20] Issue #KN-1041 fix: Added code to delete vertex --- .../service/operation/SearchOperations.scala | 22 +--------- .../service/operation/VertexOperations.scala | 40 ++++++++++++++----- .../org/sunbird/graph/vertex/DataVertex.scala | 9 ++--- 3 files changed, 36 insertions(+), 35 deletions(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala index dc770b77a..ca24553e4 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala @@ -52,7 +52,6 @@ class SearchOperations { throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + vertexId, vertexId) - val vertexMap = new util.HashMap[Object, AnyRef] val relationMap = new util.HashMap[Object, AnyRef] val startNodeMap = new util.HashMap[Object, AnyRef] @@ -69,12 +68,10 @@ class SearchOperations { } newVertex } - catch { case ex: MiddlewareException => throw ex case e: Throwable => e.printStackTrace() - println("IN catch operation", e) throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name(), DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) } @@ -94,10 +91,6 @@ class SearchOperations { throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node By Unique Id' Query Generation Failed.]") -// g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(identity(), outE().as("r").inV().as("__endNode") -// .select("ee", "r", "__endNode"), inE().as("r").outV().as("__startNode") -// .select("ee", "r", "__startNode")).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] - g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee") .project("ee", "r", "__startNode", "__endNode") .by(identity()) @@ -106,12 +99,8 @@ class SearchOperations { .by(outE().inV().elementMap().fold()) .toList() -// g.V().hasLabel(graphId).has("IL_UNIQUE_ID", vertexId).as("ee").union(project("id", "label").by(id()).by(label()), outE().as("r").inV().as("__endNode") -// .select("ee", "r", "__endNode") .by(project("id", "label").by(id()).by(label())) .by(project("id", "label", "inVLabel", "outVLabel", "inV", "outV") .by(id()).by(label()).by(inV().label()).by(outV().label()).by(inV().id()).by(outV().id())) .by(project("id", "label").by(id()).by(label())), inE().as("r").outV().as("__startNode") -// .select("ee", "r", "__startNode") .by(project("id", "label").by(id()).by(label())) .by(project("id", "label", "inVLabel", "outVLabel", "inV", "outV") .by(id()).by(label()).by(inV().label()).by(outV().label()).by(inV().id()).by(outV().id())) .by(project("id", "label").by(id()).by(label())) ).toList.asInstanceOf[util.List[util.Map[String, AnyRef]]] - } - else new util.ArrayList[util.Map[String, AnyRef]] + else throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP ) } catch { case e :Exception => @@ -122,18 +111,13 @@ class SearchOperations { private def getRecordValues(result: util.Map[String, AnyRef], nodeMap :util.Map[Object, AnyRef], relationMap :util.Map[Object, AnyRef], startNodeMap :util.Map[Object, AnyRef], endNodeMap :util.Map[Object, AnyRef] ): Unit = { if (null != nodeMap) { val vertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT) - println("vertexValue ", vertexValue) - if(null != vertexValue && vertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { + if (null != vertexValue && vertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val gremlinVertex : org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] - println("gremlinVertex ", gremlinVertex) nodeMap.put(gremlinVertex.id(), gremlinVertex) - } else { - println("In ELSE ", vertexValue) } } if (null != relationMap) { val edgeValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT) - println("edgeValue ", edgeValue) if (null != edgeValue && edgeValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Edge]) { val edge: org.apache.tinkerpop.gremlin.structure.Edge = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).asInstanceOf[Edge] nodeMap.put(edge.id(), edge) @@ -141,7 +125,6 @@ class SearchOperations { } if (null != startNodeMap) { val startVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT) - println("startVertexValue ", startVertexValue) if (null != startVertexValue && startVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val startVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] nodeMap.put(startVertex.id(), startVertex) @@ -149,7 +132,6 @@ class SearchOperations { } if (null != endNodeMap) { val endVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT) - println("endVertexValue ", endVertexValue) if (null != endVertexValue && endVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val endVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] nodeMap.put(endVertex.id(), endVertex) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index 4f1a5b243..e09bfbcd5 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -2,7 +2,8 @@ package org.sunbird.janus.service.operation import org.apache.commons.lang3.{BooleanUtils, StringUtils} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} -import org.sunbird.common.exception.{ClientException, ServerException} +import org.sunbird.common.dto.Request +import org.sunbird.common.exception.{ClientException, ResourceNotFoundException, ServerException} import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier import org.sunbird.graph.common.enums.{AuditProperties, GraphDACParams, SystemProperties} @@ -49,6 +50,7 @@ class VertexOperations { } catch { case e: Throwable => + e.printStackTrace() e.getCause match { case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => throw new ClientException( @@ -81,20 +83,40 @@ class VertexOperations { parameterMap.put(GraphDACParams.nodeId.name, vertexId) parameterMap.put(GraphDACParams.request.name, request) - deleteQuery(parameterMap, g) - - true + executeVertexDeletion(parameterMap, g) } catch { - case e: Exception => throw e + case e: Throwable => + e.printStackTrace() + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, + DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) } - } - } - def deleteQuery(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): Unit = { - + private def executeVertexDeletion(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): Boolean = { + if (null != parameterMap) { + val graphId = parameterMap.get(GraphDACParams.graphId.name).asInstanceOf[String] + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Remove Property Values Query Generation Failed.]") + + val nodeId = parameterMap.get(GraphDACParams.nodeId.name).asInstanceOf[String] + if (StringUtils.isBlank(nodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | [Remove Property Values Query Generation Failed.]") + + val traversal = g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name(), nodeId) + if (traversal.hasNext) { + traversal.drop().iterate() + true + } else { + throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, + DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + nodeId, nodeId) + } + } else { + throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP) + } } private def createVertexTraversal(parameterMap: util.Map[String, AnyRef], graphTraversalSource: GraphTraversalSource): GraphTraversal[org.apache.tinkerpop.gremlin.structure.Vertex, org.apache.tinkerpop.gremlin.structure.Vertex] = { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index 10aa933dc..193d65dc0 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -57,7 +57,7 @@ object DataVertex { } } - def deleteNode(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[java.lang.Boolean] = { + def deleteVertex(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[java.lang.Boolean] = { val identifier: String = request.getRequest.getOrDefault("identifier", "").asInstanceOf[String] oec.janusGraphService.deleteNode(request.graphId, identifier, request) } @@ -74,11 +74,8 @@ object DataVertex { updateEdges(request.graphId, vertex, request.getContext) ) futureList.map(list => result) - }) - .flatMap(f => f) - .recoverWith { case e: CompletionException => throw e.getCause } - }).flatMap(f => f) - .recoverWith { case e: CompletionException => throw e.getCause } + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } + }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } } /* private def updateEdges(vertex: Vertex, graphId: String, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { From 6fa3853214e6076dc78c29cfd68b38309fb4ac8f Mon Sep 17 00:00:00 2001 From: aimansharief Date: Mon, 27 May 2024 15:09:24 +0530 Subject: [PATCH 16/20] Issue #KN-1041 fix: Added getNodeProperty and checkCyclicLoop --- .../org/sunbird/graph/JanusGraphService.scala | 11 +- .../service/operation/SearchOperations.scala | 162 +++++++++++++++++- .../service/operation/VertexOperations.scala | 7 +- .../org/sunbird/actors/CategoryActor.scala | 7 +- 4 files changed, 172 insertions(+), 15 deletions(-) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 6a2873be8..4da63d219 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -1,7 +1,7 @@ package org.sunbird.graph import org.sunbird.common.Platform -import org.sunbird.common.dto.{Request, Response} +import org.sunbird.common.dto.{Property, Request, Response} import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.util.CSPMetaUtil import org.sunbird.janus.service.operation.{EdgeOperations, SearchOperations, VertexOperations} @@ -42,14 +42,21 @@ class JanusGraphService { def deleteNode(graphId: String, vertexId: String, request: Request): Future[java.lang.Boolean] = { VertexOperations.deleteVertex(graphId, vertexId, request) } + def upsertVertex(graphId: String, vertex: Vertex, request: Request): Future[Vertex] = { if (isrRelativePathEnabled) { val metadata = CSPMetaUtil.updateRelativePath(vertex.getMetadata) vertex.setMetadata(metadata) } - // Assuming VertexOperations provides access to JanusGraph vertex upsert VertexOperations.upsertVertex(graphId, vertex, request) .map(resVertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resVertex) else resVertex) } + def getNodeProperty(graphId: String, identifier: String, property: String): Future[Property] = { + SearchOperations.getNodeProperty(graphId, identifier, property).map(property => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(property) else property) + } + + def checkCyclicLoop(graphId: String, endNodeId: String, startNodeId: String, relationType: String) = { + SearchOperations.checkCyclicLoop(graphId, endNodeId, relationType, startNodeId) + } } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala index ca24553e4..386e0f4c5 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/SearchOperations.scala @@ -4,20 +4,20 @@ import org.apache.commons.collections4.CollectionUtils import org.apache.commons.lang3.StringUtils import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource import org.sunbird.janus.dac.util.GremlinVertexUtil -import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__._ +import org.apache.tinkerpop.gremlin.groovy.jsr223.dsl.credential.__._ import org.sunbird.graph.dac.model.Vertex import org.apache.tinkerpop.gremlin.structure.Edge -import org.sunbird.common.dto.Request +import org.sunbird.common.dto.{Property, Request} import org.sunbird.common.exception.{ClientException, MiddlewareException, ResourceNotFoundException, ServerException} -import org.sunbird.graph.common.enums.GraphDACParams +import org.sunbird.graph.common.enums.{GraphDACParams, SystemProperties} import org.sunbird.graph.service.common.{CypherQueryConfigurationConstants, DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager - +import java.lang.Boolean import java.util import scala.concurrent.{ExecutionContext, Future} import ExecutionContext.Implicits.global -import scala.collection.JavaConverters.asScalaBufferConverter + class SearchOperations { val graphConnection = new JanusConnectionUtil @@ -78,6 +78,158 @@ class SearchOperations { } } + def getNodeProperty(graphId: String, vertexId: String, key: String): Future[Property] = { + Future { + TelemetryManager.log("Graph Id: " + graphId + "\nNode Id: " + vertexId + "\nProperty (Key): " + key) + + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Get Node Property' Operation Failed.]") + + if (StringUtils.isBlank(vertexId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node Property' Operation Failed.]") + + if (StringUtils.isBlank(key)) + throw new ClientException(DACErrorCodeConstants.INVALID_PROPERTY.name, + DACErrorMessageConstants.INVALID_PROPERTY_KEY + " | ['Get Node Property' Operation Failed.]") + + val property = new Property() + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put(GraphDACParams.nodeId.name, vertexId) + parameterMap.put(GraphDACParams.key.name, key) + + val nodeProperty = executeGetNodeProperty(parameterMap, g) + val elementMap = nodeProperty.elementMap().next() + if (null != elementMap && null != elementMap.get(key)){ + property.setPropertyName(key) + property.setPropertyValue(elementMap.get(key)) + } + property + } + catch { + case e: Throwable => + e.printStackTrace() + e.getCause match { + case _: NoSuchElementException | _: ResourceNotFoundException => + throw new ResourceNotFoundException(DACErrorCodeConstants.NOT_FOUND.name, DACErrorMessageConstants.NODE_NOT_FOUND + " | [Invalid Node Id.]: " + vertexId, vertexId) + case _ => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } + } + + } + + def checkCyclicLoop(graphId: String, startNodeId: String, relationType: String, endNodeId: String): util.Map[String, AnyRef] = { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Check Cyclic Loop' Operation Failed.]") + + if (StringUtils.isBlank(startNodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_START_NODE_ID + " | ['Check Cyclic Loop' Operation Failed.]") + + if (StringUtils.isBlank(relationType)) + throw new ClientException(DACErrorCodeConstants.INVALID_RELATION.name, + DACErrorMessageConstants.INVALID_RELATION_TYPE + " | ['Check Cyclic Loop' Operation Failed.]") + + if (StringUtils.isBlank(endNodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_END_NODE_ID + " | ['Check Cyclic Loop' Operation Failed.]") + + val cyclicLoopMap = new util.HashMap[String, AnyRef] + + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val parameterMap = new util.HashMap[String, AnyRef] + parameterMap.put(GraphDACParams.graphId.name, graphId) + parameterMap.put(GraphDACParams.startNodeId.name, startNodeId) + parameterMap.put(GraphDACParams.relationType.name, relationType) + parameterMap.put(GraphDACParams.endNodeId.name, endNodeId) + + val result = generateCheckCyclicLoopTraversal(parameterMap, g) + if (null != result && result.hasNext) { + cyclicLoopMap.put(GraphDACParams.loop.name, new Boolean(true)) + cyclicLoopMap.put(GraphDACParams.message.name, startNodeId + " and " + endNodeId + " are connected by relation: " + relationType) + } + else + cyclicLoopMap.put(GraphDACParams.loop.name, new Boolean(false)) + + } + TelemetryManager.log("Returning Cyclic Loop Map: ", cyclicLoopMap) + cyclicLoopMap + } + + + def generateCheckCyclicLoopTraversal(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource) = { + + if (null != parameterMap) { + val graphId = parameterMap.get(GraphDACParams.graphId.name).asInstanceOf[String] + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Check Cyclic Loop' Query Generation Failed.]") + + val startNodeId = parameterMap.get(GraphDACParams.startNodeId.name).asInstanceOf[String] + if (StringUtils.isBlank(startNodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_START_NODE_ID + " | ['Check Cyclic Loop' Query Generation Failed.]") + + val relationType = parameterMap.get(GraphDACParams.relationType.name).asInstanceOf[String] + if (StringUtils.isBlank(relationType)) + throw new ClientException(DACErrorCodeConstants.INVALID_RELATION.name, + DACErrorMessageConstants.INVALID_RELATION_TYPE + " | ['Check Cyclic Loop' Query Generation Failed.]") + + val endNodeId = parameterMap.get(GraphDACParams.endNodeId.name).asInstanceOf[String] + if (StringUtils.isBlank(endNodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_END_NODE_ID + " | ['Check Cyclic Loop' Query Generation Failed.]") + + val cyclicTraversal = g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name(), startNodeId) + .repeat(outE(relationType).inV().simplePath()).until(has(SystemProperties.IL_UNIQUE_ID.name(), endNodeId)) + .hasLabel(graphId) + cyclicTraversal + } + else throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP) + } + + + def executeGetNodeProperty(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource) = { + try { + if (null != parameterMap) { + val graphId = parameterMap.get(GraphDACParams.graphId.name).asInstanceOf[String] + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | ['Get Node Property' Query Generation Failed.]") + + val nodeId = parameterMap.get(GraphDACParams.nodeId.name).asInstanceOf[String] + if (StringUtils.isBlank(nodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | ['Get Node Property' Query Generation Failed.]") + + val key = parameterMap.get(GraphDACParams.key.name).asInstanceOf[String] + if (StringUtils.isBlank(key)) + throw new ClientException(DACErrorCodeConstants.INVALID_PROPERTY.name, + DACErrorMessageConstants.INVALID_PROPERTY_KEY + " | ['Get Node Property' Query Generation Failed.]") + + g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name, nodeId).values(key) + + } + else throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP) + } + catch { + case e: Exception => + throw new ServerException(DACErrorCodeConstants.SERVER_ERROR.name, "Error! Something went wrong while creating node object. ", e.getCause); + } + } + private def getVertexByUniqueId(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): util.List[util.Map[String, AnyRef]] = { try { if (null != parameterMap) { diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index e09bfbcd5..3afbd8b0d 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -11,11 +11,8 @@ import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager -import org.sunbird.common.dto.Request - import java.util import scala.collection.convert.ImplicitConversions.{`map AsJavaMap`, `map AsScala`} -import scala.collection.immutable.HashMap import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future @@ -119,7 +116,7 @@ class VertexOperations { } } - private def createVertexTraversal(parameterMap: util.Map[String, AnyRef], graphTraversalSource: GraphTraversalSource): GraphTraversal[org.apache.tinkerpop.gremlin.structure.Vertex, org.apache.tinkerpop.gremlin.structure.Vertex] = { + private def createVertexTraversal(parameterMap: util.Map[String, AnyRef], g: GraphTraversalSource): GraphTraversal[org.apache.tinkerpop.gremlin.structure.Vertex, org.apache.tinkerpop.gremlin.structure.Vertex] = { if (null != parameterMap) { val graphId = parameterMap.getOrDefault(GraphDACParams.graphId.name,"").asInstanceOf[String] val vertex = parameterMap.getOrDefault(GraphDACParams.vertex.name, null).asInstanceOf[Vertex] @@ -147,7 +144,7 @@ class VertexOperations { parameterMap.put(GraphDACParams.paramValueMap.name, combinedMap) - val newVertexTraversal = graphTraversalSource.addV(vertex.getGraphId) + val newVertexTraversal = g.addV(vertex.getGraphId) val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] finalMap.foreach { case (key, value) => newVertexTraversal.property(key, value) } diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala index 49d9a5e84..34ead38c5 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryActor.scala @@ -13,6 +13,7 @@ import org.sunbird.graph.nodes.DataNode import org.sunbird.utils.{Constants, RequestUtil} import org.sunbird.mangers.FrameworkManager import org.sunbird.cache.impl.RedisCache +import org.sunbird.graph.vertex.DataVertex import scala.concurrent.{ExecutionContext, Future} @@ -37,14 +38,14 @@ class CategoryActor @Inject()(implicit oec: OntologyEngineContext) extends BaseA request.getRequest.put(Constants.IDENTIFIER, code) RedisCache.delete("masterCategories") FrameworkManager.validateTranslationMap(request) - DataNode.create(request).map(node => { + DataVertex.create(request).map(node => { ResponseHandler.OK.put(Constants.IDENTIFIER, node.getIdentifier).put(Constants.NODE_ID, node.getIdentifier) }) } private def read(request: Request): Future[Response] = { - DataNode.read(request).map(node => { - val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) + DataVertex.read(request).map(node => { + val metadata: util.Map[String, AnyRef] = NodeUtil.serializeVertex(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) ResponseHandler.OK.put("category", metadata) }) } From b44ef9d4b883090bea4db15f10f24f3a56f2f246 Mon Sep 17 00:00:00 2001 From: shourya-solanki Date: Mon, 27 May 2024 18:25:27 +0530 Subject: [PATCH 17/20] Issue #KN-1041 fix: 1. added updateVertexes changes, 2. added updateRootVertex changes --- .../org/sunbird/graph/JanusGraphService.scala | 7 + .../service/operation/VertexOperations.scala | 151 +++++++++++++++++- .../org/sunbird/graph/vertex/DataVertex.scala | 17 +- 3 files changed, 165 insertions(+), 10 deletions(-) diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 6a2873be8..0573612d0 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -52,4 +52,11 @@ class JanusGraphService { .map(resVertex => if (isrRelativePathEnabled) CSPMetaUtil.updateAbsolutePath(resVertex) else resVertex) } + def upsertRootNode(graphId: String, request: Request): Future[Vertex] = { + VertexOperations.upsertRootVertex(graphId, request) + } + def updateVertexes(graphId: String, identifiers: java.util.List[String], metadata: java.util.Map[String, AnyRef]): Future[java.util.Map[String, Vertex]] = { + val updatedMetadata = if (isrRelativePathEnabled) CSPMetaUtil.updateRelativePath(metadata) else metadata + VertexOperations.updateVertexes(graphId, identifiers, updatedMetadata) + } } diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index e09bfbcd5..fa38efc6c 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -1,5 +1,6 @@ package org.sunbird.janus.service.operation +import org.apache.commons.collections4.MapUtils import org.apache.commons.lang3.{BooleanUtils, StringUtils} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.{GraphTraversal, GraphTraversalSource} import org.sunbird.common.dto.Request @@ -7,15 +8,16 @@ import org.sunbird.common.exception.{ClientException, ResourceNotFoundException, import org.sunbird.common.{DateUtils, JsonUtils} import org.sunbird.graph.common.Identifier import org.sunbird.graph.common.enums.{AuditProperties, GraphDACParams, SystemProperties} +import org.sunbird.graph.dac.enums.SystemNodeTypes import org.sunbird.graph.dac.model.Vertex import org.sunbird.graph.service.common.{DACErrorCodeConstants, DACErrorMessageConstants} import org.sunbird.janus.service.util.JanusConnectionUtil import org.sunbird.telemetry.logger.TelemetryManager -import org.sunbird.common.dto.Request import java.util -import scala.collection.convert.ImplicitConversions.{`map AsJavaMap`, `map AsScala`} +import scala.collection.convert.ImplicitConversions.{`collection AsScalaIterable`, `map AsJavaMap`, `map AsScala`} import scala.collection.immutable.HashMap +import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future @@ -221,7 +223,6 @@ class VertexOperations { graphConnection.initialiseGraphClient() val g: GraphTraversalSource = graphConnection.getGraphTraversalSource - // Find existing vertex by identifier (replace with your logic if needed) val existingVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.name, vertex.getIdentifier) val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] finalMap.foreach { case (key, value) => @@ -323,6 +324,150 @@ class VertexOperations { paramMap } + def upsertRootVertex(graphId: String, request: AnyRef): Future[Vertex] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name(), + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Upsert Root Node Operation Failed.]") + + val g = graphConnection.getGraphTraversalSource + + // Generate Root Node Id (assuming similar logic as the provided code) + val rootNodeUniqueId = Identifier.getIdentifier(graphId, SystemNodeTypes.ROOT_NODE.name()) + + val vertex = new Vertex // Assuming you have a Vertex constructor + vertex.setIdentifier(rootNodeUniqueId) + vertex.getMetadata().put(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId) + vertex.getMetadata().put(SystemProperties.IL_SYS_NODE_TYPE.name, SystemNodeTypes.ROOT_NODE.name) + vertex.getMetadata().put(AuditProperties.createdOn.name, DateUtils.formatCurrentDate()) + vertex.getMetadata().put(GraphDACParams.Nodes_Count.name, 0: Integer) + vertex.getMetadata().put(GraphDACParams.Relations_Count.name, 0: Integer) + + val parameterMap = Map( + GraphDACParams.graphId.name -> graphId, + GraphDACParams.rootNode.name -> vertex, + GraphDACParams.request.name -> request + ) + + try { + val existingRootNode = Option(g.V().has(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId)) + + existingRootNode match { + case Some(v) => + v.property(SystemProperties.IL_UNIQUE_ID.name, vertex.getId) + .property(AuditProperties.createdOn.name, DateUtils.formatCurrentDate()) + .next() + } + + val retrievedVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId).elementMap().next() + vertex.setId(retrievedVertex.get(SystemProperties.IL_UNIQUE_ID.name)) + + vertex + } + catch { + case e: Throwable => + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, + DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage, e) + } + } + } + + def updateVertexes(graphId: String, identifiers: java.util.List[String], data: java.util.Map[String, AnyRef]): Future[util.Map[String, Vertex]] = { + Future { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, + DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Invalid or 'null' Graph Id.]") + + + if (identifiers.isEmpty) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, + DACErrorMessageConstants.INVALID_IDENTIFIER + " | [Please Provide Node Identifier.]") + + if (MapUtils.isEmpty(data)) + throw new ClientException(DACErrorCodeConstants.INVALID_METADATA.name, + DACErrorMessageConstants.INVALID_METADATA + " | [Please Provide Valid Node Metadata]") + + val parameterMap = generateUpdateVerticesQuery(graphId, identifiers, setPrimitiveData(data)) + + try { + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + + val updatedVertices = identifiers.foldLeft(List.empty[Vertex]) { + (acc: List[Vertex], identifier: String) => + val existingVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.toString, identifier) + val finalMap = parameterMap.getOrDefault(GraphDACParams.paramValueMap.name, new util.HashMap[String, AnyRef]).asInstanceOf[util.Map[String, AnyRef]] + + finalMap.foreach { case (key, value) => + if (!key.equals(GraphDACParams.graphId.name) && !key.equals(GraphDACParams.request.name)) { + existingVertex.property(key, value) + } + } + + val updatedVertex = existingVertex.toList().head.asInstanceOf[Vertex] + + acc :+ updatedVertex + } + + + val resultMap = updatedVertices.map(vertex => { + val identifier = vertex.getMetadata.get(SystemProperties.IL_UNIQUE_ID.name).asInstanceOf[String] + val newVertex = new Vertex + newVertex.setIdentifier(identifier) + newVertex + }) + + resultMap.asInstanceOf[Map[String, Vertex]] + } + catch { + case e: Throwable => + e.printStackTrace() + e match { + case cause: org.apache.tinkerpop.gremlin.driver.exception.ResponseException => + throw new ClientException( + DACErrorCodeConstants.CONSTRAINT_VALIDATION_FAILED.name(), + DACErrorMessageConstants.CONSTRAINT_VALIDATION_FAILED + " | Updating multiple nodes failed." + ) + case cause => + val errorMessage = DACErrorMessageConstants.CONNECTION_PROBLEM + " | " + e.getMessage + throw new ServerException(DACErrorCodeConstants.CONNECTION_PROBLEM.name, errorMessage, e) + } + } + } + } + + private def setPrimitiveData(metadata: java.util.Map[String, AnyRef]): mutable.Map[String, Object] = { + metadata.flatMap { case (key, value) => + val processedValue = value match { + case map: Map[Any, Any] => + try { + JsonUtils.serialize(map) + } catch { + case e: Exception => + TelemetryManager.error("Exception Occurred While Processing Primitive Data Types | Exception is : " + e.getMessage(), e) + value + } + case list: List[_] if list.nonEmpty && list.head.isInstanceOf[Map[Any, Any]] => + try { + JsonUtils.serialize(list) + } catch { + case e: Exception => + TelemetryManager.error("Exception Occurred While Processing Primitive Data Types | Exception is : " + e.getMessage(), e) + value + } + case _ => value + } + Some((key, processedValue)) + } + } + + def generateUpdateVerticesQuery(graphId: String, identifiers: java.util.List[String], data: mutable.Map[String, AnyRef]): Map[String, Object] = { + val parameterMap = new HashMap[String, Object] + parameterMap.put("identifiers", identifiers); + parameterMap.putAll(data); + parameterMap; + } + private def setPrimitiveData(vertex: Vertex): Vertex = { val metadata: util.Map[String, AnyRef] = vertex.getMetadata metadata.forEach((key, value) => { diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala index 193d65dc0..d7c99efa1 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/vertex/DataVertex.scala @@ -1,22 +1,20 @@ package org.sunbird.graph.vertex -import java.util -import java.util.Optional -import java.util.concurrent.CompletionException import org.apache.commons.collections4.{CollectionUtils, MapUtils} import org.apache.commons.lang3.StringUtils -import org.sunbird.common.DateUtils import org.sunbird.common.dto.{Request, Response} import org.sunbird.common.exception.{ClientException, ErrorCodes, ResponseCode} import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.common.enums.SystemProperties -import org.sunbird.graph.dac.model.{Edges, Filter, MetadataCriterion, SearchConditions, SearchCriteria, Vertex} +import org.sunbird.graph.dac.model.{Edges, Vertex} import org.sunbird.graph.nodes.DataNode.{saveExternalProperties, updateExternalProperties} import org.sunbird.graph.schema.{DefinitionDTO, DefinitionFactory, DefinitionNode} import org.sunbird.parseq.Task -import scala.collection.convert.ImplicitConversions._ +import java.util +import java.util.Optional +import java.util.concurrent.CompletionException import scala.collection.JavaConverters._ +import scala.collection.convert.ImplicitConversions._ import scala.concurrent.{ExecutionContext, Future} object DataVertex { @@ -78,6 +76,11 @@ object DataVertex { }).flatMap(f => f) recoverWith { case e: CompletionException => throw e.getCause } } + def bulkUpdate(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[util.Map[String, Vertex]] = { + val identifiers: util.List[String] = request.get("identifiers").asInstanceOf[util.List[String]] + val metadata: util.Map[String, AnyRef] = request.get("metadata").asInstanceOf[util.Map[String, AnyRef]] + oec.janusGraphService.updateVertexes(request.graphId, identifiers, metadata) + } /* private def updateEdges(vertex: Vertex, graphId: String, context: util.Map[String, AnyRef])(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Response] = { val request = new Request // Assuming Request is required for JanusGraph request.setContext(context) From aed89693de4d5806832b58f5388771e147bea46a Mon Sep 17 00:00:00 2001 From: shourya-solanki Date: Mon, 27 May 2024 18:46:26 +0530 Subject: [PATCH 18/20] Issue #KN-1041 fix: updated updateVertexes code --- .../service/operation/VertexOperations.scala | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala index 274e2c28e..f02d44662 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/VertexOperations.scala @@ -332,10 +332,9 @@ class VertexOperations { val g = graphConnection.getGraphTraversalSource - // Generate Root Node Id (assuming similar logic as the provided code) val rootNodeUniqueId = Identifier.getIdentifier(graphId, SystemNodeTypes.ROOT_NODE.name()) - val vertex = new Vertex // Assuming you have a Vertex constructor + val vertex = new Vertex vertex.setIdentifier(rootNodeUniqueId) vertex.getMetadata().put(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId) vertex.getMetadata().put(SystemProperties.IL_SYS_NODE_TYPE.name, SystemNodeTypes.ROOT_NODE.name) @@ -350,19 +349,20 @@ class VertexOperations { ) try { - val existingRootNode = Option(g.V().has(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId)) + val existingRootNode = g.V().has(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId).next() - existingRootNode match { - case Some(v) => - v.property(SystemProperties.IL_UNIQUE_ID.name, vertex.getId) - .property(AuditProperties.createdOn.name, DateUtils.formatCurrentDate()) - .next() - } + val updatedVertex = existingRootNode.property(AuditProperties.createdOn.name, DateUtils.formatCurrentDate()) + + val identifier = updatedVertex.property(SystemProperties.IL_UNIQUE_ID.name).value().toString + val versionKey = Option(updatedVertex.property(GraphDACParams.versionKey.name)).map(_.value().toString).getOrElse("") - val retrievedVertex = g.V().has(SystemProperties.IL_UNIQUE_ID.name, rootNodeUniqueId).elementMap().next() - vertex.setId(retrievedVertex.get(SystemProperties.IL_UNIQUE_ID.name)) + vertex.setIdentifier(identifier) + vertex.setGraphId(graphId) + if (StringUtils.isNotBlank(versionKey)) + vertex.getMetadata.put(GraphDACParams.versionKey.name, versionKey) vertex + } catch { case e: Throwable => From 9689f735aa6ec9753fb03ae690ff78022762eef1 Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Mon, 27 May 2024 19:12:31 +0530 Subject: [PATCH 19/20] Subgraph changes --- .../org/sunbird/graph/JanusGraphService.scala | 6 +- .../graph/dac/model/VertexSubGraph.java | 28 ++++++ .../service/operation/EdgeOperations.scala | 89 ++++++++++++++++++- .../org/sunbird/graph/path/DataSubGraph.scala | 9 +- .../sunbird/mangers/FrameworkManager.scala | 79 +++++++++++++++- 5 files changed, 204 insertions(+), 7 deletions(-) create mode 100644 ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/VertexSubGraph.java diff --git a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala index 2a20fa099..9f09b1f9f 100644 --- a/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala +++ b/ontology-engine/graph-core_2.12/src/main/scala/org/sunbird/graph/JanusGraphService.scala @@ -2,7 +2,7 @@ package org.sunbird.graph import org.sunbird.common.Platform import org.sunbird.common.dto.{Property, Request, Response} -import org.sunbird.graph.dac.model.Vertex +import org.sunbird.graph.dac.model.{Vertex, VertexSubGraph} import org.sunbird.graph.util.CSPMetaUtil import org.sunbird.janus.service.operation.{EdgeOperations, SearchOperations, VertexOperations} @@ -68,4 +68,8 @@ class JanusGraphService { def checkCyclicLoop(graphId: String, endNodeId: String, startNodeId: String, relationType: String) = { SearchOperations.checkCyclicLoop(graphId, endNodeId, relationType, startNodeId) } + + def getSubGraph(graphId: String, nodeId: String, depth: Int): Future[VertexSubGraph] = { + EdgeOperations.getSubGraph(graphId, nodeId, depth) + } } \ No newline at end of file diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/VertexSubGraph.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/VertexSubGraph.java new file mode 100644 index 000000000..5df0ed7cd --- /dev/null +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/VertexSubGraph.java @@ -0,0 +1,28 @@ +package org.sunbird.graph.dac.model; + +import java.util.List; +import java.util.Map; + +public class VertexSubGraph { + private Map vertexs; + private List edges; + + public VertexSubGraph(Map vertexs, List edges) { + this.vertexs = vertexs; + this.edges = edges; + } + + public Map getVertexs() { return vertexs; } + + public void setVertexs(Map vertexs) { + this.vertexs = vertexs; + } + + public List getEdges() { + return edges; + } + + public void setEdges(List edges) { + this.edges = edges; + } +} diff --git a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala index 10e5282e0..5966b93c4 100644 --- a/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala +++ b/ontology-engine/graph-dac/src/main/scala/org/sunbird/janus/service/operation/EdgeOperations.scala @@ -2,23 +2,34 @@ package org.sunbird.janus.service.operation import org.apache.commons.collections4.CollectionUtils import org.apache.commons.lang3.StringUtils +import org.apache.tinkerpop.gremlin.groovy.jsr223.dsl.credential.__ +import org.apache.tinkerpop.gremlin.groovy.jsr223.dsl.credential.__.{bothE, outE} import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource -import org.apache.tinkerpop.gremlin.structure.{Edge, Vertex} +import org.apache.tinkerpop.gremlin.structure.{Edge, Graph, Vertex} import org.janusgraph.core.JanusGraph import org.sunbird.common.dto.{Response, ResponseHandler} import org.sunbird.common.exception.ClientException import org.sunbird.graph.common.enums.SystemProperties +import org.sunbird.graph.dac.model.{Edges, Node, VertexSubGraph} import org.sunbird.graph.service.common.{CypherQueryConfigurationConstants, DACErrorCodeConstants, DACErrorMessageConstants} +import org.sunbird.janus.dac.util.GremlinVertexUtil import org.sunbird.janus.service.util.JanusConnectionUtil +import org.sunbird.telemetry.logger.TelemetryManager import java.util -import scala.collection.JavaConverters.asScalaBufferConverter +import java.util.{HashMap, List, Map, Set} +import java.util.stream.Collectors +import scala.collection.JavaConverters.{asJavaIterableConverter, asScalaBufferConverter, asScalaIteratorConverter, mapAsScalaMapConverter} +import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable` +import scala.collection.immutable.HashSet import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future +import scala.collection.JavaConverters._ class EdgeOperations { val graphConnection = new JanusConnectionUtil + val gremlinVertexUtil = new GremlinVertexUtil def createEdges(graphId: String, edgeData: util.List[util.Map[String, AnyRef]]): Future[Response] = { Future{ if (StringUtils.isBlank(graphId)) @@ -84,4 +95,78 @@ class EdgeOperations { } } + + def getSubGraph(graphId: String, nodeId: String, depth: Integer) = { + if (StringUtils.isBlank(graphId)) + throw new ClientException(DACErrorCodeConstants.INVALID_GRAPH.name, DACErrorMessageConstants.INVALID_GRAPH_ID + " | [Get SubGraph Operation Failed.]") + if (StringUtils.isBlank(nodeId)) + throw new ClientException(DACErrorCodeConstants.INVALID_IDENTIFIER.name, DACErrorMessageConstants.INVALID_IDENTIFIER + " | [Please Provide Node Identifier.]") + var effectiveDepth:Integer = if (depth == null) 5 else depth + + graphConnection.initialiseGraphClient() + val g: GraphTraversalSource = graphConnection.getGraphTraversalSource + TelemetryManager.log("Driver Initialised. | [Graph Id: " + graphId + "]") + + val relationMap = new util.HashMap[Object, AnyRef]() + var nodes = new util.HashSet[org.sunbird.graph.dac.model.Vertex] + var relations = new util.HashSet[org.sunbird.graph.dac.model.Edges] + val startNodeMap = new util.HashMap[Object, AnyRef] + val endNodeMap = new util.HashMap[Object, AnyRef] + + var results = g.V().hasLabel(graphId).has(SystemProperties.IL_UNIQUE_ID.name, nodeId).as("n") + .emit().repeat(outE().inV().simplePath()).times(5).as("m") + .outE().as("r2").inV().as("l") + .select("n", "m", "r2", "l") + .unfold() + .project("relationName", "relationMetadata", "startNode", "endNode") + .by(__.select("r2").label()) + .by(__.select("r2").elementMap()) + .by(__.select("m")) + .by(__.select("l")) + .toList() + + + for (result <- results.asScala) { + val startNode = result.get("startNode").asInstanceOf[Vertex] + val endNode = result.get("endNode").asInstanceOf[Vertex] + val relationName = result.get("relationName").toString + val relationMetadata = result.get("relationMetadata").asInstanceOf[util.Map[String, Object]] + + nodes.add(gremlinVertexUtil.getNode(graphId, startNode, relationMap, startNodeMap, endNodeMap)) + nodes.add(gremlinVertexUtil.getNode(graphId, endNode, relationMap, startNodeMap, endNodeMap)) + + // Relation Metadata + val relData = new Edges( + startNode.property(SystemProperties.IL_UNIQUE_ID.name).value().toString, + relationName, + endNode.property(SystemProperties.IL_UNIQUE_ID.name).value().toString + ) + relData.setMetadata(relationMetadata) + relData.setStartVertexObjectType(startNode.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name).value().toString) + relData.setStartVertexName(startNode.property("name").value().toString) +// relData.setStartVertexType(startNode.property(SystemProperties.IL_SYS_NODE_TYPE.name).value().toString) + relData.setEndVertexObjectType(endNode.property(SystemProperties.IL_FUNC_OBJECT_TYPE.name).value().toString) + relData.setEndVertexName(endNode.property("name").value().toString) +// relData.setEndVertexType(endNode.property(SystemProperties.IL_SYS_NODE_TYPE.name).value().toString) + relData.setEndVertexType("DATA_NODE") + relData.setStartVertexType("DATA_NODE") + relations.add(relData) + } + + // Group nodes by their identifier and get the first node for each identifier + val uniqNodes = nodes.groupBy(_.getIdentifier).mapValues(_.head).values.toSet + + // Create a map with the node identifier as the key and the node itself as the value + val nodeMap= uniqNodes.map(node => node.getIdentifier -> node).toMap + val relationsList= relations.toList + + // Convert Scala collections to Java collections + val javaNodeMap: java.util.Map[String, org.sunbird.graph.dac.model.Vertex] = nodeMap.asJava + val javaRelationsList: java.util.List[org.sunbird.graph.dac.model.Edges] = relationsList.asJava + + // Create a VertexSubGraph instance + Future { + new VertexSubGraph(javaNodeMap, javaRelationsList) + } + } } diff --git a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala index e7605e4e4..960fde586 100644 --- a/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala +++ b/ontology-engine/graph-engine_2.12/src/main/scala/org/sunbird/graph/path/DataSubGraph.scala @@ -5,7 +5,7 @@ import org.apache.commons.lang3.StringUtils import org.sunbird.common.dto.Request import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.common.enums.SystemProperties -import org.sunbird.graph.dac.model.{Node, Relation, SubGraph} +import org.sunbird.graph.dac.model.{Node, Relation, SubGraph, VertexSubGraph} import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.schema.{DefinitionFactory, DefinitionNode, ObjectCategoryDefinition} import org.sunbird.graph.utils.NodeUtil @@ -28,6 +28,13 @@ object DataSubGraph { subGraph } + def readVertex(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[VertexSubGraph] = { + val identifier: String = request.get("identifier").asInstanceOf[String] + val depth: Int = request.getOrDefault("depth", 5).asInstanceOf[Int] + val subGraph: Future[VertexSubGraph] = oec.janusGraphService.getSubGraph(request.graphId, identifier, depth) + subGraph + } + def readSubGraph(request: Request)(implicit oec: OntologyEngineContext, ec: ExecutionContext): Future[Unit] = { val identifier: String = request.get("identifier").asInstanceOf[String] val schemaName: String = request.getContext.get("schemaName").asInstanceOf[String] diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala index dd6773a85..5819a8733 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala @@ -6,10 +6,9 @@ import org.sunbird.cache.impl.RedisCache import org.sunbird.common.{JsonUtils, Platform} import org.sunbird.common.dto.{Request, Response, ResponseHandler} import org.sunbird.common.exception.{ClientException, ServerException} -import org.sunbird.graph.OntologyEngineContext -import org.sunbird.graph.dac.model.{Relation, SubGraph} +import org.sunbird.graph.{JanusGraphService, OntologyEngineContext} +import org.sunbird.graph.dac.model.{Edges, Relation, SubGraph, VertexSubGraph} import org.sunbird.graph.nodes.DataNode - import org.sunbird.graph.schema.{DefinitionNode, ObjectCategoryDefinition} import org.sunbird.graph.utils.NodeUtil import org.sunbird.graph.utils.NodeUtil.{convertJsonProperties, handleKeyNames} @@ -113,6 +112,49 @@ object FrameworkManager { } } + def getCompleteMetadata(id: String, subGraph: VertexSubGraph, includeRelations: Boolean)(implicit oec: OntologyEngineContext, ec: ExecutionContext): util.Map[String, AnyRef] = { + val nodes = subGraph.getVertexs + val relations = subGraph.getEdges + val node = nodes.get(id) + val metadata = node.getMetadata + val objectType = node.getObjectType.toLowerCase().replace("image", "") + val channel = node.getMetadata.getOrDefault("channel", "all").asInstanceOf[String] + val definition: ObjectCategoryDefinition = DefinitionNode.getObjectCategoryDefinition("", objectType, channel) + val jsonProps = DefinitionNode.fetchJsonProps(node.getGraphId, schemaVersion, objectType, definition) + val updatedMetadata: util.Map[String, AnyRef] = metadata.entrySet().asScala.filter(entry => null != entry.getValue) + .map((entry: util.Map.Entry[String, AnyRef]) => handleKeyNames(entry, null) -> convertJsonProperties(entry, jsonProps)).toMap ++ + Map("objectType" -> node.getObjectType, "identifier" -> node.getIdentifier, "languageCode" -> NodeUtil.getLanguageCodes(node)) + + val fields = DefinitionNode.getMetadataFields(node.getGraphId, schemaVersion, objectType, definition) + val filteredData: util.Map[String, AnyRef] = if (fields.nonEmpty) updatedMetadata.filterKeys(key => fields.contains(key)) else updatedMetadata + + val relationDef = DefinitionNode.getRelationDefinitionMap(node.getGraphId, schemaVersion, objectType, definition) + val outRelations = relations.filter((rel: Edges) => { + StringUtils.equals(rel.getStartVertexId.toString(), node.getIdentifier) + }).sortBy((rel: Edges) => rel.getMetadata.get("IL_SEQUENCE_INDEX").asInstanceOf[Long])(Ordering.Long).toList + + if (includeRelations) { + val relMetadata = getEdgesAsMetadata(relationDef, outRelations, "out") + val childHierarchy = relMetadata.map(x => (x._1, x._2.map(a => { + val identifier = a.getOrElse("identifier", "") + val childNode = nodes.get(identifier) + val index = a.getOrElse("index", 1).asInstanceOf[Number] + val metaData = (childNode.getMetadata ++ Map("index" -> index)).asJava + childNode.setMetadata(metaData) + if ("associations".equalsIgnoreCase(x._1)) { + getCompleteMetadata(childNode.getIdentifier, subGraph, false) + } else { + getCompleteMetadata(childNode.getIdentifier, subGraph, true) + } + }).toList.asJava)) + val data = (filteredData ++ childHierarchy).asJava + println("final data ", data) + data + } else { + filteredData + } + } + def getRelationAsMetadata(definitionMap: Map[String, AnyRef], relationMap: util.List[Relation], direction: String) = { relationMap.asScala.map(rel => { @@ -136,6 +178,37 @@ object FrameworkManager { })).distinct.asJava )) } + def getEdgesAsMetadata(definitionMap: Map[String, AnyRef], relationMap: util.List[Edges], direction: String) = { + relationMap.asScala.map(rel => { + println("rel ", rel) + val endObjectType = rel.getEndVertexObjectType.replace("Image", "") + println("endObjectType ", endObjectType) + val relKey: String = rel.getEdgeType + "_" + direction + "_" + endObjectType + println("relKey ", relKey) + println("definitionMap ", definitionMap) + if (definitionMap.containsKey(relKey)) { + println("IN IF getEdgesAsMetadata") + val relData = Map[String, Object]("identifier" -> rel.getEndVertexId.replace(".img", ""), + "name" -> rel.getEndVertexName, + "objectType" -> endObjectType, + "relation" -> rel.getEdgeType, + "KEY" -> definitionMap.getOrDefault(relKey, "").asInstanceOf[String] + ) ++ rel.getMetadata.asScala + val indexMap = if (rel.getEdgeType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).asInstanceOf[Number]) else Map() + println("indexMap ", indexMap) + relData ++ indexMap + } else { + println("IN ELSE getEdgesAsMetadata") + Map[String, Object]() + } + }).filter(x => x.nonEmpty) + .groupBy(x => x.getOrDefault("KEY", "").asInstanceOf[String]) + .map(x => (x._1, (x._2.toList.map(x => { + x.-("KEY") + x.-("IL_SEQUENCE_INDEX") + })).distinct.asJava)) + } + def getFrameworkHierarchy(request: Request)(implicit ec: ExecutionContext, oec: OntologyEngineContext): Future[Map[String, AnyRef]] = { val req = new Request(request) req.put("identifier", request.get("identifier")) From d84de81c8bc785cdc8bdd6564f583b6053dd5bb0 Mon Sep 17 00:00:00 2001 From: Kartheek Palla Date: Thu, 30 May 2024 11:09:24 +0530 Subject: [PATCH 20/20] Issue #KN-1041 fix: Vertex read and subgraph queries issue fixes --- .../org/sunbird/graph/dac/model/Edges.java | 6 +- .../service/operation/EdgeOperations.scala | 25 ++++---- .../service/operation/SearchOperations.scala | 61 ++++++++++++++----- .../actors/CategoryInstanceActor.scala | 10 ++- .../scala/org/sunbird/actors/TermActor.scala | 10 ++- .../sunbird/mangers/FrameworkManager.scala | 12 +--- 6 files changed, 83 insertions(+), 41 deletions(-) diff --git a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java index b232a5c1d..e2aabd6ce 100644 --- a/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java +++ b/ontology-engine/graph-dac-api/src/main/java/org/sunbird/graph/dac/model/Edges.java @@ -5,7 +5,6 @@ import org.apache.commons.lang3.StringUtils; import org.apache.tinkerpop.gremlin.structure.Edge; import org.apache.tinkerpop.gremlin.structure.Vertex; -import org.neo4j.graphdb.Node; import org.sunbird.common.exception.ServerException; import org.sunbird.graph.common.enums.SystemProperties; import org.sunbird.graph.dac.enums.GraphDACErrorCodes; @@ -73,8 +72,9 @@ public Edges(String graphId, Edge edge, Map startNodeMap, Map + val ee = tr.get("ee").asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] + val r = tr.get("r") + val startNode = tr.get("__startNode") match { + case null => ee + case node => node + } + val endNode = tr.get("__endNode") match { + case null => ee + case node => node + } + val resMap = new util.HashMap[String, AnyRef] + resMap.put("ee", ee) + resMap.put("r", r) + resMap.put("__startNode", startNode) + resMap.put("__endNode", endNode) + finalList.add(resMap) + }.asJava + + finalList } else throw new ClientException(DACErrorCodeConstants.INVALID_PARAMETER.name, DACErrorMessageConstants.INVALID_PARAM_MAP ) } catch { case e :Exception => + e.printStackTrace() throw new ServerException(DACErrorCodeConstants.SERVER_ERROR.name, "Error! Something went wrong while creating node object. ", e.getCause); } } @@ -272,21 +303,23 @@ class SearchOperations { val edgeValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT) if (null != edgeValue && edgeValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Edge]) { val edge: org.apache.tinkerpop.gremlin.structure.Edge = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_RELATION_OBJECT).asInstanceOf[Edge] - nodeMap.put(edge.id(), edge) + relationMap.put(edge.id(), edge) } } + if (null != startNodeMap) { val startVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT) + if (null != startVertexValue && startVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val startVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_START_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] - nodeMap.put(startVertex.id(), startVertex) + startNodeMap.put(startVertex.id(), startVertex) } } if (null != endNodeMap) { val endVertexValue = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT) if (null != endVertexValue && endVertexValue.isInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex]) { val endVertex: org.apache.tinkerpop.gremlin.structure.Vertex = result.get(CypherQueryConfigurationConstants.DEFAULT_CYPHER_END_NODE_OBJECT).asInstanceOf[org.apache.tinkerpop.gremlin.structure.Vertex] - nodeMap.put(endVertex.id(), endVertex) + endNodeMap.put(endVertex.id(), endVertex) } } } diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala index 6833c37f7..3d1bc4194 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/CategoryInstanceActor.scala @@ -8,7 +8,7 @@ import org.sunbird.common.dto.{Request, Response, ResponseHandler} import org.sunbird.common.exception.ClientException import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.enums.RelationTypes -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.NodeUtil import org.sunbird.utils.{Constants, RequestUtil} @@ -73,6 +73,14 @@ class CategoryInstanceActor @Inject()(implicit oec: OntologyEngineContext) exten if (indexList.nonEmpty) indexList.max + 1 else 1 } + private def getCategoryIndex(node: Vertex): Integer = { + val indexList = (node.getOutEdges.asScala ++ node.getInEdges.asScala).filter(r => (StringUtils.equals(r.getEdgeType, RelationTypes.SEQUENCE_MEMBERSHIP.relationName()) && StringUtils.equals(r.getStartVertexId, node.getIdentifier))) + .map(relation => { + relation.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).toString.toInt.intValue() + }) + if (indexList.nonEmpty) indexList.max + 1 else 1 + } + private def read(request: Request): Future[Response] = { validateCategoryInstanceObject(request).map(node => { val metadata: util.Map[String, AnyRef] = NodeUtil.serialize(node, null, request.getContext.get("schemaName").asInstanceOf[String], request.getContext.get("version").asInstanceOf[String]) diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala index 04a8b0c1b..20fe23b30 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/actors/TermActor.scala @@ -8,7 +8,7 @@ import org.sunbird.common.dto.{Request, Response, ResponseHandler} import org.sunbird.common.exception.{ClientException, ResponseCode } import org.sunbird.graph.OntologyEngineContext import org.sunbird.graph.dac.enums.RelationTypes -import org.sunbird.graph.dac.model.Node +import org.sunbird.graph.dac.model.{Node, Vertex} import org.sunbird.graph.nodes.DataNode import org.sunbird.graph.utils.NodeUtil import org.sunbird.utils.{Constants, RequestUtil} @@ -106,6 +106,14 @@ class TermActor @Inject()(implicit oec: OntologyEngineContext) extends BaseActor if (indexList.nonEmpty) indexList.max + 1 else 1 } + private def getIndex(node: Vertex): Integer = { + val indexList = (node.getOutEdges.asScala ++ node.getInEdges.asScala).filter(r => (StringUtils.equals(r.getEdgeType, RelationTypes.SEQUENCE_MEMBERSHIP.relationName()) && StringUtils.equals(r.getStartVertexId, node.getIdentifier))) + .map(relation => { + relation.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).toString.toInt.intValue() + }) + if (indexList.nonEmpty) indexList.max + 1 else 1 + } + private def read(request: Request): Future[Response] = { validateCategoryInstance(request) validateTerm(request).map(node => { diff --git a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala index 5819a8733..b9e83432f 100644 --- a/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala +++ b/taxonomy-api/taxonomy-actors/src/main/scala/org/sunbird/mangers/FrameworkManager.scala @@ -131,7 +131,7 @@ object FrameworkManager { val relationDef = DefinitionNode.getRelationDefinitionMap(node.getGraphId, schemaVersion, objectType, definition) val outRelations = relations.filter((rel: Edges) => { StringUtils.equals(rel.getStartVertexId.toString(), node.getIdentifier) - }).sortBy((rel: Edges) => rel.getMetadata.get("IL_SEQUENCE_INDEX").asInstanceOf[Long])(Ordering.Long).toList + }).sortBy((rel: Edges) => rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", "1").toString.toInt)(Ordering.Int).toList if (includeRelations) { val relMetadata = getEdgesAsMetadata(relationDef, outRelations, "out") @@ -148,7 +148,6 @@ object FrameworkManager { } }).toList.asJava)) val data = (filteredData ++ childHierarchy).asJava - println("final data ", data) data } else { filteredData @@ -180,25 +179,18 @@ object FrameworkManager { def getEdgesAsMetadata(definitionMap: Map[String, AnyRef], relationMap: util.List[Edges], direction: String) = { relationMap.asScala.map(rel => { - println("rel ", rel) val endObjectType = rel.getEndVertexObjectType.replace("Image", "") - println("endObjectType ", endObjectType) val relKey: String = rel.getEdgeType + "_" + direction + "_" + endObjectType - println("relKey ", relKey) - println("definitionMap ", definitionMap) if (definitionMap.containsKey(relKey)) { - println("IN IF getEdgesAsMetadata") val relData = Map[String, Object]("identifier" -> rel.getEndVertexId.replace(".img", ""), "name" -> rel.getEndVertexName, "objectType" -> endObjectType, "relation" -> rel.getEdgeType, "KEY" -> definitionMap.getOrDefault(relKey, "").asInstanceOf[String] ) ++ rel.getMetadata.asScala - val indexMap = if (rel.getEdgeType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).asInstanceOf[Number]) else Map() - println("indexMap ", indexMap) + val indexMap = if (rel.getEdgeType.equals("hasSequenceMember")) Map("index" -> rel.getMetadata.getOrDefault("IL_SEQUENCE_INDEX", 1.asInstanceOf[Number]).toString.toInt) else Map() relData ++ indexMap } else { - println("IN ELSE getEdgesAsMetadata") Map[String, Object]() } }).filter(x => x.nonEmpty)