Commit 03d665df authored by Mohamed Khalil Labidi's avatar Mohamed Khalil Labidi
Browse files

Revert "Upgrade to Gradle 6.8.3 and various minor fixes (#724)"

This reverts commit 1ee38af7.
parent 1ee38af7
......@@ -9,7 +9,7 @@ Execute a normal build with gradle:
Upon success open `index.html` to check your changes.
----
firefox build/asciidoc/index.html
firefox build/asciidoc/html5/index.html
----
Do not forget to follow the naming and capitalization guidelines at the end of this README.
......
import com.github.jk1.license.render.InventoryHtmlReportRenderer
import com.github.jk1.license.render.*
import org.aim42.htmlsanitycheck.check.*
buildscript {
repositories {
jcenter()
maven {
url "https://plugins.gradle.org/m2/"
}
}
dependencies {
classpath 'de.undercouch:gradle-download-task:3.1.2'
classpath 'org.asciidoctor:asciidoctor-gradle-plugin:1.5.9.2'
classpath 'xalan:xalan:2.7.2'
classpath 'gradle.plugin.org.aim42:htmlSanityCheck:1.1.3'
classpath 'com.github.jk1:gradle-license-report:1.7'
}
}
plugins {
id "de.undercouch.download" version "4.1.1"
id 'com.github.jk1.dependency-license-report' version '1.16'
id 'org.asciidoctor.jvm.convert' version '3.3.2'
id "org.aim42.htmlSanityCheck" version "1.1.3"
id 'java'
id 'maven-publish'
}
repositories {
jcenter()
}
apply plugin: 'de.undercouch.download'
apply plugin: 'java'
apply plugin: 'maven'
apply plugin: 'org.asciidoctor.convert'
apply plugin: 'org.aim42.htmlSanityCheck'
apply plugin: 'com.github.jk1.dependency-license-report'
asciidoctor {
sourceDir file("$projectDir/src/docs")
......@@ -33,6 +39,7 @@ asciidoctor {
include 'admin/AdminTutorials.adoc'
include 'admin/Catalog.adoc'
include 'index.adoc'
}
resources {
from("$projectDir/src/docs/") {
......@@ -42,15 +49,14 @@ asciidoctor {
include 'highlight/**'
}
}
baseDirFollowsSourceFile()
}
asciidoctorj {
version = '2.4.3'
version = '1.5.3'
}
htmlSanityCheck {
sourceDir = new File( "$buildDir/asciidoc" )
sourceDir = new File( "$buildDir/asciidoc/html5" )
sourceDocuments = fileTree(sourceDir) {
include "**/*.html"
......@@ -58,12 +64,13 @@ htmlSanityCheck {
}
// where to put results of sanityChecks...
checkingResultsDir = new File("$buildDir/report/htmlchecks")
checkingResultsDir = new File( "$buildDir/report/htmlchecks" )
checkerClasses = [BrokenCrossReferencesChecker, BrokenHttpLinksChecker, DuplicateIdChecker, ImageMapChecker, MissingAltInImageTagsChecker, MissingImageFilesChecker]
// fail build on errors?
failOnErrors = false
}
licenseReport {
......@@ -71,6 +78,7 @@ licenseReport {
renderers = [new InventoryHtmlReportRenderer()]
}
project.ext.branch = new File("$projectDir/src/docs/version-conf.js").text.split("'")[1]
if (project.ext.branch.endsWith("-SNAPSHOT")) {
project.ext.branch = 'master'
......@@ -79,29 +87,31 @@ if (project.ext.branch.endsWith("-SNAPSHOT")) {
def getDate() {
def currentDate = new Date()
def formattedDateString = currentDate.format('yyyy-MM-dd')
if (project.ext.branch == 'master') {
if(project.ext.branch == 'master'){
formattedDateString = currentDate.format('yyyy-MM-dd HH:mm:ss')
}
return formattedDateString
}
task copyDocs(type: Copy) {
def dateString = getDate()
from("$projectDir/src/docs/") {
from ("$projectDir/src/docs/") {
include 'version-conf.js'
filter { line ->
line.replace('date: \'\'', 'date: \'' + dateString + '\'')
line.replace('date: \'\'', 'date: \''+dateString+'\'')
}
}
into "$buildDir/asciidoc"
into "$buildDir/asciidoc/html5"
}
task removeProperties(type: Delete) {
delete fileTree(dir: "$projectDir/src/docs/admin/references/properties/")
}
import de.undercouch.gradle.tasks.download.Download
task downloadRmProperties(type: Download) {
src "https://raw.githubusercontent.com/ow2-proactive/scheduling/${project.branch}/config/rm/settings.ini"
dest file("$projectDir/src/docs/admin/references/properties/rm.properties")
......@@ -126,11 +136,11 @@ task xsdDoc(type: JavaExec) {
println file("$projectDir/src/xsd/schedulerjob.xsd").absoluteFile
inputs.files file("$projectDir/src/xsd/schedulerjob.xsd"), file("$projectDir/src/xsd/xs3p.xsl")
outputs.files file("$buildDir/asciidoc/user/schedulerjob.html")
outputs.files file("$buildDir/html5/user/schedulerjob.html")
classpath buildscript.configurations.classpath
main 'org.apache.xalan.xslt.Process'
args '-IN', 'src/xsd/schedulerjob.xsd', '-XSL', 'src/xsd/xs3p.xsl', '-OUT', "$buildDir/asciidoc/user/schedulerjob.html"
args '-IN', 'src/xsd/schedulerjob.xsd', '-XSL', 'src/xsd/xs3p.xsl', '-OUT', "$buildDir/asciidoc/html5/user/schedulerjob.html"
}
copyDocs.dependsOn removeProperties
......
......@@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-3.3-bin.zip
......@@ -1005,63 +1005,46 @@ Execute the workflow by setting the different workflow's variables as described
[cols="2,5,2"]
|===
|*Variable name* | *Description* | *Type*
3+^|*Workflow variables*
| `MODEL_SERVICE_INSTANCE_NAME`
| Service instance name.
| String (default="maas_dl-${PA_JOB_ID}").
| `MODEL_SERVICE_PROXIFIED`
| Allows access to the endpoint through an Http(s) Proxy.
| Boolean (default=False).
| `MODEL_SERVICE_ENTRYPOINT`
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<_customize_the_service>> section.
| String (default="dl_service").
| `MODEL_SERVICE_YAML_FILE`
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<_customize_the_service>>.
| String (default="dl_service-api").
| `MODEL_SERVICE_USER_NAME`
| A valid user name having the needed privileges to execute this action.
| String (default="user").
| `MODEL_SERVICE_NODE_NAME`
| The name of the node where the service will be deployed. If empty, the service will be deployed on an available node selected randomly.
|
3+^|*Task variables*
| `SERVICE_ID`
| The name of the service. Please keep the default value for this variable.
| String (default="MaaS_DL")
| `INSTANCE_NAME`
| The name of the service that will be deployed.
| String (default="$MODEL_SERVICE_INSTANCE_NAME")
| `ENGINE`
| Container engine.
| String (default="$CONTAINER_PLATFORM")
| `PROXIFIED`
| It takes by default the value of `MODEL_SERVICE_PROXYFIED` workflow variable.
| String (default="$MODEL_SERVICE_PROXYFIED")
| `PYTHON_ENTRYPOINT`
| It takes by default the value of `MODEL_SERVICE_ENTRYPOINT` workflow variable.
| String (default="$MODEL_SERVICE_ENTRYPOINT")
| `YAML_FILE`
| It takes by default the value of `MODEL_SERVICE_YAML_FILE` workflow variable.
| String (default="$MODEL_SERVICE_YAML_FILE")
| `USER_NAME`
| It takes by default the value of `MODEL_SERVICE_USER_NAME` workflow variable.
| String (default="$MODEL_SERVICE_USER_NAME")
| `NODE_NAME`
| It takes by default the value of `MODEL_SERVICE_NODE_NAME` workflow variable.
| String (default="$MODEL_SERVICE_NODE_NAME")
......@@ -2355,7 +2338,7 @@ NOTE: Instead of training a model from scratch, a pre-trained sentiment analysis
*Train_Image_Classification:* trains a model to classify images from ants and bees.
*Train_Image_Segmentation:* trains a segmentation model using SegNet network on https://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^].
*Train_Image_Segmentation:* trains a segmentation model using SegNet network on http://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^].
*Train_Image_Object_Detection:* trains objects using YOLOv3 model on COCO dataset proposed by Microsoft Research.
......@@ -2371,7 +2354,7 @@ This section presents custom AI workflows using tasks available on the `deep-lea
*Fake_Celebrity_Faces_Generation:* generates a wild diversity of fake faces using a GAN model that was trained based on thousands of real celebrity photos. The pre-trained GAN model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/Epoch+018.pt[link^].
*Image_Segmentation:* predicts a segmentation model using SegNet network on https://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^]. The pre-trained image segmentation model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/model_segnet.zip[link^].
*Image_Segmentation:* predicts a segmentation model using SegNet network on http://www.robots.ox.ac.uk/~vgg/data/pets/[Oxford-IIIT Pet Dataset^]. The pre-trained image segmentation model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/model_segnet.zip[link^].
*Image_Object_Detection:* detects objects using a pre-trained YOLOv3 model on COCO dataset proposed by Microsoft Research. The pre-trained model is available on this https://s3.eu-west-2.amazonaws.com/activeeon-public/models/yolo3_coco.zip[link^].
......@@ -3719,7 +3702,7 @@ NOTE: You can use RGB images in JPG format (Images folder) and the groundtruth a
NOTE: You can find an example of the organization of the folders at: https://s3.eu-west-2.amazonaws.com/activeeon-public/datasets/oxford.zip
. _Object Detection Dataset:_ Two folders are demanded: the first folder should contain the RGB images in JPG format and another folder should contain its corresponding anotations in XML format using http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html[PASCAL VOC^] format or TXT format using COCO format (https://cocodataset.org/#home). The RGB images and annotations should be organized as follows:
. _Object Detection Dataset:_ Two folders are demanded: the first folder should contain the RGB images in JPG format and another folder should contain its corresponding anotations in XML format using http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html[PASCAL VOC^] format or TXT format using COCO format (http://cocodataset.org/#home). The RGB images and annotations should be organized as follows:
image::object_detection.png[150,150]
......@@ -3937,7 +3920,7 @@ NOTE: Torchtext were used to preprocess and load the text input. More informatio
| Boolean (default=True)
|===
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[AlexNet^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[AlexNet^].
===== DenseNet-161
......@@ -3945,7 +3928,7 @@ NOTE: PyTorch is used to build the model architecture based on https://pytorch.o
*Usage:* It should be connected to <<Train_Image_Classification_Model>>.
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[DenseNet-161^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[DenseNet-161^].
.DenseNet-161_Task variables
[cols="2,5,2"]
......@@ -3965,7 +3948,7 @@ NOTE: PyTorch is used to build the model architecture based on https://pytorch.o
*Usage:* It should be connected to <<Train_Image_Classification_Model>>.
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[ResNet-18^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[ResNet-18^].
.ResNet-161_Task variables
[cols="2,5,2"]
......@@ -3985,7 +3968,7 @@ NOTE: PyTorch is used to build the model architecture based on https://pytorch.o
*Usage:* It should be connected to <<Train_Image_Classification_Model>>.
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/vision/stable/models.html[VGG-16^].
NOTE: PyTorch is used to build the model architecture based on https://pytorch.org/docs/stable/torchvision/models.html[VGG-16^].
.VGG-16_Task variables
[cols="2,5,2"]
......@@ -4171,7 +4154,7 @@ NOTE: PyTorch is used to build the model architecture based on https://github.co
| Boolean (default=True)
|===
NOTE: The default parameters of the YOLO network were set for the COCO dataset (https://cocodataset.org/#home). If you'd like to use another dataset, you probably need to change the default parameters.
NOTE: The default parameters of the YOLO network were set for the COCO dataset (http://cocodataset.org/#home). If you'd like to use another dataset, you probably need to change the default parameters.
==== Text Classification
......
......@@ -833,7 +833,7 @@ The service is started using the following variables.
| Boolean
| `false`
| `PYTHON_ENTRYPOINT`
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<../PML/PMLUserGuide.html#_customize_the_service>> section.
| This entry script starts the service and defines the different functions to deploy the model, scores the prediction requests based on the deployed model, and returns the results. This script is specific to your model. This file should be stored in the Catalog under the `model_as_service_resources` bucket. More information about this file can be found in the <<_customize_the_service>> section.
| Yes
| String
| `dl_service`
......@@ -853,7 +853,7 @@ The service is started using the following variables.
| Boolean
| `true`
| `YAML_FILE`
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<../PML/PMLUserGuide.html#_customize_the_service>>.
| A YAML file that describes the OpenAPI Specification ver. 2 (known as Swagger Spec) of the service. This file should be stored in the catalog under the `model_as_service_resources` bucket. More information about the structure of this file can be found in the section <<_customize_the_service>>.
| Yes
| String
| `dl_service-api`
......
......@@ -105,7 +105,7 @@ The second way to start a ProActive Scheduler is to install it as a system servi
==== How to install ProActive on Windows
Under Windows, it is possible to use the https://nssm.cc/[nssm^] service manager tool to manage a running script as a service. You can configure nssm to absolve all responsibility for restarting it and let Windows take care of recovery actions.
Under Windows, it is possible to use the https://nssm.cc/[nssm^] service manager tool to manage a running script as a service. You can configure nssm to absolve all responsibility for restarting it and let Windows take care of recovery actions.
In our case, you need to provide to nssm the Path to this script `$PROACTIVE_HOME/bin/proactive-server.bat` to start ProActive as a service.
==== How to install ProActive on Linux
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment