diff --git a/README b/README new file mode 100644 index 0000000000000000000000000000000000000000..055cd550cee653fb4bf57b512cb27cd78091846d --- /dev/null +++ b/README @@ -0,0 +1,25 @@ +Contrail +======== + +Contrail is a complete Cloud platform which integrates a full Infrastructure-as-a-Service and Platform-as-a-Service facilities. It allows Cloud providers to seamlessly integrate resources from other Clouds with their own infrastructure, and breaks the current customer lock-in situation by allowing live application migration from one cloud to another. + +Documentation (and detailed installation instructions) can be found online at the +[Contrail Docs site](http://TBD). + +Installation +------------ + +TBD + +License +------- + +See LICENSE files in the directory common/licenses. + +Support +------- + +Please log tickets and issues at our [issue tracking system](http://jira.ow2.org/browse/CONTRAIL) + +The official Contrail [IRC](http://webchat.freenode.net/?channels=contrail&uio=Mj10cnVlJjQ9dHJ1ZSY5PXRydWUmMTE9ODIdf) channel is #contrail on the [Freenode](http://www.freenode.net) network. + diff --git a/tags/contrail-0.1/pom.xml b/common/tags/contrail-0.1/pom.xml similarity index 100% rename from tags/contrail-0.1/pom.xml rename to common/tags/contrail-0.1/pom.xml diff --git a/tags/contrail-common-0.1.0/contegrator/.project b/common/tags/contrail-common-0.1.0/contegrator/.project similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/.project rename to common/tags/contrail-common-0.1.0/contegrator/.project diff --git a/tags/contrail-common-0.1.0/contegrator/.pydevproject b/common/tags/contrail-common-0.1.0/contegrator/.pydevproject similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/.pydevproject rename to common/tags/contrail-common-0.1.0/contegrator/.pydevproject diff --git a/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/install.xml b/common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/install.xml similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/install.xml rename to common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/install.xml diff --git a/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh b/common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh rename to common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh diff --git a/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/install.xml b/common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/install.xml similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/install.xml rename to common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/install.xml diff --git a/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh b/common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh rename to common/tags/contrail-common-0.1.0/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh diff --git a/tags/contrail-common-0.1.0/contegrator/example/one-test/test/stress.py b/common/tags/contrail-common-0.1.0/contegrator/example/one-test/test/stress.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/one-test/test/stress.py rename to common/tags/contrail-common-0.1.0/contegrator/example/one-test/test/stress.py diff --git a/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz b/common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz rename to common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz diff --git a/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/install.xml b/common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/install.xml similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/install.xml rename to common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/install.xml diff --git a/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/scripts/script.sh b/common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/scripts/script.sh similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/scripts/script.sh rename to common/tags/contrail-common-0.1.0/contegrator/example/test-suite/nodes/node-1/scripts/script.sh diff --git a/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-1.py b/common/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-1.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-1.py rename to common/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-1.py diff --git a/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-2.py b/common/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-2.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-2.py rename to common/tags/contrail-common-0.1.0/contegrator/example/test-suite/test/testcase-2.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/contegrator.py b/common/tags/contrail-common-0.1.0/contegrator/src/contegrator.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/contegrator.py rename to common/tags/contrail-common-0.1.0/contegrator/src/contegrator.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/image/__init__.py b/common/tags/contrail-common-0.1.0/contegrator/src/image/__init__.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/image/__init__.py rename to common/tags/contrail-common-0.1.0/contegrator/src/image/__init__.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/image/imagedef.py b/common/tags/contrail-common-0.1.0/contegrator/src/image/imagedef.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/image/imagedef.py rename to common/tags/contrail-common-0.1.0/contegrator/src/image/imagedef.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/image/imager.py b/common/tags/contrail-common-0.1.0/contegrator/src/image/imager.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/image/imager.py rename to common/tags/contrail-common-0.1.0/contegrator/src/image/imager.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/image/installer.py b/common/tags/contrail-common-0.1.0/contegrator/src/image/installer.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/image/installer.py rename to common/tags/contrail-common-0.1.0/contegrator/src/image/installer.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/nodes/__init__.py b/common/tags/contrail-common-0.1.0/contegrator/src/nodes/__init__.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/nodes/__init__.py rename to common/tags/contrail-common-0.1.0/contegrator/src/nodes/__init__.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/nodes/nodeman.py b/common/tags/contrail-common-0.1.0/contegrator/src/nodes/nodeman.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/nodes/nodeman.py rename to common/tags/contrail-common-0.1.0/contegrator/src/nodes/nodeman.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/perceus/__init__.py b/common/tags/contrail-common-0.1.0/contegrator/src/perceus/__init__.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/perceus/__init__.py rename to common/tags/contrail-common-0.1.0/contegrator/src/perceus/__init__.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/perceus/perceus.py b/common/tags/contrail-common-0.1.0/contegrator/src/perceus/perceus.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/perceus/perceus.py rename to common/tags/contrail-common-0.1.0/contegrator/src/perceus/perceus.py diff --git a/tags/contrail-common-0.1.0/contegrator/src/testbedconfig.py b/common/tags/contrail-common-0.1.0/contegrator/src/testbedconfig.py similarity index 100% rename from tags/contrail-common-0.1.0/contegrator/src/testbedconfig.py rename to common/tags/contrail-common-0.1.0/contegrator/src/testbedconfig.py diff --git a/tags/contrail-common-0.1.0/infrastructure/svnhooks/Rakefile b/common/tags/contrail-common-0.1.0/infrastructure/svnhooks/Rakefile similarity index 100% rename from tags/contrail-common-0.1.0/infrastructure/svnhooks/Rakefile rename to common/tags/contrail-common-0.1.0/infrastructure/svnhooks/Rakefile diff --git a/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/pre-commit.sh b/common/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/pre-commit.sh similarity index 100% rename from tags/contrail-common-0.1.0/infrastructure/svnhooks/src/pre-commit.sh rename to common/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/pre-commit.sh diff --git a/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/test_pre-commit-hook.rb b/common/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/test_pre-commit-hook.rb similarity index 100% rename from tags/contrail-common-0.1.0/infrastructure/svnhooks/src/test_pre-commit-hook.rb rename to common/tags/contrail-common-0.1.0/infrastructure/svnhooks/src/test_pre-commit-hook.rb diff --git a/tags/contrail-common-0.1.0/maven/parent/pom.xml b/common/tags/contrail-common-0.1.0/maven/parent/pom.xml similarity index 100% rename from tags/contrail-common-0.1.0/maven/parent/pom.xml rename to common/tags/contrail-common-0.1.0/maven/parent/pom.xml diff --git a/tags/contrail-common-0.1.0/maven/settings/settings.xml b/common/tags/contrail-common-0.1.0/maven/settings/settings.xml similarity index 100% rename from tags/contrail-common-0.1.0/maven/settings/settings.xml rename to common/tags/contrail-common-0.1.0/maven/settings/settings.xml diff --git a/tags/contrail-common-0.1.0/registration/register-contrail-developer.sh b/common/tags/contrail-common-0.1.0/registration/register-contrail-developer.sh similarity index 100% rename from tags/contrail-common-0.1.0/registration/register-contrail-developer.sh rename to common/tags/contrail-common-0.1.0/registration/register-contrail-developer.sh diff --git a/trunk/Makefile b/common/trunk/Makefile similarity index 100% rename from trunk/Makefile rename to common/trunk/Makefile diff --git a/trunk/README b/common/trunk/README similarity index 100% rename from trunk/README rename to common/trunk/README diff --git a/trunk/admin/keys/Rakefile b/common/trunk/admin/keys/Rakefile similarity index 100% rename from trunk/admin/keys/Rakefile rename to common/trunk/admin/keys/Rakefile diff --git a/trunk/admin/keys/contrail.pub b/common/trunk/admin/keys/contrail.pub similarity index 100% rename from trunk/admin/keys/contrail.pub rename to common/trunk/admin/keys/contrail.pub diff --git a/trunk/admin/keys/contrail.sec.gpg b/common/trunk/admin/keys/contrail.sec.gpg similarity index 100% rename from trunk/admin/keys/contrail.sec.gpg rename to common/trunk/admin/keys/contrail.sec.gpg diff --git a/trunk/admin/ow2-admin-guide/Makefile b/common/trunk/admin/ow2-admin-guide/Makefile similarity index 100% rename from trunk/admin/ow2-admin-guide/Makefile rename to common/trunk/admin/ow2-admin-guide/Makefile diff --git a/trunk/admin/ow2-admin-guide/img/contrail.png b/common/trunk/admin/ow2-admin-guide/img/contrail.png similarity index 100% rename from trunk/admin/ow2-admin-guide/img/contrail.png rename to common/trunk/admin/ow2-admin-guide/img/contrail.png diff --git a/trunk/admin/ow2-admin-guide/img/push_website.png b/common/trunk/admin/ow2-admin-guide/img/push_website.png similarity index 100% rename from trunk/admin/ow2-admin-guide/img/push_website.png rename to common/trunk/admin/ow2-admin-guide/img/push_website.png diff --git a/trunk/admin/ow2-admin-guide/tex/ow2-admin-guide.tex b/common/trunk/admin/ow2-admin-guide/tex/ow2-admin-guide.tex similarity index 100% rename from trunk/admin/ow2-admin-guide/tex/ow2-admin-guide.tex rename to common/trunk/admin/ow2-admin-guide/tex/ow2-admin-guide.tex diff --git a/trunk/admin/ow2-admin-guide/tex/procedures.tex b/common/trunk/admin/ow2-admin-guide/tex/procedures.tex similarity index 100% rename from trunk/admin/ow2-admin-guide/tex/procedures.tex rename to common/trunk/admin/ow2-admin-guide/tex/procedures.tex diff --git a/trunk/admin/ow2-admin-guide/tex/titlepage.tex b/common/trunk/admin/ow2-admin-guide/tex/titlepage.tex similarity index 100% rename from trunk/admin/ow2-admin-guide/tex/titlepage.tex rename to common/trunk/admin/ow2-admin-guide/tex/titlepage.tex diff --git a/trunk/admin/registration/register-contrail-developer.sh b/common/trunk/admin/registration/register-contrail-developer.sh similarity index 100% rename from trunk/admin/registration/register-contrail-developer.sh rename to common/trunk/admin/registration/register-contrail-developer.sh diff --git a/trunk/admin/svnhooks/Makefile b/common/trunk/admin/svnhooks/Makefile similarity index 100% rename from trunk/admin/svnhooks/Makefile rename to common/trunk/admin/svnhooks/Makefile diff --git a/trunk/admin/svnhooks/src/pre-commit.sh b/common/trunk/admin/svnhooks/src/pre-commit.sh similarity index 100% rename from trunk/admin/svnhooks/src/pre-commit.sh rename to common/trunk/admin/svnhooks/src/pre-commit.sh diff --git a/trunk/dev/maven/parent/pom.xml b/common/trunk/dev/maven/parent/pom.xml similarity index 100% rename from trunk/dev/maven/parent/pom.xml rename to common/trunk/dev/maven/parent/pom.xml diff --git a/trunk/dev/maven/settings/settings.xml b/common/trunk/dev/maven/settings/settings.xml similarity index 100% rename from trunk/dev/maven/settings/settings.xml rename to common/trunk/dev/maven/settings/settings.xml diff --git a/trunk/src/ovf-parser/OVFParser/data/contrail_petstore.xml b/common/trunk/src/ovf-parser/OVFParser/data/contrail_petstore.xml similarity index 100% rename from trunk/src/ovf-parser/OVFParser/data/contrail_petstore.xml rename to common/trunk/src/ovf-parser/OVFParser/data/contrail_petstore.xml diff --git a/trunk/src/ovf-parser/OVFParser/data/small-vm.ovf b/common/trunk/src/ovf-parser/OVFParser/data/small-vm.ovf similarity index 100% rename from trunk/src/ovf-parser/OVFParser/data/small-vm.ovf rename to common/trunk/src/ovf-parser/OVFParser/data/small-vm.ovf diff --git a/trunk/src/ovf-parser/OVFParser/lib/mascopt-1.3.2/mascoptLib-1.3.2.jar b/common/trunk/src/ovf-parser/OVFParser/lib/mascopt-1.3.2/mascoptLib-1.3.2.jar similarity index 100% rename from trunk/src/ovf-parser/OVFParser/lib/mascopt-1.3.2/mascoptLib-1.3.2.jar rename to common/trunk/src/ovf-parser/OVFParser/lib/mascopt-1.3.2/mascoptLib-1.3.2.jar diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/Test/TestOVFParser.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/Test/TestOVFParser.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/Test/TestOVFParser.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/Test/TestOVFParser.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplianceDescriptor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplianceDescriptor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplianceDescriptor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplianceDescriptor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplicationDescriptor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplicationDescriptor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplicationDescriptor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ApplicationDescriptor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/ApplicationConstraints.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/ApplicationConstraints.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/ApplicationConstraints.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/ApplicationConstraints.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/UserConstraints.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/UserConstraints.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/UserConstraints.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/constraints/UserConstraints.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplianceDescriptorImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplianceDescriptorImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplianceDescriptorImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplianceDescriptorImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationConstraintsImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationConstraintsImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationConstraintsImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationConstraintsImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationDescriptorImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationDescriptorImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationDescriptorImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/ApplicationDescriptorImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/UserConstraintsImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/UserConstraintsImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/UserConstraintsImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/implementation/UserConstraintsImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/Disk.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/Disk.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/Disk.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/Disk.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/File.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/File.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/File.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/File.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFItemProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFItemProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFItemProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFItemProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFParser.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFParser.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFParser.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFParser.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFResourceAllocationSettingDataTranslator.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFResourceAllocationSettingDataTranslator.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFResourceAllocationSettingDataTranslator.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFResourceAllocationSettingDataTranslator.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFSections.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFSections.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFSections.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFSections.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualDisk.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualDisk.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualDisk.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualDisk.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualHardware.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualHardware.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualHardware.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualHardware.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualNetwork.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualNetwork.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualNetwork.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualNetwork.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualSystem.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualSystem.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualSystem.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/OVFVirtualSystem.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/AnnotationSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/AnnotationSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/AnnotationSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/AnnotationSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ContentType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ContentType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ContentType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ContentType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DeploymentOptionSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DeploymentOptionSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DeploymentOptionSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DeploymentOptionSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DiskSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DiskSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DiskSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/DiskSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EnvelopeType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EnvelopeType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EnvelopeType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EnvelopeType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EulaSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EulaSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EulaSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/EulaSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/FileType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/FileType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/FileType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/FileType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/InstallSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/InstallSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/InstallSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/InstallSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/MsgType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/MsgType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/MsgType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/MsgType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/NetworkSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/NetworkSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/NetworkSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/NetworkSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ObjectFactory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ObjectFactory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ObjectFactory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ObjectFactory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/OperatingSystemSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/OperatingSystemSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/OperatingSystemSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/OperatingSystemSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ProductSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ProductSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ProductSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ProductSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/PropertyConfigurationValueType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/PropertyConfigurationValueType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/PropertyConfigurationValueType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/PropertyConfigurationValueType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/RASDType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/RASDType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/RASDType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/RASDType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ReferencesType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ReferencesType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ReferencesType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ReferencesType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ResourceAllocationSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ResourceAllocationSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ResourceAllocationSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/ResourceAllocationSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/SectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/SectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/SectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/SectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StartupSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StartupSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StartupSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StartupSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StringsType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StringsType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StringsType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/StringsType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VSSDType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VSSDType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VSSDType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VSSDType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualDiskDescType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualDiskDescType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualDiskDescType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualDiskDescType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualHardwareSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualHardwareSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualHardwareSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualHardwareSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemCollectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemCollectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemCollectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemCollectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/VirtualSystemType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/package-info.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/package-info.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/package-info.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/envelope/_1/package-info.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EntityType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EntityType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EntityType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EntityType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EnvironmentType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EnvironmentType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EnvironmentType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/EnvironmentType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/ObjectFactory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/ObjectFactory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/ObjectFactory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/ObjectFactory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PlatformSectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PlatformSectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PlatformSectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PlatformSectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PropertySectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PropertySectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PropertySectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/PropertySectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/SectionType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/SectionType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/SectionType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/SectionType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/package-info.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/package-info.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/package-info.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/ovf/environment/_1/package-info.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/CIMResourceAllocationSettingDataType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/CIMResourceAllocationSettingDataType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/CIMResourceAllocationSettingDataType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/CIMResourceAllocationSettingDataType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/Caption.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/Caption.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/Caption.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/Caption.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ConsumerVisibility.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ConsumerVisibility.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ConsumerVisibility.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ConsumerVisibility.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/MappingBehavior.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/MappingBehavior.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/MappingBehavior.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/MappingBehavior.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ObjectFactory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ObjectFactory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ObjectFactory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ObjectFactory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ResourceType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ResourceType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ResourceType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/ResourceType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/package-info.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/package-info.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/package-info.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_resourceallocationsettingdata/package-info.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticRecoveryAction.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticRecoveryAction.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticRecoveryAction.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticRecoveryAction.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticShutdownAction.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticShutdownAction.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticShutdownAction.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticShutdownAction.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticStartupAction.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticStartupAction.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticStartupAction.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/AutomaticStartupAction.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/CIMVirtualSystemSettingDataType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/CIMVirtualSystemSettingDataType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/CIMVirtualSystemSettingDataType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/CIMVirtualSystemSettingDataType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/Caption.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/Caption.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/Caption.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/Caption.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/ObjectFactory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/ObjectFactory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/ObjectFactory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/ObjectFactory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/package-info.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/package-info.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/package-info.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/cim_schema/_2/cim_virtualsystemsettingdata/package-info.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimAnySimpleType.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimAnySimpleType.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimAnySimpleType.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimAnySimpleType.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBase64Binary.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBase64Binary.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBase64Binary.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBase64Binary.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBoolean.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBoolean.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBoolean.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimBoolean.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimByte.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimByte.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimByte.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimByte.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimChar16.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimChar16.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimChar16.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimChar16.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDateTime.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDateTime.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDateTime.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDateTime.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDouble.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDouble.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDouble.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimDouble.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimFloat.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimFloat.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimFloat.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimFloat.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimHexBinary.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimHexBinary.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimHexBinary.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimHexBinary.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimInt.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimInt.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimInt.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimInt.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimLong.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimLong.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimLong.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimLong.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimReference.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimReference.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimReference.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimReference.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimShort.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimShort.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimShort.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimShort.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimString.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimString.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimString.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimString.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedByte.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedByte.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedByte.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedByte.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedInt.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedInt.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedInt.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedInt.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedLong.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedLong.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedLong.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedLong.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedShort.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedShort.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedShort.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/CimUnsignedShort.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/ObjectFactory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/ObjectFactory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/ObjectFactory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/ObjectFactory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierBoolean.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierBoolean.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierBoolean.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierBoolean.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSArray.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSArray.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSArray.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSArray.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSInt64.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSInt64.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSInt64.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierSInt64.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierString.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierString.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierString.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierString.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierUInt32.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierUInt32.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierUInt32.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/QualifierUInt32.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/package-info.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/package-info.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/package-info.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/dmtf/schemas/wbem/wscim/_1/common/package-info.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCdDriveProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCdDriveProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCdDriveProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCdDriveProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCpuProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCpuProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCpuProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFCpuProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFDiskDriveProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFDiskDriveProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFDiskDriveProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFDiskDriveProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFEthernetAdapterProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFEthernetAdapterProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFEthernetAdapterProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFEthernetAdapterProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFFloppyDriveProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFFloppyDriveProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFFloppyDriveProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFFloppyDriveProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFIdeControllerProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFIdeControllerProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFIdeControllerProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFIdeControllerProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFMemoryProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFMemoryProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFMemoryProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFMemoryProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFParallelSCSIProcessor.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFParallelSCSIProcessor.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFParallelSCSIProcessor.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/item_processor/OVFParallelSCSIProcessor.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCdDrive.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCdDrive.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCdDrive.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCdDrive.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCpu.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCpu.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCpu.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwCpu.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwDiskDrive.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwDiskDrive.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwDiskDrive.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwDiskDrive.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwEthernetAdapter.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwEthernetAdapter.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwEthernetAdapter.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwEthernetAdapter.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwFloppyDrive.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwFloppyDrive.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwFloppyDrive.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwFloppyDrive.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwIdeController.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwIdeController.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwIdeController.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwIdeController.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwMemory.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwMemory.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwMemory.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwMemory.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwParallelSCSI.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwParallelSCSI.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwParallelSCSI.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/ovf/virtualhardware/OVFVirtualHwParallelSCSI.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/NegotiatedSLA.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/NegotiatedSLA.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/NegotiatedSLA.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/NegotiatedSLA.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLAProposal.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLAProposal.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLAProposal.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLAProposal.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLATemplate.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLATemplate.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLATemplate.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/SLATemplate.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/InternalSLAProposalImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/InternalSLAProposalImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/InternalSLAProposalImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/InternalSLAProposalImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/NegotiatedSLAImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/NegotiatedSLAImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/NegotiatedSLAImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/NegotiatedSLAImpl.java diff --git a/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/SLATemplateImpl.java b/common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/SLATemplateImpl.java similarity index 100% rename from trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/SLATemplateImpl.java rename to common/trunk/src/ovf-parser/OVFParser/src/org/ow2/contrail/federation/application/sla/implementation/SLATemplateImpl.java diff --git a/trunk/test/contegrator/.project b/common/trunk/test/contegrator/.project similarity index 100% rename from trunk/test/contegrator/.project rename to common/trunk/test/contegrator/.project diff --git a/trunk/test/contegrator/.pydevproject b/common/trunk/test/contegrator/.pydevproject similarity index 100% rename from trunk/test/contegrator/.pydevproject rename to common/trunk/test/contegrator/.pydevproject diff --git a/trunk/test/contegrator/example/one-test/nodes/head/install.xml b/common/trunk/test/contegrator/example/one-test/nodes/head/install.xml similarity index 100% rename from trunk/test/contegrator/example/one-test/nodes/head/install.xml rename to common/trunk/test/contegrator/example/one-test/nodes/head/install.xml diff --git a/trunk/test/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh b/common/trunk/test/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh similarity index 100% rename from trunk/test/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh rename to common/trunk/test/contegrator/example/one-test/nodes/head/scripts/fix-scripts.sh diff --git a/trunk/test/contegrator/example/one-test/nodes/worker/install.xml b/common/trunk/test/contegrator/example/one-test/nodes/worker/install.xml similarity index 100% rename from trunk/test/contegrator/example/one-test/nodes/worker/install.xml rename to common/trunk/test/contegrator/example/one-test/nodes/worker/install.xml diff --git a/trunk/test/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh b/common/trunk/test/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh similarity index 100% rename from trunk/test/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh rename to common/trunk/test/contegrator/example/one-test/nodes/worker/scripts/fix-nfs.sh diff --git a/trunk/test/contegrator/example/one-test/test/stress.py b/common/trunk/test/contegrator/example/one-test/test/stress.py similarity index 100% rename from trunk/test/contegrator/example/one-test/test/stress.py rename to common/trunk/test/contegrator/example/one-test/test/stress.py diff --git a/trunk/test/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz b/common/trunk/test/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz similarity index 100% rename from trunk/test/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz rename to common/trunk/test/contegrator/example/test-suite/nodes/node-1/config/config.tar.gz diff --git a/trunk/test/contegrator/example/test-suite/nodes/node-1/install.xml b/common/trunk/test/contegrator/example/test-suite/nodes/node-1/install.xml similarity index 100% rename from trunk/test/contegrator/example/test-suite/nodes/node-1/install.xml rename to common/trunk/test/contegrator/example/test-suite/nodes/node-1/install.xml diff --git a/trunk/test/contegrator/example/test-suite/nodes/node-1/scripts/script.sh b/common/trunk/test/contegrator/example/test-suite/nodes/node-1/scripts/script.sh similarity index 100% rename from trunk/test/contegrator/example/test-suite/nodes/node-1/scripts/script.sh rename to common/trunk/test/contegrator/example/test-suite/nodes/node-1/scripts/script.sh diff --git a/trunk/test/contegrator/example/test-suite/test/testcase-1.py b/common/trunk/test/contegrator/example/test-suite/test/testcase-1.py similarity index 100% rename from trunk/test/contegrator/example/test-suite/test/testcase-1.py rename to common/trunk/test/contegrator/example/test-suite/test/testcase-1.py diff --git a/trunk/test/contegrator/example/test-suite/test/testcase-2.py b/common/trunk/test/contegrator/example/test-suite/test/testcase-2.py similarity index 100% rename from trunk/test/contegrator/example/test-suite/test/testcase-2.py rename to common/trunk/test/contegrator/example/test-suite/test/testcase-2.py diff --git a/trunk/test/contegrator/src/contegrator.py b/common/trunk/test/contegrator/src/contegrator.py similarity index 100% rename from trunk/test/contegrator/src/contegrator.py rename to common/trunk/test/contegrator/src/contegrator.py diff --git a/trunk/test/contegrator/src/image/__init__.py b/common/trunk/test/contegrator/src/image/__init__.py similarity index 100% rename from trunk/test/contegrator/src/image/__init__.py rename to common/trunk/test/contegrator/src/image/__init__.py diff --git a/trunk/test/contegrator/src/image/imagedef.py b/common/trunk/test/contegrator/src/image/imagedef.py similarity index 100% rename from trunk/test/contegrator/src/image/imagedef.py rename to common/trunk/test/contegrator/src/image/imagedef.py diff --git a/trunk/test/contegrator/src/image/imager.py b/common/trunk/test/contegrator/src/image/imager.py similarity index 100% rename from trunk/test/contegrator/src/image/imager.py rename to common/trunk/test/contegrator/src/image/imager.py diff --git a/trunk/test/contegrator/src/image/installer.py b/common/trunk/test/contegrator/src/image/installer.py similarity index 100% rename from trunk/test/contegrator/src/image/installer.py rename to common/trunk/test/contegrator/src/image/installer.py diff --git a/trunk/test/contegrator/src/nodes/__init__.py b/common/trunk/test/contegrator/src/nodes/__init__.py similarity index 100% rename from trunk/test/contegrator/src/nodes/__init__.py rename to common/trunk/test/contegrator/src/nodes/__init__.py diff --git a/trunk/test/contegrator/src/nodes/nodeman.py b/common/trunk/test/contegrator/src/nodes/nodeman.py similarity index 100% rename from trunk/test/contegrator/src/nodes/nodeman.py rename to common/trunk/test/contegrator/src/nodes/nodeman.py diff --git a/trunk/test/contegrator/src/perceus/__init__.py b/common/trunk/test/contegrator/src/perceus/__init__.py similarity index 100% rename from trunk/test/contegrator/src/perceus/__init__.py rename to common/trunk/test/contegrator/src/perceus/__init__.py diff --git a/trunk/test/contegrator/src/perceus/perceus.py b/common/trunk/test/contegrator/src/perceus/perceus.py similarity index 100% rename from trunk/test/contegrator/src/perceus/perceus.py rename to common/trunk/test/contegrator/src/perceus/perceus.py diff --git a/trunk/test/contegrator/src/testbedconfig.py b/common/trunk/test/contegrator/src/testbedconfig.py similarity index 100% rename from trunk/test/contegrator/src/testbedconfig.py rename to common/trunk/test/contegrator/src/testbedconfig.py diff --git a/conpaas/branches/Y1DEMO-conpaassql/.project b/conpaas/branches/Y1DEMO-conpaassql/.project new file mode 100644 index 0000000000000000000000000000000000000000..05743ee15c63937deab608c525b073dd74bb2afc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/.project @@ -0,0 +1,17 @@ + + + ConPaaSSql + + + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/conpaas/branches/Y1DEMO-conpaassql/.pydevproject b/conpaas/branches/Y1DEMO-conpaassql/.pydevproject new file mode 100644 index 0000000000000000000000000000000000000000..18c13d8d4d7d558f6449339595267bbb19bfb5a0 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/.pydevproject @@ -0,0 +1,11 @@ + + + + +Default +python 2.7 + +/ConPaaSSql/src +/ConPaaSSql/contrib + + diff --git a/conpaas/branches/Y1DEMO-conpaassql/LICENSE.txt b/conpaas/branches/Y1DEMO-conpaassql/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..1333ed77b7e1ed056329cae96075dc558158ee69 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/LICENSE.txt @@ -0,0 +1 @@ +TODO diff --git a/conpaas/branches/Y1DEMO-conpaassql/Makefile b/conpaas/branches/Y1DEMO-conpaassql/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c49d45626e0f56dc38d2d3814ac22389a85d7e46 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/Makefile @@ -0,0 +1,41 @@ +PWD = `pwd` +PYTHONPATH = `pwd`/src +DESTDIR ?= `/usr/local` +BASENAME = conpaassql +BINNAME = $(PWD)/$(BASENAME)-bin.tar.gz +BINDIR = $(PWD)/bindist +# This is for deploying (testing). +PACKAGE_NAME = conpaassql.tar +DEST_DIR_PCKG = $(DESTDIR)/$(PACKAGE_NAME) +PYTHON_VER = 2.6 + +all: + +check: all + #PYTHONPATH=./src:./contrib python -m unittest conpaas.mysql.test.unit.agent.TestServerAgent + #PYTHONPATH=./src:./contrib python -m unittest conpaas.mysql.test.unit.manager.TestServerManager + PYTHONPATH=./src:./contrib python src/conpaas/mysql/test/unit/agent.py + PYTHONPATH=./src:./contrib python src/conpaas/mysql/test/unit/manager.py + +install: check + #PYTHONPATH=$(DESTDIR)/usr/lib/python${PYTHON_VER}/site-packages/contrib:$(DESTDIR)/usr/lib/python${PYTHON_VER}/site-packages python $(PWD)/setup.py install --prefix=$(DESTDIR)/usr + echo $(DESTDIR) + mkdir -p $(DESTDIR)/usr/lib/python2.6/site-packages/ + PYTHONPATH=$(DESTDIR)/usr/lib/python${PYTHON_VER}/site-packages/contrib:$(DESTDIR)/usr/lib/python${PYTHON_VER}/site-packages python $(PWD)/setup.py install --prefix=$(DESTDIR)/usr + +binary: check + PYTHONPATH=$(BINDIR)/usr/lib/python${PYTHON_VER}/site-packages/contrib:$(BINDIR)/usr/lib/python${PYTHON_VER}/site-packages $(PWD)/setup.py install --prefix=$(BINDIR)/usr + tar czvf $(BINNAME) -C $(BINDIR) . + +deploy: check + mkdir -p $(DESTDIR) + tar --exclude=".svn" -cvf $(DEST_DIR_PCKG) . + ## Untar there + tar xvf $(DEST_DIR_PCKG) -C $(DESTDIR) + # Remove the package + #rm $(DEST_DIR_PCKG) + # Deployment complete! +clean: + rm -rf $(PWD)/build $(BINDIR) $(BINNAME) + find src -iname "*.pyc" -exec rm {} \; + find contrib -iname "*.pyc" -exec rm {} \; diff --git a/conpaas/branches/Y1DEMO-conpaassql/README.txt b/conpaas/branches/Y1DEMO-conpaassql/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..a00c387c32f743887a322e1a58935aa55fb05068 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/README.txt @@ -0,0 +1,59 @@ +ConPaasSql - Conpaas SQL Server +------------------------------- + +Installation and Dependencies +----------------------------- + +Dependencies: + * unzip + * >=python2.6 + * python-mysqldb + * python-pycurl + * OCA bindings + * setuptools + +apt-get install -y unzip +apt-get install -y python +apt-get install -y python-mysqldb +apt-get install -y python-pycurl + +wget https://github.com/lukaszo/python-oca/zipball/0.2.3 +wget http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz#md5=7df2a529a074f613b509fb44feefe74e + +Setuptool installation + +tar xvfz setuptools-0.6c11.tar.gz +cd setuptools-0.6c11 +python setup.py build +python setup.py install + + +OCA installation + +unzip 0.2.3 +cd lukaszo-python-oca-61992c1 +python setup.py build +python setup.py install + +Configuration +------------- +./src/conpaas/mysql/server/agent/configuration.cnf + +Running agent-server +-------------------- +How to run the agent-server: +sudo PYTHONPATH= python server.py + +Running unit tests +------------------ + +make check + +or directly invoking by + +PYTHONPATH=./src:./contrib python src/conpaas/mysql/test/unit/agent.py +PYTHONPATH=./src:./contrib python src/conpaas/mysql/test/unit/manager.py + +Generating documentation +======================== +../doc$ sphinx-build -a -b html source/ build/ diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..68c57195a76742a6cb7246dda34b8dac884759d1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/__init__.py @@ -0,0 +1,58 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +libcloud provides a unified interface to the cloud computing resources. + +@var __version__: Current version of libcloud +""" + +__all__ = ["__version__", "enable_debug"] + +__version__ = "0.5.0" + +def enable_debug(fo): + """ + Enable library wide debugging to a file-like object. + + @param fo: Where to append debugging information + @type fo: File like object, only write operations are used. + """ + from libcloud.base import (ConnectionKey, + LoggingHTTPConnection, + LoggingHTTPSConnection) + LoggingHTTPSConnection.log = fo + LoggingHTTPConnection.log = fo + ConnectionKey.conn_classes = (LoggingHTTPConnection, LoggingHTTPSConnection) + +def _init_once(): + """ + Utility function that is ran once on Library import. + + This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists + is where we will log debug information about the provider transports. + + If LIBCLOUD_DEBUG is not a path, C{/tmp/libcloud_debug.log} is used by + default. + """ + import os + d = os.getenv("LIBCLOUD_DEBUG") + if d: + if d.isdigit(): + d = "/tmp/libcloud_debug.log" + fo = open(d, "a") + enable_debug(fo) + +_init_once() diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/base.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ef340cb884ab42e0b0411b9ceae937c5d54edd49 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/base.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import RawResponse, Response, LoggingConnection +from libcloud.common.base import LoggingHTTPSConnection, LoggingHTTPConnection +from libcloud.common.base import ConnectionKey, ConnectionUserAndKey +from libcloud.compute.base import Node, NodeSize, NodeImage +from libcloud.compute.base import NodeLocation, NodeAuthSSHKey, NodeAuthPassword +from libcloud.compute.base import NodeDriver, is_private_subnet + +__all__ = ['RawResponse', + 'Response', + 'LoggingConnection', + 'LoggingHTTPSConnection', + 'LoggingHTTPConnection', + 'ConnectionKey', + 'ConnectionUserAndKey', + 'Node', + 'NodeSize', + 'NodeImage', + 'NodeLocation', + 'NodeAuthSSHKey', + 'NodeAuthPassword', + 'NodeDriver', + 'is_private_subnet'] + +from libcloud.utils import deprecated_warning + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/aws.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/aws.py new file mode 100644 index 0000000000000000000000000000000000000000..20b72696934c7af9bee62cace43e6db421c6b3f3 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/aws.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from xml.etree import ElementTree as ET + +from libcloud.common.base import Response +from libcloud.common.types import MalformedResponseError + +class AWSBaseResponse(Response): + def parse_body(self): + if not self.body: + return None + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", body=self.body) + return body diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/base.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/base.py new file mode 100644 index 0000000000000000000000000000000000000000..896c87e90b6ca4eb471ddf8ca48bd66da9d03ff4 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/base.py @@ -0,0 +1,451 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import urllib +import StringIO +import ssl + +from pipes import quote as pquote + +import libcloud + +from libcloud.httplib_ssl import LibcloudHTTPSConnection +from httplib import HTTPConnection as LibcloudHTTPConnection + +class Response(object): + """ + A Base Response class to derive from. + """ + NODE_STATE_MAP = {} + + object = None + body = None + status = httplib.OK + headers = {} + error = None + connection = None + + def __init__(self, response): + self.body = response.read() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + + if not self.success(): + raise Exception(self.parse_error()) + + self.object = self.parse_body() + + def parse_body(self): + """ + Parse response body. + + Override in a provider's subclass. + + @return: Parsed body. + """ + return self.body + + def parse_error(self): + """ + Parse the error messages. + + Override in a provider's subclass. + + @return: Parsed error. + """ + return self.body + + def success(self): + """ + Determine if our request was successful. + + The meaning of this can be arbitrary; did we receive OK status? Did + the node get created? Were we authenticated? + + @return: C{True} or C{False} + """ + return self.status == httplib.OK or self.status == httplib.CREATED + +class RawResponse(Response): + + def __init__(self, response=None): + self._status = None + self._response = None + self._headers = {} + self._error = None + self._reason = None + + @property + def response(self): + if not self._response: + response = self.connection.connection.getresponse() + self._response, self.body = response, response + if not self.success(): + self.parse_error() + return self._response + + @property + def status(self): + if not self._status: + self._status = self.response.status + return self._status + + @property + def headers(self): + if not self._headers: + self._headers = dict(self.response.getheaders()) + return self._headers + + @property + def reason(self): + if not self._reason: + self._reason = self.response.reason + return self._reason + + +#TODO: Move this to a better location/package +class LoggingConnection(): + """ + Debug class to log all HTTP(s) requests as they could be made + with the C{curl} command. + + @cvar log: file-like object that logs entries are written to. + """ + log = None + + def _log_response(self, r): + rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) + ht = "" + v = r.version + if r.version == 10: + v = "HTTP/1.0" + if r.version == 11: + v = "HTTP/1.1" + ht += "%s %s %s\r\n" % (v, r.status, r.reason) + body = r.read() + for h in r.getheaders(): + ht += "%s: %s\r\n" % (h[0].title(), h[1]) + ht += "\r\n" + # this is evil. laugh with me. ha arharhrhahahaha + class fakesock: + def __init__(self, s): + self.s = s + def makefile(self, mode, foo): + return StringIO.StringIO(self.s) + rr = r + if r.chunked: + ht += "%x\r\n" % (len(body)) + ht += body + ht += "\r\n0\r\n" + else: + ht += body + rr = httplib.HTTPResponse(fakesock(ht), + method=r._method, + debuglevel=r.debuglevel) + rr.begin() + rv += ht + rv += ("\n# -------- end %d:%d response ----------\n" + % (id(self), id(r))) + return (rr, rv) + + def _log_curl(self, method, url, body, headers): + cmd = ["curl", "-i"] + + cmd.extend(["-X", pquote(method)]) + + for h in headers: + cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) + + # TODO: in python 2.6, body can be a file-like object. + if body is not None and len(body) > 0: + cmd.extend(["--data-binary", pquote(body)]) + + cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))]) + return " ".join(cmd) + +class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection): + """ + Utility Class for logging HTTPS connections + """ + + def getresponse(self): + r = LibcloudHTTPSConnection.getresponse(self) + if self.log is not None: + r, rv = self._log_response(r) + self.log.write(rv + "\n") + self.log.flush() + return r + + def request(self, method, url, body=None, headers=None): + headers.update({'X-LC-Request-ID': str(id(self))}) + if self.log is not None: + pre = "# -------- begin %d request ----------\n" % id(self) + self.log.write(pre + + self._log_curl(method, url, body, headers) + "\n") + self.log.flush() + return LibcloudHTTPSConnection.request(self, method, url, body, headers) + +class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection): + """ + Utility Class for logging HTTP connections + """ + + def getresponse(self): + r = LibcloudHTTPConnection.getresponse(self) + if self.log is not None: + r, rv = self._log_response(r) + self.log.write(rv + "\n") + self.log.flush() + return r + + def request(self, method, url, body=None, headers=None): + headers.update({'X-LC-Request-ID': str(id(self))}) + if self.log is not None: + pre = "# -------- begin %d request ----------\n" % id(self) + self.log.write(pre + + self._log_curl(method, url, body, headers) + "\n") + self.log.flush() + return LibcloudHTTPConnection.request(self, method, url, + body, headers) + +class ConnectionKey(object): + """ + A Base Connection class to derive from. + """ + #conn_classes = (LoggingHTTPSConnection) + conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection) + + responseCls = Response + rawResponseCls = RawResponse + connection = None + host = '127.0.0.1' + port = (80, 443) + secure = 1 + driver = None + action = None + + def __init__(self, key, secure=True, host=None, force_port=None): + """ + Initialize `user_id` and `key`; set `secure` to an C{int} based on + passed value. + """ + self.key = key + self.secure = secure and 1 or 0 + self.ua = [] + if host: + self.host = host + + if force_port: + self.port = (force_port, force_port) + + def connect(self, host=None, port=None): + """ + Establish a connection with the API server. + + @type host: C{str} + @param host: Optional host to override our default + + @type port: C{int} + @param port: Optional port to override our default + + @returns: A connection + """ + host = host or self.host + + # port might be included in service url, so pick it if it's present + if ":" in host: + host, port = host.split(":") + else: + port = port or self.port[self.secure] + + kwargs = {'host': host, 'port': int(port)} + + connection = self.conn_classes[self.secure](**kwargs) + # You can uncoment this line, if you setup a reverse proxy server + # which proxies to your endpoint, and lets you easily capture + # connections in cleartext when you setup the proxy to do SSL + # for you + #connection = self.conn_classes[False]("127.0.0.1", 8080) + + self.connection = connection + + def _user_agent(self): + return 'libcloud/%s (%s)%s' % ( + libcloud.__version__, + self.driver.name, + "".join([" (%s)" % x for x in self.ua])) + + def user_agent_append(self, token): + """ + Append a token to a user agent string. + + Users of the library should call this to uniquely identify thier requests + to a provider. + + @type token: C{str} + @param token: Token to add to the user agent. + """ + self.ua.append(token) + + def request(self, + action, + params=None, + data='', + headers=None, + method='GET', + raw=False, + host=None): + """ + Request a given `action`. + + Basically a wrapper around the connection + object's `request` that does some helpful pre-processing. + + @type action: C{str} + @param action: A path + + @type params: C{dict} + @param params: Optional mapping of additional parameters to send. If + None, leave as an empty C{dict}. + + @type data: C{unicode} + @param data: A body of data to send with the request. + + @type headers: C{dict} + @param headers: Extra headers to add to the request + None, leave as an empty C{dict}. + + @type method: C{str} + @param method: An HTTP method such as "GET" or "POST". + + @type raw: C{bool} + @param raw: True to perform a "raw" request aka only send the headers + and use the rawResponseCls class. This is used with + storage API when uploading a file. + + @type host: C{str} + @param host: To which host to send the request. If not specified, + self.host is used. + + @return: An instance of type I{responseCls} + """ + if params is None: + params = {} + if headers is None: + headers = {} + + self.action = action + self.method = method + # Extend default parameters + params = self.add_default_params(params) + # Extend default headers + headers = self.add_default_headers(headers) + # We always send a user-agent header + headers.update({'User-Agent': self._user_agent()}) + host = host or self.host + headers.update({'Host': host}) + # Encode data if necessary + if data != '' and data != None: + data = self.encode_data(data) + + if data is not None: + headers.update({'Content-Length': str(len(data))}) + + params, headers = self.pre_connect_hook(params, headers) + + if params: + url = '?'.join((action, urllib.urlencode(params))) + else: + url = action + + # Removed terrible hack...this a less-bad hack that doesn't execute a + # request twice, but it's still a hack. + self.connect(host=host) + try: + # @TODO: Should we just pass File object as body to request method + # instead of dealing with splitting and sending the file ourselves? + if raw: + self.connection.putrequest(method, url) + + for key, value in headers.iteritems(): + self.connection.putheader(key, value) + + self.connection.endheaders() + else: + self.connection.request(method=method, url=url, body=data, + headers=headers) + except ssl.SSLError, e: + raise ssl.SSLError(str(e)) + + if raw: + response = self.rawResponseCls() + else: + response = self.responseCls(self.connection.getresponse()) + + response.connection = self + return response + + def add_default_params(self, params): + """ + Adds default parameters (such as API key, version, etc.) + to the passed `params` + + Should return a dictionary. + """ + return params + + def add_default_headers(self, headers): + """ + Adds default headers (such as Authorization, X-Foo-Bar) + to the passed `headers` + + Should return a dictionary. + """ + return headers + + def pre_connect_hook(self, params, headers): + """ + A hook which is called before connecting to the remote server. + This hook can perform a final manipulation on the params, headers and + url parameters. + + @type params: C{dict} + @param params: Request parameters. + + @type headers: C{dict} + @param headers: Request headers. + """ + return params, headers + + def encode_data(self, data): + """ + Encode body data. + + Override in a provider's subclass. + """ + return data + +class ConnectionUserAndKey(ConnectionKey): + """ + Base connection which accepts a user_id and key + """ + + user_id = None + + def __init__(self, user_id, key, secure=True, host=None, port=None): + super(ConnectionUserAndKey, self).__init__(key, secure, host, port) + self.user_id = user_id diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/gogrid.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/gogrid.py new file mode 100644 index 0000000000000000000000000000000000000000..3b775495a9ccf2ba52b660cd0bf0e30eddf7d60f --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/gogrid.py @@ -0,0 +1,178 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import time + +try: + import json +except ImportError: + import simplejson as json + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.types import MalformedResponseError +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.compute.base import NodeLocation + +HOST = 'api.gogrid.com' +PORTS_BY_SECURITY = { True: 443, False: 80 } +API_VERSION = '1.8' + +__all__ = ["GoGridResponse", + "GoGridConnection", + "GoGridIpAddress", + "BaseGoGridDriver", +] + +class GoGridResponse(Response): + + def __init__(self, *args, **kwargs): + self.driver = BaseGoGridDriver + super(GoGridResponse, self).__init__(*args, **kwargs) + + def success(self): + if self.status == 403: + raise InvalidCredsError('Invalid credentials', self.driver) + if self.status == 401: + raise InvalidCredsError('API Key has insufficient rights', self.driver) + if not self.body: + return None + try: + return json.loads(self.body)['status'] == 'success' + except ValueError: + raise MalformedResponseError('Malformed reply', + body=self.body, driver=self.driver) + + def parse_body(self): + if not self.body: + return None + return json.loads(self.body) + + def parse_error(self): + try: + return json.loads(self.body)["list"][0]['message'] + except (ValueError, KeyError): + return None + +class GoGridConnection(ConnectionUserAndKey): + """ + Connection class for the GoGrid driver + """ + + host = HOST + responseCls = GoGridResponse + + def add_default_params(self, params): + params["api_key"] = self.user_id + params["v"] = API_VERSION + params["format"] = 'json' + params["sig"] = self.get_signature(self.user_id, self.key) + + return params + + def get_signature(self, key, secret): + """ create sig from md5 of key + secret + time """ + m = hashlib.md5(key+secret+str(int(time.time()))) + return m.hexdigest() + +class GoGridIpAddress(object): + """ + IP Address + """ + + def __init__(self, id, ip, public, state, subnet): + self.id = id + self.ip = ip + self.public = public + self.state = state + self.subnet = subnet + +class BaseGoGridDriver(object): + """GoGrid has common object model for services they + provide, like locations and IP, so keep handling of + these things in a single place.""" + + name = "GoGrid" + + def _get_ip(self, element): + return element.get('ip').get('ip') + + def _to_ip(self, element): + ip = GoGridIpAddress(id=element['id'], + ip=element['ip'], + public=element['public'], + subnet=element['subnet'], + state=element["state"]["name"]) + ip.location = self._to_location(element['datacenter']) + return ip + + def _to_ips(self, object): + return [ self._to_ip(el) + for el in object['list'] ] + + def _to_location(self, element): + location = NodeLocation(id=element['id'], + name=element['name'], + country="US", + driver=self.connection.driver) + return location + + def _to_locations(self, object): + return [self._to_location(el) + for el in object['list']] + + + def ex_list_ips(self, **kwargs): + """Return list of IP addresses assigned to + the account. + + @keyword public: set to True to list only + public IPs or False to list only + private IPs. Set to None or not specify + at all not to filter by type + @type public: C{bool} + @keyword assigned: set to True to list only addresses + assigned to servers, False to list unassigned + addresses and set to None or don't set at all + not no filter by state + @type assigned: C{bool} + @keyword location: filter IP addresses by location + @type location: L{NodeLocation} + @return: C{list} of L{GoGridIpAddress}es + """ + + params = {} + + if "public" in kwargs and kwargs["public"] is not None: + params["ip.type"] = {True: "Public", + False: "Private"}[kwargs["public"]] + if "assigned" in kwargs and kwargs["assigned"] is not None: + params["ip.state"] = {True: "Assigned", + False: "Unassigned"}[kwargs["assigned"]] + if "location" in kwargs and kwargs['location'] is not None: + params['datacenter'] = kwargs['location'].id + + ips = self._to_ips( + self.connection.request('/api/grid/ip/list', + params=params).object) + return ips + + def _get_first_ip(self, location=None): + ips = self.ex_list_ips(public=True, assigned=False, location=location) + try: + return ips[0].ip + except IndexError: + raise LibcloudError('No public unassigned IPs left', + self.driver) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/rackspace.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/rackspace.py new file mode 100644 index 0000000000000000000000000000000000000000..953a96147bf9c377cd9a3fceee26125002a8965d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/rackspace.py @@ -0,0 +1,120 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common utilities for Rackspace Cloud Servers and Cloud Files +""" +import httplib +from urllib2 import urlparse +from libcloud.common.base import ConnectionUserAndKey +from libcloud.compute.types import InvalidCredsError + +AUTH_HOST_US='auth.api.rackspacecloud.com' +AUTH_HOST_UK='lon.auth.api.rackspacecloud.com' +AUTH_API_VERSION = 'v1.0' + +__all__ = [ + "RackspaceBaseConnection", + "AUTH_HOST_US", + "AUTH_HOST_UK" + ] + +class RackspaceBaseConnection(ConnectionUserAndKey): + def __init__(self, user_id, key, secure): + self.cdn_management_url = None + self.storage_url = None + self.auth_token = None + self.__host = None + super(RackspaceBaseConnection, self).__init__( + user_id, key, secure=secure) + + def add_default_headers(self, headers): + headers['X-Auth-Token'] = self.auth_token + headers['Accept'] = self.accept_format + return headers + + @property + def request_path(self): + return self._get_request_path(url_key=self._url_key) + + @property + def host(self): + # Default to server_host + return self._get_host(url_key=self._url_key) + + def _get_request_path(self, url_key): + value_key = '__request_path_%s' % (url_key) + value = getattr(self, value_key, None) + + if not value: + self._populate_hosts_and_request_paths() + value = getattr(self, value_key, None) + + return value + + def _get_host(self, url_key): + value_key = '__%s' % (url_key) + value = getattr(self, value_key, None) + + if not value: + self._populate_hosts_and_request_paths() + value = getattr(self, value_key, None) + + return value + + def _populate_hosts_and_request_paths(self): + """ + Rackspace uses a separate host for API calls which is only provided + after an initial authentication request. If we haven't made that + request yet, do it here. Otherwise, just return the management host. + """ + if not self.auth_token: + # Initial connection used for authentication + conn = self.conn_classes[self.secure]( + self.auth_host, self.port[self.secure]) + conn.request( + method='GET', + url='/%s' % (AUTH_API_VERSION), + headers={ + 'X-Auth-User': self.user_id, + 'X-Auth-Key': self.key + } + ) + + resp = conn.getresponse() + + if resp.status != httplib.NO_CONTENT: + raise InvalidCredsError() + + headers = dict(resp.getheaders()) + + try: + self.server_url = headers['x-server-management-url'] + self.storage_url = headers['x-storage-url'] + self.cdn_management_url = headers['x-cdn-management-url'] + self.lb_url = self.server_url.replace("servers", "ord.loadbalancers") + self.auth_token = headers['x-auth-token'] + except KeyError: + raise InvalidCredsError() + + for key in ['server_url', 'storage_url', 'cdn_management_url', + 'lb_url']: + scheme, server, request_path, param, query, fragment = ( + urlparse.urlparse(getattr(self, key))) + # Set host to where we want to make further requests to + setattr(self, '__%s' % (key), server) + setattr(self, '__request_path_%s' % (key), request_path) + + conn.close() diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/types.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/types.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f1b5d238e40c14a392446359dad1f28f433d32 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/common/types.py @@ -0,0 +1,65 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "LibcloudError", + "MalformedResponseError", + "InvalidCredsError", + "InvalidCredsException" + ] + +class LibcloudError(Exception): + """The base class for other libcloud exceptions""" + + def __init__(self, value, driver=None): + self.value = value + self.driver = driver + + def __str__(self): + return ("") + +class MalformedResponseError(LibcloudError): + """Exception for the cases when a provider returns a malformed + response, e.g. you request JSON and provider returns + '

something

' due to some error on their side.""" + + def __init__(self, value, body=None, driver=None): + self.value = value + self.driver = driver + self.body = body + + def __str__(self): + return (": " + + repr(self.body)) + +class InvalidCredsError(LibcloudError): + """Exception used when invalid credentials are used on a provider.""" + + def __init__(self, value='Invalid credentials with the provider', + driver=None): + self.value = value + self.driver = driver + def __str__(self): + return repr(self.value) + +# Deprecated alias of L{InvalidCredsError} +InvalidCredsException = InvalidCredsError diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/base.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/base.py new file mode 100644 index 0000000000000000000000000000000000000000..866feac8230bea0c6a4c72de0e071a08c2964512 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/base.py @@ -0,0 +1,612 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides base classes for working with drivers +""" +import time +import hashlib +import os +import socket +import struct + +from libcloud.pricing import get_size_price +from libcloud.compute.types import NodeState, DeploymentError +from libcloud.compute.ssh import SSHClient + +# @@TR: are the imports below part of the public api for this +# module? They aren't used in here ... +from libcloud.common.base import ConnectionKey, ConnectionUserAndKey +from libcloud.httplib_ssl import LibcloudHTTPSConnection +from libcloud.common.base import LibcloudHTTPConnection + +__all__ = [ + "Node", + "NodeState", + "NodeSize", + "NodeImage", + "NodeLocation", + "NodeAuthSSHKey", + "NodeAuthPassword", + "NodeDriver", + + # @@TR: do the following need exporting? + "ConnectionKey", + "ConnectionUserAndKey", + "LibcloudHTTPSConnection", + "LibcloudHTTPConnection" + ] + +class Node(object): + """ + Provide a common interface for handling nodes of all types. + + The Node object provides the interface in libcloud through which + we can manipulate nodes in different cloud providers in the same + way. Node objects don't actually do much directly themselves, + instead the node driver handles the connection to the node. + + You don't normally create a node object yourself; instead you use + a driver and then have that create the node for you. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> node.public_ip[0] + '127.0.0.3' + >>> node.name + 'dummy-3' + + You can also get nodes from the driver's list_node function. + + >>> node = driver.list_nodes()[0] + >>> node.name + 'dummy-1' + + the node keeps a reference to its own driver which means that we + can work on nodes from different providers without having to know + which is which. + + >>> driver = DummyNodeDriver(72) + >>> node2 = driver.create_node() + >>> node.driver.creds + 0 + >>> node2.driver.creds + 72 + + Althrough Node objects can be subclassed, this isn't normally + done. Instead, any driver specific information is stored in the + "extra" proproperty of the node. + + >>> node.extra + {'foo': 'bar'} + + """ + + def __init__(self, id, name, state, public_ip, private_ip, + driver, extra=None): + self.id = str(id) if id else None + self.name = name + self.state = state + self.public_ip = public_ip + self.private_ip = private_ip + self.driver = driver + self.uuid = self.get_uuid() + if not extra: + self.extra = {} + else: + self.extra = extra + + def get_uuid(self): + """Unique hash for this node + + @return: C{string} + + The hash is a function of an SHA1 hash of the node's ID and + its driver which means that it should be unique between all + nodes. In some subclasses (e.g. GoGrid) there is no ID + available so the public IP address is used. This means that, + unlike a properly done system UUID, the same UUID may mean a + different system install at a different time + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> node.get_uuid() + 'd3748461511d8b9b0e0bfa0d4d3383a619a2bb9f' + + Note, for example, that this example will always produce the + same UUID! + """ + return hashlib.sha1("%s:%d" % (self.id,self.driver.type)).hexdigest() + + def reboot(self): + """Reboot this node + + @return: C{bool} + + This calls the node's driver and reboots the node + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node = driver.create_node() + >>> from libcloud.compute.types import NodeState + >>> node.state == NodeState.RUNNING + True + >>> node.state == NodeState.REBOOTING + False + >>> node.reboot() + True + >>> node.state == NodeState.REBOOTING + True + """ + return self.driver.reboot_node(self) + + def destroy(self): + """Destroy this node + + @return: C{bool} + + This calls the node's driver and destroys the node + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> from libcloud.compute.types import NodeState + >>> node = driver.create_node() + >>> node.state == NodeState.RUNNING + True + >>> node.destroy() + True + >>> node.state == NodeState.RUNNING + False + + """ + return self.driver.destroy_node(self) + + def __repr__(self): + return (('') + % (self.uuid, self.name, self.state, self.public_ip, + self.driver.name)) + + +class NodeSize(object): + """ + A Base NodeSize class to derive from. + + NodeSizes are objects which are typically returned a driver's + list_sizes function. They contain a number of different + parameters which define how big an image is. + + The exact parameters available depends on the provider. + + N.B. Where a parameter is "unlimited" (for example bandwidth in + Amazon) this will be given as 0. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> size = driver.list_sizes()[0] + >>> size.ram + 128 + >>> size.bandwidth + 500 + >>> size.price + 4 + """ + + def __init__(self, id, name, ram, disk, bandwidth, price, driver): + self.id = str(id) + self.name = name + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name)) + + +class NodeImage(object): + """ + An operating system image. + + NodeImage objects are typically returned by the driver for the + cloud provider in response to the list_images function + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> image = driver.list_images()[0] + >>> image.name + 'Ubuntu 9.10' + + Apart from name and id, there is no further standard information; + other parameters are stored in a driver specific "extra" variable + + When creating a node, a node image should be given as an argument + to the create_node function to decide which OS image to use. + + >>> node = driver.create_node(image=image) + + """ + + def __init__(self, id, name, driver, extra=None): + self.id = str(id) + self.name = name + self.driver = driver + if not extra: + self.extra = {} + else: + self.extra = extra + def __repr__(self): + return (('') + % (self.id, self.name, self.driver.name)) + +class NodeLocation(object): + """ + A physical location where nodes can be. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> location = driver.list_locations()[0] + >>> location.country + 'US' + """ + + def __init__(self, id, name, country, driver): + self.id = str(id) + self.name = name + self.country = country + self.driver = driver + def __repr__(self): + return (('') + % (self.id, self.name, self.country, self.driver.name)) + +class NodeAuthSSHKey(object): + """ + An SSH key to be installed for authentication to a node. + + This is the actual contents of the users ssh public key which will + normally be installed as root's public key on the node. + + >>> pubkey = '...' # read from file + >>> from libcloud.compute.base import NodeAuthSSHKey + >>> k = NodeAuthSSHKey(pubkey) + >>> k + + + """ + def __init__(self, pubkey): + self.pubkey = pubkey + def __repr__(self): + return '' + +class NodeAuthPassword(object): + """ + A password to be used for authentication to a node. + """ + def __init__(self, password): + self.password = password + def __repr__(self): + return '' + +class NodeDriver(object): + """ + A base NodeDriver class to derive from + + This class is always subclassed by a specific driver. For + examples of base behavior of most functions (except deploy node) + see the dummy driver. + + """ + + connectionCls = ConnectionKey + name = None + type = None + port = None + features = {"create_node": []} + """ + List of available features for a driver. + - L{create_node} + - ssh_key: Supports L{NodeAuthSSHKey} as an authentication method + for nodes. + - password: Supports L{NodeAuthPassword} as an authentication + method for nodes. + - generates_password: Returns a password attribute on the Node + object returned from creation. + """ + + NODE_STATE_MAP = {} + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + """ + @keyword key: API key or username to used + @type key: str + + @keyword secret: Secret password to be used + @type secret: str + + @keyword secure: Weither to use HTTPS or HTTP. Note: Some providers + only support HTTPS, and it is on by default. + @type secure: bool + + @keyword host: Override hostname used for connections. + @type host: str + + @keyword port: Override port used for connections. + @type port: int + """ + self.key = key + self.secret = secret + self.secure = secure + args = [self.key] + + if self.secret != None: + args.append(self.secret) + + args.append(secure) + + if host != None: + args.append(host) + + if port != None: + args.append(port) + + self.connection = self.connectionCls(*args) + + self.connection.driver = self + self.connection.connect() + + def create_node(self, **kwargs): + """Create a new node instance. + + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword size: The size of resources allocated to this node. + (required) + @type size: L{NodeSize} + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword location: Which data center to create a node in. If empty, + undefined behavoir will be selected. (optional) + @type location: L{NodeLocation} + + @keyword auth: Initial authentication information for the node + (optional) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @return: The newly created L{Node}. + """ + raise NotImplementedError, \ + 'create_node not implemented for this driver' + + def destroy_node(self, node): + """Destroy a node. + + Depending upon the provider, this may destroy all data associated with + the node, including backups. + + @return: C{bool} True if the destroy was successful, otherwise False + """ + raise NotImplementedError, \ + 'destroy_node not implemented for this driver' + + def reboot_node(self, node): + """ + Reboot a node. + @return: C{bool} True if the reboot was successful, otherwise False + """ + raise NotImplementedError, \ + 'reboot_node not implemented for this driver' + + def list_nodes(self): + """ + List all nodes + @return: C{list} of L{Node} objects + """ + raise NotImplementedError, \ + 'list_nodes not implemented for this driver' + + def list_images(self, location=None): + """ + List images on a provider + @return: C{list} of L{NodeImage} objects + """ + raise NotImplementedError, \ + 'list_images not implemented for this driver' + + def list_sizes(self, location=None): + """ + List sizes on a provider + @return: C{list} of L{NodeSize} objects + """ + raise NotImplementedError, \ + 'list_sizes not implemented for this driver' + + def list_locations(self): + """ + List data centers for a provider + @return: C{list} of L{NodeLocation} objects + """ + raise NotImplementedError, \ + 'list_locations not implemented for this driver' + + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + Depends on a Provider Driver supporting either using a specific password + or returning a generated password. + + This function may raise a L{DeploymentException}, if a create_node + call was successful, but there is a later error (like SSH failing or + timing out). This exception includes a Node object which you may want + to destroy if incomplete deployments are not desirable. + + @keyword deploy: Deployment to run once machine is online and availble to SSH. + @type deploy: L{Deployment} + + @keyword ssh_username: Optional name of the account which is used when connecting to + SSH server (default is root) + @type ssh_username: C{str} + + @keyword ssh_port: Optional SSH server port (default is 22) + @type ssh_port: C{int} + + @keyword ssh_timeout: Optional SSH connection timeout in seconds + (default is None) + @type ssh_timeout: C{float} + + See L{NodeDriver.create_node} for more keyword args. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> from libcloud.deployment import ScriptDeployment, MultiStepDeployment + >>> from libcloud.compute.base import NodeAuthSSHKey + >>> driver = DummyNodeDriver(0) + >>> key = NodeAuthSSHKey('...') # read from file + >>> script = ScriptDeployment("yum -y install emacs strace tcpdump") + >>> msd = MultiStepDeployment([key, script]) + >>> def d(): + ... try: + ... node = driver.deploy_node(deploy=msd) + ... except NotImplementedError: + ... print "not implemented for dummy driver" + >>> d() + not implemented for dummy driver + + Deploy node is typically not overridden in subclasses. The + existing implementation should be able to handle most such. + """ + # TODO: support ssh keys + # FIX: this method is too long and complicated + WAIT_PERIOD=3 + password = None + + if 'generates_password' not in self.features["create_node"]: + if 'password' not in self.features["create_node"]: + raise NotImplementedError, \ + 'deploy_node not implemented for this driver' + + if not kwargs.has_key('auth'): + kwargs['auth'] = NodeAuthPassword(os.urandom(16).encode('hex')) + + password = kwargs['auth'].password + node = self.create_node(**kwargs) + try: + if 'generates_password' in self.features["create_node"]: + password = node.extra.get('password') + start = time.time() + end = start + (60 * 15)# FIX: this should be soft-coded + while time.time() < end: + # need to wait until we get a public IP address. + # TODO: there must be a better way of doing this + time.sleep(WAIT_PERIOD) + nodes = self.list_nodes() + nodes = filter(lambda n: n.uuid == node.uuid, nodes) + if len(nodes) == 0: + raise DeploymentError( + node, + ("Booted node[%s] " % node + + "is missing from list_nodes.")) + if len(nodes) > 1: + raise DeploymentError( + node, + ("Booted single node[%s], " % node + + "but multiple nodes have same UUID")) + + node = nodes[0] + + if (node.public_ip is not None + and node.public_ip != "" + and node.state == NodeState.RUNNING): + break + + ssh_username = kwargs.get('ssh_username', 'root') + ssh_port = kwargs.get('ssh_port', 22) + ssh_timeout = kwargs.get('ssh_timeout', 20) + + client = SSHClient(hostname=node.public_ip[0], + port=ssh_port, username=ssh_username, + password=password, + timeout=ssh_timeout) + + while time.time() < end: + try: + client.connect() + except (IOError, socket.gaierror, socket.error), e: + # Retry if a connection is refused or timeout + # occured + client.close() + time.sleep(WAIT_PERIOD) + continue + + max_tries, tries = 3, 0 + while tries < max_tries: + try: + n = kwargs["deploy"].run(node, client) + client.close() + raise + except Exception, e: + tries += 1 + if tries >= max_tries: + raise DeploymentError(node, + 'Failed after %d tries' % (max_tries)) + + except DeploymentError: + raise + except Exception, e: + raise DeploymentError(node, e) + return n + + def _get_size_price(self, size_id): + return get_size_price(driver_type='compute', + driver_name=self.api_name, + size_id=size_id) + + +def is_private_subnet(ip): + """ + Utility function to check if an IP address is inside a private subnet. + + @type ip: C{str} + @keyword ip: IP address to check + + @return: C{bool} if the specified IP address is private. + """ + priv_subnets = [ {'subnet': '10.0.0.0', 'mask': '255.0.0.0'}, + {'subnet': '172.16.0.0', 'mask': '255.240.0.0'}, + {'subnet': '192.168.0.0', 'mask': '255.255.0.0'} ] + + ip = struct.unpack('I',socket.inet_aton(ip))[0] + + for network in priv_subnets: + subnet = struct.unpack('I',socket.inet_aton(network['subnet']))[0] + mask = struct.unpack('I',socket.inet_aton(network['mask']))[0] + + if (ip & mask) == (subnet & mask): + return True + + return False + + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/deployment.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..e52b37c501187a4c72d4d45478f57b2b06be2e52 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/deployment.py @@ -0,0 +1,130 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Provides generic deployment steps for machines post boot. +""" +import os + +class Deployment(object): + """ + Base class for deployment tasks. + """ + + def run(self, node, client): + """ + Runs this deployment task on C{node} using the C{client} provided. + + @type node: L{Node} + @keyword node: Node to operate one + + @type client: L{BaseSSHClient} + @keyword client: Connected SSH client to use. + + @return: L{Node} + """ + raise NotImplementedError, \ + 'run not implemented for this deployment' + + +class SSHKeyDeployment(Deployment): + """ + Installs a public SSH Key onto a host. + """ + + def __init__(self, key): + """ + @type key: C{str} + @keyword key: Contents of the public key write + """ + self.key = key + + def run(self, node, client): + """ + Installs SSH key into C{.ssh/authorized_keys} + + See also L{Deployment.run} + """ + client.put(".ssh/authorized_keys", contents=self.key) + return node + +class ScriptDeployment(Deployment): + """ + Runs an arbitrary Shell Script task. + """ + + def __init__(self, script, name=None, delete=False): + """ + @type script: C{str} + @keyword script: Contents of the script to run + + @type name: C{str} + @keyword name: Name of the script to upload it as, if not specified, a random name will be choosen. + + @type delete: C{bool} + @keyword delete: Whether to delete the script on completion. + """ + self.script = script + self.stdout = None + self.stderr = None + self.exit_status = None + self.delete = delete + self.name = name + if self.name is None: + self.name = "/root/deployment_%s.sh" % (os.urandom(4).encode('hex')) + + def run(self, node, client): + """ + Uploads the shell script and then executes it. + + See also L{Deployment.run} + """ + client.put(path=self.name, chmod=755, contents=self.script) + self.stdout, self.stderr, self.exit_status = client.run(self.name) + if self.delete: + client.delete(self.name) + return node + +class MultiStepDeployment(Deployment): + """ + Runs a chain of Deployment steps. + """ + def __init__(self, add = None): + """ + @type add: C{list} + @keyword add: Deployment steps to add. + """ + self.steps = [] + self.add(add) + + def add(self, add): + """Add a deployment to this chain. + + @type add: Single L{Deployment} or a C{list} of L{Deployment} + @keyword add: Adds this deployment to the others already in this object. + """ + if add is not None: + add = add if isinstance(add, (list, tuple)) else [add] + self.steps.extend(add) + + def run(self, node, client): + """ + Run each deployment that has been added. + + See also L{Deployment.run} + """ + for s in self.steps: + node = s.run(node, client) + return node diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b1821adf1de4b0930df86fc901f98706796b78ff --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/__init__.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'brightbox', + 'bluebox', + 'dummy', + 'ec2', + 'ecp', + 'elastichosts', + 'cloudsigma', + 'gogrid', + 'ibm_sbc', + 'linode', + 'opennebula', + 'rackspace', + 'rimuhosting', + 'slicehost', + 'softlayer', + 'vcloud', + 'voxel', + 'vpsnet', +] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/bluebox.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/bluebox.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff4f6e05deb89624da30457a7b26af48099ef37 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/bluebox.py @@ -0,0 +1,234 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +libcloud driver for the Blue Box Blocks API + +This driver implements all libcloud functionality for the Blue Box Blocks API. + +Blue Box home page http://bluebox.net +Blue Box API documentation https://boxpanel.bluebox.net/public/the_vault/index.php/Blocks_API +""" + +import copy +import urllib +import base64 + +try: + import json +except: + import simplejson as json + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState, InvalidCredsError +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation +from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey + +# Current end point for Blue Box API. +BLUEBOX_API_HOST = "boxpanel.bluebox.net" + +# The API doesn't currently expose all of the required values for libcloud, +# so we simply list what's available right now, along with all of the various +# attributes that are needed by libcloud. +BLUEBOX_INSTANCE_TYPES = { + '1gb': { + 'id': '94fd37a7-2606-47f7-84d5-9000deda52ae', + 'name': 'Block 1GB Virtual Server', + 'ram': 1024, + 'disk': 20, + 'cpu': 0.5 + }, + '2gb': { + 'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092', + 'name': 'Block 2GB Virtual Server', + 'ram': 2048, + 'disk': 25, + 'cpu': 1 + }, + '4gb': { + 'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58', + 'name': 'Block 4GB Virtual Server', + 'ram': 4096, + 'disk': 50, + 'cpu': 2 + }, + '8gb': { + 'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251', + 'name': 'Block 8GB Virtual Server', + 'ram': 8192, + 'disk': 100, + 'cpu': 4 + } +} + +RAM_PER_CPU = 2048 + +NODE_STATE_MAP = { 'queued': NodeState.PENDING, + 'building': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'error': NodeState.TERMINATED, + 'unknown': NodeState.UNKNOWN } + +class BlueboxResponse(Response): + def parse_body(self): + try: + js = json.loads(self.body) + return js + except ValueError: + return self.body + + def parse_error(self): + if int(self.status) == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + return self.body + +class BlueboxNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, self.disk, self.price, self.driver.name)) + +class BlueboxConnection(ConnectionUserAndKey): + """ + Connection class for the Bluebox driver + """ + + host = BLUEBOX_API_HOST + secure = True + responseCls = BlueboxResponse + + def add_default_headers(self, headers): + user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + headers['Authorization'] = 'Basic %s' % (user_b64) + return headers + +class BlueboxNodeDriver(NodeDriver): + """ + Bluebox Blocks node driver + """ + + connectionCls = BlueboxConnection + type = Provider.BLUEBOX + api_name = 'bluebox' + name = 'Bluebox Blocks' + + def list_nodes(self): + result = self.connection.request('/api/blocks.json') + return [self._to_node(i) for i in result.object] + + def list_sizes(self, location=None): + sizes = [] + for key, values in BLUEBOX_INSTANCE_TYPES.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(BlueboxNodeSize(driver=self.connection.driver, + **attributes)) + + return sizes + + def list_images(self, location=None): + result = self.connection.request('/api/block_templates.json') + images = [] + for image in result.object: + images.extend([self._to_image(image)]) + + return images + + def create_node(self, **kwargs): + headers = { 'Content-Type': 'application/x-www-form-urlencoded' } + size = kwargs["size"] + + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + try: + auth = kwargs['auth'] + except Exception: + raise Exception("SSH public key or password required.") + + data = { + 'hostname': name, + 'product': size.id, + 'template': image.id + } + + ssh = None + password = None + + if isinstance(auth, NodeAuthSSHKey): + ssh = auth.pubkey + data.update(ssh_public_key=ssh) + elif isinstance(auth, NodeAuthPassword): + password = auth.password + data.update(password=password) + + if "ex_username" in kwargs: + data.update(username=kwargs["ex_username"]) + + if not ssh and not password: + raise Exception("SSH public key or password required.") + + params = urllib.urlencode(data) + result = self.connection.request('/api/blocks.json', headers=headers, data=params, method='POST') + node = self._to_node(result.object) + return node + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + url = '/api/blocks/%s.json' % (node.id) + result = self.connection.request(url, method='DELETE') + + return result.status == 200 + + def list_locations(self): + return [NodeLocation(0, "Blue Box Seattle US", 'US', self)] + + def reboot_node(self, node): + url = '/api/blocks/%s/reboot.json' % (node.id) + result = self.connection.request(url, method="PUT") + return result.status == 200 + + def _to_node(self, vm): + state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)] + n = Node(id=vm['id'], + name=vm['hostname'], + state=state, + public_ip=[ ip['address'] for ip in vm['ips'] ], + private_ip=[], + extra={'storage':vm['storage'], 'cpu':vm['cpu']}, + driver=self.connection.driver) + return n + + def _to_image(self, image): + image = NodeImage(id=image['id'], + name=image['description'], + driver=self.connection.driver) + return image diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/brightbox.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/brightbox.py new file mode 100644 index 0000000000000000000000000000000000000000..1312bb6569504b907d4955ab6441416d92f1b682 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/brightbox.py @@ -0,0 +1,221 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Brightbox Driver +""" +import httplib +import base64 + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + +try: + import json +except ImportError: + import simplejson as json + +API_VERSION = '1.0' + + +class BrightboxResponse(Response): + def success(self): + return self.status >= 200 and self.status < 400 + + def parse_body(self): + if self.headers['content-type'].split('; ')[0] == 'application/json' and len(self.body) > 0: + return json.loads(self.body) + else: + return self.body + + def parse_error(self): + return json.loads(self.body)['error'] + + +class BrightboxConnection(ConnectionUserAndKey): + """ + Connection class for the Brightbox driver + """ + + host = 'api.gb1.brightbox.com' + responseCls = BrightboxResponse + + def _fetch_oauth_token(self): + body = json.dumps({'client_id': self.user_id, 'grant_type': 'none'}) + + authorization = 'Basic ' + base64.encodestring('%s:%s' % (self.user_id, self.key)).rstrip() + + self.connect() + + response = self.connection.request(method='POST', url='/token', body=body, headers={ + 'Host': self.host, + 'User-Agent': self._user_agent(), + 'Authorization': authorization, + 'Content-Type': 'application/json', + 'Content-Length': str(len(body)) + }) + + response = self.connection.getresponse() + + if response.status == 200: + return json.loads(response.read())['access_token'] + else: + message = '%s (%s)' % (json.loads(response.read())['error'], response.status) + + raise InvalidCredsError, message + + def add_default_headers(self, headers): + try: + headers['Authorization'] = 'OAuth ' + self.token + except AttributeError: + self.token = self._fetch_oauth_token() + + headers['Authorization'] = 'OAuth ' + self.token + + return headers + + def encode_data(self, data): + return json.dumps(data) + + +class BrightboxNodeDriver(NodeDriver): + """ + Brightbox node driver + """ + + connectionCls = BrightboxConnection + + type = Provider.BRIGHTBOX + name = 'Brightbox' + + NODE_STATE_MAP = { 'creating': NodeState.PENDING, + 'active': NodeState.RUNNING, + 'inactive': NodeState.UNKNOWN, + 'deleting': NodeState.UNKNOWN, + 'deleted': NodeState.TERMINATED, + 'failed': NodeState.UNKNOWN } + + def _to_node(self, data): + return Node( + id = data['id'], + name = data['name'], + state = self.NODE_STATE_MAP[data['status']], + public_ip = map(lambda cloud_ip: cloud_ip['public_ip'], data['cloud_ips']), + private_ip = map(lambda interface: interface['ipv4_address'], data['interfaces']), + driver = self.connection.driver, + extra = { + 'status': data['status'], + 'interfaces': data['interfaces'] + } + ) + + def _to_image(self, data): + return NodeImage( + id = data['id'], + name = data['name'], + driver = self, + extra = { + 'description': data['description'], + 'arch': data['arch'] + } + ) + + def _to_size(self, data): + return NodeSize( + id = data['id'], + name = data['name'], + ram = data['ram'], + disk = data['disk_size'], + bandwidth = 0, + price = '', + driver = self + ) + + def _to_location(self, data): + return NodeLocation( + id = data['id'], + name = data['handle'], + country = 'GB', + driver = self + ) + + def _post(self, path, data={}): + headers = {'Content-Type': 'application/json'} + + return self.connection.request(path, data=data, headers=headers, method='POST') + + def create_node(self, **kwargs): + data = { + 'name': kwargs['name'], + 'server_type': kwargs['size'].id, + 'image': kwargs['image'].id, + 'user_data': '' + } + + if kwargs.has_key('location'): + data['zone'] = kwargs['location'].id + else: + data['zone'] = '' + + data = self._post('/%s/servers' % API_VERSION, data).object + + return self._to_node(data) + + def destroy_node(self, node): + response = self.connection.request('/%s/servers/%s' % (API_VERSION, node.id), method='DELETE') + + return response.status == httplib.ACCEPTED + + def list_nodes(self): + data = self.connection.request('/%s/servers' % API_VERSION).object + + return map(self._to_node, data) + + def list_images(self): + data = self.connection.request('/%s/images' % API_VERSION).object + + return map(self._to_image, data) + + def list_sizes(self): + data = self.connection.request('/%s/server_types' % API_VERSION).object + + return map(self._to_size, data) + + def list_locations(self): + data = self.connection.request('/%s/zones' % API_VERSION).object + + return map(self._to_location, data) + + def ex_list_cloud_ips(self): + return self.connection.request('/%s/cloud_ips' % API_VERSION).object + + def ex_create_cloud_ip(self): + return self._post('/%s/cloud_ips' % API_VERSION).object + + def ex_map_cloud_ip(self, cloud_ip_id, interface_id): + response = self._post('/%s/cloud_ips/%s/map' % (API_VERSION, cloud_ip_id), {'interface': interface_id}) + + return response.status == httplib.ACCEPTED + + def ex_unmap_cloud_ip(self, cloud_ip_id): + response = self._post('/%s/cloud_ips/%s/unmap' % (API_VERSION, cloud_ip_id)) + + return response.status == httplib.ACCEPTED + + def ex_destroy_cloud_ip(self, cloud_ip_id): + response = self.connection.request('/%s/cloud_ips/%s' % (API_VERSION, cloud_ip_id), method='DELETE') + + return response.status == httplib.OK diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/cloudsigma.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/cloudsigma.py new file mode 100644 index 0000000000000000000000000000000000000000..ee50ebbac0118c5cfa40f2d7715dac29d8371478 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/cloudsigma.py @@ -0,0 +1,553 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CloudSigma Driver +""" +import re +import time +import base64 + +from libcloud.utils import str2dicts, str2list, dict2str +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, NodeSize, Node +from libcloud.compute.base import NodeImage + +# API end-points +API_ENDPOINTS = { + 'zrh': { + 'name': 'Zurich', + 'country': 'Switzerland', + 'host': 'api.cloudsigma.com' + }, +} + +# Default API end-point for the base connection clase. +DEFAULT_ENDPOINT = 'zrh' + +# CloudSigma doesn't specify special instance types. +# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work, 500 MB to 32000 MB for ram +# and 1 GB to 1024 GB for hard drive size. +# Plans in this file are based on examples listed on http://www.cloudsigma.com/en/pricing/price-schedules +INSTANCE_TYPES = { + 'micro-regular': { + 'id': 'micro-regular', + 'name': 'Micro/Regular instance', + 'cpu': 1100, + 'memory': 640, + 'disk': 50, + 'bandwidth': None, + }, + 'micro-high-cpu': { + 'id': 'micro-high-cpu', + 'name': 'Micro/High CPU instance', + 'cpu': 2200, + 'memory': 640, + 'disk': 80, + 'bandwidth': None, + }, + 'standard-small': { + 'id': 'standard-small', + 'name': 'Standard/Small instance', + 'cpu': 1100, + 'memory': 1741, + 'disk': 50, + 'bandwidth': None, + }, + 'standard-large': { + 'id': 'standard-large', + 'name': 'Standard/Large instance', + 'cpu': 4400, + 'memory': 7680, + 'disk': 250, + 'bandwidth': None, + }, + 'standard-extra-large': { + 'id': 'standard-extra-large', + 'name': 'Standard/Extra Large instance', + 'cpu': 8800, + 'memory': 15360, + 'disk': 500, + 'bandwidth': None, + }, + 'high-memory-extra-large': { + 'id': 'high-memory-extra-large', + 'name': 'High Memory/Extra Large instance', + 'cpu': 7150, + 'memory': 17510, + 'disk': 250, + 'bandwidth': None, + }, + 'high-memory-double-extra-large': { + 'id': 'high-memory-double-extra-large', + 'name': 'High Memory/Double Extra Large instance', + 'cpu': 14300, + 'memory': 32768, + 'disk': 500, + 'bandwidth': None, + }, + 'high-cpu-medium': { + 'id': 'high-cpu-medium', + 'name': 'High CPU/Medium instance', + 'cpu': 5500, + 'memory': 1741, + 'disk': 150, + 'bandwidth': None, + }, + 'high-cpu-extra-large': { + 'id': 'high-cpu-extra-large', + 'name': 'High CPU/Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 500, + 'bandwidth': None, + } +} + +NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'stopped': NodeState.TERMINATED, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, +} + +# Default timeout (in seconds) for the drive imaging process +IMAGING_TIMEOUT = 20 * 60 + +class CloudSigmaException(Exception): + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + +class CloudSigmaInsufficientFundsException(Exception): + def __repr__(self): + return "" % (self.args[0]) + +class CloudSigmaResponse(Response): + def success(self): + if self.status == 401: + raise InvalidCredsError() + + return self.status >= 200 and self.status <= 299 + + def parse_body(self): + if not self.body: + return self.body + + return str2dicts(self.body) + + def parse_error(self): + return 'Error: %s' % (self.body.replace('errors:', '').strip()) + +class CloudSigmaNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, self.disk, self.bandwidth, + self.price, self.driver.name)) + +class CloudSigmaBaseConnection(ConnectionUserAndKey): + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + responseCls = CloudSigmaResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + + headers['Authorization'] = 'Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key))) + + return headers + +class CloudSigmaBaseNodeDriver(NodeDriver): + type = Provider.CLOUDSIGMA + name = 'CloudSigma' + connectionCls = CloudSigmaBaseConnection + + def reboot_node(self, node): + """ + Reboot a node. + + Because Cloudsigma API does not provide native reboot call, it's emulated using stop and start. + """ + node = self._get_node(node.id) + state = node.state + + if state == NodeState.RUNNING: + stopped = self.ex_stop_node(node) + else: + stopped = True + + if not stopped: + raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + + success = self.ex_start_node(node) + + return success + + def destroy_node(self, node): + """ + Destroy a node (all the drives associated with it are NOT destroyed). + + If a node is still running, it's stopped before it's destroyed. + """ + node = self._get_node(node.id) + state = node.state + + # Node cannot be destroyed while running so it must be stopped first + if state == NodeState.RUNNING: + stopped = self.ex_stop_node(node) + else: + stopped = True + + if not stopped: + raise CloudSigmaException('Could not stop node with id %s' % (node.id)) + + response = self.connection.request(action = '/servers/%s/destroy' % (node.id), + method = 'POST') + return response.status == 204 + + def list_images(self, location=None): + """ + Return a list of available standard images (this call might take up to 15 seconds to return). + """ + response = self.connection.request(action = '/drives/standard/info').object + + images = [] + for value in response: + if value.get('type'): + if value['type'] == 'disk': + image = NodeImage(id = value['drive'], name = value['name'], driver = self.connection.driver, + extra = {'size': value['size']}) + images.append(image) + + return images + + def list_sizes(self, location = None): + """ + Return a list of available node sizes. + """ + sizes = [] + for key, value in INSTANCE_TYPES.iteritems(): + size = CloudSigmaNodeSize(id = value['id'], name = value['name'], + cpu = value['cpu'], ram = value['memory'], + disk = value['disk'], bandwidth = value['bandwidth'], + price = self._get_size_price(size_id=key), + driver = self.connection.driver) + sizes.append(size) + + return sizes + + def list_nodes(self): + """ + Return a list of nodes. + """ + response = self.connection.request(action = '/servers/info').object + + nodes = [] + for data in response: + node = self._to_node(data) + if node: + nodes.append(node) + return nodes + + def create_node(self, **kwargs): + """ + Creates a CloudSigma instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: String with a name for this new node (required) + @type name: C{string} + + @keyword smp: Number of virtual processors or None to calculate based on the cpu speed + @type smp: C{int} + + @keyword nic_model: e1000, rtl8139 or virtio (is not specified, e1000 is used) + @type nic_model: C{string} + + @keyword vnc_password: If not set, VNC access is disabled. + @type vnc_password: C{bool} + """ + size = kwargs['size'] + image = kwargs['image'] + smp = kwargs.get('smp', 'auto') + nic_model = kwargs.get('nic_model', 'e1000') + vnc_password = kwargs.get('vnc_password', None) + + if nic_model not in ['e1000', 'rtl8139', 'virtio']: + raise CloudSigmaException('Invalid NIC model specified') + + drive_data = {} + drive_data.update({'name': kwargs['name'], 'size': '%sG' % (kwargs['size'].disk)}) + + response = self.connection.request(action = '/drives/%s/clone' % image.id, data = dict2str(drive_data), + method = 'POST').object + + if not response: + raise CloudSigmaException('Drive creation failed') + + drive_uuid = response[0]['drive'] + + response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + imaging_start = time.time() + while response[0].has_key('imaging'): + response = self.connection.request(action = '/drives/%s/info' % (drive_uuid)).object + elapsed_time = time.time() - imaging_start + if response[0].has_key('imaging') and elapsed_time >= IMAGING_TIMEOUT: + raise CloudSigmaException('Drive imaging timed out') + time.sleep(1) + + node_data = {} + node_data.update({'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram, 'ide:0:0': drive_uuid, + 'boot': 'ide:0:0', 'smp': smp}) + node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) + + if vnc_password: + node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) + + response = self.connection.request(action = '/servers/create', data = dict2str(node_data), + method = 'POST').object + + if not isinstance(response, list): + response = [ response ] + + node = self._to_node(response[0]) + if node is None: + # Insufficient funds, destroy created drive + self.ex_drive_destroy(drive_uuid) + raise CloudSigmaInsufficientFundsException('Insufficient funds, node creation failed') + + # Start the node after it has been created + started = self.ex_start_node(node) + + if started: + node.state = NodeState.RUNNING + + return node + + def ex_destroy_node_and_drives(self, node): + """ + Destroy a node and all the drives associated with it. + """ + node = self._get_node_info(node) + + drive_uuids = [] + for key, value in node.iteritems(): + if (key.startswith('ide:') or key.startswith('scsi') or key.startswith('block')) and \ + not (key.endswith(':bytes') or key.endswith(':requests') or key.endswith('media')): + drive_uuids.append(value) + + node_destroyed = self.destroy_node(self._to_node(node)) + + if not node_destroyed: + return False + + for drive_uuid in drive_uuids: + self.ex_drive_destroy(drive_uuid) + + return True + + def ex_static_ip_list(self): + """ + Return a list of available static IP addresses. + """ + response = self.connection.request(action = '/resources/ip/list', method = 'GET') + + if response.status != 200: + raise CloudSigmaException('Could not retrieve IP list') + + ips = str2list(response.body) + return ips + + def ex_drives_list(self): + """ + Return a list of all the available drives. + """ + response = self.connection.request(action = '/drives/info', method = 'GET') + + result = str2dicts(response.body) + return result + + def ex_static_ip_create(self): + """ + Create a new static IP address. + """ + response = self.connection.request(action = '/resources/ip/create', method = 'GET') + + result = str2dicts(response.body) + return result + + def ex_static_ip_destroy(self, ip_address): + """ + Destroy a static IP address. + """ + response = self.connection.request(action = '/resources/ip/%s/destroy' % (ip_address), method = 'GET') + + return response.status == 204 + + def ex_drive_destroy(self, drive_uuid): + """ + Destroy a drive with a specified uuid. + If the drive is currently mounted an exception is thrown. + """ + response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), method = 'POST') + + return response.status == 204 + + + def ex_set_node_configuration(self, node, **kwargs): + """ + Update a node configuration. + Changing most of the parameters requires node to be stopped. + """ + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + + invalid_keys = [] + for key in kwargs.keys(): + matches = False + for regex in valid_keys: + if re.match(regex, key): + matches = True + break + if not matches: + invalid_keys.append(key) + + if invalid_keys: + raise CloudSigmaException('Invalid configuration key specified: %s' % (',' .join(invalid_keys))) + + response = self.connection.request(action = '/servers/%s/set' % (node.id), data = dict2str(kwargs), + method = 'POST') + + return (response.status == 200 and response.body != '') + + def ex_start_node(self, node): + """ + Start a node. + """ + response = self.connection.request(action = '/servers/%s/start' % (node.id), + method = 'POST') + + return response.status == 200 + + def ex_stop_node(self, node): + """ + Stop (shutdown) a node. + """ + response = self.connection.request(action = '/servers/%s/stop' % (node.id), + method = 'POST') + return response.status == 204 + + def ex_shutdown_node(self, node): + """ + Stop (shutdown) a node. + """ + return self.ex_stop_node(node) + + def ex_destroy_drive(self, drive_uuid): + """ + Destroy a drive. + """ + response = self.connection.request(action = '/drives/%s/destroy' % (drive_uuid), + method = 'POST') + return response.status == 204 + + def _to_node(self, data): + if data: + try: + state = NODE_STATE_MAP[data['status']] + except KeyError: + state = NodeState.UNKNOWN + + if 'server' not in data: + # Response does not contain server UUID if the server + # creation failed because of insufficient funds. + return None + + public_ip = [] + if data.has_key('nic:0:dhcp'): + if isinstance(data['nic:0:dhcp'], list): + public_ip = data['nic:0:dhcp'] + else: + public_ip = [data['nic:0:dhcp']] + + extra = {} + extra_keys = [ ('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'), ('status', 'str') ] + for key, value_type in extra_keys: + if data.has_key(key): + value = data[key] + + if value_type == 'int': + value = int(value) + elif value_type == 'auto': + try: + value = int(value) + except ValueError: + pass + + extra.update({key: value}) + + if data.has_key('vnc:ip') and data.has_key('vnc:password'): + extra.update({'vnc_ip': data['vnc:ip'], 'vnc_password': data['vnc:password']}) + + node = Node(id = data['server'], name = data['name'], state = state, + public_ip = public_ip, private_ip = None, driver = self.connection.driver, + extra = extra) + + return node + return None + + def _get_node(self, node_id): + nodes = self.list_nodes() + node = [node for node in nodes if node.id == node.id] + + if not node: + raise CloudSigmaException('Node with id %s does not exist' % (node_id)) + + return node[0] + + def _get_node_info(self, node): + response = self.connection.request(action = '/servers/%s/info' % (node.id)) + + result = str2dicts(response.body) + return result[0] + +class CloudSigmaZrhConnection(CloudSigmaBaseConnection): + """ + Connection class for the CloudSigma driver for the Zurich end-point + """ + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + +class CloudSigmaZrhNodeDriver(CloudSigmaBaseNodeDriver): + """ + CloudSigma node driver for the Zurich end-point + """ + connectionCls = CloudSigmaZrhConnection + api_name = 'cloudsigma_zrh' diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dreamhost.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dreamhost.py new file mode 100644 index 0000000000000000000000000000000000000000..fb857b640a9bc1e7546cc7ce1cc77ceb738979a2 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dreamhost.py @@ -0,0 +1,249 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +DreamHost Driver +""" + +try: + import json +except: + import simplejson as json + +import copy + +from libcloud.common.base import ConnectionKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.base import Node, NodeDriver, NodeSize +from libcloud.compute.base import NodeImage +from libcloud.compute.types import Provider, NodeState + +# DreamHost Private Servers can be resized on the fly, but Libcloud doesn't +# currently support extensions to its interface, so we'll put some basic sizes +# in for node creation. + +DH_PS_SIZES = { + 'minimum': { + 'id' : 'minimum', + 'name' : 'Minimum DH PS size', + 'ram' : 300, + 'disk' : None, + 'bandwidth' : None + }, + 'maximum': { + 'id' : 'maximum', + 'name' : 'Maximum DH PS size', + 'ram' : 4000, + 'disk' : None, + 'bandwidth' : None + }, + 'default': { + 'id' : 'default', + 'name' : 'Default DH PS size', + 'ram' : 2300, + 'disk' : None, + 'bandwidth' : None + }, + 'low': { + 'id' : 'low', + 'name' : 'DH PS with 1GB RAM', + 'ram' : 1000, + 'disk' : None, + 'bandwidth' : None + }, + 'high': { + 'id' : 'high', + 'name' : 'DH PS with 3GB RAM', + 'ram' : 3000, + 'disk' : None, + 'bandwidth' : None + }, +} + + +class DreamhostAPIException(Exception): + + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + + +class DreamhostResponse(Response): + """ + Response class for DreamHost PS + """ + + def parse_body(self): + resp = json.loads(self.body) + if resp['result'] != 'success': + raise Exception(self._api_parse_error(resp)) + return resp['data'] + + def parse_error(self): + raise Exception + + def _api_parse_error(self, response): + if 'data' in response: + if response['data'] == 'invalid_api_key': + raise InvalidCredsError( + "Oops! You've entered an invalid API key") + else: + raise DreamhostAPIException(response['data']) + else: + raise DreamhostAPIException("Unknown problem: %s" % (self.body)) + +class DreamhostConnection(ConnectionKey): + """ + Connection class to connect to DreamHost's API servers + """ + + host = 'api.dreamhost.com' + responseCls = DreamhostResponse + format = 'json' + + def add_default_params(self, params): + """ + Add key and format parameters to the request. Eventually should add + unique_id to prevent re-execution of a single request. + """ + params['key'] = self.key + params['format'] = self.format + #params['unique_id'] = generate_unique_id() + return params + + +class DreamhostNodeDriver(NodeDriver): + """ + Node Driver for DreamHost PS + """ + type = Provider.DREAMHOST + api_name = 'dreamhost' + name = "Dreamhost" + connectionCls = DreamhostConnection + + _sizes = DH_PS_SIZES + + def create_node(self, **kwargs): + """Create a new Dreamhost node + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_movedata: Copy all your existing users to this new PS + @type ex_movedata: C{str} + """ + size = kwargs['size'].ram + params = { + 'cmd' : 'dreamhost_ps-add_ps', + 'movedata' : kwargs.get('movedata', 'no'), + 'type' : kwargs['image'].name, + 'size' : size + } + data = self.connection.request('/', params).object + return Node( + id = data['added_web'], + name = data['added_web'], + state = NodeState.PENDING, + public_ip = [], + private_ip = [], + driver = self.connection.driver, + extra = { + 'type' : kwargs['image'].name + } + ) + + def destroy_node(self, node): + params = { + 'cmd' : 'dreamhost_ps-remove_ps', + 'ps' : node.id + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def reboot_node(self, node): + params = { + 'cmd' : 'dreamhost_ps-reboot', + 'ps' : node.id + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def list_nodes(self, **kwargs): + data = self.connection.request( + '/', {'cmd': 'dreamhost_ps-list_ps'}).object + return [self._to_node(n) for n in data] + + def list_images(self, **kwargs): + data = self.connection.request( + '/', {'cmd': 'dreamhost_ps-list_images'}).object + images = [] + for img in data: + images.append(NodeImage( + id = img['image'], + name = img['image'], + driver = self.connection.driver + )) + return images + + def list_sizes(self, **kwargs): + sizes = [] + for key, values in self._sizes.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(NodeSize(driver=self.connection.driver, **attributes)) + + return sizes + + def list_locations(self, **kwargs): + raise NotImplementedError( + 'You cannot select a location for ' + 'DreamHost Private Servers at this time.') + + ############################################ + # Private Methods (helpers and extensions) # + ############################################ + def _resize_node(self, node, size): + if (size < 300 or size > 4000): + return False + + params = { + 'cmd' : 'dreamhost_ps-set_size', + 'ps' : node.id, + 'size' : size + } + try: + return self.connection.request('/', params).success() + except DreamhostAPIException: + return False + + def _to_node(self, data): + """ + Convert the data from a DreamhostResponse object into a Node + """ + return Node( + id = data['ps'], + name = data['ps'], + state = NodeState.UNKNOWN, + public_ip = [data['ip']], + private_ip = [], + driver = self.connection.driver, + extra = { + 'current_size' : data['memory_mb'], + 'account_id' : data['account_id'], + 'type' : data['type']}) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dummy.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..57485c4e1621436cbf45f7928deae28ff4f5c512 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/dummy.py @@ -0,0 +1,297 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Dummy Driver + +@note: This driver is out of date +""" +import uuid +import socket +import struct + +from libcloud.base import ConnectionKey, NodeDriver, NodeSize, NodeLocation +from libcloud.compute.base import NodeImage, Node +from libcloud.compute.types import Provider,NodeState + +class DummyConnection(ConnectionKey): + """ + Dummy connection class + """ + + def connect(self, host=None, port=None): + pass + +class DummyNodeDriver(NodeDriver): + """ + Dummy node driver + + This is a fake driver which appears to always create or destroy + nodes successfully. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node=driver.create_node() + >>> node.public_ip[0] + '127.0.0.3' + >>> node.name + 'dummy-3' + + If the credentials you give convert to an integer then the next + node to be created will be one higher. + + Each time you create a node you will get a different IP address. + + >>> driver = DummyNodeDriver(22) + >>> node=driver.create_node() + >>> node.name + 'dummy-23' + + """ + + name = "Dummy Node Provider" + type = Provider.DUMMY + + def __init__(self, creds): + self.creds = creds + try: + num = int(creds) + except ValueError: + num = None + if num: + self.nl = [] + startip = _ip_to_int('127.0.0.1') + for i in xrange(num): + ip = _int_to_ip(startip + i) + self.nl.append( + Node(id=i, + name='dummy-%d' % (i), + state=NodeState.RUNNING, + public_ip=[ip], + private_ip=[], + driver=self, + extra={'foo': 'bar'}) + ) + else: + self.nl = [ + Node(id=1, + name='dummy-1', + state=NodeState.RUNNING, + public_ip=['127.0.0.1'], + private_ip=[], + driver=self, + extra={'foo': 'bar'}), + Node(id=2, + name='dummy-2', + state=NodeState.RUNNING, + public_ip=['127.0.0.1'], + private_ip=[], + driver=self, + extra={'foo': 'bar'}), + ] + self.connection = DummyConnection(self.creds) + + def get_uuid(self, unique_field=None): + return str(uuid.uuid4()) + + def list_nodes(self): + """ + List the nodes known to a particular driver; + There are two default nodes created at the beginning + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node_list=driver.list_nodes() + >>> sorted([node.name for node in node_list ]) + ['dummy-1', 'dummy-2'] + + each item in the list returned is a node object from which you + can carry out any node actions you wish + + >>> node_list[0].reboot() + True + + As more nodes are added, list_nodes will return them + + >>> node=driver.create_node() + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-3'] + """ + return self.nl + + def reboot_node(self, node): + """ + Sets the node state to rebooting; in this dummy driver always + returns True as if the reboot had been successful. + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> node=driver.create_node() + >>> from libcloud.compute.types import NodeState + >>> node.state == NodeState.RUNNING + True + >>> node.state == NodeState.REBOOTING + False + >>> driver.reboot_node(node) + True + >>> node.state == NodeState.REBOOTING + True + + Please note, dummy nodes never recover from the reboot. + """ + + node.state = NodeState.REBOOTING + return True + + def destroy_node(self, node): + """ + Sets the node state to terminated and removes it from the node list + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> from libcloud.compute.types import NodeState + >>> node = [node for node in driver.list_nodes() if node.name == 'dummy-1'][0] + >>> node.state == NodeState.RUNNING + True + >>> driver.destroy_node(node) + True + >>> node.state == NodeState.RUNNING + False + >>> [node for node in driver.list_nodes() if node.name == 'dummy-1'] + [] + """ + + node.state = NodeState.TERMINATED + self.nl.remove(node) + return True + + def list_images(self, location=None): + """ + Returns a list of images as a cloud provider might have + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([image.name for image in driver.list_images()]) + ['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10'] + """ + return [ + NodeImage(id=1, name="Ubuntu 9.10", driver=self), + NodeImage(id=2, name="Ubuntu 9.04", driver=self), + NodeImage(id=3, name="Slackware 4", driver=self), + ] + + def list_sizes(self, location=None): + """ + Returns a list of node sizes as a cloud provider might have + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([size.ram for size in driver.list_sizes()]) + [128, 512, 4096, 8192] + """ + + return [ + NodeSize(id=1, + name="Small", + ram=128, + disk=4, + bandwidth=500, + price=4, + driver=self), + NodeSize(id=2, + name="Medium", + ram=512, + disk=16, + bandwidth=1500, + price=8, + driver=self), + NodeSize(id=3, + name="Big", + ram=4096, + disk=32, + bandwidth=2500, + price=32, + driver=self), + NodeSize(id=4, + name="XXL Big", + ram=4096*2, + disk=32*4, + bandwidth=2500*3, + price=32*2, + driver=self), + ] + + def list_locations(self): + """ + Returns a list of locations of nodes + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([loc.name + " in " + loc.country for loc in driver.list_locations()]) + ['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"] + """ + return [ + NodeLocation(id=1, + name="Paul's Room", + country='US', + driver=self), + NodeLocation(id=2, + name="London Loft", + country='GB', + driver=self), + NodeLocation(id=3, + name="Island Datacenter", + country='FJ', + driver=self), + ] + + def create_node(self, **kwargs): + """ + Creates a dummy node; the node id is equal to the number of + nodes in the node list + + >>> from libcloud.compute.drivers.dummy import DummyNodeDriver + >>> driver = DummyNodeDriver(0) + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2'] + >>> nodeA = driver.create_node() + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-3'] + >>> driver.create_node().name + 'dummy-4' + >>> driver.destroy_node(nodeA) + True + >>> sorted([node.name for node in driver.list_nodes()]) + ['dummy-1', 'dummy-2', 'dummy-4'] + """ + l = len(self.nl) + 1 + n = Node(id=l, + name='dummy-%d' % l, + state=NodeState.RUNNING, + public_ip=['127.0.0.%d' % l], + private_ip=[], + driver=self, + extra={'foo': 'bar'}) + self.nl.append(n) + return n + +def _ip_to_int(ip): + return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0]) + +def _int_to_ip(ip): + return socket.inet_ntoa(struct.pack('I', socket.ntohl(ip))) + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ec2.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ec2.py new file mode 100644 index 0000000000000000000000000000000000000000..7805cd0d6538e69cbb5330a1468f52b8628cd75e --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ec2.py @@ -0,0 +1,1015 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Amazon EC2 driver +""" +import base64 +import hmac +import os +import time +import urllib +import copy + +from hashlib import sha256 +from xml.etree import ElementTree as ET + +from libcloud.utils import fixxpath, findtext, findattr, findall +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.aws import AWSBaseResponse +from libcloud.common.types import InvalidCredsError, MalformedResponseError, LibcloudError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize +from libcloud.compute.base import NodeImage + +EC2_US_EAST_HOST = 'ec2.us-east-1.amazonaws.com' +EC2_US_WEST_HOST = 'ec2.us-west-1.amazonaws.com' +EC2_EU_WEST_HOST = 'ec2.eu-west-1.amazonaws.com' +EC2_AP_SOUTHEAST_HOST = 'ec2.ap-southeast-1.amazonaws.com' +EC2_AP_NORTHEAST_HOST = 'ec2.ap-northeast-1.amazonaws.com' + +API_VERSION = '2010-08-31' + +NAMESPACE = "http://ec2.amazonaws.com/doc/%s/" % (API_VERSION) + +""" +Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them. +From http://aws.amazon.com/ec2/instance-types/ +""" +EC2_INSTANCE_TYPES = { + 't1.micro': { + 'id': 't1.micro', + 'name': 'Micro Instance', + 'ram': 613, + 'disk': 15, + 'bandwidth': None + }, + 'm1.small': { + 'id': 'm1.small', + 'name': 'Small Instance', + 'ram': 1740, + 'disk': 160, + 'bandwidth': None + }, + 'm1.large': { + 'id': 'm1.large', + 'name': 'Large Instance', + 'ram': 7680, + 'disk': 850, + 'bandwidth': None + }, + 'm1.xlarge': { + 'id': 'm1.xlarge', + 'name': 'Extra Large Instance', + 'ram': 15360, + 'disk': 1690, + 'bandwidth': None + }, + 'c1.medium': { + 'id': 'c1.medium', + 'name': 'High-CPU Medium Instance', + 'ram': 1740, + 'disk': 350, + 'bandwidth': None + }, + 'c1.xlarge': { + 'id': 'c1.xlarge', + 'name': 'High-CPU Extra Large Instance', + 'ram': 7680, + 'disk': 1690, + 'bandwidth': None + }, + 'm2.xlarge': { + 'id': 'm2.xlarge', + 'name': 'High-Memory Extra Large Instance', + 'ram': 17510, + 'disk': 420, + 'bandwidth': None + }, + 'm2.2xlarge': { + 'id': 'm2.2xlarge', + 'name': 'High-Memory Double Extra Large Instance', + 'ram': 35021, + 'disk': 850, + 'bandwidth': None + }, + 'm2.4xlarge': { + 'id': 'm2.4xlarge', + 'name': 'High-Memory Quadruple Extra Large Instance', + 'ram': 70042, + 'disk': 1690, + 'bandwidth': None + }, + 'cg1.4xlarge': { + 'id': 'cg1.4xlarge', + 'name': 'Cluster GPU Quadruple Extra Large Instance', + 'ram': 22528, + 'disk': 1690, + 'bandwidth': None + }, + 'cc1.4xlarge': { + 'id': 'cc1.4xlarge', + 'name': 'Cluster Compute Quadruple Extra Large Instance', + 'ram': 23552, + 'disk': 1690, + 'bandwidth': None + }, +} + +CLUSTER_INSTANCES_IDS = [ 'cg1.4xlarge', 'cc1.4xlarge' ] + +EC2_US_EAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_US_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_EU_WEST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_AP_SOUTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) +EC2_AP_NORTHEAST_INSTANCE_TYPES = dict(EC2_INSTANCE_TYPES) + +class EC2NodeLocation(NodeLocation): + def __init__(self, id, name, country, driver, availability_zone): + super(EC2NodeLocation, self).__init__(id, name, country, driver) + self.availability_zone = availability_zone + + def __repr__(self): + return (('') + % (self.id, self.name, self.country, + self.availability_zone.name, self.driver.name)) + +class EC2Response(AWSBaseResponse): + """ + EC2 specific response parsing and error handling. + """ + def parse_error(self): + err_list = [] + # Okay, so for Eucalyptus, you can get a 403, with no body, + # if you are using the wrong user/password. + msg = "Failure: 403 Forbidden" + if self.status == 403 and self.body[:len(msg)] == msg: + raise InvalidCredsError(msg) + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", body=self.body, driver=EC2NodeDriver) + + for err in body.findall('Errors/Error'): + code, message = err.getchildren() + err_list.append("%s: %s" % (code.text, message.text)) + if code.text == "InvalidClientTokenId": + raise InvalidCredsError(err_list[-1]) + if code.text == "SignatureDoesNotMatch": + raise InvalidCredsError(err_list[-1]) + if code.text == "AuthFailure": + raise InvalidCredsError(err_list[-1]) + if code.text == "OptInRequired": + raise InvalidCredsError(err_list[-1]) + if code.text == "IdempotentParameterMismatch": + raise IdempotentParamError(err_list[-1]) + return "\n".join(err_list) + +class EC2Connection(ConnectionUserAndKey): + """ + Repersents a single connection to the EC2 Endpoint + """ + + host = EC2_US_EAST_HOST + responseCls = EC2Response + + def add_default_params(self, params): + params['SignatureVersion'] = '2' + params['SignatureMethod'] = 'HmacSHA256' + params['AWSAccessKeyId'] = self.user_id + params['Version'] = API_VERSION + params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', + time.gmtime()) + params['Signature'] = self._get_aws_auth_param(params, self.key, self.action) + return params + + def _get_aws_auth_param(self, params, secret_key, path='/'): + """ + Creates the signature required for AWS, per + http://bit.ly/aR7GaQ [docs.amazonwebservices.com]: + + StringToSign = HTTPVerb + "\n" + + ValueOfHostHeaderInLowercase + "\n" + + HTTPRequestURI + "\n" + + CanonicalizedQueryString + """ + keys = params.keys() + keys.sort() + pairs = [] + for key in keys: + pairs.append(urllib.quote(key, safe='') + '=' + + urllib.quote(params[key], safe='-_~')) + + qs = '&'.join(pairs) + string_to_sign = '\n'.join(('GET', self.host, path, qs)) + + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha256).digest() + ) + return b64_hmac + +class ExEC2AvailabilityZone(object): + """ + Extension class which stores information about an EC2 availability zone. + + Note: This class is EC2 specific. + """ + def __init__(self, name, zone_state, region_name): + self.name = name + self.zone_state = zone_state + self.region_name = region_name + + def __repr__(self): + return (('') + % (self.name, self.zone_state, self.region_name)) + +class EC2NodeDriver(NodeDriver): + """ + Amazon EC2 node driver + """ + + connectionCls = EC2Connection + type = Provider.EC2 + api_name = 'ec2_us_east' + name = 'Amazon EC2 (us-east-1)' + friendly_name = 'Amazon US N. Virginia' + country = 'US' + region_name = 'us-east-1' + path = '/' + + _instance_types = EC2_US_EAST_INSTANCE_TYPES + + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.TERMINATED, + 'terminated': NodeState.TERMINATED + } + + def _pathlist(self, key, arr): + """ + Converts a key and an array of values into AWS query param format. + """ + params = {} + i = 0 + for value in arr: + i += 1 + params["%s.%s" % (key, i)] = value + return params + + def _get_boolean(self, element): + tag = "{%s}%s" % (NAMESPACE, 'return') + return element.findtext(tag) == 'true' + + def _get_terminate_boolean(self, element): + status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name')) + return any([ term_status == status + for term_status + in ('shutting-down', 'terminated') ]) + + def _to_nodes(self, object, xpath, groups=None): + return [ self._to_node(el, groups=groups) + for el in object.findall(fixxpath(xpath=xpath, namespace=NAMESPACE)) ] + + def _to_node(self, element, groups=None): + try: + state = self.NODE_STATE_MAP[ + findattr(element=element, xpath="instanceState/name", + namespace=NAMESPACE) + ] + except KeyError: + state = NodeState.UNKNOWN + + n = Node( + id=findtext(element=element, xpath='instanceId', + namespace=NAMESPACE), + name=findtext(element=element, xpath='instanceId', + namespace=NAMESPACE), + state=state, + public_ip=[findtext(element=element, xpath='ipAddress', + namespace=NAMESPACE)], + private_ip=[findtext(element=element, xpath='privateIpAddress', + namespace=NAMESPACE)], + driver=self.connection.driver, + extra={ + 'dns_name': findattr(element=element, xpath="dnsName", + namespace=NAMESPACE), + 'instanceId': findattr(element=element, xpath="instanceId", + namespace=NAMESPACE), + 'imageId': findattr(element=element, xpath="imageId", + namespace=NAMESPACE), + 'private_dns': findattr(element=element, xpath="privateDnsName", + namespace=NAMESPACE), + 'status': findattr(element=element, xpath="instanceState/name", + namespace=NAMESPACE), + 'keyname': findattr(element=element, xpath="keyName", + namespace=NAMESPACE), + 'launchindex': findattr(element=element, xpath="amiLaunchIndex", + namespace=NAMESPACE), + 'productcode': + [p.text for p in findall(element=element, + xpath="productCodesSet/item/productCode", + namespace=NAMESPACE + )], + 'instancetype': findattr(element=element, xpath="instanceType", + namespace=NAMESPACE), + 'launchdatetime': findattr(element=element, xpath="launchTime", + namespace=NAMESPACE), + 'availability': findattr(element, xpath="placement/availabilityZone", + namespace=NAMESPACE), + 'kernelid': findattr(element=element, xpath="kernelId", + namespace=NAMESPACE), + 'ramdiskid': findattr(element=element, xpath="ramdiskId", + namespace=NAMESPACE), + 'clienttoken' : findattr(element=element, xpath="clientToken", + namespace=NAMESPACE), + 'groups': groups + } + ) + return n + + def _to_images(self, object): + return [ self._to_image(el) + for el in object.findall( + fixxpath(xpath='imagesSet/item', namespace=NAMESPACE) + ) ] + + def _to_image(self, element): + n = NodeImage(id=findtext(element=element, xpath='imageId', + namespace=NAMESPACE), + name=findtext(element=element, xpath='imageLocation', + namespace=NAMESPACE), + driver=self.connection.driver) + return n + + def list_nodes(self): + params = {'Action': 'DescribeInstances' } + elem=self.connection.request(self.path, params=params).object + nodes=[] + for rs in findall(element=elem, xpath='reservationSet/item', + namespace=NAMESPACE): + groups=[g.findtext('') + for g in findall(element=rs, xpath='groupSet/item/groupId', + namespace=NAMESPACE)] + nodes += self._to_nodes(rs, 'instancesSet/item', groups) + + nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) + for node in nodes: + node.public_ip.extend(nodes_elastic_ips_mappings[node.id]) + return nodes + + def list_sizes(self, location=None): + # Cluster instances are currently only available in the US - N. Virginia Region + include_cluser_instances = self.region_name == 'us-east-1' + sizes = self._get_sizes(include_cluser_instances = + include_cluser_instances) + + return sizes + + def _get_sizes(self, include_cluser_instances=False): + sizes = [] + for key, values in self._instance_types.iteritems(): + if not include_cluser_instances and \ + key in CLUSTER_INSTANCES_IDS: + continue + attributes = copy.deepcopy(values) + attributes.update({'price': self._get_size_price(size_id=key)}) + sizes.append(NodeSize(driver=self, **attributes)) + return sizes + + def list_images(self, location=None): + params = {'Action': 'DescribeImages'} + images = self._to_images( + self.connection.request(self.path, params=params).object + ) + return images + + def list_locations(self): + locations = [] + for index, availability_zone in enumerate(self.ex_list_availability_zones()): + locations.append(EC2NodeLocation(index, + self.friendly_name, + self.country, + self, + availability_zone)) + return locations + + def ex_create_keypair(self, name): + """Creates a new keypair + + @note: This is a non-standard extension API, and + only works for EC2. + + @type name: C{str} + @param name: The name of the keypair to Create. This must be + unique, otherwise an InvalidKeyPair.Duplicate + exception is raised. + """ + params = { + 'Action': 'CreateKeyPair', + 'KeyName': name, + } + response = self.connection.request(self.path, params=params).object + key_material = findtext(element=response, xpath='keyMaterial', + namespace=NAMESPACE) + key_fingerprint = findtext(element=response, xpath='keyFingerprint', + namespace=NAMESPACE) + return { + 'keyMaterial': key_material, + 'keyFingerprint': key_fingerprint, + } + + def ex_import_keypair(self, name, keyfile): + """imports a new public key + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the public key to import. This must be unique, + otherwise an InvalidKeyPair.Duplicate exception is raised. + + @type keyfile: C{str} + @param keyfile: The filename with path of the public key to import. + + """ + + base64key = base64.b64encode(open(os.path.expanduser(keyfile)).read()) + + params = {'Action': 'ImportKeyPair', + 'KeyName': name, + 'PublicKeyMaterial': base64key + } + + response = self.connection.request(self.path, params=params).object + key_name = findtext(element=response, xpath='keyName', namespace=NAMESPACE) + key_fingerprint = findtext(element=response, xpath='keyFingerprint', + namespace=NAMESPACE) + return { + 'keyName': key_name, + 'keyFingerprint': key_fingerprint, + } + + def ex_describe_keypairs(self, name): + """Describes a keypiar by name + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the keypair to describe. + + """ + + params = {'Action': 'DescribeKeyPairs', + 'KeyName.1': name + } + + response = self.connection.request(self.path, params=params).object + key_name = findattr(element=response, xpath='keySet/item/keyName', + namespace=NAMESPACE) + return { + 'keyName': key_name + } + + def ex_create_security_group(self, name, description): + """Creates a new Security Group + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the security group to Create. This must be unique. + + @type description: C{str} + @param description: Human readable description of a Security Group. + """ + params = {'Action': 'CreateSecurityGroup', + 'GroupName': name, + 'GroupDescription': description} + return self.connection.request(self.path, params=params).object + + def ex_authorize_security_group_permissive(self, name): + """Edit a Security Group to allow all traffic. + + @note: This is a non-standard extension API, and only works for EC2. + + @type name: C{str} + @param name: The name of the security group to edit + """ + + results = [] + params = {'Action': 'AuthorizeSecurityGroupIngress', + 'GroupName': name, + 'IpProtocol': 'tcp', + 'FromPort': '0', + 'ToPort': '65535', + 'CidrIp': '0.0.0.0/0'} + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + params['IpProtocol'] = 'udp' + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + + params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'}) + + try: + results.append( + self.connection.request(self.path, params=params.copy()).object + ) + except Exception, e: + if e.args[0].find("InvalidPermission.Duplicate") == -1: + raise e + return results + + def ex_list_availability_zones(self, only_available=True): + """ + Return a list of L{ExEC2AvailabilityZone} objects for the + current region. + + Note: This is an extension method and is only available for EC2 + driver. + + @keyword only_available: If true, return only availability zones + with state 'available' + @type only_available: C{string} + """ + params = {'Action': 'DescribeAvailabilityZones'} + + if only_available: + params.update({'Filter.0.Name': 'state'}) + params.update({'Filter.0.Value.0': 'available'}) + + params.update({'Filter.1.Name': 'region-name'}) + params.update({'Filter.1.Value.0': self.region_name}) + + result = self.connection.request(self.path, + params=params.copy()).object + + availability_zones = [] + for element in findall(element=result, xpath='availabilityZoneInfo/item', + namespace=NAMESPACE): + name = findtext(element=element, xpath='zoneName', + namespace=NAMESPACE) + zone_state = findtext(element=element, xpath='zoneState', + namespace=NAMESPACE) + region_name = findtext(element=element, xpath='regionName', + namespace=NAMESPACE) + + availability_zone = ExEC2AvailabilityZone( + name=name, + zone_state=zone_state, + region_name=region_name + ) + availability_zones.append(availability_zone) + + return availability_zones + + def ex_describe_tags(self, node): + """ + Return a dictionary of tags for this instance. + + @type node: C{Node} + @param node: Node instance + + @return dict Node tags + """ + params = { 'Action': 'DescribeTags', + 'Filter.0.Name': 'resource-id', + 'Filter.0.Value.0': node.id, + 'Filter.1.Name': 'resource-type', + 'Filter.1.Value.0': 'instance', + } + + result = self.connection.request(self.path, + params=params.copy()).object + + tags = {} + for element in findall(element=result, xpath='tagSet/item', + namespace=NAMESPACE): + key = findtext(element=element, xpath='key', namespace=NAMESPACE) + value = findtext(element=element, xpath='value', namespace=NAMESPACE) + + tags[key] = value + return tags + + def ex_create_tags(self, node, tags): + """ + Create tags for an instance. + + @type node: C{Node} + @param node: Node instance + @param tags: A dictionary or other mapping of strings to strings, + associating tag names with tag values. + """ + if not tags: + return + + params = { 'Action': 'CreateTags', + 'ResourceId.0': node.id } + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + self.connection.request(self.path, + params=params.copy()).object + + def ex_delete_tags(self, node, tags): + """ + Delete tags from an instance. + + @type node: C{Node} + @param node: Node instance + @param tags: A dictionary or other mapping of strings to strings, + specifying the tag names and tag values to be deleted. + """ + if not tags: + return + + params = { 'Action': 'DeleteTags', + 'ResourceId.0': node.id } + for i, key in enumerate(tags): + params['Tag.%d.Key' % i] = key + params['Tag.%d.Value' % i] = tags[key] + + self.connection.request(self.path, + params=params.copy()).object + + def ex_describe_addresses(self, nodes): + """ + Return Elastic IP addresses for all the nodes in the provided list. + + @type nodes: C{list} + @param nodes: List of C{Node} instances + + @return dict Dictionary where a key is a node ID and the value is a + list with the Elastic IP addresses associated with this node. + """ + if not nodes: + return {} + + params = { 'Action': 'DescribeAddresses' } + + if len(nodes) == 1: + params.update({ + 'Filter.0.Name': 'instance-id', + 'Filter.0.Value.0': nodes[0].id + }) + + result = self.connection.request(self.path, + params=params.copy()).object + + node_instance_ids = [ node.id for node in nodes ] + nodes_elastic_ip_mappings = {} + + for node_id in node_instance_ids: + nodes_elastic_ip_mappings.setdefault(node_id, []) + for element in findall(element=result, xpath='addressesSet/item', + namespace=NAMESPACE): + instance_id = findtext(element=element, xpath='instanceId', + namespace=NAMESPACE) + ip_address = findtext(element=element, xpath='publicIp', + namespace=NAMESPACE) + + if instance_id not in node_instance_ids: + continue + + nodes_elastic_ip_mappings[instance_id].append(ip_address) + return nodes_elastic_ip_mappings + + def ex_describe_addresses_for_node(self, node): + """ + Return a list of Elastic IP addresses associated with this node. + + @type node: C{Node} + @param node: Node instance + + @return list Elastic IP addresses attached to this node. + """ + node_elastic_ips = self.ex_describe_addresses([node]) + return node_elastic_ips[node.id] + + def ex_modify_instance_attribute(self, node, attributes): + """ + Modify node attributes. + A list of valid attributes can be found at http://goo.gl/gxcj8 + + @type node: C{Node} + @param node: Node instance + + @type attributes: C{dict} + @param attributes: Dictionary with node attributes + + @return bool True on success, False otherwise. + """ + attributes = attributes or {} + attributes.update({'InstanceId': node.id}) + + params = { 'Action': 'ModifyInstanceAttribute' } + params.update(attributes) + + result = self.connection.request(self.path, + params=params.copy()).object + element = findtext(element=result, xpath='return', + namespace=NAMESPACE) + return element == 'true' + + def ex_change_node_size(self, node, new_size): + """ + Change the node size. + Note: Node must be turned of before changing the size. + + @type node: C{Node} + @param node: Node instance + + @type new_size: C{NodeSize} + @param new_size: NodeSize intance + + @return bool True on success, False otherwise. + """ + if 'instancetype' in node.extra: + current_instance_type = node.extra['instancetype'] + + if current_instance_type == new_size.id: + raise ValueError('New instance size is the same as the current one') + + attributes = { 'InstanceType.Value': new_size.id } + return self.ex_modify_instance_attribute(node, attributes) + + def create_node(self, **kwargs): + """Create a new EC2 node + + See L{NodeDriver.create_node} for more keyword args. + Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com] + + @keyword ex_mincount: Minimum number of instances to launch + @type ex_mincount: C{int} + + @keyword ex_maxcount: Maximum number of instances to launch + @type ex_maxcount: C{int} + + @keyword ex_securitygroup: Name of security group + @type ex_securitygroup: C{str} + + @keyword ex_keyname: The name of the key pair + @type ex_keyname: C{str} + + @keyword ex_userdata: User data + @type ex_userdata: C{str} + + @keyword ex_clienttoken: Unique identifier to ensure idempotency + @type ex_clienttoken: C{str} + """ + image = kwargs["image"] + size = kwargs["size"] + params = { + 'Action': 'RunInstances', + 'ImageId': image.id, + 'MinCount': kwargs.get('ex_mincount','1'), + 'MaxCount': kwargs.get('ex_maxcount','1'), + 'InstanceType': size.id + } + + if 'ex_securitygroup' in kwargs: + if not isinstance(kwargs['ex_securitygroup'], list): + kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']] + for sig in range(len(kwargs['ex_securitygroup'])): + params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig] + + if 'location' in kwargs: + availability_zone = getattr(kwargs['location'], 'availability_zone', + None) + if availability_zone: + if availability_zone.region_name != self.region_name: + raise AttributeError('Invalid availability zone: %s' + % (availability_zone.name)) + params['Placement.AvailabilityZone'] = availability_zone.name + + if 'ex_keyname' in kwargs: + params['KeyName'] = kwargs['ex_keyname'] + + if 'ex_userdata' in kwargs: + params['UserData'] = base64.b64encode(kwargs['ex_userdata']) + + if 'ex_clienttoken' in kwargs: + params['ClientToken'] = kwargs['ex_clienttoken'] + + object = self.connection.request(self.path, params=params).object + nodes = self._to_nodes(object, 'instancesSet/item') + + for node in nodes: + self.ex_create_tags(node=node, tags={'Name': kwargs['name']}) + + if len(nodes) == 1: + return nodes[0] + else: + return nodes + + def reboot_node(self, node): + """ + Reboot the node by passing in the node object + """ + params = {'Action': 'RebootInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_boolean(res) + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + params = {'Action': 'TerminateInstances'} + params.update(self._pathlist('InstanceId', [node.id])) + res = self.connection.request(self.path, params=params).object + return self._get_terminate_boolean(res) + +class IdempotentParamError(LibcloudError): + """ + Request used the same client token as a previous, but non-identical request. + """ + def __str__(self): + return repr(self.value) + +class EC2EUConnection(EC2Connection): + """ + Connection class for EC2 in the Western Europe Region + """ + host = EC2_EU_WEST_HOST + +class EC2EUNodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Western Europe Region + """ + + api_name = 'ec2_eu_west' + name = 'Amazon EC2 (eu-west-1)' + friendly_name = 'Amazon Europe Ireland' + country = 'IE' + region_name = 'eu-west-1' + connectionCls = EC2EUConnection + _instance_types = EC2_EU_WEST_INSTANCE_TYPES + +class EC2USWestConnection(EC2Connection): + """ + Connection class for EC2 in the Western US Region + """ + + host = EC2_US_WEST_HOST + +class EC2USWestNodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Western US Region + """ + + api_name = 'ec2_us_west' + name = 'Amazon EC2 (us-west-1)' + friendly_name = 'Amazon US N. California' + country = 'US' + region_name = 'us-west-1' + connectionCls = EC2USWestConnection + _instance_types = EC2_US_WEST_INSTANCE_TYPES + +class EC2APSEConnection(EC2Connection): + """ + Connection class for EC2 in the Southeast Asia Pacific Region + """ + + host = EC2_AP_SOUTHEAST_HOST + +class EC2APNEConnection(EC2Connection): + """ + Connection class for EC2 in the Northeast Asia Pacific Region + """ + + host = EC2_AP_NORTHEAST_HOST + +class EC2APSENodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Southeast Asia Pacific Region + """ + + api_name = 'ec2_ap_southeast' + name = 'Amazon EC2 (ap-southeast-1)' + friendly_name = 'Amazon Asia-Pacific Singapore' + country = 'SG' + region_name = 'ap-southeast-1' + connectionCls = EC2APSEConnection + _instance_types = EC2_AP_SOUTHEAST_INSTANCE_TYPES + +class EC2APNENodeDriver(EC2NodeDriver): + """ + Driver class for EC2 in the Northeast Asia Pacific Region + """ + + api_name = 'ec2_ap_northeast' + name = 'Amazon EC2 (ap-northeast-1)' + friendly_name = 'Amazon Asia-Pacific Tokyo' + country = 'JP' + region_name = 'ap-northeast-1' + connectionCls = EC2APNEConnection + _instance_types = EC2_AP_NORTHEAST_INSTANCE_TYPES + +class EucConnection(EC2Connection): + """ + Connection class for Eucalyptus + """ + + host = None + +class EucNodeDriver(EC2NodeDriver): + """ + Driver class for Eucalyptus + """ + + name = 'Eucalyptus' + connectionCls = EucConnection + _instance_types = EC2_US_WEST_INSTANCE_TYPES + + def __init__(self, key, secret=None, secure=True, host=None, path=None, port=None): + super(EucNodeDriver, self).__init__(key, secret, secure, host, port) + if path is None: + path = "/services/Eucalyptus" + self.path = path + + def list_locations(self): + raise NotImplementedError, \ + 'list_locations not implemented for this driver' + +# Nimbus clouds have 3 EC2-style instance types but their particular RAM +# allocations are configured by the admin +NIMBUS_INSTANCE_TYPES = { + 'm1.small': { + 'id' : 'm1.small', + 'name': 'Small Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, + 'm1.large': { + 'id' : 'm1.large', + 'name': 'Large Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, + 'm1.xlarge': { + 'id' : 'm1.xlarge', + 'name': 'Extra Large Instance', + 'ram': None, + 'disk': None, + 'bandwidth': None, + }, +} + +class NimbusConnection(EC2Connection): + """ + Connection class for Nimbus + """ + + host = None + +class NimbusNodeDriver(EC2NodeDriver): + """ + Driver class for Nimbus + """ + + type = Provider.NIMBUS + name = 'Nimbus' + api_name = 'nimbus' + region_name = 'nimbus' + friendly_name = 'Nimbus Private Cloud' + connectionCls = NimbusConnection + _instance_types = NIMBUS_INSTANCE_TYPES + + def ex_describe_addresses(self, nodes): + """Nimbus doesn't support elastic IPs, so this is a passthrough + """ + nodes_elastic_ip_mappings = {} + for node in nodes: + # empty list per node + nodes_elastic_ip_mappings[node.id] = [] + return nodes_elastic_ip_mappings diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ecp.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ecp.py new file mode 100644 index 0000000000000000000000000000000000000000..3c11316d4c5cd39417941a7cf1aa049f14415389 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ecp.py @@ -0,0 +1,360 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Enomaly ECP driver +""" +import time +import base64 +import httplib +import socket +import os + +# JSON is included in the standard library starting with Python 2.6. For 2.5 +# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson +try: + import json +except: + import simplejson as json + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.compute.base import NodeDriver, NodeSize, NodeLocation +from libcloud.compute.base import NodeImage, Node +from libcloud.compute.types import Provider, NodeState, InvalidCredsError +from libcloud.compute.base import is_private_subnet + +#Defaults +API_HOST = '' +API_PORT = (80,443) + +class ECPResponse(Response): + + def success(self): + if self.status == httplib.OK or self.status == httplib.CREATED: + try: + j_body = json.loads(self.body) + except ValueError: + self.error = "JSON response cannot be decoded." + return False + if j_body['errno'] == 0: + return True + else: + self.error = "ECP error: %s" % j_body['message'] + return False + elif self.status == httplib.UNAUTHORIZED: + raise InvalidCredsError() + else: + self.error = "HTTP Error Code: %s" % self.status + return False + + def parse_error(self): + return self.error + + #Interpret the json responses - no error checking required + def parse_body(self): + return json.loads(self.body) + + def getheaders(self): + return self.headers + +class ECPConnection(ConnectionUserAndKey): + """ + Connection class for the Enomaly ECP driver + """ + + responseCls = ECPResponse + host = API_HOST + port = API_PORT + + def add_default_headers(self, headers): + #Authentication + username = self.user_id + password = self.key + base64string = base64.encodestring( + '%s:%s' % (username, password))[:-1] + authheader = "Basic %s" % base64string + headers['Authorization']= authheader + + return headers + + def _encode_multipart_formdata(self, fields): + """ + Based on Wade Leftwich's function: + http://code.activestate.com/recipes/146306/ + """ + #use a random boundary that does not appear in the fields + boundary = '' + while boundary in ''.join(fields): + boundary = os.urandom(16).encode('hex') + L = [] + for i in fields: + L.append('--' + boundary) + L.append('Content-Disposition: form-data; name="%s"' % i) + L.append('') + L.append(fields[i]) + L.append('--' + boundary + '--') + L.append('') + body = '\r\n'.join(L) + content_type = 'multipart/form-data; boundary=%s' % boundary + header = {'Content-Type':content_type} + return header, body + + +class ECPNodeDriver(NodeDriver): + """ + Enomaly ECP node driver + """ + + name = "Enomaly Elastic Computing Platform" + type = Provider.ECP + connectionCls = ECPConnection + + def list_nodes(self): + """ + Returns a list of all running Nodes + """ + + #Make the call + res = self.connection.request('/rest/hosting/vm/list').parse_body() + + #Put together a list of node objects + nodes=[] + for vm in res['vms']: + node = self._to_node(vm) + if not node == None: + nodes.append(node) + + #And return it + return nodes + + + def _to_node(self, vm): + """ + Turns a (json) dictionary into a Node object. + This returns only running VMs. + """ + + #Check state + if not vm['state'] == "running": + return None + + #IPs + iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1'] + + public_ips = [] + private_ips = [] + for ip in iplist: + try: + socket.inet_aton(ip) + except socket.error: + # not a valid ip + continue + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + + #Create the node object + n = Node( + id=vm['uuid'], + name=vm['name'], + state=NodeState.RUNNING, + public_ip=public_ips, + private_ip=private_ips, + driver=self, + ) + + return n + + def reboot_node(self, node): + """ + Shuts down a VM and then starts it again. + """ + + #Turn the VM off + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'stop'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + node.state = NodeState.REBOOTING + #Wait for it to turn off and then continue (to turn it on again) + while node.state == NodeState.REBOOTING: + #Check if it's off. + response = self.connection.request( + '/rest/hosting/vm/%s' % node.id + ).parse_body() + if response['vm']['state'] == 'off': + node.state = NodeState.TERMINATED + else: + time.sleep(5) + + + #Turn the VM back on. + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'start'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + node.state = NodeState.RUNNING + return True + + def destroy_node(self, node): + """ + Shuts down and deletes a VM. + """ + + #Shut down first + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'stop'}) + self.connection.request( + '/rest/hosting/vm/%s' % node.id, + method = 'POST', + headers=d[0], + data=d[1] + ).parse_body() + + #Ensure there was no applicationl level error + node.state = NodeState.PENDING + #Wait for the VM to turn off before continuing + while node.state == NodeState.PENDING: + #Check if it's off. + response = self.connection.request( + '/rest/hosting/vm/%s' % node.id + ).parse_body() + if response['vm']['state'] == 'off': + node.state = NodeState.TERMINATED + else: + time.sleep(5) + + #Delete the VM + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata({'action':'delete'}) + self.connection.request( + '/rest/hosting/vm/%s' % (node.id), + method='POST', + headers=d[0], + data=d[1] + ).parse_body() + + return True + + def list_images(self, location=None): + """ + Returns a list of all package templates aka appiances aka images + """ + + #Make the call + response = self.connection.request( + '/rest/hosting/ptemplate/list').parse_body() + + #Turn the response into an array of NodeImage objects + images = [] + for ptemplate in response['packages']: + images.append(NodeImage( + id = ptemplate['uuid'], + name= '%s: %s' % (ptemplate['name'], ptemplate['description']), + driver = self, + )) + + return images + + + def list_sizes(self, location=None): + """ + Returns a list of all hardware templates + """ + + #Make the call + response = self.connection.request( + '/rest/hosting/htemplate/list').parse_body() + + #Turn the response into an array of NodeSize objects + sizes = [] + for htemplate in response['templates']: + sizes.append(NodeSize( + id = htemplate['uuid'], + name = htemplate['name'], + ram = htemplate['memory'], + disk = 0, #Disk is independent of hardware template + bandwidth = 0, #There is no way to keep track of bandwidth + price = 0, #The billing system is external + driver = self, + )) + + return sizes + + def list_locations(self): + """ + This feature does not exist in ECP. Returns hard coded dummy location. + """ + return [ + NodeLocation(id=1, + name="Cloud", + country='', + driver=self), + ] + + def create_node(self, **kwargs): + """ + Creates a virtual machine. + + Parameters: name (string), image (NodeImage), size (NodeSize) + """ + + #Find out what network to put the VM on. + res = self.connection.request('/rest/hosting/network/list').parse_body() + + #Use the first / default network because there is no way to specific + #which one + network = res['networks'][0]['uuid'] + + #Prepare to make the VM + data = { + 'name' : str(kwargs['name']), + 'package' : str(kwargs['image'].id), + 'hardware' : str(kwargs['size'].id), + 'network_uuid' : str(network), + 'disk' : '' + } + + #Black magic to make the POST requests work + d = self.connection._encode_multipart_formdata(data) + response = self.connection.request( + '/rest/hosting/vm/', + method='PUT', + headers = d[0], + data=d[1] + ).parse_body() + + #Create a node object and return it. + n = Node( + id=response['machine_id'], + name=data['name'], + state=NodeState.PENDING, + public_ip=[], + private_ip=[], + driver=self, + ) + + return n diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/elastichosts.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/elastichosts.py new file mode 100644 index 0000000000000000000000000000000000000000..e083c7c9661015224e29301c51ff1823fe698671 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/elastichosts.py @@ -0,0 +1,579 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +ElasticHosts Driver +""" +import re +import time +import base64 +import httplib + +try: + import json +except: + import simplejson as json + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node +from libcloud.compute.base import NodeImage +from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment, MultiStepDeployment + +# API end-points +API_ENDPOINTS = { + 'uk-1': { + 'name': 'London Peer 1', + 'country': 'United Kingdom', + 'host': 'api.lon-p.elastichosts.com' + }, + 'uk-2': { + 'name': 'London BlueSquare', + 'country': 'United Kingdom', + 'host': 'api.lon-b.elastichosts.com' + }, + 'us-1': { + 'name': 'San Antonio Peer 1', + 'country': 'United States', + 'host': 'api.sat-p.elastichosts.com' + }, +} + +# Default API end-point for the base connection clase. +DEFAULT_ENDPOINT = 'us-1' + +# ElasticHosts doesn't specify special instance types, so I just specified +# some plans based on the pricing page +# (http://www.elastichosts.com/cloud-hosting/pricing) +# and other provides. +# +# Basically for CPU any value between 500Mhz and 20000Mhz should work, +# 256MB to 8192MB for ram and 1GB to 2TB for disk. +INSTANCE_TYPES = { + 'small': { + 'id': 'small', + 'name': 'Small instance', + 'cpu': 2000, + 'memory': 1700, + 'disk': 160, + 'bandwidth': None, + }, + 'medium': { + 'id': 'medium', + 'name': 'Medium instance', + 'cpu': 3000, + 'memory': 4096, + 'disk': 500, + 'bandwidth': None, + }, + 'large': { + 'id': 'large', + 'name': 'Large instance', + 'cpu': 4000, + 'memory': 7680, + 'disk': 850, + 'bandwidth': None, + }, + 'extra-large': { + 'id': 'extra-large', + 'name': 'Extra Large instance', + 'cpu': 8000, + 'memory': 8192, + 'disk': 1690, + 'bandwidth': None, + }, + 'high-cpu-medium': { + 'id': 'high-cpu-medium', + 'name': 'High-CPU Medium instance', + 'cpu': 5000, + 'memory': 1700, + 'disk': 350, + 'bandwidth': None, + }, + 'high-cpu-extra-large': { + 'id': 'high-cpu-extra-large', + 'name': 'High-CPU Extra Large instance', + 'cpu': 20000, + 'memory': 7168, + 'disk': 1690, + 'bandwidth': None, + }, +} + +# Retrieved from http://www.elastichosts.com/cloud-hosting/api +STANDARD_DRIVES = { + '38df0986-4d85-4b76-b502-3878ffc80161': { + 'uuid': '38df0986-4d85-4b76-b502-3878ffc80161', + 'description': 'CentOS Linux 5.5', + 'size_gunzipped': '3GB', + 'supports_deployment': True, + }, + '980cf63c-f21e-4382-997b-6541d5809629': { + 'uuid': '980cf63c-f21e-4382-997b-6541d5809629', + 'description': 'Debian Linux 5.0', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'aee5589a-88c3-43ef-bb0a-9cab6e64192d': { + 'uuid': 'aee5589a-88c3-43ef-bb0a-9cab6e64192d', + 'description': 'Ubuntu Linux 10.04', + 'size_gunzipped': '1GB', + 'supports_deployment': True, + }, + 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0': { + 'uuid': 'b9d0eb72-d273-43f1-98e3-0d4b87d372c0', + 'description': 'Windows Web Server 2008', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '30824e97-05a4-410c-946e-2ba5a92b07cb': { + 'uuid': '30824e97-05a4-410c-946e-2ba5a92b07cb', + 'description': 'Windows Web Server 2008 R2', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '9ecf810e-6ad1-40ef-b360-d606f0444671': { + 'uuid': '9ecf810e-6ad1-40ef-b360-d606f0444671', + 'description': 'Windows Web Server 2008 R2 + SQL Server', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '10a88d1c-6575-46e3-8d2c-7744065ea530': { + 'uuid': '10a88d1c-6575-46e3-8d2c-7744065ea530', + 'description': 'Windows Server 2008 Standard R2', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, + '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47': { + 'uuid': '2567f25c-8fb8-45c7-95fc-bfe3c3d84c47', + 'description': 'Windows Server 2008 Standard R2 + SQL Server', + 'size_gunzipped': '13GB', + 'supports_deployment': False, + }, +} + +NODE_STATE_MAP = { + 'active': NodeState.RUNNING, + 'dead': NodeState.TERMINATED, + 'dumped': NodeState.TERMINATED, +} + +# Default timeout (in seconds) for the drive imaging process +IMAGING_TIMEOUT = 10 * 60 + +class ElasticHostsException(Exception): + """ + Exception class for ElasticHosts driver + """ + + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + +class ElasticHostsResponse(Response): + def success(self): + if self.status == 401: + raise InvalidCredsError() + + return self.status >= 200 and self.status <= 299 + + def parse_body(self): + if not self.body: + return self.body + + try: + data = json.loads(self.body) + except: + raise MalformedResponseError("Failed to parse JSON", + body=self.body, + driver=ElasticHostsBaseNodeDriver) + + return data + + def parse_error(self): + error_header = self.headers.get('x-elastic-error', '') + return 'X-Elastic-Error: %s (%s)' % (error_header, self.body.strip()) + +class ElasticHostsNodeSize(NodeSize): + def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver): + self.id = id + self.name = name + self.cpu = cpu + self.ram = ram + self.disk = disk + self.bandwidth = bandwidth + self.price = price + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.cpu, self.ram, + self.disk, self.bandwidth, self.price, self.driver.name)) + +class ElasticHostsBaseConnection(ConnectionUserAndKey): + """ + Base connection class for the ElasticHosts driver + """ + + host = API_ENDPOINTS[DEFAULT_ENDPOINT]['host'] + responseCls = ElasticHostsResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'application/json' + headers['Content-Type'] = 'application/json' + headers['Authorization'] = ('Basic %s' + % (base64.b64encode('%s:%s' + % (self.user_id, + self.key)))) + return headers + +class ElasticHostsBaseNodeDriver(NodeDriver): + """ + Base ElasticHosts node driver + """ + + type = Provider.ELASTICHOSTS + api_name = 'elastichosts' + name = 'ElasticHosts' + connectionCls = ElasticHostsBaseConnection + features = {"create_node": ["generates_password"]} + + def reboot_node(self, node): + # Reboots the node + response = self.connection.request( + action='/servers/%s/reset' % (node.id), + method='POST' + ) + return response.status == 204 + + def destroy_node(self, node): + # Kills the server immediately + response = self.connection.request( + action='/servers/%s/destroy' % (node.id), + method='POST' + ) + return response.status == 204 + + def list_images(self, location=None): + # Returns a list of available pre-installed system drive images + images = [] + for key, value in STANDARD_DRIVES.iteritems(): + image = NodeImage( + id=value['uuid'], + name=value['description'], + driver=self.connection.driver, + extra={ + 'size_gunzipped': value['size_gunzipped'] + } + ) + images.append(image) + + return images + + def list_sizes(self, location=None): + sizes = [] + for key, value in INSTANCE_TYPES.iteritems(): + size = ElasticHostsNodeSize( + id=value['id'], + name=value['name'], cpu=value['cpu'], ram=value['memory'], + disk=value['disk'], bandwidth=value['bandwidth'], + price=self._get_size_price(size_id=value['id']), + driver=self.connection.driver + ) + sizes.append(size) + + return sizes + + def list_nodes(self): + # Returns a list of active (running) nodes + response = self.connection.request(action='/servers/info').object + + nodes = [] + for data in response: + node = self._to_node(data) + nodes.append(node) + + return nodes + + def create_node(self, **kwargs): + """Creates a ElasticHosts instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: String with a name for this new node (required) + @type name: C{string} + + @keyword smp: Number of virtual processors or None to calculate + based on the cpu speed + @type smp: C{int} + + @keyword nic_model: e1000, rtl8139 or virtio + (if not specified, e1000 is used) + @type nic_model: C{string} + + @keyword vnc_password: If set, the same password is also used for + SSH access with user toor, + otherwise VNC access is disabled and + no SSH login is possible. + @type vnc_password: C{string} + """ + size = kwargs['size'] + image = kwargs['image'] + smp = kwargs.get('smp', 'auto') + nic_model = kwargs.get('nic_model', 'e1000') + vnc_password = ssh_password = kwargs.get('vnc_password', None) + + if nic_model not in ('e1000', 'rtl8139', 'virtio'): + raise ElasticHostsException('Invalid NIC model specified') + + # check that drive size is not smaller then pre installed image size + + # First we create a drive with the specified size + drive_data = {} + drive_data.update({'name': kwargs['name'], + 'size': '%sG' % (kwargs['size'].disk)}) + + response = self.connection.request(action='/drives/create', + data=json.dumps(drive_data), + method='POST').object + + if not response: + raise ElasticHostsException('Drive creation failed') + + drive_uuid = response['drive'] + + # Then we image the selected pre-installed system drive onto it + response = self.connection.request( + action='/drives/%s/image/%s/gunzip' % (drive_uuid, image.id), + method='POST' + ) + + if response.status != 204: + raise ElasticHostsException('Drive imaging failed') + + # We wait until the drive is imaged and then boot up the node + # (in most cases, the imaging process shouldn't take longer + # than a few minutes) + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + imaging_start = time.time() + while response.has_key('imaging'): + response = self.connection.request( + action='/drives/%s/info' % (drive_uuid) + ).object + elapsed_time = time.time() - imaging_start + if (response.has_key('imaging') + and elapsed_time >= IMAGING_TIMEOUT): + raise ElasticHostsException('Drive imaging timed out') + time.sleep(1) + + node_data = {} + node_data.update({'name': kwargs['name'], + 'cpu': size.cpu, + 'mem': size.ram, + 'ide:0:0': drive_uuid, + 'boot': 'ide:0:0', + 'smp': smp}) + node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'}) + + if vnc_password: + node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password}) + + response = self.connection.request( + action='/servers/create', data=json.dumps(node_data), + method='POST' + ).object + + if isinstance(response, list): + nodes = [self._to_node(node, ssh_password) for node in response] + else: + nodes = self._to_node(response, ssh_password) + + return nodes + + # Extension methods + def ex_set_node_configuration(self, node, **kwargs): + # Changes the configuration of the running server + valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$', + '^boot$', '^nic:0:model$', '^nic:0:dhcp', + '^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$', + '^vnc:ip$', '^vnc:password$', '^vnc:tls', + '^ide:[0-1]:[0-1](:media)?$', + '^scsi:0:[0-7](:media)?$', '^block:[0-7](:media)?$') + + invalid_keys = [] + for key in kwargs.keys(): + matches = False + for regex in valid_keys: + if re.match(regex, key): + matches = True + break + if not matches: + invalid_keys.append(key) + + if invalid_keys: + raise ElasticHostsException( + 'Invalid configuration key specified: %s' + % (',' .join(invalid_keys)) + ) + + response = self.connection.request( + action='/servers/%s/set' % (node.id), data=json.dumps(kwargs), + method='POST' + ) + + return (response.status == httplib.OK and response.body != '') + + def deploy_node(self, **kwargs): + """ + Create a new node, and start deployment. + + @keyword enable_root: If true, root password will be set to + vnc_password (this will enable SSH access) + and default 'toor' account will be deleted. + @type enable_root: C{bool} + + For detailed description and keywords args, see + L{NodeDriver.deploy_node}. + """ + image = kwargs['image'] + vnc_password = kwargs.get('vnc_password', None) + enable_root = kwargs.get('enable_root', False) + + if not vnc_password: + raise ValueError('You need to provide vnc_password argument ' + 'if you want to use deployment') + + if (image in STANDARD_DRIVES + and STANDARD_DRIVES[image]['supports_deployment']): + raise ValueError('Image %s does not support deployment' + % (image.id)) + + if enable_root: + script = ("unset HISTFILE;" + "echo root:%s | chpasswd;" + "sed -i '/^toor.*$/d' /etc/passwd /etc/shadow;" + "history -c") % vnc_password + root_enable_script = ScriptDeployment(script=script, + delete=True) + deploy = kwargs.get('deploy', None) + if deploy: + if (isinstance(deploy, ScriptDeployment) + or isinstance(deploy, SSHKeyDeployment)): + deployment = MultiStepDeployment([deploy, + root_enable_script]) + elif isinstance(deploy, MultiStepDeployment): + deployment = deploy + deployment.add(root_enable_script) + else: + deployment = root_enable_script + + kwargs['deploy'] = deployment + + if not kwargs.get('ssh_username', None): + kwargs['ssh_username'] = 'toor' + + return super(ElasticHostsBaseNodeDriver, self).deploy_node(**kwargs) + + def ex_shutdown_node(self, node): + # Sends the ACPI power-down event + response = self.connection.request( + action='/servers/%s/shutdown' % (node.id), + method='POST' + ) + return response.status == 204 + + def ex_destroy_drive(self, drive_uuid): + # Deletes a drive + response = self.connection.request( + action='/drives/%s/destroy' % (drive_uuid), + method='POST' + ) + return response.status == 204 + + # Helper methods + def _to_node(self, data, ssh_password=None): + try: + state = NODE_STATE_MAP[data['status']] + except KeyError: + state = NodeState.UNKNOWN + + if isinstance(data['nic:0:dhcp'], list): + public_ip = data['nic:0:dhcp'] + else: + public_ip = [data['nic:0:dhcp']] + + extra = {'cpu': data['cpu'], + 'smp': data['smp'], + 'mem': data['mem'], + 'started': data['started']} + + if data.has_key('vnc:ip') and data.has_key('vnc:password'): + extra.update({'vnc_ip': data['vnc:ip'], + 'vnc_password': data['vnc:password']}) + + if ssh_password: + extra.update({'password': ssh_password}) + + node = Node(id=data['server'], name=data['name'], state=state, + public_ip=public_ip, private_ip=None, + driver=self.connection.driver, + extra=extra) + + return node + +class ElasticHostsUK1Connection(ElasticHostsBaseConnection): + """ + Connection class for the ElasticHosts driver for + the London Peer 1 end-point + """ + + host = API_ENDPOINTS['uk-1']['host'] + +class ElasticHostsUK1NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the London Peer 1 end-point + """ + connectionCls = ElasticHostsUK1Connection + +class ElasticHostsUK2Connection(ElasticHostsBaseConnection): + """ + Connection class for the ElasticHosts driver for + the London Bluesquare end-point + """ + host = API_ENDPOINTS['uk-2']['host'] + +class ElasticHostsUK2NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the London Bluesquare end-point + """ + connectionCls = ElasticHostsUK2Connection + +class ElasticHostsUS1Connection(ElasticHostsBaseConnection): + """ + Connection class for the ElasticHosts driver for + the San Antonio Peer 1 end-point + """ + host = API_ENDPOINTS['us-1']['host'] + +class ElasticHostsUS1NodeDriver(ElasticHostsBaseNodeDriver): + """ + ElasticHosts node driver for the San Antonio Peer 1 end-point + """ + connectionCls = ElasticHostsUS1Connection diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gandi.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gandi.py new file mode 100644 index 0000000000000000000000000000000000000000..d4dd3751f51c53bfdca3dc676d2a57370eed0e8d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gandi.py @@ -0,0 +1,367 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Gandi driver +""" + +import time +import xmlrpclib + +import libcloud +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage + +# Global constants +API_VERSION = '2.0' +API_PREFIX = "https://rpc.gandi.net/xmlrpc/%s/" % API_VERSION + +DEFAULT_TIMEOUT = 600 # operation pooling max seconds +DEFAULT_INTERVAL = 20 # seconds between 2 operation.info + +NODE_STATE_MAP = { + 'running': NodeState.RUNNING, + 'halted': NodeState.TERMINATED, + 'paused': NodeState.TERMINATED, + 'locked' : NodeState.TERMINATED, + 'being_created' : NodeState.PENDING, + 'invalid' : NodeState.UNKNOWN, + 'legally_locked' : NodeState.PENDING, + 'deleted' : NodeState.TERMINATED +} + +NODE_PRICE_HOURLY_USD = 0.02 + +class GandiException(Exception): + """ + Exception class for Gandi driver + """ + def __str__(self): + return "(%u) %s" % (self.args[0], self.args[1]) + def __repr__(self): + return "" % (self.args[0], self.args[1]) + +class GandiSafeTransport(xmlrpclib.SafeTransport): + pass + +class GandiTransport(xmlrpclib.Transport): + pass + +class GandiProxy(xmlrpclib.ServerProxy): + transportCls = (GandiTransport, GandiSafeTransport) + + def __init__(self,user_agent, verbose=0): + cls = self.transportCls[0] + if API_PREFIX.startswith("https://"): + cls = self.transportCls[1] + t = cls(use_datetime=0) + t.user_agent = user_agent + xmlrpclib.ServerProxy.__init__( + self, + uri="%s" % (API_PREFIX), + transport=t, + verbose=verbose, + allow_none=True + ) + +class GandiConnection(object): + """ + Connection class for the Gandi driver + """ + + proxyCls = GandiProxy + driver = 'gandi' + + def __init__(self, user, password=None): + self.ua = [] + + # Connect only with an api_key generated on website + self.api_key = user + + try: + self._proxy = self.proxyCls(self._user_agent()) + except xmlrpclib.Fault, e: + raise GandiException(1000, e) + + def _user_agent(self): + return 'libcloud/%s (%s)%s' % ( + libcloud.__version__, + self.driver, + "".join([" (%s)" % x for x in self.ua])) + + def user_agent_append(self, s): + self.ua.append(s) + + def request(self,method,*args): + """ Request xmlrpc method with given args""" + try: + return getattr(self._proxy, method)(self.api_key,*args) + except xmlrpclib.Fault, e: + raise GandiException(1001, e) + + +class GandiNodeDriver(NodeDriver): + """ + Gandi node driver + + """ + connectionCls = GandiConnection + name = 'Gandi' + api_name = 'gandi' + friendly_name = 'Gandi.net' + country = 'FR' + type = Provider.GANDI + # TODO : which features to enable ? + features = { } + + def __init__(self, key, secret=None, secure=False): + self.key = key + self.secret = secret + self.connection = self.connectionCls(key, secret) + self.connection.driver = self + + # Specific methods for gandi + def _wait_operation(self, id, timeout=DEFAULT_TIMEOUT, check_interval=DEFAULT_INTERVAL): + """ Wait for an operation to succeed""" + + for i in range(0, timeout, check_interval): + try: + op = self.connection.request('operation.info', int(id)) + + if op['step'] == 'DONE': + return True + if op['step'] in ['ERROR','CANCEL']: + return False + except (KeyError, IndexError): + pass + except Exception, e: + raise GandiException(1002, e) + + time.sleep(check_interval) + return False + + def _node_info(self,id): + try: + obj = self.connection.request('vm.info',int(id)) + return obj + except Exception,e: + raise GandiException(1003, e) + return None + + # Generic methods for driver + def _to_node(self, vm): + return Node( + id=vm['id'], + name=vm['hostname'], + state=NODE_STATE_MAP.get( + vm['state'], + NodeState.UNKNOWN + ), + public_ip=vm.get('ip'), + private_ip='', + driver=self, + extra={ + 'ai_active' : vm.get('ai_active'), + 'datacenter_id' : vm.get('datacenter_id'), + 'description' : vm.get('description') + } + ) + + def _to_nodes(self, vms): + return [self._to_node(v) for v in vms] + + def list_nodes(self): + vms = self.connection.request('vm.list') + ips = self.connection.request('ip.list') + for vm in vms: + for ip in ips: + if vm['ifaces_id'][0] == ip['iface_id']: + vm['ip'] = ip.get('ip') + + nodes = self._to_nodes(vms) + return nodes + + def reboot_node(self, node): + op = self.connection.request('vm.reboot',int(node.id)) + op_res = self._wait_operation(op['id']) + vm = self.connection.request('vm.info',int(node.id)) + if vm['state'] == 'running': + return True + return False + + def destroy_node(self, node): + vm = self._node_info(node.id) + if vm['state'] == 'running': + # Send vm_stop and wait for accomplish + op_stop = self.connection.request('vm.stop',int(node.id)) + if not self._wait_operation(op_stop['id']): + raise GandiException(1010, 'vm.stop failed') + # Delete + op = self.connection.request('vm.delete',int(node.id)) + if self._wait_operation(op['id']): + return True + return False + + def deploy_node(self, **kwargs): + raise NotImplementedError, \ + 'deploy_node not implemented for gandi driver' + + def create_node(self, **kwargs): + """Create a new Gandi node + + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword location: Which data center to create a node in. If empty, + undefined behavoir will be selected. (optional) + @type location: L{NodeLocation} + + @keyword size: The size of resources allocated to this node. + (required) + @type size: L{NodeSize} + + @keyword login: user name to create for login on this machine (required) + @type login: String + + @keyword password: password for user that'll be created (required) + @type password: String + + @keywork inet_family: version of ip to use, default 4 (optional) + @type inet_family: int + """ + + if kwargs.get('login') is None or kwargs.get('password') is None: + raise GandiException(1020, 'login and password must be defined for node creation') + + location = kwargs.get('location') + if location and isinstance(location,NodeLocation): + dc_id = int(location.id) + else: + raise GandiException(1021, 'location must be a subclass of NodeLocation') + + size = kwargs.get('size') + if not size and not isinstance(size,NodeSize): + raise GandiException(1022, 'size must be a subclass of NodeSize') + + src_disk_id = int(kwargs['image'].id) + + disk_spec = { + 'datacenter_id': dc_id, + 'name': 'disk_%s' % kwargs['name'] + } + + vm_spec = { + 'datacenter_id': dc_id, + 'hostname': kwargs['name'], + 'login': kwargs['login'], + 'password': kwargs['password'], # TODO : use NodeAuthPassword + 'memory': int(size.ram), + 'cores': int(size.id), + 'bandwidth' : int(size.bandwidth), + 'ip_version': kwargs.get('inet_family',4), + } + + # Call create_from helper api. Return 3 operations : disk_create, + # iface_create,vm_create + (op_disk,op_iface,op_vm) = self.connection.request( + 'vm.create_from', + vm_spec,disk_spec,src_disk_id + ) + + # We wait for vm_create to finish + if self._wait_operation(op_vm['id']): + # after successful operation, get ip information thru first interface + node = self._node_info(op_vm['vm_id']) + ifaces = node.get('ifaces') + if len(ifaces) > 0: + ips = ifaces[0].get('ips') + if len(ips) > 0: + node['ip'] = ips[0]['ip'] + return self._to_node(node) + + return None + + def _to_image(self, img): + return NodeImage( + id=img['disk_id'], + name=img['label'], + driver=self.connection.driver + ) + + def list_images(self, location=None): + try: + if location: + filtering = { 'datacenter_id' : int(location.id) } + else: + filtering = {} + images = self.connection.request('image.list', filtering ) + return [self._to_image(i) for i in images] + except Exception, e: + raise GandiException(1011, e) + + def _to_size(self, id, size): + return NodeSize( + id=id, + name='%s cores' % id, + ram=size['memory'], + disk=size['disk'], + bandwidth=size['bandwidth'], + price=(self._get_size_price(size_id='1') * id), + driver=self.connection.driver, + ) + + def list_sizes(self, location=None): + account = self.connection.request('account.info') + # Look for available shares, and return a list of share_definition + available_res = account['resources']['available'] + + if available_res['shares'] == 0: + return None + else: + share_def = account['share_definition'] + available_cores = available_res['cores'] + # 0.75 core given when creating a server + max_core = int(available_cores + 0.75) + shares = [] + if available_res['servers'] < 1: + # No server quota, no way + return shares + for i in range(1,max_core + 1): + share = {id:i} + share_is_available = True + for k in ['memory', 'disk', 'bandwidth']: + if share_def[k] * i > available_res[k]: + # We run out for at least one resource inside + share_is_available = False + else: + share[k] = share_def[k] * i + if share_is_available: + nb_core = i + shares.append(self._to_size(nb_core,share)) + return shares + + def _to_loc(self, loc): + return NodeLocation( + id=loc['id'], + name=loc['name'], + country=loc['country'], + driver=self + ) + + def list_locations(self): + res = self.connection.request("datacenter.list") + return [self._to_loc(l) for l in res] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gogrid.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gogrid.py new file mode 100644 index 0000000000000000000000000000000000000000..850c0d75d925fd27a672824d3c2d03fcbc62ba6d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/gogrid.py @@ -0,0 +1,395 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +GoGrid driver +""" +import time +import hashlib +import copy + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.gogrid import GoGridConnection, BaseGoGridDriver +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +STATE = { + "Starting": NodeState.PENDING, + "On": NodeState.RUNNING, + "On/Saving": NodeState.RUNNING, + "Off": NodeState.PENDING, + "Restarting": NodeState.REBOOTING, + "Saving": NodeState.PENDING, + "Restoring": NodeState.PENDING, +} + +GOGRID_INSTANCE_TYPES = { + '512MB': {'id': '512MB', + 'name': '512MB', + 'ram': 512, + 'disk': 30, + 'bandwidth': None}, + '1GB': {'id': '1GB', + 'name': '1GB', + 'ram': 1024, + 'disk': 60, + 'bandwidth': None}, + '2GB': {'id': '2GB', + 'name': '2GB', + 'ram': 2048, + 'disk': 120, + 'bandwidth': None}, + '4GB': {'id': '4GB', + 'name': '4GB', + 'ram': 4096, + 'disk': 240, + 'bandwidth': None}, + '8GB': {'id': '8GB', + 'name': '8GB', + 'ram': 8192, + 'disk': 480, + 'bandwidth': None}, + '16GB': {'id': '16GB', + 'name': '16GB', + 'ram': 16384, + 'disk': 960, + 'bandwidth': None}, +} + + +class GoGridNode(Node): + # Generating uuid based on public ip to get around missing id on + # create_node in gogrid api + # + # Used public ip since it is not mutable and specified at create time, + # so uuid of node should not change after add is completed + def get_uuid(self): + return hashlib.sha1( + "%s:%d" % (self.public_ip,self.driver.type) + ).hexdigest() + +class GoGridNodeDriver(BaseGoGridDriver, NodeDriver): + """ + GoGrid node driver + """ + + connectionCls = GoGridConnection + type = Provider.GOGRID + api_name = 'gogrid' + name = 'GoGrid' + features = {"create_node": ["generates_password"]} + + _instance_types = GOGRID_INSTANCE_TYPES + + def _get_state(self, element): + try: + return STATE[element['state']['name']] + except: + pass + return NodeState.UNKNOWN + + def _get_ip(self, element): + return element.get('ip').get('ip') + + def _get_id(self, element): + return element.get('id') + + def _to_node(self, element, password=None): + state = self._get_state(element) + ip = self._get_ip(element) + id = self._get_id(element) + n = GoGridNode(id=id, + name=element['name'], + state=state, + public_ip=[ip], + private_ip=[], + extra={'ram': element.get('ram').get('name')}, + driver=self.connection.driver) + if password: + n.extra['password'] = password + + return n + + def _to_image(self, element): + n = NodeImage(id=element['id'], + name=element['friendlyName'], + driver=self.connection.driver) + return n + + def _to_images(self, object): + return [ self._to_image(el) + for el in object['list'] ] + + def _to_location(self, element): + location = NodeLocation(id=element['id'], + name=element['name'], + country="US", + driver=self.connection.driver) + return location + + def _to_locations(self, object): + return [self._to_location(el) + for el in object['list']] + + def list_images(self, location=None): + params = {} + if location is not None: + params["datacenter"] = location.id + images = self._to_images( + self.connection.request('/api/grid/image/list', params).object) + return images + + def list_nodes(self): + passwords_map = {} + + res = self._server_list() + try: + for password in self._password_list()['list']: + try: + passwords_map[password['server']['id']] = password['password'] + except KeyError: + pass + except InvalidCredsError: + # some gogrid API keys don't have permission to access the password list. + pass + + return [ self._to_node(el, passwords_map.get(el.get('id'))) + for el + in res['list'] ] + + def reboot_node(self, node): + id = node.id + power = 'restart' + res = self._server_power(id, power) + if not res.success(): + raise Exception(res.parse_error()) + return True + + def destroy_node(self, node): + id = node.id + res = self._server_delete(id) + if not res.success(): + raise Exception(res.parse_error()) + return True + + def _server_list(self): + return self.connection.request('/api/grid/server/list').object + + def _password_list(self): + return self.connection.request('/api/support/password/list').object + + def _server_power(self, id, power): + # power in ['start', 'stop', 'restart'] + params = {'id': id, 'power': power} + return self.connection.request("/api/grid/server/power", params, + method='POST') + + def _server_delete(self, id): + params = {'id': id} + return self.connection.request("/api/grid/server/delete", params, + method='POST') + + def _get_first_ip(self, location=None): + ips = self.ex_list_ips(public=True, assigned=False, location=location) + try: + return ips[0].ip + except IndexError: + raise LibcloudError('No public unassigned IPs left', + GoGridNodeDriver) + + def list_sizes(self, location=None): + sizes = [] + for key, values in self._instance_types.iteritems(): + attributes = copy.deepcopy(values) + attributes.update({ 'price': self._get_size_price(size_id=key) }) + sizes.append(NodeSize(driver=self.connection.driver, **attributes)) + + return sizes + + def list_locations(self): + locations = self._to_locations( + self.connection.request('/api/common/lookup/list', + params={'lookup': 'ip.datacenter'}).object) + return locations + + def ex_create_node_nowait(self, **kwargs): + """Don't block until GoGrid allocates id for a node + but return right away with id == None. + + The existance of this method is explained by the fact + that GoGrid assigns id to a node only few minutes after + creation.""" + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + try: + ip = kwargs['ex_ip'] + except KeyError: + ip = self._get_first_ip(kwargs.get('location')) + + params = {'name': name, + 'image': image.id, + 'description': kwargs.get('ex_description', ''), + 'server.ram': size.id, + 'ip': ip} + + object = self.connection.request('/api/grid/server/add', + params=params, method='POST').object + node = self._to_node(object['list'][0]) + + return node + + def create_node(self, **kwargs): + """Create a new GoGird node + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_description: Description of a Node + @type ex_description: C{string} + @keyword ex_ip: Public IP address to use for a Node. If not + specified, first available IP address will be picked + @type ex_ip: C{string} + """ + node = self.ex_create_node_nowait(**kwargs) + + timeout = 60 * 20 + waittime = 0 + interval = 2 * 60 + + while node.id is None and waittime < timeout: + nodes = self.list_nodes() + + for i in nodes: + if i.public_ip[0] == node.public_ip[0] and i.id is not None: + return i + + waittime += interval + time.sleep(interval) + + if id is None: + raise Exception("Wasn't able to wait for id allocation for the node %s" % str(node)) + + return node + + def ex_save_image(self, node, name): + """Create an image for node. + + Please refer to GoGrid documentation to get info + how prepare a node for image creation: + + http://wiki.gogrid.com/wiki/index.php/MyGSI + + @keyword node: node to use as a base for image + @type node: L{Node} + @keyword name: name for new image + @type name: C{string} + """ + params = {'server': node.id, + 'friendlyName': name} + object = self.connection.request('/api/grid/image/save', params=params, + method='POST').object + + return self._to_images(object)[0] + + def ex_edit_node(self, **kwargs): + """Change attributes of a node. + + @keyword node: node to be edited + @type node: L{Node} + @keyword size: new size of a node + @type size: L{NodeSize} + @keyword ex_description: new description of a node + @type ex_description: C{string} + """ + node = kwargs['node'] + size = kwargs['size'] + + params = {'id': node.id, + 'server.ram': size.id} + + if 'ex_description' in kwargs: + params['description'] = kwargs['ex_description'] + + object = self.connection.request('/api/grid/server/edit', + params=params).object + + return self._to_node(object['list'][0]) + + def ex_edit_image(self, **kwargs): + """Edit metadata of a server image. + + @keyword image: image to be edited + @type image: L{NodeImage} + @keyword public: should be the image public? + @type public: C{bool} + @keyword ex_description: description of the image (optional) + @type ex_description: C{string} + @keyword name: name of the image + @type name C{string} + + """ + + image = kwargs['image'] + public = kwargs['public'] + + params = {'id': image.id, + 'isPublic': str(public).lower()} + + if 'ex_description' in kwargs: + params['description'] = kwargs['ex_description'] + + if 'name' in kwargs: + params['friendlyName'] = kwargs['name'] + + object = self.connection.request('/api/grid/image/edit', + params=params).object + + return self._to_image(object['list'][0]) + + def ex_list_ips(self, **kwargs): + """Return list of IP addresses assigned to + the account. + + @keyword public: set to True to list only + public IPs or False to list only + private IPs. Set to None or not specify + at all not to filter by type + @type public: C{bool} + @keyword assigned: set to True to list only addresses + assigned to servers, False to list unassigned + addresses and set to None or don't set at all + not no filter by state + @type assigned: C{bool} + @keyword location: filter IP addresses by location + @type location: L{NodeLocation} + @return: C{list} of L{GoGridIpAddress}es + """ + + params = {} + + if "public" in kwargs and kwargs["public"] is not None: + params["ip.type"] = {True: "Public", + False: "Private"}[kwargs["public"]] + if "assigned" in kwargs and kwargs["assigned"] is not None: + params["ip.state"] = {True: "Assigned", + False: "Unassigned"}[kwargs["assigned"]] + if "location" in kwargs and kwargs['location'] is not None: + params['datacenter'] = kwargs['location'].id + + ips = self._to_ips( + self.connection.request('/api/grid/ip/list', + params=params).object) + return ips diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ibm_sbc.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ibm_sbc.py new file mode 100644 index 0000000000000000000000000000000000000000..99e309d57e12eceaa665a8d0130fa0f98ed76be5 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/ibm_sbc.py @@ -0,0 +1,191 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Driver for the IBM Developer Cloud. +""" +import base64, urllib + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, Node, NodeImage, NodeSize, NodeLocation, NodeAuthSSHKey + +from xml.etree import ElementTree as ET + +HOST = 'www-147.ibm.com' +REST_BASE = '/computecloud/enterprise/api/rest/20100331' + +class IBMResponse(Response): + def success(self): + return int(self.status) == 200 + + def parse_body(self): + if not self.body: + return None + return ET.XML(self.body) + + def parse_error(self): + if int(self.status) == 401: + if not self.body: + raise InvalidCredsError(str(self.status) + ': ' + self.error) + else: + raise InvalidCredsError(self.body) + return self.body + +class IBMConnection(ConnectionUserAndKey): + """ + Connection class for the IBM Developer Cloud driver + """ + + host = HOST + responseCls = IBMResponse + + def add_default_headers(self, headers): + headers['Accept'] = 'text/xml' + headers['Authorization'] = ('Basic %s' % (base64.b64encode('%s:%s' % (self.user_id, self.key)))) + if not 'Content-Type' in headers: + headers['Content-Type'] = 'text/xml' + return headers + + def encode_data(self, data): + return urllib.urlencode(data) + +class IBMNodeDriver(NodeDriver): + """ + IBM Developer Cloud node driver. + """ + connectionCls = IBMConnection + type = Provider.IBM + name = "IBM Developer Cloud" + + NODE_STATE_MAP = { 0: NodeState.PENDING, # New + 1: NodeState.PENDING, # Provisioning + 2: NodeState.TERMINATED, # Failed + 3: NodeState.TERMINATED, # Removed + 4: NodeState.TERMINATED, # Rejected + 5: NodeState.RUNNING, # Active + 6: NodeState.UNKNOWN, # Unknown + 7: NodeState.PENDING, # Deprovisioning + 8: NodeState.REBOOTING, # Restarting + 9: NodeState.PENDING, # Starting + 10: NodeState.PENDING, # Stopping + 11: NodeState.TERMINATED,# Stopped + 12: NodeState.PENDING, # Deprovision Pending + 13: NodeState.PENDING, # Restart Pending + 14: NodeState.PENDING, # Attaching + 15: NodeState.PENDING } # Detaching + + def create_node(self, **kwargs): + """ + Creates a node in the IBM Developer Cloud. + + See L{NodeDriver.create_node} for more keyword args. + + @keyword ex_configurationData: Image-specific configuration parameters. + Configuration parameters are defined in + the parameters.xml file. The URL to + this file is defined in the NodeImage + at extra[parametersURL]. + @type ex_configurationData: C{dict} + """ + + # Compose headers for message body + data = {} + data.update({'name': kwargs['name']}) + data.update({'imageID': kwargs['image'].id}) + data.update({'instanceType': kwargs['size'].id}) + if 'location' in kwargs: + data.update({'location': kwargs['location'].id}) + else: + data.update({'location': '1'}) + if 'auth' in kwargs and isinstance(kwargs['auth'], NodeAuthSSHKey): + data.update({'publicKey': kwargs['auth'].pubkey}) + if 'ex_configurationData' in kwargs: + configurationData = kwargs['ex_configurationData'] + for key in configurationData.keys(): + data.update({key: configurationData.get(key)}) + + # Send request! + resp = self.connection.request(action = REST_BASE + '/instances', + headers = {'Content-Type': 'application/x-www-form-urlencoded'}, + method = 'POST', + data = data).object + return self._to_nodes(resp)[0] + + def destroy_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + status = int(self.connection.request(action = url, method='DELETE').status) + return status == 200 + + def reboot_node(self, node): + url = REST_BASE + '/instances/%s' % (node.id) + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'state': 'restart'} + + resp = self.connection.request(action = url, + method = 'PUT', + headers = headers, + data = data) + return int(resp.status) == 200 + + def list_nodes(self): + return self._to_nodes(self.connection.request(REST_BASE + '/instances').object) + + def list_images(self, location = None): + return self._to_images(self.connection.request(REST_BASE + '/offerings/image').object) + + def list_sizes(self, location = None): + return [ NodeSize('BRZ32.1/2048/60*175', 'Bronze 32 bit', None, None, None, None, self.connection.driver), + NodeSize('BRZ64.2/4096/60*500*350', 'Bronze 64 bit', None, None, None, None, self.connection.driver), + NodeSize('COP32.1/2048/60', 'Copper 32 bit', None, None, None, None, self.connection.driver), + NodeSize('COP64.2/4096/60', 'Copper 64 bit', None, None, None, None, self.connection.driver), + NodeSize('SLV32.2/4096/60*350', 'Silver 32 bit', None, None, None, None, self.connection.driver), + NodeSize('SLV64.4/8192/60*500*500', 'Silver 64 bit', None, None, None, None, self.connection.driver), + NodeSize('GLD32.4/4096/60*350', 'Gold 32 bit', None, None, None, None, self.connection.driver), + NodeSize('GLD64.8/16384/60*500*500', 'Gold 64 bit', None, None, None, None, self.connection.driver), + NodeSize('PLT64.16/16384/60*500*500*500*500', 'Platinum 64 bit', None, None, None, None, self.connection.driver) ] + + def list_locations(self): + return self._to_locations(self.connection.request(REST_BASE + '/locations').object) + + def _to_nodes(self, object): + return [ self._to_node(instance) for instance in object.findall('Instance') ] + + def _to_node(self, instance): + return Node(id = instance.findtext('ID'), + name = instance.findtext('Name'), + state = self.NODE_STATE_MAP[int(instance.findtext('Status'))], + public_ip = instance.findtext('IP'), + private_ip = None, + driver = self.connection.driver) + + def _to_images(self, object): + return [ self._to_image(image) for image in object.findall('Image') ] + + def _to_image(self, image): + return NodeImage(id = image.findtext('ID'), + name = image.findtext('Name'), + driver = self.connection.driver, + extra = {'parametersURL': image.findtext('Manifest')}) + + def _to_locations(self, object): + return [ self._to_location(location) for location in object.findall('Location') ] + + def _to_location(self, location): + # NOTE: country currently hardcoded + return NodeLocation(id = location.findtext('ID'), + name = location.findtext('Name'), + country = 'US', + driver = self.connection.driver) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/linode.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/linode.py new file mode 100644 index 0000000000000000000000000000000000000000..0a0f866b59118a22bfee6e608929d520976b16ac --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/linode.py @@ -0,0 +1,618 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""libcloud driver for the Linode(R) API + +This driver implements all libcloud functionality for the Linode API. Since the +API is a bit more fine-grained, create_node abstracts a significant amount of +work (and may take a while to run). + +Linode home page http://www.linode.com/ +Linode API documentation http://www.linode.com/api/ +Alternate bindings for reference http://github.com/tjfontaine/linode-python + +Linode(R) is a registered trademark of Linode, LLC. + +""" +import itertools +import os + +from copy import copy + +try: + import json +except: + import simplejson as json + +from libcloud.common.base import ConnectionKey, Response +from libcloud.common.types import InvalidCredsError, MalformedResponseError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation +from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey +from libcloud.compute.base import NodeImage + +# Where requests go - in beta situations, this information may change. +LINODE_API = "api.linode.com" +LINODE_ROOT = "/" + +# Map of TOTALRAM to PLANID, allows us to figure out what plan +# a particular node is on (updated with new plan sizes 6/28/10) +LINODE_PLAN_IDS = {512:'1', + 768:'2', + 1024:'3', + 1536:'4', + 2048:'5', + 4096:'6', + 8192:'7', + 12288:'8', + 16384:'9', + 20480:'10'} + + +class LinodeException(Exception): + """Error originating from the Linode API + + This class wraps a Linode API error, a list of which is available in the + API documentation. All Linode API errors are a numeric code and a + human-readable description. + """ + def __str__(self): + return "(%u) %s" % (self.args[0], self.args[1]) + def __repr__(self): + return "" % (self.args[0], self.args[1]) + + +class LinodeResponse(Response): + """Linode API response + + Wraps the HTTP response returned by the Linode API, which should be JSON in + this structure: + + { + "ERRORARRAY": [ ... ], + "DATA": [ ... ], + "ACTION": " ... " + } + + libcloud does not take advantage of batching, so a response will always + reflect the above format. A few weird quirks are caught here as well.""" + def __init__(self, response): + """Instantiate a LinodeResponse from the HTTP response + + @keyword response: The raw response returned by urllib + @return: parsed L{LinodeResponse}""" + self.body = response.read() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + self.invalid = LinodeException(0xFF, + "Invalid JSON received from server") + + # Move parse_body() to here; we can't be sure of failure until we've + # parsed the body into JSON. + self.objects, self.errors = self.parse_body() + if not self.success(): + # Raise the first error, as there will usually only be one + raise self.errors[0] + + def parse_body(self): + """Parse the body of the response into JSON objects + + If the response chokes the parser, action and data will be returned as + None and errorarray will indicate an invalid JSON exception. + + @return: C{list} of objects and C{list} of errors""" + try: + js = json.loads(self.body) + except: + raise MalformedResponseError("Failed to parse JSON", body=self.body, + driver=LinodeNodeDriver) + + try: + if isinstance(js, dict): + # solitary response - promote to list + js = [js] + ret = [] + errs = [] + for obj in js: + if ("DATA" not in obj or "ERRORARRAY" not in obj + or "ACTION" not in obj): + ret.append(None) + errs.append(self.invalid) + continue + ret.append(obj["DATA"]) + errs.extend(self._make_excp(e) for e in obj["ERRORARRAY"]) + return (ret, errs) + except: + return (None, [self.invalid]) + + def success(self): + """Check the response for success + + The way we determine success is by the presence of an error in + ERRORARRAY. If one is there, we assume the whole request failed. + + @return: C{bool} indicating a successful request""" + return len(self.errors) == 0 + + def _make_excp(self, error): + """Convert an API error to a LinodeException instance + + @keyword error: JSON object containing C{ERRORCODE} and C{ERRORMESSAGE} + @type error: dict""" + if "ERRORCODE" not in error or "ERRORMESSAGE" not in error: + return None + if error["ERRORCODE"] == 4: + return InvalidCredsError(error["ERRORMESSAGE"]) + return LinodeException(error["ERRORCODE"], error["ERRORMESSAGE"]) + + +class LinodeConnection(ConnectionKey): + """A connection to the Linode API + + Wraps SSL connections to the Linode API, automagically injecting the + parameters that the API needs for each request.""" + host = LINODE_API + responseCls = LinodeResponse + + def add_default_params(self, params): + """Add parameters that are necessary for every request + + This method adds C{api_key} and C{api_responseFormat} to the request.""" + params["api_key"] = self.key + # Be explicit about this in case the default changes. + params["api_responseFormat"] = "json" + return params + + +class LinodeNodeDriver(NodeDriver): + """libcloud driver for the Linode API + + Rough mapping of which is which: + + list_nodes linode.list + reboot_node linode.reboot + destroy_node linode.delete + create_node linode.create, linode.update, + linode.disk.createfromdistribution, + linode.disk.create, linode.config.create, + linode.ip.addprivate, linode.boot + list_sizes avail.linodeplans + list_images avail.distributions + list_locations avail.datacenters + + For more information on the Linode API, be sure to read the reference: + + http://www.linode.com/api/ + """ + type = Provider.LINODE + name = "Linode" + connectionCls = LinodeConnection + _linode_plan_ids = LINODE_PLAN_IDS + + def __init__(self, key): + """Instantiate the driver with the given API key + + @keyword key: the API key to use + @type key: C{str}""" + self.datacenter = None + NodeDriver.__init__(self, key) + + # Converts Linode's state from DB to a NodeState constant. + LINODE_STATES = { + -2: NodeState.UNKNOWN, # Boot Failed + -1: NodeState.PENDING, # Being Created + 0: NodeState.PENDING, # Brand New + 1: NodeState.RUNNING, # Running + 2: NodeState.TERMINATED, # Powered Off + 3: NodeState.REBOOTING, # Shutting Down + 4: NodeState.UNKNOWN # Reserved + } + + def list_nodes(self): + """List all Linodes that the API key can access + + This call will return all Linodes that the API key in use has access to. + If a node is in this list, rebooting will work; however, creation and + destruction are a separate grant. + + @return: C{list} of L{Node} objects that the API key can access""" + params = { "api_action": "linode.list" } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + return self._to_nodes(data) + + def reboot_node(self, node): + """Reboot the given Linode + + Will issue a shutdown job followed by a boot job, using the last booted + configuration. In most cases, this will be the only configuration. + + @keyword node: the Linode to reboot + @type node: L{Node}""" + params = { "api_action": "linode.reboot", "LinodeID": node.id } + self.connection.request(LINODE_ROOT, params=params) + return True + + def destroy_node(self, node): + """Destroy the given Linode + + Will remove the Linode from the account and issue a prorated credit. A + grant for removing Linodes from the account is required, otherwise this + method will fail. + + In most cases, all disk images must be removed from a Linode before the + Linode can be removed; however, this call explicitly skips those + safeguards. There is no going back from this method. + + @keyword node: the Linode to destroy + @type node: L{Node}""" + params = { "api_action": "linode.delete", "LinodeID": node.id, + "skipChecks": True } + self.connection.request(LINODE_ROOT, params=params) + return True + + def create_node(self, **kwargs): + """Create a new Linode, deploy a Linux distribution, and boot + + This call abstracts much of the functionality of provisioning a Linode + and getting it booted. A global grant to add Linodes to the account is + required, as this call will result in a billing charge. + + Note that there is a safety valve of 5 Linodes per hour, in order to + prevent a runaway script from ruining your day. + + @keyword name: the name to assign the Linode (mandatory) + @type name: C{str} + + @keyword image: which distribution to deploy on the Linode (mandatory) + @type image: L{NodeImage} + + @keyword size: the plan size to create (mandatory) + @type size: L{NodeSize} + + @keyword auth: an SSH key or root password (mandatory) + @type auth: L{NodeAuthSSHKey} or L{NodeAuthPassword} + + @keyword location: which datacenter to create the Linode in + @type location: L{NodeLocation} + + @keyword ex_swap: size of the swap partition in MB (128) + @type ex_swap: C{int} + + @keyword ex_rsize: size of the root partition in MB (plan size - swap). + @type ex_rsize: C{int} + + @keyword ex_kernel: a kernel ID from avail.kernels (Latest 2.6 Stable). + @type ex_kernel: C{str} + + @keyword ex_payment: one of 1, 12, or 24; subscription length (1) + @type ex_payment: C{int} + + @keyword ex_comment: a small comment for the configuration (libcloud) + @type ex_comment: C{str} + + @keyword ex_private: whether or not to request a private IP (False) + @type ex_private: C{bool} + + @keyword lconfig: what to call the configuration (generated) + @type lconfig: C{str} + + @keyword lroot: what to call the root image (generated) + @type lroot: C{str} + + @keyword lswap: what to call the swap space (generated) + @type lswap: C{str} + + @return: a L{Node} representing the newly-created Linode + """ + name = kwargs["name"] + image = kwargs["image"] + size = kwargs["size"] + auth = kwargs["auth"] + + # Pick a location (resolves LIBCLOUD-41 in JIRA) + if "location" in kwargs: + chosen = kwargs["location"].id + elif self.datacenter: + chosen = self.datacenter + else: + raise LinodeException(0xFB, "Need to select a datacenter first") + + # Step 0: Parameter validation before we purchase + # We're especially careful here so we don't fail after purchase, rather + # than getting halfway through the process and having the API fail. + + # Plan ID + plans = self.list_sizes() + if size.id not in [p.id for p in plans]: + raise LinodeException(0xFB, "Invalid plan ID -- avail.plans") + + # Payment schedule + payment = "1" if "ex_payment" not in kwargs else str(kwargs["ex_payment"]) + if payment not in ["1", "12", "24"]: + raise LinodeException(0xFB, "Invalid subscription (1, 12, 24)") + + ssh = None + root = None + # SSH key and/or root password + if isinstance(auth, NodeAuthSSHKey): + ssh = auth.pubkey + elif isinstance(auth, NodeAuthPassword): + root = auth.password + + if not ssh and not root: + raise LinodeException(0xFB, "Need SSH key or root password") + if not root is None and len(root) < 6: + raise LinodeException(0xFB, "Root password is too short") + + # Swap size + try: swap = 128 if "ex_swap" not in kwargs else int(kwargs["ex_swap"]) + except: raise LinodeException(0xFB, "Need an integer swap size") + + # Root partition size + imagesize = (size.disk - swap) if "ex_rsize" not in kwargs else \ + int(kwargs["ex_rsize"]) + if (imagesize + swap) > size.disk: + raise LinodeException(0xFB, "Total disk images are too big") + + # Distribution ID + distros = self.list_images() + if image.id not in [d.id for d in distros]: + raise LinodeException(0xFB, + "Invalid distro -- avail.distributions") + + # Kernel + if "ex_kernel" in kwargs: + kernel = kwargs["ex_kernel"] + else: + if image.extra['64bit']: + kernel = 111 if image.extra['pvops'] else 107 + else: + kernel = 110 if image.extra['pvops'] else 60 + params = { "api_action": "avail.kernels" } + kernels = self.connection.request(LINODE_ROOT, params=params).objects[0] + if kernel not in [z["KERNELID"] for z in kernels]: + raise LinodeException(0xFB, "Invalid kernel -- avail.kernels") + + # Comments + comments = "Created by Apache libcloud " if \ + "ex_comment" not in kwargs else kwargs["ex_comment"] + + # Labels + label = { + "lconfig": "[%s] Configuration Profile" % name, + "lroot": "[%s] %s Disk Image" % (name, image.name), + "lswap": "[%s] Swap Space" % name + } + for what in ["lconfig", "lroot", "lswap"]: + if what in kwargs: + label[what] = kwargs[what] + + # Step 1: linode.create + params = { + "api_action": "linode.create", + "DatacenterID": chosen, + "PlanID": size.id, + "PaymentTerm": payment + } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + linode = { "id": data["LinodeID"] } + + # Step 1b. linode.update to rename the Linode + params = { + "api_action": "linode.update", + "LinodeID": linode["id"], + "Label": name + } + self.connection.request(LINODE_ROOT, params=params) + + # Step 1c. linode.ip.addprivate if it was requested + if "ex_private" in kwargs and kwargs["ex_private"]: + params = { + "api_action": "linode.ip.addprivate", + "LinodeID": linode["id"] + } + self.connection.request(LINODE_ROOT, params=params) + + # Step 2: linode.disk.createfromdistribution + if not root: + root = os.urandom(8).encode('hex') + params = { + "api_action": "linode.disk.createfromdistribution", + "LinodeID": linode["id"], + "DistributionID": image.id, + "Label": label["lroot"], + "Size": imagesize, + "rootPass": root, + } + if ssh: params["rootSSHKey"] = ssh + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + linode["rootimage"] = data["DiskID"] + + # Step 3: linode.disk.create for swap + params = { + "api_action": "linode.disk.create", + "LinodeID": linode["id"], + "Label": label["lswap"], + "Type": "swap", + "Size": swap + } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + linode["swapimage"] = data["DiskID"] + + # Step 4: linode.config.create for main profile + disks = "%s,%s,,,,,,," % (linode["rootimage"], linode["swapimage"]) + params = { + "api_action": "linode.config.create", + "LinodeID": linode["id"], + "KernelID": kernel, + "Label": label["lconfig"], + "Comments": comments, + "DiskList": disks + } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + linode["config"] = data["ConfigID"] + + # Step 5: linode.boot + params = { + "api_action": "linode.boot", + "LinodeID": linode["id"], + "ConfigID": linode["config"] + } + self.connection.request(LINODE_ROOT, params=params) + + # Make a node out of it and hand it back + params = { "api_action": "linode.list", "LinodeID": linode["id"] } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + return self._to_nodes(data) + + def list_sizes(self, location=None): + """List available Linode plans + + Gets the sizes that can be used for creating a Linode. Since available + Linode plans vary per-location, this method can also be passed a + location to filter the availability. + + @keyword location: the facility to retrieve plans in + @type location: NodeLocation + + @return: a C{list} of L{NodeSize}s""" + params = { "api_action": "avail.linodeplans" } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + sizes = [] + for obj in data: + n = NodeSize(id=obj["PLANID"], name=obj["LABEL"], ram=obj["RAM"], + disk=(obj["DISK"] * 1024), bandwidth=obj["XFER"], + price=obj["PRICE"], driver=self.connection.driver) + sizes.append(n) + return sizes + + def list_images(self): + """List available Linux distributions + + Retrieve all Linux distributions that can be deployed to a Linode. + + @return: a C{list} of L{NodeImage}s""" + params = { "api_action": "avail.distributions" } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + distros = [] + for obj in data: + i = NodeImage(id=obj["DISTRIBUTIONID"], + name=obj["LABEL"], + driver=self.connection.driver, + extra={'pvops': obj['REQUIRESPVOPSKERNEL'], + '64bit': obj['IS64BIT']}) + distros.append(i) + return distros + + def list_locations(self): + """List available facilities for deployment + + Retrieve all facilities that a Linode can be deployed in. + + @return: a C{list} of L{NodeLocation}s""" + params = { "api_action": "avail.datacenters" } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + nl = [] + for dc in data: + country = None + if "USA" in dc["LOCATION"]: country = "US" + elif "UK" in dc["LOCATION"]: country = "GB" + else: country = "??" + nl.append(NodeLocation(dc["DATACENTERID"], + dc["LOCATION"], + country, + self)) + return nl + + def linode_set_datacenter(self, dc): + """Set the default datacenter for Linode creation + + Since Linodes must be created in a facility, this function sets the + default that L{create_node} will use. If a C{location} keyword is not + passed to L{create_node}, this method must have already been used. + + @keyword dc: the datacenter to create Linodes in unless specified + @type dc: L{NodeLocation}""" + did = dc.id + params = { "api_action": "avail.datacenters" } + data = self.connection.request(LINODE_ROOT, params=params).objects[0] + for datacenter in data: + if did == dc["DATACENTERID"]: + self.datacenter = did + return + + dcs = ", ".join([d["DATACENTERID"] for d in data]) + self.datacenter = None + raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs) + + def _to_nodes(self, objs): + """Convert returned JSON Linodes into Node instances + + @keyword objs: C{list} of JSON dictionaries representing the Linodes + @type objs: C{list} + @return: C{list} of L{Node}s""" + + # Get the IP addresses for the Linodes + nodes = {} + batch = [] + for o in objs: + lid = o["LINODEID"] + nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ip=[], + private_ip=[], state=self.LINODE_STATES[o["STATUS"]], + driver=self.connection.driver) + n.extra = copy(o) + n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM")) + batch.append({"api_action": "linode.ip.list", "LinodeID": lid}) + + # Avoid batch limitation + ip_answers = [] + args = [iter(batch)] * 25 + izip_longest = getattr(itertools, 'izip_longest', _izip_longest) + for twenty_five in izip_longest(*args): + twenty_five = [q for q in twenty_five if q] + params = { "api_action": "batch", + "api_requestArray": json.dumps(twenty_five) } + req = self.connection.request(LINODE_ROOT, params=params) + if not req.success() or len(req.objects) == 0: + return None + ip_answers.extend(req.objects) + + # Add the returned IPs to the nodes and return them + for ip_list in ip_answers: + for ip in ip_list: + lid = ip["LINODEID"] + which = nodes[lid].public_ip if ip["ISPUBLIC"] == 1 else \ + nodes[lid].private_ip + which.append(ip["IPADDRESS"]) + return nodes.values() + + features = {"create_node": ["ssh_key", "password"]} + +def _izip_longest(*args, **kwds): + """Taken from Python docs + + http://docs.python.org/library/itertools.html#itertools.izip + """ + fillvalue = kwds.get('fillvalue') + def sentinel(counter = ([fillvalue]*(len(args)-1)).pop): + yield counter() # yields the fillvalue, or raises IndexError + fillers = itertools.repeat(fillvalue) + iters = [itertools.chain(it, sentinel(), fillers) for it in args] + try: + for tup in itertools.izip(*iters): + yield tup + except IndexError: + pass diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opennebula.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opennebula.py new file mode 100644 index 0000000000000000000000000000000000000000..f282ad3dbb33a0932a465159c61b3bef3d3ac959 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opennebula.py @@ -0,0 +1,219 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +OpenNebula driver +""" + +from base64 import b64encode +import hashlib +from xml.etree import ElementTree as ET + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import NodeDriver, Node, NodeLocation +from libcloud.compute.base import NodeImage, NodeSize + +API_HOST = '' +API_PORT = (4567, 443) +API_SECURE = True + + +class OpenNebulaResponse(Response): + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def parse_body(self): + if not self.body: + return None + return ET.XML(self.body) + + def parse_error(self): + if int(self.status) == 401: + raise InvalidCredsError(self.body) + return self.body + + +class OpenNebulaConnection(ConnectionUserAndKey): + """ + Connection class for the OpenNebula driver + """ + + host = API_HOST + port = API_PORT + secure = API_SECURE + responseCls = OpenNebulaResponse + + def add_default_headers(self, headers): + pass_sha1 = hashlib.sha1(self.key).hexdigest() + headers['Authorization'] = ("Basic %s" % b64encode("%s:%s" % (self.user_id, pass_sha1))) + return headers + + +class OpenNebulaNodeDriver(NodeDriver): + """ + OpenNebula node driver + """ + + connectionCls = OpenNebulaConnection + type = Provider.OPENNEBULA + name = 'OpenNebula' + + NODE_STATE_MAP = { + 'PENDING': NodeState.PENDING, + 'ACTIVE': NodeState.RUNNING, + 'DONE': NodeState.TERMINATED, + 'STOPPED': NodeState.TERMINATED + } + + def list_sizes(self, location=None): + return [ + NodeSize(id=1, + name="small", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=2, + name="medium", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=3, + name="large", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + + def list_nodes(self): + return self._to_nodes(self.connection.request('/compute').object) + + def list_images(self, location=None): + return self._to_images(self.connection.request('/storage').object) + + def list_locations(self): + return [NodeLocation(0, 'OpenNebula', 'ONE', self)] + + def reboot_node(self, node): + compute_id = str(node.id) + + url = '/compute/%s' % compute_id + resp1 = self.connection.request(url,method='PUT',data=self._xml_action(compute_id,'STOPPED')) + + if resp1.status == 400: + return False + + resp2 = self.connection.request(url,method='PUT',data=self._xml_action(compute_id,'RESUME')) + + if resp2.status == 400: + return False + + return True + + def destroy_node(self, node): + url = '/compute/%s' % (str(node.id)) + resp = self.connection.request(url,method='DELETE') + + return resp.status == 204 + + def create_node(self, **kwargs): + """Create a new OpenNebula node + + See L{NodeDriver.create_node} for more keyword args. + """ + compute = ET.Element('COMPUTE') + + name = ET.SubElement(compute, 'NAME') + name.text = kwargs['name'] + + # """ + # Other extractable (but unused) information + # """ + # instance_type = ET.SubElement(compute, 'INSTANCE_TYPE') + # instance_type.text = kwargs['size'].name + # + # storage = ET.SubElement(compute, 'STORAGE') + # disk = ET.SubElement(storage, 'DISK', {'image': str(kwargs['image'].id), + # 'dev': 'sda1'}) + + xml = ET.tostring(compute) + + node = self.connection.request('/compute',method='POST',data=xml).object + + return self._to_node(node) + + def _to_images(self, object): + images = [] + for element in object.findall("DISK"): + image_id = element.attrib["href"].partition("/storage/")[2] + image = self.connection.request(("/storage/%s" % (image_id))).object + images.append(self._to_image(image)) + + return images + + def _to_image(self, image): + return NodeImage(id=image.findtext("ID"), + name=image.findtext("NAME"), + driver=self.connection.driver) + + def _to_nodes(self, object): + computes = [] + for element in object.findall("COMPUTE"): + compute_id = element.attrib["href"].partition("/compute/")[2] + compute = self.connection.request(("/compute/%s" % (compute_id))).object + computes.append(self._to_node(compute)) + + return computes + + def _to_node(self, compute): + try: + state = self.NODE_STATE_MAP[compute.findtext("STATE")] + except KeyError: + state = NodeState.UNKNOWN + + networks = [] + for element in compute.findall("NIC"): + networks.append(element.attrib["ip"]) + + return Node(id=compute.findtext("ID"), + name=compute.findtext("NAME"), + state=state, + public_ip=networks, + private_ip=[], + driver=self.connection.driver) + + def _xml_action(self, compute_id, action): + compute = ET.Element('COMPUTE') + + compute_id = ET.SubElement(compute, 'ID') + compute_id.text = str(compute_id) + + state = ET.SubElement(compute, 'STATE') + state.text = action + + xml = ET.tostring(compute) + return xml diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opsource.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opsource.py new file mode 100644 index 0000000000000000000000000000000000000000..d71f1b4efbc666147c8fe010bb57d86748d0317d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/opsource.py @@ -0,0 +1,501 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Opsource Driver +""" +import base64 +from xml.etree import ElementTree as ET + +from libcloud.utils import fixxpath, findtext, findall +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import LibcloudError, InvalidCredsError, MalformedResponseError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, Node, NodeAuthPassword +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +# Roadmap / TODO: +# +# 0.1 - Basic functionality: create, delete, start, stop, reboot - servers +# (base OS images only, no customer images suported yet) +# x implement list_nodes() +# x implement create_node() (only support Base OS images, no customer images yet) +# x implement reboot() +# x implement destroy_node() +# x implement list_sizes() +# x implement list_images() (only support Base OS images, no customer images yet) +# x implement list_locations() +# x implement ex_* extension functions for opsource-specific features +# x ex_graceful_shutdown +# x ex_start_node +# x ex_power_off +# x ex_list_networks (needed for create_node()) +# x refactor: switch to using fixxpath() from the vcloud driver for dealing with xml namespace tags +# x refactor: move some functionality from OpsourceConnection.request() method into new .request_with_orgId() method +# x add OpsourceStatus object support to: +# x _to_node() +# x _to_network() +# x implement test cases +# +# 0.2 - Support customer images (snapshots) and server modification functions +# - support customer-created images: +# - list deployed customer images (in list_images() ?) +# - list pending customer images (in list_images() ?) +# - delete customer images +# - modify customer images +# - add "pending-servers" in list_nodes() +# - implement various ex_* extension functions for opsource-specific features +# - ex_modify_server() +# - ex_add_storage_to_server() +# - ex_snapshot_server() (create's customer image) +# +# 0.3 - support Network API +# 0.4 - Support VIP/Load-balancing API +# 0.5 - support Files Account API +# 0.6 - support Reports API +# 1.0 - Opsource 0.9 API feature complete, tested + +# setup a few variables to represent all of the opsource cloud namespaces +NAMESPACE_BASE = "http://oec.api.opsource.net/schemas" +ORGANIZATION_NS = NAMESPACE_BASE + "/organization" +SERVER_NS = NAMESPACE_BASE + "/server" +NETWORK_NS = NAMESPACE_BASE + "/network" +DIRECTORY_NS = NAMESPACE_BASE + "/directory" +RESET_NS = NAMESPACE_BASE + "/reset" +VIP_NS = NAMESPACE_BASE + "/vip" +IMAGEIMPORTEXPORT_NS = NAMESPACE_BASE + "/imageimportexport" +DATACENTER_NS = NAMESPACE_BASE + "/datacenter" +SUPPORT_NS = NAMESPACE_BASE + "/support" +GENERAL_NS = NAMESPACE_BASE + "/general" +IPPLAN_NS = NAMESPACE_BASE + "/ipplan" +WHITELABEL_NS = NAMESPACE_BASE + "/whitelabel" + + +class OpsourceResponse(Response): + + def parse_body(self): + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", body=self.body, driver=OpsourceNodeDriver) + return body + + def parse_error(self): + if self.status == 401: + raise InvalidCredsError(self.body) + + if self.status == 403: + raise InvalidCredsError(self.body) + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError("Failed to parse XML", body=self.body, driver=OpsourceNodeDriver) + + if self.status == 400: + code = findtext(body, 'resultCode', SERVER_NS) + message = findtext(body, 'resultDetail', SERVER_NS) + raise OpsourceAPIException(code, message, driver=OpsourceNodeDriver) + + return self.body + +class OpsourceAPIException(LibcloudError): + def __init__(self, code, msg, driver): + self.code = code + self.msg = msg + self.driver = driver + + def __str__(self): + return "%s: %s" % (self.code, self.msg) + + def __repr__(self): + return "" % (self.code, self.msg) + +class OpsourceConnection(ConnectionUserAndKey): + """ + Connection class for the Opsource driver + """ + + host = 'api.opsourcecloud.net' + api_path = '/oec' + api_version = '0.9' + _orgId = None + responseCls = OpsourceResponse + + def add_default_headers(self, headers): + headers['Authorization'] = ('Basic %s' + % (base64.b64encode('%s:%s' % (self.user_id, self.key)))) + return headers + + def request(self, action, params=None, data='', headers=None, method='GET'): + action = "%s/%s/%s" % (self.api_path, self.api_version, action) + + return super(OpsourceConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers + ) + + def request_with_orgId(self, action, params=None, data='', headers=None, method='GET'): + action = "%s/%s" % (self.get_resource_path(), action) + + return super(OpsourceConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers + ) + + def get_resource_path(self): + """this method returns a resource path which is necessary for referencing + resources that require a full path instead of just an ID, such as + networks, and customer snapshots. + """ + return ("%s/%s/%s" % (self.api_path, self.api_version, self._get_orgId())) + + def _get_orgId(self): + """ + send the /myaccount API request to opsource cloud and parse the 'orgId' from the + XML response object. We need the orgId to use most of the other API functions + """ + if self._orgId == None: + body = self.request('myaccount').object + self._orgId = findtext(body, 'orgId', DIRECTORY_NS) + return self._orgId + +class OpsourceStatus(object): + """ + Opsource API pending operation status class + action, requestTime, username, numberOfSteps, updateTime, + step.name, step.number, step.percentComplete, failureReason, + """ + def __init__(self, action=None, requestTime=None, userName=None, + numberOfSteps=None, updateTime=None, step_name=None, + step_number=None, step_percentComplete=None, failureReason=None): + self.action = action + self.requestTime = requestTime + self.userName = userName + self.numberOfSteps = numberOfSteps + self.updateTime = updateTime + self.step_name = step_name + self.step_number = step_number + self.step_percentComplete = step_percentComplete + self.failureReason = failureReason + + def __repr__(self): + return (('') + % (self.id, self.name, self.description, self.location, + self.privateNet, self.multicast)) + + +class OpsourceNodeDriver(NodeDriver): + """ + Opsource node driver + """ + + connectionCls = OpsourceConnection + + type = Provider.OPSOURCE + name = 'Opsource' + + features = {"create_node": ["password"]} + + def list_nodes(self): + nodes = self._to_nodes(self.connection.request_with_orgId('server/deployed').object) + nodes.extend(self._to_nodes(self.connection.request_with_orgId('server/pendingDeploy').object)) + return nodes + + def list_sizes(self, location=None): + return [ NodeSize(id=1, + name="default", + ram=0, + disk=0, + bandwidth=0, + price=0, + driver=self.connection.driver) ] + + def list_images(self, location=None): + """return a list of available images + Currently only returns the default 'base OS images' provided by opsource. + Customer images (snapshots) are not yet supported. + """ + return self._to_base_images(self.connection.request('base/image').object) + + def list_locations(self): + """list locations (datacenters) available for instantiating servers and + networks. + """ + return self._to_locations(self.connection.request_with_orgId('datacenter').object) + + def create_node(self, **kwargs): + """Create a new opsource node + + Standard keyword arguments from L{NodeDriver.create_node}: + @keyword name: String with a name for this new node (required) + @type name: str + + @keyword image: OS Image to boot on node. (required) + @type image: L{NodeImage} + + @keyword auth: Initial authentication information for the node (required) + @type auth: L{NodeAuthPassword} + + Non-standard keyword arguments: + @keyword ex_description: description for this node (required) + @type ex_description: C{str} + + @keyword ex_network: Network to create the node within (required) + @type ex_network: L{OpsourceNetwork} + + @keyword ex_isStarted: Start server after creation? default true (required) + @type ex_isStarted: C{bool} + + @return: The newly created L{Node}. NOTE: Opsource does not provide a way to + determine the ID of the server that was just created, so the returned + L{Node} is not guaranteed to be the same one that was created. This + is only the case when multiple nodes with the same name exist. + """ + name = kwargs['name'] + image = kwargs['image'] + + # XXX: Node sizes can be adjusted after a node is created, but cannot be + # set at create time because size is part of the image definition. + password = None + if kwargs.has_key('auth'): + auth = kwargs.get('auth') + if isinstance(auth, NodeAuthPassword): + password = auth.password + else: + raise ValueError('auth must be of NodeAuthPassword type') + + ex_description = kwargs.get('ex_description', '') + ex_isStarted = kwargs.get('ex_isStarted', True) + + ex_network = kwargs.get('ex_network') + if not isinstance(ex_network, OpsourceNetwork): + raise ValueError('ex_network must be of OpsourceNetwork type') + vlanResourcePath = "%s/%s" % (self.connection.get_resource_path(), ex_network.id) + + imageResourcePath = None + if image.extra.has_key('resourcePath'): + imageResourcePath = image.extra['resourcePath'] + else: + imageResourcePath = "%s/%s" % (self.connection.get_resource_path(), image.id) + + server_elm = ET.Element('Server', {'xmlns': SERVER_NS}) + ET.SubElement(server_elm, "name").text = name + ET.SubElement(server_elm, "description").text = ex_description + ET.SubElement(server_elm, "vlanResourcePath").text = vlanResourcePath + ET.SubElement(server_elm, "imageResourcePath").text = imageResourcePath + ET.SubElement(server_elm, "administratorPassword").text = password + ET.SubElement(server_elm, "isStarted").text = str(ex_isStarted) + + self.connection.request_with_orgId('server', + method='POST', + data=ET.tostring(server_elm) + ).object + # XXX: return the last node in the list that has a matching name. this + # is likely but not guaranteed to be the node we just created + # because opsource allows multiple nodes to have the same name + return filter(lambda x: x.name == name, self.list_nodes())[-1] + + def reboot_node(self, node): + """reboots the node""" + body = self.connection.request_with_orgId('server/%s?restart' % node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def destroy_node(self, node): + """Destroys the node""" + body = self.connection.request_with_orgId('server/%s?delete' % node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_start_node(self, node): + """Powers on an existing deployed server""" + body = self.connection.request_with_orgId('server/%s?start' % node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_shutdown_graceful(self, node): + """This function will attempt to "gracefully" stop a server by initiating a + shutdown sequence within the guest operating system. A successful response + on this function means the system has successfully passed the + request into the operating system. + """ + body = self.connection.request_with_orgId('server/%s?shutdown' % node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_power_off(self, node): + """This function will abruptly power-off a server. Unlike ex_shutdown_graceful, + success ensures the node will stop but some OS and application configurations may + be adversely affected by the equivalent of pulling the power plug out of the + machine. + """ + body = self.connection.request_with_orgId('server/%s?poweroff' % node.id).object + result = findtext(body, 'result', GENERAL_NS) + return result == 'SUCCESS' + + def ex_list_networks(self): + """List networks deployed across all data center locations for your + organization. The response includes the location of each network. + + Returns a list of OpsourceNetwork objects + """ + return self._to_networks(self.connection.request_with_orgId('networkWithLocation').object) + + def ex_get_location_by_id(self, id): + location = None + if id is not None: + location = filter(lambda x: x.id == id, self.list_locations())[0] + return location + + def _to_networks(self, object): + node_elements = findall(object, 'network', NETWORK_NS) + return [ self._to_network(el) for el in node_elements ] + + def _to_network(self, element): + multicast = False + if findtext(element, 'multicast', NETWORK_NS) == 'true': + multicast = True + + status = self._to_status(element.find(fixxpath('status', NETWORK_NS))) + + location_id = findtext(element, 'location', NETWORK_NS) + location = self.ex_get_location_by_id(location_id) + + return OpsourceNetwork(id=findtext(element, 'id', NETWORK_NS), + name=findtext(element, 'name', NETWORK_NS), + description=findtext(element, 'description', NETWORK_NS), + location=location, + privateNet=findtext(element, 'privateNet', NETWORK_NS), + multicast=multicast, + status=status) + + def _to_locations(self, object): + node_elements = object.findall(fixxpath('datacenter', DATACENTER_NS)) + return [ self._to_location(el) for el in node_elements ] + + def _to_location(self, element): + l = NodeLocation(id=findtext(element, 'location', DATACENTER_NS), + name=findtext(element, 'displayName', DATACENTER_NS), + country=findtext(element, 'country', DATACENTER_NS), + driver=self) + return l + + def _to_nodes(self, object): + node_elements = object.findall(fixxpath('DeployedServer', SERVER_NS)) + node_elements.extend(object.findall(fixxpath('PendingDeployServer', SERVER_NS))) + return [ self._to_node(el) for el in node_elements ] + + def _to_node(self, element): + if findtext(element, 'isStarted', SERVER_NS) == 'true': + state = NodeState.RUNNING + else: + state = NodeState.TERMINATED + + status = self._to_status(element.find(fixxpath('status', SERVER_NS))) + + extra = { + 'description': findtext(element, 'description', SERVER_NS), + 'sourceImageId': findtext(element, 'sourceImageId', SERVER_NS), + 'networkId': findtext(element, 'networkId', SERVER_NS), + 'machineName': findtext(element, 'machineName', SERVER_NS), + 'deployedTime': findtext(element, 'deployedTime', SERVER_NS), + 'cpuCount': findtext(element, 'machineSpecification/cpuCount', SERVER_NS), + 'memoryMb': findtext(element, 'machineSpecification/memoryMb', SERVER_NS), + 'osStorageGb': findtext(element, 'machineSpecification/osStorageGb', SERVER_NS), + 'additionalLocalStorageGb': findtext(element, 'machineSpecification/additionalLocalStorageGb', SERVER_NS), + 'OS_type': findtext(element, 'machineSpecification/operatingSystem/type', SERVER_NS), + 'OS_displayName': findtext(element, 'machineSpecification/operatingSystem/displayName', SERVER_NS), + 'status': status, + } + + n = Node(id=findtext(element, 'id', SERVER_NS), + name=findtext(element, 'name', SERVER_NS), + state=state, + public_ip="unknown", + private_ip=findtext(element, 'privateIpAddress', SERVER_NS), + driver=self.connection.driver, + extra=extra) + return n + + def _to_base_images(self, object): + node_elements = object.findall(fixxpath("ServerImage", SERVER_NS)) + return [ self._to_base_image(el) for el in node_elements ] + + def _to_base_image(self, element): + # Eventually we will probably need multiple _to_image() functions + # that parse differently than . + # DeployedImages are customer snapshot images, and ServerImages are + # 'base' images provided by opsource + location_id = findtext(element, 'location', SERVER_NS) + location = self.ex_get_location_by_id(location_id) + + extra = { + 'description': findtext(element, 'description', SERVER_NS), + 'OS_type': findtext(element, 'operatingSystem/type', SERVER_NS), + 'OS_displayName': findtext(element, 'operatingSystem/displayName', SERVER_NS), + 'cpuCount': findtext(element, 'cpuCount', SERVER_NS), + 'resourcePath': findtext(element, 'resourcePath', SERVER_NS), + 'memory': findtext(element, 'memory', SERVER_NS), + 'osStorage': findtext(element, 'osStorage', SERVER_NS), + 'additionalStorage': findtext(element, 'additionalStorage', SERVER_NS), + 'created': findtext(element, 'created', SERVER_NS), + 'location': location, + } + + i = NodeImage(id=str(findtext(element, 'id', SERVER_NS)), + name=str(findtext(element, 'name', SERVER_NS)), + extra=extra, + driver=self.connection.driver) + return i + + def _to_status(self, element): + if element == None: + return OpsourceStatus() + s = OpsourceStatus(action=findtext(element, 'action', SERVER_NS), + requestTime=findtext(element, 'requestTime', SERVER_NS), + userName=findtext(element, 'userName', SERVER_NS), + numberOfSteps=findtext(element, 'numberOfSteps', SERVER_NS), + step_name=findtext(element, 'step/name', SERVER_NS), + step_number=findtext(element, 'step_number', SERVER_NS), + step_percentComplete=findtext(element, 'step/percentComplete', SERVER_NS), + failureReason=findtext(element, 'failureReason', SERVER_NS)) + return s diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rackspace.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rackspace.py new file mode 100644 index 0000000000000000000000000000000000000000..4e41cad4a87ce1c87c4c9caa5801b73667f5f602 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rackspace.py @@ -0,0 +1,565 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Rackspace driver +""" +import os + +import base64 + +from xml.etree import ElementTree as ET +from xml.parsers.expat import ExpatError + +from libcloud.pricing import get_pricing +from libcloud.common.base import Response +from libcloud.common.types import MalformedResponseError +from libcloud.compute.types import NodeState, Provider +from libcloud.compute.base import NodeDriver, Node +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +from libcloud.common.rackspace import ( + AUTH_HOST_US, AUTH_HOST_UK, RackspaceBaseConnection) + +NAMESPACE='http://docs.rackspacecloud.com/servers/api/v1.0' + + +class RackspaceResponse(Response): + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def parse_body(self): + if not self.body: + return None + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError( + "Failed to parse XML", + body=self.body, + driver=RackspaceNodeDriver) + return body + def parse_error(self): + # TODO: fixup, Rackspace only uses response codes really! + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError( + "Failed to parse XML", + body=self.body, driver=RackspaceNodeDriver) + try: + text = "; ".join([ err.text or '' + for err in + body.getiterator() + if err.text]) + except ExpatError: + text = self.body + return '%s %s %s' % (self.status, self.error, text) + + +class RackspaceConnection(RackspaceBaseConnection): + """ + Connection class for the Rackspace driver + """ + + responseCls = RackspaceResponse + auth_host = AUTH_HOST_US + _url_key = "server_url" + + def __init__(self, user_id, key, secure=True): + super(RackspaceConnection, self).__init__(user_id, key, secure) + self.api_version = 'v1.0' + self.accept_format = 'application/xml' + + def request(self, action, params=None, data='', headers=None, method='GET'): + if not headers: + headers = {} + if not params: + params = {} + # Due to first-run authentication request, we may not have a path + if self.server_url: + action = self.server_url + action + if method in ("POST", "PUT"): + headers = {'Content-Type': 'application/xml; charset=UTF-8'} + if method == "GET": + params['cache-busting'] = os.urandom(8).encode('hex') + return super(RackspaceConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers + ) + + +class RackspaceSharedIpGroup(object): + """ + Shared IP group info. + """ + + def __init__(self, id, name, servers=None): + self.id = str(id) + self.name = name + self.servers = servers + + +class RackspaceNodeIpAddresses(object): + """ + List of public and private IP addresses of a Node. + """ + + def __init__(self, public_addresses, private_addresses): + self.public_addresses = public_addresses + self.private_addresses = private_addresses + + +class RackspaceNodeDriver(NodeDriver): + """ + Rackspace node driver. + + Extra node attributes: + - password: root password, available after create. + - hostId: represents the host your cloud server runs on + - imageId: id of image + - flavorId: id of flavor + """ + connectionCls = RackspaceConnection + type = Provider.RACKSPACE + api_name = 'rackspace' + name = 'Rackspace' + + _rackspace_prices = get_pricing(driver_type='compute', + driver_name='rackspace') + + features = {"create_node": ["generates_password"]} + + NODE_STATE_MAP = { 'BUILD': NodeState.PENDING, + 'REBUILD': NodeState.PENDING, + 'ACTIVE': NodeState.RUNNING, + 'SUSPENDED': NodeState.TERMINATED, + 'QUEUE_RESIZE': NodeState.PENDING, + 'PREP_RESIZE': NodeState.PENDING, + 'VERIFY_RESIZE': NodeState.RUNNING, + 'PASSWORD': NodeState.PENDING, + 'RESCUE': NodeState.PENDING, + 'REBUILD': NodeState.PENDING, + 'REBOOT': NodeState.REBOOTING, + 'HARD_REBOOT': NodeState.REBOOTING, + 'SHARE_IP': NodeState.PENDING, + 'SHARE_IP_NO_CONFIG': NodeState.PENDING, + 'DELETE_IP': NodeState.PENDING, + 'UNKNOWN': NodeState.UNKNOWN} + + def list_nodes(self): + return self._to_nodes(self.connection.request('/servers/detail').object) + + def list_sizes(self, location=None): + return self._to_sizes(self.connection.request('/flavors/detail').object) + + def list_images(self, location=None): + return self._to_images(self.connection.request('/images/detail').object) + + def list_locations(self): + """Lists available locations + + Locations cannot be set or retrieved via the API, but currently + there are two locations, DFW and ORD. + """ + return [NodeLocation(0, "Rackspace DFW1/ORD1", 'US', self)] + + def _change_password_or_name(self, node, name=None, password=None): + uri = '/servers/%s' % (node.id) + + if not name: + name = node.name + + body = { 'xmlns': NAMESPACE, + 'name': name} + + if password != None: + body['adminPass'] = password + + server_elm = ET.Element('server', body) + + resp = self.connection.request( + uri, method='PUT', data=ET.tostring(server_elm)) + + if resp.status == 204 and password != None: + node.extra['password'] = password + + return resp.status == 204 + + def ex_set_password(self, node, password): + """ + Sets the Node's root password. + + This will reboot the instance to complete the operation. + + L{node.extra['password']} will be set to the new value if the + operation was successful. + """ + return self._change_password_or_name(node, password=password) + + def ex_set_server_name(self, node, name): + """ + Sets the Node's name. + + This will reboot the instance to complete the operation. + """ + return self._change_password_or_name(node, name=name) + + def create_node(self, **kwargs): + """Create a new rackspace node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_metadata: Key/Value metadata to associate with a node + @type ex_metadata: C{dict} + + @keyword ex_files: File Path => File contents to create on the node + @type ex_files: C{dict} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + server_elm = ET.Element( + 'server', + {'xmlns': NAMESPACE, + 'name': name, + 'imageId': str(image.id), + 'flavorId': str(size.id)} + ) + + metadata_elm = self._metadata_to_xml(kwargs.get("ex_metadata", {})) + if metadata_elm: + server_elm.append(metadata_elm) + + files_elm = self._files_to_xml(kwargs.get("ex_files", {})) + if files_elm: + server_elm.append(files_elm) + + shared_ip_elm = self._shared_ip_group_to_xml( + kwargs.get("ex_shared_ip_group", None)) + if shared_ip_elm: + server_elm.append(shared_ip_elm) + + resp = self.connection.request("/servers", + method='POST', + data=ET.tostring(server_elm)) + return self._to_node(resp.object) + + def ex_rebuild(self, node_id, image_id): + elm = ET.Element( + 'rebuild', + {'xmlns': NAMESPACE, + 'imageId': image_id, + } + ) + resp = self.connection.request("/servers/%s/action" % node_id, + method='POST', + data=ET.tostring(elm)) + return resp.status == 202 + + def ex_create_ip_group(self, group_name, node_id=None): + group_elm = ET.Element( + 'sharedIpGroup', + {'xmlns': NAMESPACE, + 'name': group_name, + } + ) + if node_id: + ET.SubElement(group_elm, + 'server', + {'id': node_id} + ) + + resp = self.connection.request('/shared_ip_groups', + method='POST', + data=ET.tostring(group_elm)) + return self._to_shared_ip_group(resp.object) + + def ex_list_ip_groups(self, details=False): + uri = '/shared_ip_groups/detail' if details else '/shared_ip_groups' + resp = self.connection.request(uri, + method='GET') + groups = self._findall(resp.object, 'sharedIpGroup') + return [self._to_shared_ip_group(el) for el in groups] + + def ex_delete_ip_group(self, group_id): + uri = '/shared_ip_groups/%s' % group_id + resp = self.connection.request(uri, method='DELETE') + return resp.status == 204 + + def ex_share_ip(self, group_id, node_id, ip, configure_node=True): + if configure_node: + str_configure = 'true' + else: + str_configure = 'false' + + elm = ET.Element( + 'shareIp', + {'xmlns': NAMESPACE, + 'sharedIpGroupId' : group_id, + 'configureServer' : str_configure} + ) + + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='PUT', + data=ET.tostring(elm)) + return resp.status == 202 + + def ex_unshare_ip(self, node_id, ip): + uri = '/servers/%s/ips/public/%s' % (node_id, ip) + + resp = self.connection.request(uri, + method='DELETE') + return resp.status == 202 + + def ex_list_ip_addresses(self, node_id): + uri = '/servers/%s/ips' % node_id + resp = self.connection.request(uri, + method='GET') + return self._to_ip_addresses(resp.object) + + def _metadata_to_xml(self, metadata): + if len(metadata) == 0: + return None + + metadata_elm = ET.Element('metadata') + for k, v in metadata.items(): + meta_elm = ET.SubElement(metadata_elm, 'meta', {'key': str(k) }) + meta_elm.text = str(v) + + return metadata_elm + + def _files_to_xml(self, files): + if len(files) == 0: + return None + + personality_elm = ET.Element('personality') + for k, v in files.items(): + file_elm = ET.SubElement(personality_elm, + 'file', + {'path': str(k)}) + file_elm.text = base64.b64encode(v) + + return personality_elm + + def _reboot_node(self, node, reboot_type='SOFT'): + resp = self._node_action(node, ['reboot', ('type', reboot_type)]) + return resp.status == 202 + + def ex_soft_reboot_node(self, node): + return self._reboot_node(node, reboot_type='SOFT') + + def ex_hard_reboot_node(self, node): + return self._reboot_node(node, reboot_type='HARD') + + def reboot_node(self, node): + return self._reboot_node(node, reboot_type='HARD') + + def destroy_node(self, node): + uri = '/servers/%s' % (node.id) + resp = self.connection.request(uri, method='DELETE') + return resp.status == 202 + + def ex_get_node_details(self, node_id): + uri = '/servers/%s' % (node_id) + resp = self.connection.request(uri, method='GET') + if resp.status == 404: + return None + return self._to_node(resp.object) + + def _node_action(self, node, body): + if isinstance(body, list): + attr = ' '.join(['%s="%s"' % (item[0], item[1]) + for item in body[1:]]) + body = '<%s xmlns="%s" %s/>' % (body[0], NAMESPACE, attr) + uri = '/servers/%s/action' % (node.id) + resp = self.connection.request(uri, method='POST', data=body) + return resp + + def _to_nodes(self, object): + node_elements = self._findall(object, 'server') + return [ self._to_node(el) for el in node_elements ] + + def _fixxpath(self, xpath): + # ElementTree wants namespaces in its xpaths, so here we add them. + return "/".join(["{%s}%s" % (NAMESPACE, e) for e in xpath.split("/")]) + + def _findall(self, element, xpath): + return element.findall(self._fixxpath(xpath)) + + def _to_node(self, el): + def get_ips(el): + return [ip.get('addr') for ip in el] + + def get_meta_dict(el): + d = {} + for meta in el: + d[meta.get('key')] = meta.text + return d + + public_ip = get_ips(self._findall(el, + 'addresses/public/ip')) + private_ip = get_ips(self._findall(el, + 'addresses/private/ip')) + metadata = get_meta_dict(self._findall(el, 'metadata/meta')) + + n = Node(id=el.get('id'), + name=el.get('name'), + state=self.NODE_STATE_MAP.get( + el.get('status'), NodeState.UNKNOWN), + public_ip=public_ip, + private_ip=private_ip, + driver=self.connection.driver, + extra={ + 'password': el.get('adminPass'), + 'hostId': el.get('hostId'), + 'imageId': el.get('imageId'), + 'flavorId': el.get('flavorId'), + 'uri': "https://%s%s/servers/%s" % ( + self.connection.host, + self.connection.request_path, el.get('id')), + 'metadata': metadata, + }) + return n + + def _to_sizes(self, object): + elements = self._findall(object, 'flavor') + return [ self._to_size(el) for el in elements ] + + def _to_size(self, el): + s = NodeSize(id=el.get('id'), + name=el.get('name'), + ram=int(el.get('ram')), + disk=int(el.get('disk')), + bandwidth=None, # XXX: needs hardcode + price=self._get_size_price(el.get('id')), # Hardcoded, + driver=self.connection.driver) + return s + + def _to_images(self, object): + elements = self._findall(object, "image") + return [ self._to_image(el) + for el in elements + if el.get('status') == 'ACTIVE' ] + + def _to_image(self, el): + i = NodeImage(id=el.get('id'), + name=el.get('name'), + driver=self.connection.driver, + extra={'serverId': el.get('serverId')}) + return i + + def ex_limits(self): + """ + Extra call to get account's limits, such as + rates (for example amount of POST requests per day) + and absolute limits like total amount of available + RAM to be used by servers. + + @return: C{dict} with keys 'rate' and 'absolute' + """ + + def _to_rate(el): + rate = {} + for item in el.items(): + rate[item[0]] = item[1] + + return rate + + def _to_absolute(el): + return {el.get('name'): el.get('value')} + + limits = self.connection.request("/limits").object + rate = [ _to_rate(el) for el in self._findall(limits, 'rate/limit') ] + absolute = {} + for item in self._findall(limits, 'absolute/limit'): + absolute.update(_to_absolute(item)) + + return {"rate": rate, "absolute": absolute} + + def ex_save_image(self, node, name): + """Create an image for node. + + @keyword node: node to use as a base for image + @param node: L{Node} + @keyword name: name for new image + @param name: C{string} + """ + + image_elm = ET.Element( + 'image', + {'xmlns': NAMESPACE, + 'name': name, + 'serverId': node.id} + ) + + return self._to_image(self.connection.request("/images", + method="POST", + data=ET.tostring(image_elm)).object) + + def _to_shared_ip_group(self, el): + servers_el = self._findall(el, 'servers') + if servers_el: + servers = [s.get('id') + for s in self._findall(servers_el[0], 'server')] + else: + servers = None + return RackspaceSharedIpGroup(id=el.get('id'), + name=el.get('name'), + servers=servers) + + def _to_ip_addresses(self, el): + return RackspaceNodeIpAddresses( + [ip.get('addr') for ip in + self._findall(self._findall(el, 'public')[0], 'ip')], + [ip.get('addr') for ip in + self._findall(self._findall(el, 'private')[0], 'ip')] + ) + + def _shared_ip_group_to_xml(self, shared_ip_group): + if not shared_ip_group: + return None + + return ET.Element('sharedIpGroupId', shared_ip_group) + +class RackspaceUKConnection(RackspaceConnection): + """ + Connection class for the Rackspace UK driver + """ + auth_host = AUTH_HOST_UK + +class RackspaceUKNodeDriver(RackspaceNodeDriver): + """Driver for Rackspace in the UK (London) + """ + + name = 'Rackspace (UK)' + connectionCls = RackspaceUKConnection + + def list_locations(self): + return [NodeLocation(0, 'Rackspace UK London', 'UK', self)] + +class OpenStackConnection(RackspaceConnection): + + def __init__(self, user_id, key, secure, host, port): + super(OpenStackConnection, self).__init__(user_id, key, secure=secure) + self.auth_host = host + self.port = (port, port) + +class OpenStackNodeDriver(RackspaceNodeDriver): + name = 'OpenStack' + connectionCls = OpenStackConnection diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rimuhosting.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rimuhosting.py new file mode 100644 index 0000000000000000000000000000000000000000..d0df077ec17ab199404d59f8e624d2926997a9b7 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/rimuhosting.py @@ -0,0 +1,313 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +RimuHosting Driver +""" +try: + import json +except: + import simplejson as json + +from libcloud.common.base import ConnectionKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation +from libcloud.compute.base import NodeImage, NodeAuthPassword + +API_CONTEXT = '/r' +API_HOST = 'rimuhosting.com' +API_PORT = (80,443) +API_SECURE = True + +class RimuHostingException(Exception): + """ + Exception class for RimuHosting driver + """ + + def __str__(self): + return self.args[0] + + def __repr__(self): + return "" % (self.args[0]) + +class RimuHostingResponse(Response): + def __init__(self, response): + self.body = response.read() + self.status = response.status + self.headers = dict(response.getheaders()) + self.error = response.reason + + if self.success(): + self.object = self.parse_body() + + def success(self): + if self.status == 403: + raise InvalidCredsError() + return True + def parse_body(self): + try: + js = json.loads(self.body) + if js[js.keys()[0]]['response_type'] == "ERROR": + raise RimuHostingException( + js[js.keys()[0]]['human_readable_message'] + ) + return js[js.keys()[0]] + except ValueError: + raise RimuHostingException('Could not parse body: %s' + % (self.body)) + except KeyError: + raise RimuHostingException('Could not parse body: %s' + % (self.body)) + +class RimuHostingConnection(ConnectionKey): + """ + Connection class for the RimuHosting driver + """ + + api_context = API_CONTEXT + host = API_HOST + port = API_PORT + responseCls = RimuHostingResponse + + def __init__(self, key, secure=True): + # override __init__ so that we can set secure of False for testing + ConnectionKey.__init__(self,key,secure) + + def add_default_headers(self, headers): + # We want JSON back from the server. Could be application/xml + # (but JSON is better). + headers['Accept'] = 'application/json' + # Must encode all data as json, or override this header. + headers['Content-Type'] = 'application/json' + + headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key) + return headers; + + def request(self, action, params=None, data='', headers=None, method='GET'): + if not headers: + headers = {} + if not params: + params = {} + # Override this method to prepend the api_context + return ConnectionKey.request(self, self.api_context + action, + params, data, headers, method) + +class RimuHostingNodeDriver(NodeDriver): + """ + RimuHosting node driver + """ + + type = Provider.RIMUHOSTING + name = 'RimuHosting' + connectionCls = RimuHostingConnection + + def __init__(self, key, host=API_HOST, port=API_PORT, + api_context=API_CONTEXT, secure=API_SECURE): + # Pass in some extra vars so that + self.key = key + self.secure = secure + self.connection = self.connectionCls(key ,secure) + self.connection.host = host + self.connection.api_context = api_context + self.connection.port = port + self.connection.driver = self + self.connection.connect() + + def _order_uri(self, node,resource): + # Returns the order uri with its resourse appended. + return "/orders/%s/%s" % (node.id,resource) + + # TODO: Get the node state. + def _to_node(self, order): + n = Node(id=order['slug'], + name=order['domain_name'], + state=NodeState.RUNNING, + public_ip=( + [order['allocated_ips']['primary_ip']] + + order['allocated_ips']['secondary_ips'] + ), + private_ip=[], + driver=self.connection.driver, + extra={'order_oid': order['order_oid'], + 'monthly_recurring_fee': order.get('billing_info').get('monthly_recurring_fee')}) + return n + + def _to_size(self,plan): + return NodeSize( + id=plan['pricing_plan_code'], + name=plan['pricing_plan_description'], + ram=plan['minimum_memory_mb'], + disk=plan['minimum_disk_gb'], + bandwidth=plan['minimum_data_transfer_allowance_gb'], + price=plan['monthly_recurring_amt']['amt_usd'], + driver=self.connection.driver + ) + + def _to_image(self,image): + return NodeImage(id=image['distro_code'], + name=image['distro_description'], + driver=self.connection.driver) + + def list_sizes(self, location=None): + # Returns a list of sizes (aka plans) + # Get plans. Note this is really just for libcloud. + # We are happy with any size. + if location == None: + location = ''; + else: + location = ";dc_location=%s" % (location.id) + + res = self.connection.request('/pricing-plans;server-type=VPS%s' % (location)).object + return map(lambda x : self._to_size(x), res['pricing_plan_infos']) + + def list_nodes(self): + # Returns a list of Nodes + # Will only include active ones. + res = self.connection.request('/orders;include_inactive=N').object + return map(lambda x : self._to_node(x), res['about_orders']) + + def list_images(self, location=None): + # Get all base images. + # TODO: add other image sources. (Such as a backup of a VPS) + # All Images are available for use at all locations + res = self.connection.request('/distributions').object + return map(lambda x : self._to_image(x), res['distro_infos']) + + def reboot_node(self, node): + # Reboot + # PUT the state of RESTARTING to restart a VPS. + # All data is encoded as JSON + data = {'reboot_request':{'running_state':'RESTARTING'}} + uri = self._order_uri(node,'vps/running-state') + self.connection.request(uri,data=json.dumps(data),method='PUT') + # XXX check that the response was actually successful + return True + + def destroy_node(self, node): + # Shutdown a VPS. + uri = self._order_uri(node,'vps') + self.connection.request(uri,method='DELETE') + # XXX check that the response was actually successful + return True + + def create_node(self, **kwargs): + """Creates a RimuHosting instance + + See L{NodeDriver.create_node} for more keyword args. + + @keyword name: Must be a FQDN. e.g example.com. + @type name: C{string} + + @keyword ex_billing_oid: If not set, a billing method is automatically picked. + @type ex_billing_oid: C{string} + + @keyword ex_host_server_oid: The host server to set the VPS up on. + @type ex_host_server_oid: C{string} + + @keyword ex_vps_order_oid_to_clone: Clone another VPS to use as the image for the new VPS. + @type ex_vps_order_oid_to_clone: C{string} + + @keyword ex_num_ips: Number of IPs to allocate. Defaults to 1. + @type ex_num_ips: C{int} + + @keyword ex_extra_ip_reason: Reason for needing the extra IPs. + @type ex_extra_ip_reason: C{string} + + @keyword ex_memory_mb: Memory to allocate to the VPS. + @type ex_memory_mb: C{int} + + @keyword ex_disk_space_mb: Diskspace to allocate to the VPS. Defaults to 4096 (4GB). + @type ex_disk_space_mb: C{int} + + @keyword ex_disk_space_2_mb: Secondary disk size allocation. Disabled by default. + @type ex_disk_space_2_mb: C{int} + + @keyword ex_control_panel: Control panel to install on the VPS. + @type ex_control_panel: C{string} + """ + # Note we don't do much error checking in this because we + # expect the API to error out if there is a problem. + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + data = { + 'instantiation_options':{ + 'domain_name': name, 'distro': image.id + }, + 'pricing_plan_code': size.id, + } + + if kwargs.has_key('ex_control_panel'): + data['instantiation_options']['control_panel'] = kwargs['ex_control_panel'] + + if kwargs.has_key('auth'): + auth = kwargs['auth'] + if not isinstance(auth, NodeAuthPassword): + raise ValueError('auth must be of NodeAuthPassword type') + data['instantiation_options']['password'] = auth.password + + if kwargs.has_key('ex_billing_oid'): + #TODO check for valid oid. + data['billing_oid'] = kwargs['ex_billing_oid'] + + if kwargs.has_key('ex_host_server_oid'): + data['host_server_oid'] = kwargs['ex_host_server_oid'] + + if kwargs.has_key('ex_vps_order_oid_to_clone'): + data['vps_order_oid_to_clone'] = kwargs['ex_vps_order_oid_to_clone'] + + if kwargs.has_key('ex_num_ips') and int(kwargs['ex_num_ips']) > 1: + if not kwargs.has_key('ex_extra_ip_reason'): + raise RimuHostingException('Need an reason for having an extra IP') + else: + if not data.has_key('ip_request'): + data['ip_request'] = {} + data['ip_request']['num_ips'] = int(kwargs['ex_num_ips']) + data['ip_request']['extra_ip_reason'] = kwargs['ex_extra_ip_reason'] + + if kwargs.has_key('ex_memory_mb'): + if not data.has_key('vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] + + if kwargs.has_key('ex_disk_space_mb'): + if not data.has_key('ex_vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['disk_space_mb'] = kwargs['ex_disk_space_mb'] + + if kwargs.has_key('ex_disk_space_2_mb'): + if not data.has_key('vps_parameters'): + data['vps_parameters'] = {} + data['vps_parameters']['disk_space_2_mb'] = kwargs['ex_disk_space_2_mb'] + + res = self.connection.request( + '/orders/new-vps', + method='POST', + data=json.dumps({"new-vps":data}) + ).object + node = self._to_node(res['about_order']) + node.extra['password'] = res['new_order_request']['instantiation_options']['password'] + return node + + def list_locations(self): + return [ + NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self), + NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self), + NodeLocation('DCLONDON', "RimuHosting London", 'GB', self), + NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self), + ] + + features = {"create_node": ["password"]} diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/slicehost.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/slicehost.py new file mode 100644 index 0000000000000000000000000000000000000000..49c15ffb825953ed76b6092ac084985832fa1b72 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/slicehost.py @@ -0,0 +1,255 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Slicehost Driver +""" +import base64 +import socket + +from xml.etree import ElementTree as ET +from xml.parsers.expat import ExpatError + +from libcloud.common.base import ConnectionKey, Response +from libcloud.compute.types import ( + NodeState, Provider, InvalidCredsError, MalformedResponseError) +from libcloud.compute.base import NodeSize, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import Node, is_private_subnet + +class SlicehostResponse(Response): + + def parse_body(self): + # length of 1 can't be valid XML, but on destroy node, + # slicehost returns a 1 byte response with a "Content-Type: + # application/xml" header. booya. + if not self.body or len(self.body) <= 1: + return None + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError( + "Failed to parse XML", + body=self.body, + driver=SlicehostNodeDriver) + return body + + def parse_error(self): + if self.status == 401: + raise InvalidCredsError(self.body) + + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError( + "Failed to parse XML", + body=self.body, + driver=SlicehostNodeDriver) + try: + return "; ".join([ err.text + for err in + body.findall('error') ]) + except ExpatError: + return self.body + + +class SlicehostConnection(ConnectionKey): + """ + Connection class for the Slicehost driver + """ + + host = 'api.slicehost.com' + responseCls = SlicehostResponse + + def add_default_headers(self, headers): + headers['Authorization'] = ('Basic %s' + % (base64.b64encode('%s:' % self.key))) + return headers + + +class SlicehostNodeDriver(NodeDriver): + """ + Slicehost node driver + """ + + connectionCls = SlicehostConnection + + type = Provider.SLICEHOST + name = 'Slicehost' + + features = {"create_node": ["generates_password"]} + + NODE_STATE_MAP = { 'active': NodeState.RUNNING, + 'build': NodeState.PENDING, + 'reboot': NodeState.REBOOTING, + 'hard_reboot': NodeState.REBOOTING, + 'terminated': NodeState.TERMINATED } + + def list_nodes(self): + return self._to_nodes(self.connection.request('/slices.xml').object) + + def list_sizes(self, location=None): + return self._to_sizes(self.connection.request('/flavors.xml').object) + + def list_images(self, location=None): + return self._to_images(self.connection.request('/images.xml').object) + + def list_locations(self): + return [ + NodeLocation(0, 'Slicehost St. Louis (STL-A)', 'US', self), + NodeLocation(0, 'Slicehost St. Louis (STL-B)', 'US', self), + NodeLocation(0, 'Slicehost Dallas-Fort Worth (DFW-1)', 'US', self) + ] + + def create_node(self, **kwargs): + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + uri = '/slices.xml' + + # create a slice obj + root = ET.Element('slice') + el_name = ET.SubElement(root, 'name') + el_name.text = name + flavor_id = ET.SubElement(root, 'flavor-id') + flavor_id.text = str(size.id) + image_id = ET.SubElement(root, 'image-id') + image_id.text = str(image.id) + xml = ET.tostring(root) + + node = self._to_nodes( + self.connection.request( + uri, + method='POST', + data=xml, + headers={'Content-Type': 'application/xml'} + ).object + )[0] + return node + + def reboot_node(self, node): + """Reboot the node by passing in the node object""" + + # 'hard' could bubble up as kwarg depending on how reboot_node + # turns out. Defaulting to soft reboot. + #hard = False + #reboot = self.api.hard_reboot if hard else self.api.reboot + #expected_status = 'hard_reboot' if hard else 'reboot' + + uri = '/slices/%s/reboot.xml' % (node.id) + node = self._to_nodes( + self.connection.request(uri, method='PUT').object + )[0] + return node.state == NodeState.REBOOTING + + def destroy_node(self, node): + """Destroys the node + + Requires 'Allow Slices to be deleted or rebuilt from the API' to be + ticked at https://manage.slicehost.com/api, otherwise returns:: + + You must enable slice deletes in the SliceManager + Permission denied + + """ + uri = '/slices/%s/destroy.xml' % (node.id) + self.connection.request(uri, method='PUT') + return True + + def _to_nodes(self, object): + if object.tag == 'slice': + return [ self._to_node(object) ] + node_elements = object.findall('slice') + return [ self._to_node(el) for el in node_elements ] + + def _to_node(self, element): + + attrs = [ 'name', 'image-id', 'progress', 'id', 'bw-out', 'bw-in', + 'flavor-id', 'status', 'ip-address', 'root-password' ] + + node_attrs = {} + for attr in attrs: + node_attrs[attr] = element.findtext(attr) + + # slicehost does not determine between public and private, so we + # have to figure it out + public_ip = [] + private_ip = [] + + ip_address = element.findtext('ip-address') + if is_private_subnet(ip_address): + private_ip.append(ip_address) + else: + public_ip.append(ip_address) + + for addr in element.findall('addresses/address'): + ip = addr.text + try: + socket.inet_aton(ip) + except socket.error: + # not a valid ip + continue + if is_private_subnet(ip): + private_ip.append(ip) + else: + public_ip.append(ip) + + public_ip = list(set(public_ip)) + + try: + state = self.NODE_STATE_MAP[element.findtext('status')] + except: + state = NodeState.UNKNOWN + + # for consistency with other drivers, we put this in two places. + node_attrs['password'] = node_attrs['root-password'] + extra = {} + for k in node_attrs.keys(): + ek = k.replace("-", "_") + extra[ek] = node_attrs[k] + n = Node(id=element.findtext('id'), + name=element.findtext('name'), + state=state, + public_ip=public_ip, + private_ip=private_ip, + driver=self.connection.driver, + extra=extra) + return n + + def _to_sizes(self, object): + if object.tag == 'flavor': + return [ self._to_size(object) ] + elements = object.findall('flavor') + return [ self._to_size(el) for el in elements ] + + def _to_size(self, element): + s = NodeSize(id=int(element.findtext('id')), + name=str(element.findtext('name')), + ram=int(element.findtext('ram')), + disk=None, # XXX: needs hardcode + bandwidth=None, # XXX: needs hardcode + price=float(element.findtext('price'))/(100*24*30), + driver=self.connection.driver) + return s + + def _to_images(self, object): + if object.tag == 'image': + return [ self._to_image(object) ] + elements = object.findall('image') + return [ self._to_image(el) for el in elements ] + + def _to_image(self, element): + i = NodeImage(id=int(element.findtext('id')), + name=str(element.findtext('name')), + driver=self.connection.driver) + return i diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/softlayer.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/softlayer.py new file mode 100644 index 0000000000000000000000000000000000000000..ac22c16ef46805215c576944e9f4bc41594f3af8 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/softlayer.py @@ -0,0 +1,442 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Softlayer driver +""" + +import time +import xmlrpclib + +import libcloud + +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, NodeImage + +DATACENTERS = { + 'sea01': {'country': 'US'}, + 'wdc01': {'country': 'US'}, + 'dal01': {'country': 'US'} +} + +NODE_STATE_MAP = { + 'RUNNING': NodeState.RUNNING, + 'HALTED': NodeState.TERMINATED, + 'PAUSED': NodeState.TERMINATED, +} + +DEFAULT_PACKAGE = 46 + +SL_IMAGES = [ + {'id': 1684, 'name': 'CentOS 5 - Minimal Install (32 bit)'}, + {'id': 1685, 'name': 'CentOS 5 - Minimal Install (64 bit)'}, + {'id': 1686, 'name': 'CentOS 5 - LAMP Install (32 bit)'}, + {'id': 1687, 'name': 'CentOS 5 - LAMP Install (64 bit)'}, + {'id': 1688, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (32 bit)'}, + {'id': 1689, 'name': 'Red Hat Enterprise Linux 5 - Minimal Install (64 bit)'}, + {'id': 1690, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (32 bit)'}, + {'id': 1691, 'name': 'Red Hat Enterprise Linux 5 - LAMP Install (64 bit)'}, + {'id': 1692, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (32 bit)'}, + {'id': 1693, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - Minimal Install (64 bit)'}, + {'id': 1694, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (32 bit)'}, + {'id': 1695, 'name': 'Ubuntu Linux 8 LTS Hardy Heron - LAMP Install (64 bit)'}, + {'id': 1696, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (32 bit)'}, + {'id': 1697, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - Minimal Install (64 bit)'}, + {'id': 1698, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (32 bit)'}, + {'id': 1699, 'name': 'Debian GNU/Linux 5.0 Lenny/Stable - LAMP Install (64 bit)'}, + {'id': 1700, 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)'}, + {'id': 1701, 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)'}, + {'id': 1703, 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)'}, + {'id': 1705, 'name': 'Windows Server 2008 Standard Edition (64bit)'}, + {'id': 1715, 'name': 'Windows Server 2003 Datacenter SP2 (64 bit)'}, + {'id': 1716, 'name': 'Windows Server 2003 Datacenter SP2 (32 bit)'}, + {'id': 1742, 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)'}, + {'id': 1752, 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)'}, + {'id': 1756, 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)'}, + {'id': 1761, 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)'}, + {'id': 1766, 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)'}, + {'id': 1770, 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)'}, + {'id': 1857, 'name': 'Windows Server 2008 R2 Standard Edition (64bit)'}, + {'id': 1860, 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)'}, + {'id': 1863, 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)'}, +] + +""" +The following code snippet will print out all available "prices" + mask = { 'items': '' } + res = self.connection.request( + "SoftLayer_Product_Package", + "getObject", + res, + id=46, + object_mask=mask + ) + + from pprint import pprint; pprint(res) +""" +SL_TEMPLATES = { + 'sl1': { + 'imagedata': { + 'name': '2 x 2.0 GHz, 1GB ram, 100GB', + 'ram': 1024, + 'disk': 100, + 'bandwidth': None + }, + 'prices': [ + {'id': 1644}, # 1 GB + {'id': 1639}, # 100 GB (SAN) + {'id': 1963}, # Private 2 x 2.0 GHz Cores + {'id': 21}, # 1 IP Address + {'id': 55}, # Host Ping + {'id': 58}, # Automated Notification + {'id': 1800}, # 0 GB Bandwidth + {'id': 57}, # Email and Ticket + {'id': 274}, # 1000 Mbps Public & Private Networks + {'id': 905}, # Reboot / Remote Console + {'id': 418}, # Nessus Vulnerability Assessment & Reporting + {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account + ], + }, + 'sl2': { + 'imagedata': { + 'name': '2 x 2.0 GHz, 4GB ram, 350GB', + 'ram': 4096, + 'disk': 350, + 'bandwidth': None + }, + 'prices': [ + {'id': 1646}, # 4 GB + {'id': 1639}, # 100 GB (SAN) - This is the only available "First Disk" + {'id': 1638}, # 250 GB (SAN) + {'id': 1963}, # Private 2 x 2.0 GHz Cores + {'id': 21}, # 1 IP Address + {'id': 55}, # Host Ping + {'id': 58}, # Automated Notification + {'id': 1800}, # 0 GB Bandwidth + {'id': 57}, # Email and Ticket + {'id': 274}, # 1000 Mbps Public & Private Networks + {'id': 905}, # Reboot / Remote Console + {'id': 418}, # Nessus Vulnerability Assessment & Reporting + {'id': 420}, # Unlimited SSL VPN Users & 1 PPTP VPN User per account + ], + } +} + +class SoftLayerException(LibcloudError): + """ + Exception class for SoftLayer driver + """ + pass + +class SoftLayerSafeTransport(xmlrpclib.SafeTransport): + pass + +class SoftLayerTransport(xmlrpclib.Transport): + pass + +class SoftLayerProxy(xmlrpclib.ServerProxy): + transportCls = (SoftLayerTransport, SoftLayerSafeTransport) + API_PREFIX = "http://api.service.softlayer.com/xmlrpc/v3" + + def __init__(self, service, user_agent, verbose=0): + cls = self.transportCls[0] + if SoftLayerProxy.API_PREFIX[:8] == "https://": + cls = self.transportCls[1] + t = cls(use_datetime=0) + t.user_agent = user_agent + xmlrpclib.ServerProxy.__init__( + self, + uri="%s/%s" % (SoftLayerProxy.API_PREFIX, service), + transport=t, + verbose=verbose + ) + +class SoftLayerConnection(object): + """ + Connection class for the SoftLayer driver + """ + + proxyCls = SoftLayerProxy + driver = None + + def __init__(self, user, key): + self.user = user + self.key = key + self.ua = [] + + def request(self, service, method, *args, **kwargs): + sl = self.proxyCls(service, self._user_agent()) + + headers = {} + headers.update(self._get_auth_headers()) + headers.update(self._get_init_params(service, kwargs.get('id'))) + headers.update(self._get_object_mask(service, kwargs.get('object_mask'))) + params = [{'headers': headers}] + list(args) + + try: + return getattr(sl, method)(*params) + except xmlrpclib.Fault, e: + if e.faultCode == "SoftLayer_Account": + raise InvalidCredsError(e.faultString) + raise SoftLayerException(e) + + def _user_agent(self): + return 'libcloud/%s (%s)%s' % ( + libcloud.__version__, + self.driver.name, + "".join([" (%s)" % x for x in self.ua])) + + def user_agent_append(self, s): + self.ua.append(s) + + def _get_auth_headers(self): + return { + 'authenticate': { + 'username': self.user, + 'apiKey': self.key + } + } + + def _get_init_params(self, service, id): + if id is not None: + return { + '%sInitParameters' % service: {'id': id} + } + else: + return {} + + def _get_object_mask(self, service, mask): + if mask is not None: + return { + '%sObjectMask' % service: {'mask': mask} + } + else: + return {} + +class SoftLayerNodeDriver(NodeDriver): + """ + SoftLayer node driver + + Extra node attributes: + - password: root password + - hourlyRecurringFee: hourly price (if applicable) + - recurringFee : flat rate (if applicable) + - recurringMonths : The number of months in which the recurringFee will be incurred. + """ + connectionCls = SoftLayerConnection + name = 'SoftLayer' + type = Provider.SOFTLAYER + + features = {"create_node": ["generates_password"]} + + def __init__(self, key, secret=None, secure=False): + self.key = key + self.secret = secret + self.connection = self.connectionCls(key, secret) + self.connection.driver = self + + def _to_node(self, host): + try: + password = host['softwareComponents'][0]['passwords'][0]['password'] + except (IndexError, KeyError): + password = None + + hourlyRecurringFee = host.get('billingItem', {}).get('hourlyRecurringFee', 0) + recurringFee = host.get('billingItem', {}).get('recurringFee', 0) + recurringMonths = host.get('billingItem', {}).get('recurringMonths', 0) + + return Node( + id=host['id'], + name=host['hostname'], + state=NODE_STATE_MAP.get( + host['powerState']['keyName'], + NodeState.UNKNOWN + ), + public_ip=[host['primaryIpAddress']], + private_ip=[host['primaryBackendIpAddress']], + driver=self, + extra={ + 'password': password, + 'hourlyRecurringFee': hourlyRecurringFee, + 'recurringFee': recurringFee, + 'recurringMonths': recurringMonths, + } + ) + + def _to_nodes(self, hosts): + return [self._to_node(h) for h in hosts] + + def destroy_node(self, node): + billing_item = self.connection.request( + "SoftLayer_Virtual_Guest", + "getBillingItem", + id=node.id + ) + + if billing_item: + res = self.connection.request( + "SoftLayer_Billing_Item", + "cancelService", + id=billing_item['id'] + ) + return res + else: + return False + + def _get_order_information(self, order_id, timeout=1200, check_interval=5): + mask = { + 'orderTopLevelItems': { + 'billingItem': { + 'resource': { + 'softwareComponents': { + 'passwords': '' + }, + 'powerState': '', + } + }, + } + } + + for i in range(0, timeout, check_interval): + try: + res = self.connection.request( + "SoftLayer_Billing_Order", + "getObject", + id=order_id, + object_mask=mask + ) + item = res['orderTopLevelItems'][0]['billingItem']['resource'] + if item['softwareComponents'][0]['passwords']: + return item + + except (KeyError, IndexError): + pass + + time.sleep(check_interval) + + return None + + def create_node(self, **kwargs): + """Create a new SoftLayer node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_domain: e.g. libcloud.org + @type ex_domain: C{string} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + domain = kwargs.get('ex_domain') + location = kwargs['location'] + if domain == None: + if name.find(".") != -1: + domain = name[name.find('.')+1:] + + if domain == None: + # TODO: domain is a required argument for the Sofylayer API, but it + # it shouldn't be. + domain = "exmaple.com" + + res = {'prices': SL_TEMPLATES[size.id]['prices']} + res['packageId'] = DEFAULT_PACKAGE + res['prices'].append({'id': image.id}) # Add OS to order + res['location'] = location.id + res['complexType'] = 'SoftLayer_Container_Product_Order_Virtual_Guest' + res['quantity'] = 1 + res['useHourlyPricing'] = True + res['virtualGuests'] = [ + { + 'hostname': name, + 'domain': domain + } + ] + + res = self.connection.request( + "SoftLayer_Product_Order", + "placeOrder", + res + ) + + order_id = res['orderId'] + raw_node = self._get_order_information(order_id) + + return self._to_node(raw_node) + + def _to_image(self, img): + return NodeImage( + id=img['id'], + name=img['name'], + driver=self.connection.driver + ) + + def list_images(self, location=None): + return [self._to_image(i) for i in SL_IMAGES] + + def _to_size(self, id, size): + return NodeSize( + id=id, + name=size['name'], + ram=size['ram'], + disk=size['disk'], + bandwidth=size['bandwidth'], + price=None, + driver=self.connection.driver, + ) + + def list_sizes(self, location=None): + return [self._to_size(id, s['imagedata']) for id, s in SL_TEMPLATES.iteritems()] + + def _to_loc(self, loc): + return NodeLocation( + id=loc['id'], + name=loc['name'], + country=DATACENTERS[loc['name']]['country'], + driver=self + ) + + def list_locations(self): + res = self.connection.request( + "SoftLayer_Location_Datacenter", + "getDatacenters" + ) + + # checking "in DATACENTERS", because some of the locations returned by getDatacenters are not useable. + return [self._to_loc(l) for l in res if l['name'] in DATACENTERS] + + def list_nodes(self): + mask = { + 'virtualGuests': { + 'powerState': '', + 'softwareComponents': { + 'passwords': '' + }, + 'billingItem': '', + }, + } + res = self.connection.request( + "SoftLayer_Account", + "getVirtualGuests", + object_mask=mask + ) + nodes = self._to_nodes(res) + return nodes + + def reboot_node(self, node): + res = self.connection.request( + "SoftLayer_Virtual_Guest", + "rebootHard", + id=node.id + ) + return res diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vcloud.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..870a9491a7457fe810f92da1b94bbc00e878bb96 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vcloud.py @@ -0,0 +1,624 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +VMware vCloud driver. +""" +import base64 +import httplib +import time + +from urlparse import urlparse +from xml.etree import ElementTree as ET +from xml.parsers.expat import ExpatError + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver, NodeLocation +from libcloud.compute.base import NodeSize, NodeImage, NodeAuthPassword + +""" +From vcloud api "The VirtualQuantity element defines the number of MB +of memory. This should be either 512 or a multiple of 1024 (1 GB)." +""" +VIRTUAL_MEMORY_VALS = [512] + [1024 * i for i in range(1,9)] + +DEFAULT_TASK_COMPLETION_TIMEOUT = 600 + +def get_url_path(url): + return urlparse(url.strip()).path + +def fixxpath(root, xpath): + """ElementTree wants namespaces in its xpaths, so here we add them.""" + namespace, root_tag = root.tag[1:].split("}", 1) + fixed_xpath = "/".join(["{%s}%s" % (namespace, e) + for e in xpath.split("/")]) + return fixed_xpath + +class InstantiateVAppXML(object): + + def __init__(self, name, template, net_href, cpus, memory, + password=None, row=None, group=None): + self.name = name + self.template = template + self.net_href = net_href + self.cpus = cpus + self.memory = memory + self.password = password + self.row = row + self.group = group + + self._build_xmltree() + + def tostring(self): + return ET.tostring(self.root) + + def _build_xmltree(self): + self.root = self._make_instantiation_root() + + self._add_vapp_template(self.root) + instantionation_params = ET.SubElement(self.root, + "InstantiationParams") + + # product and virtual hardware + self._make_product_section(instantionation_params) + self._make_virtual_hardware(instantionation_params) + + network_config_section = ET.SubElement(instantionation_params, + "NetworkConfigSection") + + network_config = ET.SubElement(network_config_section, + "NetworkConfig") + self._add_network_association(network_config) + + def _make_instantiation_root(self): + return ET.Element( + "InstantiateVAppTemplateParams", + {'name': self.name, + 'xml:lang': 'en', + 'xmlns': "http://www.vmware.com/vcloud/v0.8", + 'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance"} + ) + + def _add_vapp_template(self, parent): + return ET.SubElement( + parent, + "VAppTemplate", + {'href': self.template} + ) + + def _make_product_section(self, parent): + prod_section = ET.SubElement( + parent, + "ProductSection", + {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8", + 'xmlns:ovf': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + + if self.password: + self._add_property(prod_section, 'password', self.password) + + if self.row: + self._add_property(prod_section, 'row', self.row) + + if self.group: + self._add_property(prod_section, 'group', self.group) + + return prod_section + + def _add_property(self, parent, ovfkey, ovfvalue): + return ET.SubElement( + parent, + "Property", + {'xmlns': 'http://schemas.dmtf.org/ovf/envelope/1', + 'ovf:key': ovfkey, + 'ovf:value': ovfvalue} + ) + + def _make_virtual_hardware(self, parent): + vh = ET.SubElement( + parent, + "VirtualHardwareSection", + {'xmlns:q1': "http://www.vmware.com/vcloud/v0.8"} + ) + + self._add_cpu(vh) + self._add_memory(vh) + + return vh + + def _add_cpu(self, parent): + cpu_item = ET.SubElement( + parent, + "Item", + {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + self._add_instance_id(cpu_item, '1') + self._add_resource_type(cpu_item, '3') + self._add_virtual_quantity(cpu_item, self.cpus) + + return cpu_item + + def _add_memory(self, parent): + mem_item = ET.SubElement( + parent, + "Item", + {'xmlns': "http://schemas.dmtf.org/ovf/envelope/1"} + ) + self._add_instance_id(mem_item, '2') + self._add_resource_type(mem_item, '4') + self._add_virtual_quantity(mem_item, self.memory) + + return mem_item + + def _add_instance_id(self, parent, id): + elm = ET.SubElement( + parent, + "InstanceID", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = id + return elm + + def _add_resource_type(self, parent, type): + elm = ET.SubElement( + parent, + "ResourceType", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = type + return elm + + def _add_virtual_quantity(self, parent, amount): + elm = ET.SubElement( + parent, + "VirtualQuantity", + {'xmlns': 'http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData'} + ) + elm.text = amount + return elm + + def _add_network_association(self, parent): + return ET.SubElement( + parent, + "NetworkAssociation", + {'href': self.net_href} + ) + +class VCloudResponse(Response): + + def parse_body(self): + if not self.body: + return None + try: + return ET.XML(self.body) + except ExpatError, e: + raise Exception("%s: %s" % (e, self.parse_error())) + + def parse_error(self): + return self.error + + def success(self): + return self.status in (httplib.OK, httplib.CREATED, + httplib.NO_CONTENT, httplib.ACCEPTED) + +class VCloudConnection(ConnectionUserAndKey): + """ + Connection class for the vCloud driver + """ + + responseCls = VCloudResponse + token = None + host = None + + def request(self, *args, **kwargs): + self._get_auth_token() + return super(VCloudConnection, self).request(*args, **kwargs) + + def check_org(self): + # the only way to get our org is by logging in. + self._get_auth_token() + + def _get_auth_headers(self): + """Some providers need different headers than others""" + return { + 'Authorization': + "Basic %s" + % base64.b64encode('%s:%s' % (self.user_id, self.key)), + 'Content-Length': 0 + } + + def _get_auth_token(self): + if not self.token: + conn = self.conn_classes[self.secure](self.host, + self.port[self.secure]) + conn.request(method='POST', url='/api/v0.8/login', + headers=self._get_auth_headers()) + + resp = conn.getresponse() + headers = dict(resp.getheaders()) + body = ET.XML(resp.read()) + + try: + self.token = headers['set-cookie'] + except KeyError: + raise InvalidCredsError() + + self.driver.org = get_url_path( + body.find(fixxpath(body, 'Org')).get('href') + ) + + def add_default_headers(self, headers): + headers['Cookie'] = self.token + return headers + +class VCloudNodeDriver(NodeDriver): + """ + vCloud node driver + """ + + type = Provider.VCLOUD + name = "vCloud" + connectionCls = VCloudConnection + org = None + _vdcs = None + + NODE_STATE_MAP = {'0': NodeState.PENDING, + '1': NodeState.PENDING, + '2': NodeState.PENDING, + '3': NodeState.PENDING, + '4': NodeState.RUNNING} + + @property + def vdcs(self): + if not self._vdcs: + self.connection.check_org() # make sure the org is set. + res = self.connection.request(self.org) + self._vdcs = [ + get_url_path(i.get('href')) + for i + in res.object.findall(fixxpath(res.object, "Link")) + if i.get('type') == 'application/vnd.vmware.vcloud.vdc+xml' + ] + + return self._vdcs + + @property + def networks(self): + networks = [] + for vdc in self.vdcs: + res = self.connection.request(vdc).object + networks.extend( + [network + for network in res.findall( + fixxpath(res, "AvailableNetworks/Network") + )] + ) + + return networks + + def _to_image(self, image): + image = NodeImage(id=image.get('href'), + name=image.get('name'), + driver=self.connection.driver) + return image + + def _to_node(self, name, elm): + state = self.NODE_STATE_MAP[elm.get('status')] + public_ips = [] + private_ips = [] + + # Following code to find private IPs works for Terremark + connections = elm.findall('{http://schemas.dmtf.org/ovf/envelope/1}NetworkConnectionSection/{http://www.vmware.com/vcloud/v0.8}NetworkConnection') + for connection in connections: + ips = [ip.text + for ip + in connection.findall(fixxpath(elm, "IpAddress"))] + if connection.get('Network') == 'Internal': + private_ips.extend(ips) + else: + public_ips.extend(ips) + + node = Node(id=elm.get('href'), + name=name, + state=state, + public_ip=public_ips, + private_ip=private_ips, + driver=self.connection.driver) + + return node + + def _get_catalog_hrefs(self): + res = self.connection.request(self.org) + catalogs = [ + get_url_path(i.get('href')) + for i in res.object.findall(fixxpath(res.object, "Link")) + if i.get('type') == 'application/vnd.vmware.vcloud.catalog+xml' + ] + + return catalogs + + def _wait_for_task_completion(self, task_href, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + start_time = time.time() + res = self.connection.request(task_href) + status = res.object.get('status') + while status != 'success': + if status == 'error': + raise Exception("Error status returned by task %s." + % task_href) + if status == 'canceled': + raise Exception("Canceled status returned by task %s." + % task_href) + if (time.time() - start_time >= timeout): + raise Exception("Timeout while waiting for task %s." + % task_href) + time.sleep(5) + res = self.connection.request(task_href) + status = res.object.get('status') + + def destroy_node(self, node): + node_path = get_url_path(node.id) + # blindly poweroff node, it will throw an exception if already off + try: + res = self.connection.request('%s/power/action/poweroff' + % node_path, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + except Exception: + pass + + try: + res = self.connection.request('%s/action/undeploy' % node_path, + method='POST') + self._wait_for_task_completion(res.object.get('href')) + except ExpatError: + # The undeploy response is malformed XML atm. + # We can remove this whent he providers fix the problem. + pass + except Exception: + # Some vendors don't implement undeploy at all yet, + # so catch this and move on. + pass + + res = self.connection.request(node_path, method='DELETE') + return res.status == 202 + + def reboot_node(self, node): + res = self.connection.request('%s/power/action/reset' + % get_url_path(node.id), + method='POST') + return res.status == 202 or res.status == 204 + + def list_nodes(self): + nodes = [] + for vdc in self.vdcs: + res = self.connection.request(vdc) + elms = res.object.findall(fixxpath( + res.object, "ResourceEntities/ResourceEntity") + ) + vapps = [ + (i.get('name'), get_url_path(i.get('href'))) + for i in elms + if i.get('type') + == 'application/vnd.vmware.vcloud.vApp+xml' + and i.get('name') + ] + + for vapp_name, vapp_href in vapps: + res = self.connection.request( + vapp_href, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.vApp+xml' + } + ) + nodes.append(self._to_node(vapp_name, res.object)) + + return nodes + + def _to_size(self, ram): + ns = NodeSize( + id=None, + name="%s Ram" % ram, + ram=ram, + disk=None, + bandwidth=None, + price=None, + driver=self.connection.driver + ) + return ns + + def list_sizes(self, location=None): + sizes = [self._to_size(i) for i in VIRTUAL_MEMORY_VALS] + return sizes + + def _get_catalogitems_hrefs(self, catalog): + """Given a catalog href returns contained catalog item hrefs""" + res = self.connection.request( + catalog, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.catalog+xml' + } + ).object + + cat_items = res.findall(fixxpath(res, "CatalogItems/CatalogItem")) + cat_item_hrefs = [i.get('href') + for i in cat_items + if i.get('type') == + 'application/vnd.vmware.vcloud.catalogItem+xml'] + + return cat_item_hrefs + + def _get_catalogitem(self, catalog_item): + """Given a catalog item href returns elementree""" + res = self.connection.request( + catalog_item, + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.catalogItem+xml' + } + ).object + + return res + + def list_images(self, location=None): + images = [] + for vdc in self.vdcs: + res = self.connection.request(vdc).object + res_ents = res.findall(fixxpath( + res, "ResourceEntities/ResourceEntity") + ) + images += [ + self._to_image(i) + for i in res_ents + if i.get('type') == + 'application/vnd.vmware.vcloud.vAppTemplate+xml' + ] + + for catalog in self._get_catalog_hrefs(): + for cat_item in self._get_catalogitems_hrefs(catalog): + res = self._get_catalogitem(cat_item) + res_ents = res.findall(fixxpath(res, 'Entity')) + images += [ + self._to_image(i) + for i in res_ents + if i.get('type') == + 'application/vnd.vmware.vcloud.vAppTemplate+xml' + ] + + return images + + def create_node(self, **kwargs): + """Creates and returns node. + + + See L{NodeDriver.create_node} for more keyword args. + + Non-standard optional keyword arguments: + @keyword ex_network: link to a "Network" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/network/7" + @type ex_network: C{string} + + @keyword ex_vdc: link to a "VDC" e.g., "https://services.vcloudexpress.terremark.com/api/v0.8/vdc/1" + @type ex_vdc: C{string} + + @keyword ex_cpus: number of virtual cpus (limit depends on provider) + @type ex_cpus: C{int} + + @keyword row: ???? + @type row: C{????} + + @keyword group: ???? + @type group: C{????} + """ + name = kwargs['name'] + image = kwargs['image'] + size = kwargs['size'] + + # Some providers don't require a network link + try: + network = kwargs.get('ex_network', self.networks[0].get('href')) + except IndexError: + network = '' + + password = None + if kwargs.has_key('auth'): + auth = kwargs['auth'] + if isinstance(auth, NodeAuthPassword): + password = auth.password + else: + raise ValueError('auth must be of NodeAuthPassword type') + + instantiate_xml = InstantiateVAppXML( + name=name, + template=image.id, + net_href=network, + cpus=str(kwargs.get('ex_cpus', 1)), + memory=str(size.ram), + password=password, + row=kwargs.get('ex_row', None), + group=kwargs.get('ex_group', None) + ) + + # Instantiate VM and get identifier. + res = self.connection.request( + '%s/action/instantiateVAppTemplate' + % kwargs.get('vdc', self.vdcs[0]), + data=instantiate_xml.tostring(), + method='POST', + headers={ + 'Content-Type': + 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml' + } + ) + vapp_name = res.object.get('name') + vapp_href = get_url_path(res.object.get('href')) + + # Deploy the VM from the identifier. + res = self.connection.request('%s/action/deploy' % vapp_href, + method='POST') + + self._wait_for_task_completion(res.object.get('href')) + + # Power on the VM. + res = self.connection.request('%s/power/action/powerOn' % vapp_href, + method='POST') + + res = self.connection.request(vapp_href) + node = self._to_node(vapp_name, res.object) + + return node + + features = {"create_node": ["password"]} + +class HostingComConnection(VCloudConnection): + """ + vCloud connection subclass for Hosting.com + """ + + host = "vcloud.safesecureweb.com" + + def _get_auth_headers(self): + """hosting.com doesn't follow the standard vCloud authentication API""" + return { + 'Authentication': + base64.b64encode('%s:%s' % (self.user_id, self.key)), + 'Content-Length': 0 + } + +class HostingComDriver(VCloudNodeDriver): + """ + vCloud node driver for Hosting.com + """ + connectionCls = HostingComConnection + +class TerremarkConnection(VCloudConnection): + """ + vCloud connection subclass for Terremark + """ + + host = "services.vcloudexpress.terremark.com" + +class TerremarkDriver(VCloudNodeDriver): + """ + vCloud node driver for Terremark + """ + + connectionCls = TerremarkConnection + + def list_locations(self): + return [NodeLocation(0, "Terremark Texas", 'US', self)] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/voxel.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/voxel.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6659a1fbb20a37ea73b3bfdf79cdeff47bfcd1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/voxel.py @@ -0,0 +1,308 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Voxel VoxCloud driver +""" +import datetime +import hashlib + +from xml.etree import ElementTree as ET + +from libcloud.common.base import Response, ConnectionUserAndKey +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +VOXEL_API_HOST = "api.voxel.net" + +class VoxelResponse(Response): + + def __init__(self, response): + self.parsed = None + super(VoxelResponse, self).__init__(response) + + def parse_body(self): + if not self.body: + return None + if not self.parsed: + self.parsed = ET.XML(self.body) + return self.parsed + + def parse_error(self): + err_list = [] + if not self.body: + return None + if not self.parsed: + self.parsed = ET.XML(self.body) + for err in self.parsed.findall('err'): + code = err.get('code') + err_list.append("(%s) %s" % (code, err.get('msg'))) + # From voxel docs: + # 1: Invalid login or password + # 9: Permission denied: user lacks access rights for this method + if code == "1" or code == "9": + # sucks, but only way to detect + # bad authentication tokens so far + raise InvalidCredsError(err_list[-1]) + return "\n".join(err_list) + + def success(self): + if not self.parsed: + self.parsed = ET.XML(self.body) + stat = self.parsed.get('stat') + if stat != "ok": + return False + return True + +class VoxelConnection(ConnectionUserAndKey): + """ + Connection class for the Voxel driver + """ + + host = VOXEL_API_HOST + responseCls = VoxelResponse + + def add_default_params(self, params): + params["key"] = self.user_id + params["timestamp"] = datetime.datetime.utcnow().isoformat()+"+0000" + + for param in params.keys(): + if params[param] is None: + del params[param] + + keys = params.keys() + keys.sort() + + md5 = hashlib.md5() + md5.update(self.key) + for key in keys: + if params[key]: + if not params[key] is None: + md5.update("%s%s"% (key, params[key])) + else: + md5.update(key) + params['api_sig'] = md5.hexdigest() + return params + +VOXEL_INSTANCE_TYPES = {} +RAM_PER_CPU = 2048 + +NODE_STATE_MAP = { + 'IN_PROGRESS': NodeState.PENDING, + 'QUEUED': NodeState.PENDING, + 'SUCCEEDED': NodeState.RUNNING, + 'shutting-down': NodeState.TERMINATED, + 'terminated': NodeState.TERMINATED, + 'unknown': NodeState.UNKNOWN, +} + +class VoxelNodeDriver(NodeDriver): + """ + Voxel VoxCLOUD node driver + """ + + connectionCls = VoxelConnection + type = Provider.VOXEL + name = 'Voxel VoxCLOUD' + + def _initialize_instance_types(): + for cpus in range(1,14): + if cpus == 1: + name = "Single CPU" + else: + name = "%d CPUs" % cpus + id = "%dcpu" % cpus + ram = cpus * RAM_PER_CPU + + VOXEL_INSTANCE_TYPES[id]= { + 'id': id, + 'name': name, + 'ram': ram, + 'disk': None, + 'bandwidth': None, + 'price': None} + + features = {"create_node": [], + "list_sizes": ["variable_disk"]} + + _initialize_instance_types() + + def list_nodes(self): + params = {"method": "voxel.devices.list"} + result = self.connection.request('/', params=params).object + return self._to_nodes(result) + + def list_sizes(self, location=None): + return [ NodeSize(driver=self.connection.driver, **i) + for i in VOXEL_INSTANCE_TYPES.values() ] + + def list_images(self, location=None): + params = {"method": "voxel.images.list"} + result = self.connection.request('/', params=params).object + return self._to_images(result) + + def create_node(self, **kwargs): + """Create Voxel Node + + @keyword name: the name to assign the node (mandatory) + @type name: C{str} + + @keyword image: distribution to deploy + @type image: L{NodeImage} + + @keyword size: the plan size to create (mandatory) + Requires size.disk (GB) to be set manually + @type size: L{NodeSize} + + @keyword location: which datacenter to create the node in + @type location: L{NodeLocation} + + @keyword ex_privateip: Backend IP address to assign to node; + must be chosen from the customer's + private VLAN assignment. + @type ex_privateip: C{str} + + @keyword ex_publicip: Public-facing IP address to assign to node; + must be chosen from the customer's + public VLAN assignment. + @type ex_publicip: C{str} + + @keyword ex_rootpass: Password for root access; generated if unset. + @type ex_rootpass: C{str} + + @keyword ex_consolepass: Password for remote console; + generated if unset. + @type ex_consolepass: C{str} + + @keyword ex_sshuser: Username for SSH access + @type ex_sshuser: C{str} + + @keyword ex_sshpass: Password for SSH access; generated if unset. + @type ex_sshpass: C{str} + + @keyword ex_voxel_access: Allow access Voxel administrative access. + Defaults to False. + @type ex_voxel_access: C{bool} + """ + + # assert that disk > 0 + if not kwargs["size"].disk: + raise ValueError("size.disk must be non-zero") + + # convert voxel_access to string boolean if needed + voxel_access = kwargs.get("ex_voxel_access", None) + if voxel_access is not None: + voxel_access = "true" if voxel_access else "false" + + params = { + 'method': 'voxel.voxcloud.create', + 'hostname': kwargs["name"], + 'disk_size': int(kwargs["size"].disk), + 'facility': kwargs["location"].id, + 'image_id': kwargs["image"].id, + 'processing_cores': kwargs["size"].ram / RAM_PER_CPU, + 'backend_ip': kwargs.get("ex_privateip", None), + 'frontend_ip': kwargs.get("ex_publicip", None), + 'admin_password': kwargs.get("ex_rootpass", None), + 'console_password': kwargs.get("ex_consolepass", None), + 'ssh_username': kwargs.get("ex_sshuser", None), + 'ssh_password': kwargs.get("ex_sshpass", None), + 'voxel_access': voxel_access, + } + + object = self.connection.request('/', params=params).object + + if self._getstatus(object): + return Node( + id = object.findtext("device/id"), + name = kwargs["name"], + state = NODE_STATE_MAP[object.findtext("device/status")], + public_ip = kwargs.get("publicip", None), + private_ip = kwargs.get("privateip", None), + driver = self.connection.driver + ) + else: + return None + + def reboot_node(self, node): + """ + Reboot the node by passing in the node object + """ + params = {'method': 'voxel.devices.power', + 'device_id': node.id, + 'power_action': 'reboot'} + return self._getstatus(self.connection.request('/', params=params).object) + + def destroy_node(self, node): + """ + Destroy node by passing in the node object + """ + params = {'method': 'voxel.voxcloud.delete', + 'device_id': node.id} + return self._getstatus(self.connection.request('/', params=params).object) + + def list_locations(self): + params = {"method": "voxel.voxcloud.facilities.list"} + result = self.connection.request('/', params=params).object + nodes = self._to_locations(result) + return nodes + + def _getstatus(self, element): + status = element.attrib["stat"] + return status == "ok" + + + def _to_locations(self, object): + return [NodeLocation(element.attrib["label"], + element.findtext("description"), + element.findtext("description"), + self) + for element in object.findall('facilities/facility')] + + def _to_nodes(self, object): + nodes = [] + for element in object.findall('devices/device'): + if element.findtext("type") == "Virtual Server": + try: + state = self.NODE_STATE_MAP[element.attrib['status']] + except KeyError: + state = NodeState.UNKNOWN + + public_ip = private_ip = None + ipassignments = element.findall("ipassignments/ipassignment") + for ip in ipassignments: + if ip.attrib["type"] =="frontend": + public_ip = ip.text + elif ip.attrib["type"] == "backend": + private_ip = ip.text + + nodes.append(Node(id= element.attrib['id'], + name=element.attrib['label'], + state=state, + public_ip= public_ip, + private_ip= private_ip, + driver=self.connection.driver)) + return nodes + + def _to_images(self, object): + images = [] + for element in object.findall("images/image"): + images.append(NodeImage(id = element.attrib["id"], + name = element.attrib["summary"], + driver = self.connection.driver)) + return images diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vpsnet.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vpsnet.py new file mode 100644 index 0000000000000000000000000000000000000000..96664284a29de1dcb1a67d204d26265ed21125fc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/drivers/vpsnet.py @@ -0,0 +1,184 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +VPS.net driver +""" +import base64 + +try: + import json +except: + import simplejson as json + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import InvalidCredsError +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.base import Node, NodeDriver +from libcloud.compute.base import NodeSize, NodeImage, NodeLocation + +API_HOST = 'api.vps.net' +API_VERSION = 'api10json' + +RAM_PER_NODE = 256 +DISK_PER_NODE = 10 +BANDWIDTH_PER_NODE = 250 + + +class VPSNetResponse(Response): + + def parse_body(self): + try: + js = json.loads(self.body) + return js + except ValueError: + return self.body + + def success(self): + # vps.net wrongly uses 406 for invalid auth creds + if self.status == 406 or self.status == 403: + raise InvalidCredsError() + return True + + def parse_error(self): + try: + errors = json.loads(self.body)['errors'][0] + except ValueError: + return self.body + else: + return "\n".join(errors) + +class VPSNetConnection(ConnectionUserAndKey): + """ + Connection class for the VPS.net driver + """ + + host = API_HOST + responseCls = VPSNetResponse + + def add_default_headers(self, headers): + user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key)) + headers['Authorization'] = 'Basic %s' % (user_b64) + return headers + +class VPSNetNodeDriver(NodeDriver): + """ + VPS.net node driver + """ + + type = Provider.VPSNET + api_name = 'vps_net' + name = "vps.net" + connectionCls = VPSNetConnection + + def _to_node(self, vm): + if vm['running']: + state = NodeState.RUNNING + else: + state = NodeState.PENDING + + n = Node(id=vm['id'], + name=vm['label'], + state=state, + public_ip=[vm.get('primary_ip_address', None)], + private_ip=[], + extra={'slices_count':vm['slices_count']}, # Number of nodes consumed by VM + driver=self.connection.driver) + return n + + def _to_image(self, image, cloud): + image = NodeImage(id=image['id'], + name="%s: %s" % (cloud, image['label']), + driver=self.connection.driver) + + return image + + def _to_size(self, num): + size = NodeSize(id=num, + name="%d Node" % (num,), + ram=RAM_PER_NODE * num, + disk=DISK_PER_NODE, + bandwidth=BANDWIDTH_PER_NODE * num, + price=self._get_price_per_node(num) * num, + driver=self.connection.driver) + return size + + def _get_price_per_node(self, num): + single_node_price = self._get_size_price(size_id='1') + return num * single_node_price + + def create_node(self, name, image, size, **kwargs): + """Create a new VPS.net node + + See L{NodeDriver.create_node} for more keyword args. + @keyword ex_backups_enabled: Enable automatic backups + @type ex_backups_enabled: C{bool} + + @keyword ex_fqdn: Fully Qualified domain of the node + @type ex_fqdn: C{string} + """ + headers = {'Content-Type': 'application/json'} + request = {'virtual_machine': + {'label': name, + 'fqdn': kwargs.get('ex_fqdn', ''), + 'system_template_id': image.id, + 'backups_enabled': kwargs.get('ex_backups_enabled', 0), + 'slices_required': size.id}} + + res = self.connection.request('/virtual_machines.%s' % (API_VERSION,), + data=json.dumps(request), + headers=headers, + method='POST') + node = self._to_node(res.object['virtual_machine']) + return node + + def reboot_node(self, node): + res = self.connection.request('/virtual_machines/%s/%s.%s' % + (node.id, 'reboot', API_VERSION), + method="POST") + node = self._to_node(res.object['virtual_machine']) + return True + + def list_sizes(self, location=None): + res = self.connection.request('/nodes.%s' % (API_VERSION,)) + available_nodes = len([size for size in res.object + if size['slice']['virtual_machine_id']]) + sizes = [self._to_size(i) for i in range(1, available_nodes + 1)] + return sizes + + def destroy_node(self, node): + res = self.connection.request('/virtual_machines/%s.%s' + % (node.id, API_VERSION), + method='DELETE') + return res.status == 200 + + def list_nodes(self): + res = self.connection.request('/virtual_machines.%s' % (API_VERSION,)) + return [self._to_node(i['virtual_machine']) for i in res.object] + + def list_images(self, location=None): + res = self.connection.request('/available_clouds.%s' % (API_VERSION,)) + + images = [] + for cloud in res.object: + label = cloud['cloud']['label'] + templates = cloud['cloud']['system_templates'] + images.extend([self._to_image(image, label) + for image in templates]) + + return images + + def list_locations(self): + return [NodeLocation(0, "VPS.net Western US", 'US', self)] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/providers.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/providers.py new file mode 100644 index 0000000000000000000000000000000000000000..b5833f2f20baab35dc367eeb4eb31dd0d7b0ec40 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/providers.py @@ -0,0 +1,91 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provider related utilities +""" + +from libcloud.utils import get_driver as _get_provider_driver +from libcloud.compute.types import Provider + +__all__ = [ + "Provider", + "DRIVERS", + "get_driver"] + +DRIVERS = { + Provider.DUMMY: + ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), + Provider.EC2_US_EAST: + ('libcloud.compute.drivers.ec2', 'EC2NodeDriver'), + Provider.EC2_EU_WEST: + ('libcloud.compute.drivers.ec2', 'EC2EUNodeDriver'), + Provider.EC2_US_WEST: + ('libcloud.compute.drivers.ec2', 'EC2USWestNodeDriver'), + Provider.EC2_AP_SOUTHEAST: + ('libcloud.compute.drivers.ec2', 'EC2APSENodeDriver'), + Provider.EC2_AP_NORTHEAST: + ('libcloud.compute.drivers.ec2', 'EC2APNENodeDriver'), + Provider.ECP: + ('libcloud.compute.drivers.ecp', 'ECPNodeDriver'), + Provider.ELASTICHOSTS_UK1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK1NodeDriver'), + Provider.ELASTICHOSTS_UK2: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUK2NodeDriver'), + Provider.ELASTICHOSTS_US1: + ('libcloud.compute.drivers.elastichosts', 'ElasticHostsUS1NodeDriver'), + Provider.CLOUDSIGMA: + ('libcloud.compute.drivers.cloudsigma', 'CloudSigmaZrhNodeDriver'), + Provider.GOGRID: + ('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'), + Provider.RACKSPACE: + ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), + Provider.RACKSPACE_UK: + ('libcloud.compute.drivers.rackspace', 'RackspaceUKNodeDriver'), + Provider.SLICEHOST: + ('libcloud.compute.drivers.slicehost', 'SlicehostNodeDriver'), + Provider.VPSNET: + ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), + Provider.LINODE: + ('libcloud.compute.drivers.linode', 'LinodeNodeDriver'), + Provider.RIMUHOSTING: + ('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'), + Provider.VOXEL: + ('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'), + Provider.SOFTLAYER: + ('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'), + Provider.EUCALYPTUS: + ('libcloud.compute.drivers.ec2', 'EucNodeDriver'), + Provider.IBM: + ('libcloud.compute.drivers.ibm_sbc', 'IBMNodeDriver'), + Provider.OPENNEBULA: + ('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'), + Provider.DREAMHOST: + ('libcloud.compute.drivers.dreamhost', 'DreamhostNodeDriver'), + Provider.BRIGHTBOX: + ('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'), + Provider.NIMBUS: + ('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'), + Provider.BLUEBOX: + ('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'), + Provider.GANDI: + ('libcloud.compute.drivers.gandi', 'GandiNodeDriver'), + Provider.OPSOURCE: + ('libcloud.compute.drivers.opsource', 'OpsourceNodeDriver'), + Provider.OPENSTACK: + ('libcloud.compute.drivers.rackspace', 'OpenStackNodeDriver'), +} + +def get_driver(provider): + return _get_provider_driver(DRIVERS, provider) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/ssh.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/ssh.py new file mode 100644 index 0000000000000000000000000000000000000000..e39685b382a59e1e87632bbf5adc3161b440cfcb --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/ssh.py @@ -0,0 +1,194 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Wraps multiple ways to communicate over SSH +""" +have_paramiko = False + +try: + import paramiko + have_paramiko = True +except ImportError: + pass + +# Depending on your version of Paramiko, it may cause a deprecation +# warning on Python 2.6. +# Ref: https://bugs.launchpad.net/paramiko/+bug/392973 + +from os.path import split as psplit + +class BaseSSHClient(object): + """ + Base class representing a connection over SSH/SCP to a remote node. + """ + + def __init__(self, hostname, port=22, username='root', password=None, + key=None, timeout=None): + """ + @type hostname: C{str} + @keyword hostname: Hostname or IP address to connect to. + + @type port: C{int} + @keyword port: TCP port to communicate on, defaults to 22. + + @type username: C{str} + @keyword username: Username to use, defaults to root. + + @type password: C{str} + @keyword password: Password to authenticate with. + + @type key: C{list} + @keyword key: Private SSH keys to authenticate with. + """ + self.hostname = hostname + self.port = port + self.username = username + self.password = password + self.key = key + self.timeout = timeout + + def connect(self): + """ + Connect to the remote node over SSH. + + @return: C{bool} + """ + raise NotImplementedError, \ + 'connect not implemented for this ssh client' + + def put(self, path, contents=None, chmod=None): + """ + Upload a file to the remote node. + + @type path: C{str} + @keyword path: File path on the remote node. + + @type contents: C{str} + @keyword contents: File Contents. + + @type chmod: C{int} + @keyword chmod: chmod file to this after creation. + """ + raise NotImplementedError, \ + 'put not implemented for this ssh client' + + def delete(self, path): + """ + Delete/Unlink a file on the remote node. + + @type path: C{str} + @keyword path: File path on the remote node. + """ + raise NotImplementedError, \ + 'delete not implemented for this ssh client' + + def run(self, cmd): + """ + Run a command on a remote node. + + @type cmd: C{str} + @keyword cmd: Command to run. + + @return C{list} of [stdout, stderr, exit_status] + """ + raise NotImplementedError, \ + 'run not implemented for this ssh client' + + def close(self): + """ + Shutdown connection to the remote node. + """ + raise NotImplementedError, \ + 'close not implemented for this ssh client' + +class ParamikoSSHClient(BaseSSHClient): + """ + A SSH Client powered by Paramiko. + """ + def __init__(self, hostname, port=22, username='root', password=None, + key=None, timeout=None): + super(ParamikoSSHClient, self).__init__(hostname, port, username, + password, key, timeout) + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + def connect(self): + conninfo = {'hostname': self.hostname, + 'port': self.port, + 'username': self.username, + 'password': self.password, + 'allow_agent': False, + 'look_for_keys': False} + + if self.timeout: + conninfo['timeout'] = self.timeout + + self.client.connect(**conninfo) + return True + + def put(self, path, contents=None, chmod=None): + sftp = self.client.open_sftp() + # less than ideal, but we need to mkdir stuff otherwise file() fails + head, tail = psplit(path) + if path[0] == "/": + sftp.chdir("/") + for part in head.split("/"): + if part != "": + try: + sftp.mkdir(part) + except IOError: + # so, there doesn't seem to be a way to + # catch EEXIST consistently *sigh* + pass + sftp.chdir(part) + ak = sftp.file(tail, mode='w') + ak.write(contents) + if chmod is not None: + ak.chmod(chmod) + ak.close() + sftp.close() + + def delete(self, path): + sftp = self.client.open_sftp() + sftp.unlink(path) + sftp.close() + + def run(self, cmd): + # based on exec_command() + bufsize = -1 + t = self.client.get_transport() + chan = t.open_session() + chan.exec_command(cmd) + stdin = chan.makefile('wb', bufsize) + stdout = chan.makefile('rb', bufsize) + stderr = chan.makefile_stderr('rb', bufsize) + #stdin, stdout, stderr = self.client.exec_command(cmd) + stdin.close() + status = chan.recv_exit_status() + so = stdout.read() + se = stderr.read() + return [so, se, status] + + def close(self): + self.client.close() + +class ShellOutSSHClient(BaseSSHClient): + # TODO: write this one + pass + +SSHClient = ParamikoSSHClient +if not have_paramiko: + SSHClient = ShellOutSSHClient diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/types.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/types.py new file mode 100644 index 0000000000000000000000000000000000000000..573038629d4b2210fca49caee5366cbbb4eb3364 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/compute/types.py @@ -0,0 +1,122 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Base types used by other parts of libcloud +""" + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.types import InvalidCredsError, InvalidCredsException +__all__ = [ + "Provider", + "NodeState", + "DeploymentError", + "DeploymentException", + + # @@TR: should the unused imports below be exported? + "LibcloudError", + "MalformedResponseError", + "InvalidCredsError", + "InvalidCredsException" + ] +class Provider(object): + """ + Defines for each of the supported providers + + @cvar DUMMY: Example provider + @cvar EC2_US_EAST: Amazon AWS US N. Virgina + @cvar EC2_US_WEST: Amazon AWS US N. California + @cvar EC2_EU_WEST: Amazon AWS EU Ireland + @cvar RACKSPACE: Rackspace Cloud Servers + @cvar RACKSPACE_UK: Rackspace UK Cloud Servers + @cvar SLICEHOST: Slicehost.com + @cvar GOGRID: GoGrid + @cvar VPSNET: VPS.net + @cvar LINODE: Linode.com + @cvar VCLOUD: vmware vCloud + @cvar RIMUHOSTING: RimuHosting.com + @cvar ECP: Enomaly + @cvar IBM: IBM Developer Cloud + @cvar OPENNEBULA: OpenNebula.org + @cvar DREAMHOST: DreamHost Private Server + @cvar CLOUDSIGMA: CloudSigma + @cvar NIMBUS: Nimbus + @cvar BLUEBOX: Bluebox + @cvar OPSOURCE: Opsource Cloud + """ + DUMMY = 0 + EC2 = 1 # deprecated name + EC2_US_EAST = 1 + EC2_EU = 2 # deprecated name + EC2_EU_WEST = 2 + RACKSPACE = 3 + SLICEHOST = 4 + GOGRID = 5 + VPSNET = 6 + LINODE = 7 + VCLOUD = 8 + RIMUHOSTING = 9 + EC2_US_WEST = 10 + VOXEL = 11 + SOFTLAYER = 12 + EUCALYPTUS = 13 + ECP = 14 + IBM = 15 + OPENNEBULA = 16 + DREAMHOST = 17 + ELASTICHOSTS = 18 + ELASTICHOSTS_UK1 = 19 + ELASTICHOSTS_UK2 = 20 + ELASTICHOSTS_US1 = 21 + EC2_AP_SOUTHEAST = 22 + RACKSPACE_UK = 23 + BRIGHTBOX = 24 + CLOUDSIGMA = 25 + EC2_AP_NORTHEAST = 26 + NIMBUS = 27 + BLUEBOX = 28 + GANDI = 29 + OPSOURCE = 30 + OPENSTACK = 31 + +class NodeState(object): + """ + Standard states for a node + + @cvar RUNNING: Node is running + @cvar REBOOTING: Node is rebooting + @cvar TERMINATED: Node is terminated + @cvar PENDING: Node is pending + @cvar UNKNOWN: Node state is unknown + """ + RUNNING = 0 + REBOOTING = 1 + TERMINATED = 2 + PENDING = 3 + UNKNOWN = 4 + +class DeploymentError(LibcloudError): + """ + Exception used when a Deployment Task failed. + + @ivar node: L{Node} on which this exception happened, you might want to call L{Node.destroy} + """ + def __init__(self, node, original_exception=None): + self.node = node + self.value = original_exception + def __str__(self): + return repr(self.value) + +"""Deprecated alias of L{DeploymentException}""" +DeploymentException = DeploymentError diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/data/pricing.json b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/data/pricing.json new file mode 100644 index 0000000000000000000000000000000000000000..31554b9c1bfb3db168f445b02e6c5cc9ba41b7ed --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/data/pricing.json @@ -0,0 +1,132 @@ +{ + "bluebox": { + "1gb": 0.15, + "2gb": 0.25, + "4gb": 0.35, + "8gb": 0.45 + }, + + "rackspace": { + "1": 0.015, + "2": 0.030, + "3": 0.060, + "4": 0.120, + "5": 0.240, + "6": 0.480, + "7": 0.960 + }, + + "dreamhost": { + "minimum": 15, + "maximum": 200, + "default": 115, + "low": 50, + "high": 150 + }, + + "ec2_us_east": { + "t1.micro": 0.02, + "m1.small": 0.085, + "m1.large": 0.34, + "m1.xlarge": 0.68, + "c1.medium": 0.17, + "c1.xlarge": 0.68, + "m2.xlarge": 0.50, + "m2.2xlarge": 1.0, + "m2.4xlarge": 2.0, + "cg1.4xlarge": 2.1, + "cc1.4xlarge": 1.6 + }, + + "ec2_us_west": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_eu_west": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_ap_southeast": { + "t1.micro": 0.025, + "m1.small": 0.095, + "m1.large": 0.38, + "m1.xlarge": 0.76, + "c1.medium": 0.19, + "c1.xlarge": 0.76, + "m2.xlarge": 0.57, + "m2.2xlarge": 1.14, + "m2.4xlarge": 2.28 + }, + + "ec2_ap_northeast": { + "t1.micro": 0.027, + "m1.small": 0.10, + "m1.large": 0.40, + "m1.xlarge": 0.80, + "c1.medium": 0.20, + "c1.xlarge": 0.80, + "m2.xlarge": 0.60, + "m2.2xlarge": 1.20, + "m2.4xlarge": 2.39 + }, + + "nimbus" : { + "m1.small": 0.0, + "m1.large": 0.0, + "m1.xlarge": 0.0 + }, + + "cloudsigma_zrh": { + "micro-regular": 0.0548, + "micro-high-cpu": 0.381, + "standard-small": 0.0796, + "standard-large": 0.381, + "standard-extra-large": 0.762, + "high-memory-extra-large": 0.642, + "high-memory-double-extra-large": 1.383, + "high-cpu-medium": 0.211, + "high-cpu-extra-large": 0.780 + }, + + "elastichosts": { + "small": 0.100, + "medium": 0.223, + "large": 0.378, + "extra-large": 0.579, + "high-cpu-medium": 0.180, + "high-cpu-extra-large": 0.770 + }, + + "gogrid": { + "512MB": 0.095, + "1GB": 0.19, + "2GB": 0.38, + "4GB": 0.76, + "8GB": 1.52, + "16GB": 3.04 + }, + + "gandi": { + "1": 0.02 + }, + + "vps_net": { + "1": 0.416 + } +} diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/deployment.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/deployment.py new file mode 100644 index 0000000000000000000000000000000000000000..cbf51c826714c3041b52f74f57fc3c688b05b1fc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/deployment.py @@ -0,0 +1,31 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.deployment import ( # pylint: disable-msg=W0611 + Deployment, + SSHKeyDeployment, + ScriptDeployment, + MultiStepDeployment + ) + +__all__ = [ + "Deployment", + "SSHKeyDeployment", + "ScriptDeployment", + "MultiStepDeployment" + ] + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b4fc782dc8b4bdd7e7d26273d5edf327f9a87782 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/__init__.py @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'brightbox', + 'dummy', + 'ec2', + 'ecp', + 'elastichosts', + 'cloudsigma', + 'gogrid', + 'ibm_sbc', + 'linode', + 'opennebula', + 'rackspace', + 'rimuhosting', + 'slicehost', + 'softlayer', + 'vcloud', + 'voxel', + 'vpsnet' +] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/brightbox.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/brightbox.py new file mode 100644 index 0000000000000000000000000000000000000000..ea99d035967cf61554a862a0f5657c59b9940add --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/brightbox.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.brightbox import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/cloudsigma.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/cloudsigma.py new file mode 100644 index 0000000000000000000000000000000000000000..f4587fc32dbdf435d2e162c2cf92751885776aa3 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/cloudsigma.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning + +from libcloud.compute.drivers.cloudsigma import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dreamhost.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dreamhost.py new file mode 100644 index 0000000000000000000000000000000000000000..beeb37c67725151b8b1bf27b65c9f37b0dd9950c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dreamhost.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.dreamhost import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dummy.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..2b1a645623bc090e563ba778c4a75c96f730ff45 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/dummy.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.dummy import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ec2.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ec2.py new file mode 100644 index 0000000000000000000000000000000000000000..f654704b9c86804c2bab2484e881f149be1fdda1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ec2.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ec2 import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ecp.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ecp.py new file mode 100644 index 0000000000000000000000000000000000000000..570fad98cee02aad7930101cbd7a15e81d308e5f --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ecp.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ecp import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/elastichosts.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/elastichosts.py new file mode 100644 index 0000000000000000000000000000000000000000..e5804f7eaa8a226cfe3043f0b6ec8b37f2405fbd --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/elastichosts.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.elastichosts import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/gogrid.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/gogrid.py new file mode 100644 index 0000000000000000000000000000000000000000..d4327f18174ef3f4a6127774d5ef9b039498db54 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/gogrid.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.gogrid import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ibm_sbc.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ibm_sbc.py new file mode 100644 index 0000000000000000000000000000000000000000..f9a51df03084d98b9bef313ebe3cf2ae9288aca7 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/ibm_sbc.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.ibm_sbc import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/linode.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/linode.py new file mode 100644 index 0000000000000000000000000000000000000000..0d61de3ca8a618c3aee68e2ad0b4f0439bef0351 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/linode.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.linode import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/opennebula.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/opennebula.py new file mode 100644 index 0000000000000000000000000000000000000000..3ae3a25e4c0404a9c012c58d660b80403aa511a8 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/opennebula.py @@ -0,0 +1,22 @@ +# Copyright 2002-2009, Distributed Systems Architecture Group, Universidad +# Complutense de Madrid (dsa-research.org) +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.opennebula import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rackspace.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rackspace.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c4e1bbfcedb5db22b65fd839970d799a6f6105 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rackspace.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.rackspace import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rimuhosting.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rimuhosting.py new file mode 100644 index 0000000000000000000000000000000000000000..f55ed823ffbbdfe858229f2a134ba9fd944a53ee --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/rimuhosting.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.rimuhosting import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/slicehost.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/slicehost.py new file mode 100644 index 0000000000000000000000000000000000000000..fb4e46682f41019f44f7df2a82c0e734f1f502fc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/slicehost.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.slicehost import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/softlayer.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/softlayer.py new file mode 100644 index 0000000000000000000000000000000000000000..6c279625d0cd4d7c1bad179fb1e2202d1df7f56e --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/softlayer.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.softlayer import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vcloud.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vcloud.py new file mode 100644 index 0000000000000000000000000000000000000000..e24af6fc001757c50bb604290f9e03b14655cd07 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vcloud.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.vcloud import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/voxel.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/voxel.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad05692ca3395422aa9161c9e3c1afcba2f6b6c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/voxel.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.voxel import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vpsnet.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vpsnet.py new file mode 100644 index 0000000000000000000000000000000000000000..747dc1536d812a314433ba193d703f179d394799 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/drivers/vpsnet.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.drivers.vpsnet import * + +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/httplib_ssl.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/httplib_ssl.py new file mode 100644 index 0000000000000000000000000000000000000000..b0bccd569ea24cbf72a76873a3f5abefc015492d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/httplib_ssl.py @@ -0,0 +1,157 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Subclass for httplib.HTTPSConnection with optional certificate name +verification, depending on libcloud.security settings. +""" +import httplib +import os +import re +import socket +import ssl +import warnings + +import libcloud.security + +class LibcloudHTTPSConnection(httplib.HTTPSConnection): + """LibcloudHTTPSConnection + + Subclass of HTTPSConnection which verifies certificate names + if and only if CA certificates are available. + """ + verify = False # does not verify + ca_cert = None # no default CA Certificate + + def __init__(self, *args, **kwargs): + """Constructor + """ + self._setup_verify() + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + + def _setup_verify(self): + """Setup Verify SSL or not + + Reads security module's VERIFY_SSL_CERT and toggles whether + the class overrides the connect() class method or runs the + inherited httplib.HTTPSConnection connect() + """ + self.verify = libcloud.security.VERIFY_SSL_CERT + + if self.verify: + self._setup_ca_cert() + else: + warnings.warn(libcloud.security.VERIFY_SSL_DISABLED_MSG) + + def _setup_ca_cert(self): + """Setup CA Certs + + Search in CA_CERTS_PATH for valid candidates and + return first match. Otherwise, complain about certs + not being available. + """ + if not self.verify: + return + + ca_certs_available = [cert + for cert in libcloud.security.CA_CERTS_PATH + if os.path.exists(cert)] + if ca_certs_available: + # use first available certificate + self.ca_cert = ca_certs_available[0] + else: + # no certificates found; toggle verify to False + warnings.warn(libcloud.security.CA_CERTS_UNAVAILABLE_MSG) + self.ca_cert = None + self.verify = False + + def connect(self): + """Connect + + Checks if verification is toggled; if not, just call + httplib.HTTPSConnection's connect + """ + if not self.verify: + return httplib.HTTPSConnection.connect(self) + + # otherwise, create a connection and verify the hostname + # use socket.create_connection (in 2.6+) if possible + if getattr(socket, 'create_connection', None): + sock = socket.create_connection((self.host, self.port), + self.timeout) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((self.host, self.port)) + self.sock = ssl.wrap_socket(sock, + self.key_file, + self.cert_file, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca_cert, + ssl_version=ssl.PROTOCOL_TLSv1) + cert = self.sock.getpeercert() + if not self._verify_hostname(self.host, cert): + raise ssl.SSLError('Failed to verify hostname') + + def _verify_hostname(self, hostname, cert): + """Verify hostname against peer cert + + Check both commonName and entries in subjectAltName, using a + rudimentary glob to dns regex check to find matches + """ + common_name = self._get_common_name(cert) + alt_names = self._get_subject_alt_names(cert) + + # replace * with alphanumeric and dash + # replace . with literal . + valid_patterns = [ + re.compile( + pattern.replace( + r".", r"\." + ).replace( + r"*", r"[0-9A-Za-z]+" + ) + ) + for pattern + in (set(common_name) | set(alt_names)) + ] + + return any( + pattern.search(hostname) + for pattern in valid_patterns + ) + + def _get_subject_alt_names(self, cert): + """Get SubjectAltNames + + Retrieve 'subjectAltName' attributes from cert data structure + """ + if 'subjectAltName' not in cert: + values = [] + else: + values = [value + for field, value in cert['subjectAltName'] + if field == 'DNS'] + return values + + def _get_common_name(self, cert): + """Get Common Name + + Retrieve 'commonName' attribute from cert data structure + """ + if 'subject' not in cert: + return None + values = [value[0][1] + for value in cert['subject'] + if value[0][0] == 'commonName'] + return values diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3831bc27569510094d6be96cd7b408b2e5b6d6b --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/__init__.py @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'base', + 'providers', + 'types', + 'drivers' +] + diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/base.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/base.py new file mode 100644 index 0000000000000000000000000000000000000000..45b6f1691d789876601103497e5f0b101e3a8941 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/base.py @@ -0,0 +1,226 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.base import ConnectionKey +from libcloud.common.types import LibcloudError + +__all__ = [ + "Member", + "LoadBalancer", + "Driver", + "Algorithm" + ] + +class Member(object): + + def __init__(self, id, ip, port): + self.id = str(id) if id else None + self.ip = ip + self.port = port + + def __repr__(self): + return ('' % (self.id, + self.ip, self.port)) + +class Algorithm(object): + RANDOM = 0 + ROUND_ROBIN = 1 + LEAST_CONNECTIONS = 2 + +DEFAULT_ALGORITHM = Algorithm.ROUND_ROBIN + +class LoadBalancer(object): + """ + Provide a common interface for handling Load Balancers. + """ + + def __init__(self, id, name, state, ip, port, driver): + self.id = str(id) if id else None + self.name = name + self.state = state + self.ip = ip + self.port = port + self.driver = driver + + def attach_compute_node(self, node): + return self.driver.balancer_attach_compute_node(node) + + def attach_member(self, member): + return self.driver.balancer_attach_member(self, member) + + def detach_member(self, member): + return self.driver.balancer_detach_member(self, member) + + def list_members(self): + return self.driver.balancer_list_members(self) + + def __repr__(self): + return ('' % (self.id, + self.name, self.state)) + + +class Driver(object): + """ + A base LBDriver class to derive from + + This class is always subclassed by a specific driver. + + """ + + connectionCls = ConnectionKey + _ALGORITHM_TO_VALUE_MAP = {} + _VALUE_TO_ALGORITHM_MAP = {} + + def __init__(self, key, secret=None, secure=True): + self.key = key + self.secret = secret + args = [self.key] + + if self.secret is not None: + args.append(self.secret) + + args.append(secure) + + self.connection = self.connectionCls(*args) + self.connection.driver = self + self.connection.connect() + + def list_protocols(self): + """ + Return a list of supported protocols. + """ + + raise NotImplementedError, \ + 'list_protocols not implemented for this driver' + + def list_balancers(self): + """ + List all loadbalancers + + @return: C{list} of L{LoadBalancer} objects + + """ + + raise NotImplementedError, \ + 'list_balancers not implemented for this driver' + + def create_balancer(self, name, port, protocol, algorithm, members): + """ + Create a new load balancer instance + + @keyword name: Name of the new load balancer (required) + @type name: C{str} + @keyword members: C{list} ofL{Member}s to attach to balancer + @type: C{list} of L{Member}s + @keyword protocol: Loadbalancer protocol, defaults to http. + @type: C{str} + @keyword port: Port the load balancer should listen on, defaults to 80 + @type port: C{str} + @keyword algorithm: Load balancing algorithm, defaults to + LBAlgorithm.ROUND_ROBIN + @type algorithm: C{LBAlgorithm} + + """ + + raise NotImplementedError, \ + 'create_balancer not implemented for this driver' + + def destroy_balancer(self, balancer): + """Destroy a load balancer + + @return: C{bool} True if the destroy was successful, otherwise False + + """ + + raise NotImplementedError, \ + 'destroy_balancer not implemented for this driver' + + def get_balancer(self, balancer_id): + """ + Return a C{LoadBalancer} object. + + @keyword balancer_id: id of a load balancer you want to fetch + @type balancer_id: C{str} + + @return: C{LoadBalancer} + """ + + raise NotImplementedError, \ + 'get_balancer not implemented for this driver' + + def balancer_attach_compute_node(self, balancer, node): + """ + Attach a compute node as a member to the load balancer. + + @keyword node: Member to join to the balancer + @type member: C{libcloud.compute.base.Node} + @return {Member} Member after joining the balancer. + """ + + return self.attach_member(Member(None, node.public_ip[0], balancer.port)) + + def balancer_attach_member(self, balancer, member): + """ + Attach a member to balancer + + @keyword member: Member to join to the balancer + @type member: C{Member} + @return {Member} Member after joining the balancer. + """ + + raise NotImplementedError, \ + 'balancer_attach_member not implemented for this driver' + + def balancer_detach_member(self, balancer, member): + """ + Detach member from balancer + + @return: C{bool} True if member detach was successful, otherwise False + + """ + + raise NotImplementedError, \ + 'balancer_detach_member not implemented for this driver' + + def balancer_list_members(self, balancer): + """ + Return list of members attached to balancer + + @return: C{list} of L{Member}s + + """ + + raise NotImplementedError, \ + 'balancer_list_members not implemented for this driver' + + def _value_to_algorithm(self, value): + """ + Return C{LBAlgorithm} based on the value. + """ + try: + return self._VALUE_TO_ALGORITHM_MAP[value] + except KeyError: + raise LibcloudError(value='Invalid value: %s' % (value), + driver=self) + + def _algorithm_to_value(self, algorithm): + """ + Return value based in the algorithm (C{LBAlgorithm}). + """ + try: + return self._ALGORITHM_TO_VALUE_MAP[algorithm] + except KeyError: + raise LibcloudError(value='Invalid algorithm: %s' % (algorithm), + driver=self) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f4fdb8666a5c8eab2422f0a82a00e3b155148f9c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/__init__.py @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'rackspace', + 'gogrid' +] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/gogrid.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/gogrid.py new file mode 100644 index 0000000000000000000000000000000000000000..6ecc4c0e4a2221680671226a832c45b919fc7fc1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/gogrid.py @@ -0,0 +1,217 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import httplib + +try: + import json +except ImportError: + import simplejson as json + +from libcloud.common.types import LibcloudError +from libcloud.utils import reverse_dict +from libcloud.common.gogrid import GoGridConnection, GoGridResponse, BaseGoGridDriver +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import State, LibcloudLBImmutableError + +class GoGridLBResponse(GoGridResponse): + def success(self): + if self.status == httplib.INTERNAL_SERVER_ERROR: + # Hack, but at least this error message is more useful than + # "unexpected server error" + body = json.loads(self.body) + if body['method'] == '/grid/loadbalancer/add' and \ + len(body['list']) >= 1 and \ + body['list'][0]['message'].find('unexpected server error') != -1: + raise LibcloudError(value='You mostly likely tried to add a ' + + 'member with an IP address not assigned ' + + 'to your account', driver=self) + return super(GoGridLBResponse, self).success() + +class GoGridLBConnection(GoGridConnection): + """ + Connection class for the GoGrid load-balancer driver. + """ + responseCls = GoGridLBResponse + +class GoGridLBDriver(BaseGoGridDriver, Driver): + connectionCls = GoGridLBConnection + api_name = 'gogrid_lb' + name = 'GoGrid LB' + + LB_STATE_MAP = { 'On': State.RUNNING, + 'Unknown': State.UNKNOWN } + _VALUE_TO_ALGORITHM_MAP = { + 'round robin': Algorithm.ROUND_ROBIN, + 'least connect': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + def list_protocols(self): + # GoGrid only supports http + return [ 'http' ] + + def list_balancers(self): + return self._to_balancers( + self.connection.request('/api/grid/loadbalancer/list').object) + + def ex_create_balancer_nowait(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM): + algorithm = self._algorithm_to_value(algorithm) + + params = {'name': name, + 'loadbalancer.type': algorithm, + 'virtualip.ip': self._get_first_ip(), + 'virtualip.port': port} + params.update(self._members_to_params(members)) + + resp = self.connection.request('/api/grid/loadbalancer/add', + method='GET', + params=params) + return self._to_balancers(resp.object)[0] + + def create_balancer(self, name, members, protocol='http', port=80, + algorithm=DEFAULT_ALGORITHM): + balancer = self.ex_create_balancer_nowait(name, members, protocol, + port, algorithm) + + timeout = 60 * 20 + waittime = 0 + interval = 2 * 15 + + if balancer.id is not None: + return balancer + else: + while waittime < timeout: + balancers = self.list_balancers() + + for i in balancers: + if i.name == balancer.name and i.id is not None: + return i + + waittime += interval + time.sleep(interval) + + raise Exception('Failed to get id') + + def destroy_balancer(self, balancer): + try: + resp = self.connection.request('/api/grid/loadbalancer/delete', + method='POST', params={'id': balancer.id}) + except Exception as err: + if "Update request for LoadBalancer" in str(err): + raise LibcloudLBImmutableError("Cannot delete immutable object", + GoGridLBDriver) + else: + raise + + return resp.status == 200 + + def get_balancer(self, **kwargs): + params = {} + + try: + params['name'] = kwargs['ex_balancer_name'] + except KeyError: + balancer_id = kwargs['balancer_id'] + params['id'] = balancer_id + + resp = self.connection.request('/api/grid/loadbalancer/get', + params=params) + + return self._to_balancers(resp.object)[0] + + def balancer_attach_member(self, balancer, member): + members = self.balancer_list_members(balancer) + members.append(member) + + params = {"id": balancer.id} + + params.update(self._members_to_params(members)) + + resp = self._update_balancer(params) + + return [ m for m in + self._to_members(resp.object["list"][0]["realiplist"]) + if m.ip == member.ip ][0] + + def balancer_detach_member(self, balancer, member): + members = self.balancer_list_members(balancer) + + remaining_members = [n for n in members if n.id != member.id] + + params = {"id": balancer.id} + params.update(self._members_to_params(remaining_members)) + + resp = self._update_balancer(params) + + return resp.status == 200 + + def balancer_list_members(self, balancer): + resp = self.connection.request('/api/grid/loadbalancer/get', + params={'id': balancer.id}) + return self._to_members(resp.object["list"][0]["realiplist"]) + + def _update_balancer(self, params): + try: + return self.connection.request('/api/grid/loadbalancer/edit', + method='POST', + params=params) + except Exception as err: + if "Update already pending" in str(err): + raise LibcloudLBImmutableError("Balancer is immutable", GoGridLBDriver) + + raise LibcloudError(value='Exception: %s' % str(err), driver=self) + + def _members_to_params(self, members): + """ + Helper method to convert list of L{Member} objects + to GET params. + + """ + + params = {} + + i = 0 + for member in members: + params["realiplist.%s.ip" % i] = member.ip + params["realiplist.%s.port" % i] = member.port + i += 1 + + return params + + def _to_balancers(self, object): + return [ self._to_balancer(el) for el in object["list"] ] + + def _to_balancer(self, el): + lb = LoadBalancer(id=el.get("id"), + name=el["name"], + state=self.LB_STATE_MAP.get( + el["state"]["name"], State.UNKNOWN), + ip=el["virtualip"]["ip"]["ip"], + port=el["virtualip"]["port"], + driver=self.connection.driver) + return lb + + def _to_members(self, object): + return [ self._to_member(el) for el in object ] + + def _to_member(self, el): + member = Member(id=el["ip"]["id"], + ip=el["ip"]["ip"], + port=el["port"]) + return member diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/rackspace.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/rackspace.py new file mode 100644 index 0000000000000000000000000000000000000000..e82e91ed79af84a6b120e6480ba0ae5cefa8ff30 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/drivers/rackspace.py @@ -0,0 +1,178 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +try: + import json +except ImportError: + import simplejson as json + +from libcloud.utils import reverse_dict +from libcloud.common.base import Response +from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm +from libcloud.loadbalancer.base import DEFAULT_ALGORITHM +from libcloud.loadbalancer.types import Provider, State +from libcloud.common.rackspace import (AUTH_HOST_US, + RackspaceBaseConnection) + +class RackspaceResponse(Response): + + def success(self): + return 200 <= int(self.status) <= 299 + + def parse_body(self): + if not self.body: + return None + else: + return json.loads(self.body) + +class RackspaceConnection(RackspaceBaseConnection): + responseCls = RackspaceResponse + auth_host = AUTH_HOST_US + _url_key = "lb_url" + + def __init__(self, user_id, key, secure=True): + super(RackspaceConnection, self).__init__(user_id, key, secure) + self.api_version = 'v1.0' + self.accept_format = 'application/json' + + def request(self, action, params=None, data='', headers=None, method='GET'): + if not headers: + headers = {} + if not params: + params = {} + if self.lb_url: + action = self.lb_url + action + if method in ('POST', 'PUT'): + headers['Content-Type'] = 'application/json' + if method == 'GET': + params['cache-busing'] = os.urandom(8).encode('hex') + + return super(RackspaceConnection, self).request(action=action, + params=params, data=data, method=method, headers=headers) + + +class RackspaceLBDriver(Driver): + connectionCls = RackspaceConnection + api_name = 'rackspace_lb' + name = 'Rackspace LB' + + LB_STATE_MAP = { 'ACTIVE': State.RUNNING, + 'BUILD': State.PENDING } + _VALUE_TO_ALGORITHM_MAP = { + 'RANDOM': Algorithm.RANDOM, + 'ROUND_ROBIN': Algorithm.ROUND_ROBIN, + 'LEAST_CONNECTIONS': Algorithm.LEAST_CONNECTIONS + } + _ALGORITHM_TO_VALUE_MAP = reverse_dict(_VALUE_TO_ALGORITHM_MAP) + + def list_protocols(self): + return self._to_protocols( + self.connection.request('/loadbalancers/protocols').object) + + def list_balancers(self): + return self._to_balancers( + self.connection.request('/loadbalancers').object) + + def create_balancer(self, name, members, protocol='http', + port=80, algorithm=DEFAULT_ALGORITHM): + algorithm = self._algorithm_to_value(algorithm) + + balancer_object = {"loadBalancer": + {"name": name, + "port": port, + "algorithm": algorithm, + "protocol": protocol.upper(), + "virtualIps": [{"type": "PUBLIC"}], + "nodes": [{"address": member.ip, + "port": member.port, + "condition": "ENABLED"} for member in members], + } + } + + resp = self.connection.request('/loadbalancers', + method='POST', + data=json.dumps(balancer_object)) + return self._to_balancer(resp.object["loadBalancer"]) + + def destroy_balancer(self, balancer): + uri = '/loadbalancers/%s' % (balancer.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == 202 + + def get_balancer(self, balancer_id): + uri = '/loadbalancers/%s' % (balancer_id) + resp = self.connection.request(uri) + + return self._to_balancer(resp.object["loadBalancer"]) + + def balancer_attach_member(self, balancer, member): + ip = member.ip + port = member.port + + member_object = {"nodes": + [{"port": port, + "address": ip, + "condition": "ENABLED"}] + } + + uri = '/loadbalancers/%s/nodes' % (balancer.id) + resp = self.connection.request(uri, method='POST', + data=json.dumps(member_object)) + return self._to_members(resp.object)[0] + + def balancer_detach_member(self, balancer, member): + # Loadbalancer always needs to have at least 1 member. + # Last member cannot be detached. You can only disable it or destroy the + # balancer. + uri = '/loadbalancers/%s/nodes/%s' % (balancer.id, member.id) + resp = self.connection.request(uri, method='DELETE') + + return resp.status == 202 + + def balancer_list_members(self, balancer): + uri = '/loadbalancers/%s/nodes' % (balancer.id) + return self._to_members( + self.connection.request(uri).object) + + def _to_protocols(self, object): + protocols = [] + for item in object["protocols"]: + protocols.append(item['name'].lower()) + return protocols + + def _to_balancers(self, object): + return [ self._to_balancer(el) for el in object["loadBalancers"] ] + + def _to_balancer(self, el): + lb = LoadBalancer(id=el["id"], + name=el["name"], + state=self.LB_STATE_MAP.get( + el["status"], State.UNKNOWN), + ip=el["virtualIps"][0]["address"], + port=el["port"], + driver=self.connection.driver) + return lb + + def _to_members(self, object): + return [ self._to_member(el) for el in object["nodes"] ] + + def _to_member(self, el): + lbmember = Member(id=el["id"], + ip=el["address"], + port=el["port"]) + return lbmember diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/providers.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/providers.py new file mode 100644 index 0000000000000000000000000000000000000000..fb12e82813b5772ec95af57b2daffcca466c1237 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/providers.py @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import get_driver as get_provider_driver +from libcloud.loadbalancer.types import Provider + +__all__ = [ + "Provider", + "DRIVERS", + "get_driver", + ] + +DRIVERS = { + Provider.RACKSPACE_US: + ('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'), + Provider.GOGRID: + ('libcloud.loadbalancer.drivers.gogrid', 'GoGridLBDriver'), +} + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/types.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/types.py new file mode 100644 index 0000000000000000000000000000000000000000..79c214436b80e59e4938543fb69fa3ee75149d73 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/loadbalancer/types.py @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "Provider", + "State", + "LibcloudLBError", + "LibcloudLBImmutableError", + ] + +from libcloud.common.types import LibcloudError + +class LibcloudLBError(LibcloudError): pass + +class LibcloudLBImmutableError(LibcloudLBError): pass + +class Provider(object): + RACKSPACE_US = 0 + GOGRID = 1 + +class State(object): + """ + Standart states for a loadbalancer + + @cvar RUNNING: loadbalancer is running and ready to use + @cvar UNKNOWN: loabalancer state is unknown + """ + + RUNNING = 0 + PENDING = 1 + UNKNOWN = 2 diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/pricing.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/pricing.py new file mode 100644 index 0000000000000000000000000000000000000000..6283bdb580073aa4177ad4b76f36fe04c58d3755 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/pricing.py @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import with_statement +""" +A class which handles loading the pricing files. +""" + +try: + import json +except: + import simplejson as json + +import os.path +from os.path import join as pjoin + +PRICING_FILE_PATH = 'data/pricing.json' + +PRICING_DATA = { + 'compute': {}, + 'storage': {} +} + +def get_pricing_file_path(file_path=None): + pricing_directory = os.path.dirname(os.path.abspath(__file__)) + pricing_file_path = pjoin(pricing_directory, PRICING_FILE_PATH) + + return pricing_file_path + +def get_pricing(driver_type, driver_name, pricing_file_path=None): + """ + Return pricing for the provided driver. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @return C{dict} Dictionary with pricing where a key name iz size ID and + the value is a price. + """ + if not driver_type in [ 'compute', 'storage' ]: + raise AttributeError('Invalid driver type: %s', driver_type) + + if driver_name in PRICING_DATA[driver_type]: + return PRICING_DATA[driver_type][driver_name] + + if not pricing_file_path: + pricing_file_path = get_pricing_file_path(file_path=pricing_file_path) + + with open(pricing_file_path) as fp: + content = fp.read() + + pricing = json.loads(content)[driver_name] + + PRICING_DATA[driver_type][driver_name] = pricing + return pricing + +def set_pricing(driver_type, driver_name, pricing): + """ + Populate the driver pricing dictionary. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @type pricing: C{dict} + @param pricing: Dictionary where a key is a size ID and a value is a price. + """ + + PRICING_DATA[driver_type][driver_name] = pricing + +def get_size_price(driver_type, driver_name, size_id): + """ + Return price for the provided size. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + + @type size_id: C{int/str} + @param size_id: Unique size ID (can be an integer or a string - depends on + the driver) + + @return C{int} Size price. + """ + pricing = get_pricing(driver_type=driver_type, driver_name=driver_name) + price = float(pricing[size_id]) + return price + +def invalidate_pricing_cache(): + """ + Invalidate the cache for all the drivers. + """ + PRICING_DATA['compute'] = {} + PRICING_DATA['storage'] = {} + +def invalidate_module_pricing_cache(driver_type, driver_name): + """ + Invalidate the cache for the specified driver. + + @type driver_type: C{str} + @param driver_type: Driver type ('compute' or 'storage') + + @type driver_name: C{str} + @param driver_name: Driver name + """ + if driver_name in PRICING_DATA[driver_type]: + del PRICING_DATA[driver_type][driver_name] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/providers.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/providers.py new file mode 100644 index 0000000000000000000000000000000000000000..e27a31fdf719039302eb49f18245963bf5dc1890 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/providers.py @@ -0,0 +1,27 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.providers import ( + DRIVERS, + Provider, + get_driver, + ) +__all__ = [ + "DRIVERS", + "Provider", + "get_driver", + ] +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/security.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/security.py new file mode 100644 index 0000000000000000000000000000000000000000..9ac72f51df5fa73a2da768e36e88654dd621c684 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/security.py @@ -0,0 +1,54 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Security (SSL) Settings + +Usage: + import libcloud.security + libcloud.security.VERIFY_SSL_CERT = True + + # optional + libcloud.security.CA_CERTS_PATH.append("/path/to/cacert.txt") +""" +# For backward compatibility this option is disabled by default +VERIFY_SSL_CERT = False + +# File containing one or more PEM-encoded CA certificates +# concatenated together +CA_CERTS_PATH = [ + # centos/fedora: openssl + '/etc/pki/tls/certs/ca-bundle.crt', + + # debian/ubuntu/arch/gentoo: ca-certificates + '/etc/ssl/certs/ca-certificates.crt', + + # freebsd: ca_root_nss + '/usr/local/share/certs/ca-root-nss.crt', + + # macports: curl-ca-bundle + '/opt/local/share/curl/curl-ca-bundle.crt', +] + +CA_CERTS_UNAVAILABLE_MSG = ( + 'Warning: No CA Certificates were found in CA_CERTS_PATH. ' + 'Toggling VERIFY_SSL_CERT to False.' +) + +VERIFY_SSL_DISABLED_MSG = ( + 'SSL certificate verification is disabled, this can pose a ' + 'security risk. For more information how to enable the SSL ' + 'certificate verification, please visit the libcloud ' + 'documentation.' +) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/ssh.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/ssh.py new file mode 100644 index 0000000000000000000000000000000000000000..8901d7cab4ed330f25292eaede5f415f47c7d60e --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/ssh.py @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import deprecated_warning +from libcloud.compute.ssh import ( + BaseSSHClient, + ParamikoSSHClient, + ShellOutSSHClient, + SSHClient, + have_paramiko) + +__all__ = [ + "BaseSSHClient", + "ParamikoSSHClient", + "ShellOutSSHClient", + "SSHClient", + "have_paramiko"] +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/base.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/base.py new file mode 100644 index 0000000000000000000000000000000000000000..b7dd09e99c55a27240c867c3c29431577d9f5fcd --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/base.py @@ -0,0 +1,627 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Backward compatibility for Python 2.5 +from __future__ import with_statement + +import httplib +import os.path # pylint: disable-msg=W0404 +import hashlib +from os.path import join as pjoin + +from libcloud import utils +from libcloud.common.types import LibcloudError +from libcloud.common.base import ConnectionKey +from libcloud.storage.types import ObjectDoesNotExistError + +CHUNK_SIZE = 8096 + +class Object(object): + """ + Represents an object (BLOB). + """ + + def __init__(self, name, size, hash, extra, meta_data, container, + driver): + """ + @type name: C{str} + @param name: Object name (must be unique per container). + + @type size: C{int} + @param size: Object size in bytes. + + @type hash: C{string} + @param hash Object hash. + + @type container: C{Container} + @param container: Object container. + + @type extra: C{dict} + @param extra: Extra attributes. + + @type meta_data: C{dict} + @param meta_data: Optional object meta data. + + @type driver: C{StorageDriver} + @param driver: StorageDriver instance. + """ + + self.name = name + self.size = size + self.hash = hash + self.container = container + self.extra = extra or {} + self.meta_data = meta_data or {} + self.driver = driver + + def get_cdn_url(self): + return self.driver.get_object_cdn_url(obj=self) + + def enable_cdn(self): + return self.driver.enable_object_cdn(obj=self) + + def download(self, destination_path, overwrite_existing=False, + delete_on_failure=True): + return self.driver.download_object(self, destination_path, + overwrite_existing, + delete_on_failure) + + def as_stream(self, chunk_size=None): + return self.driver.download_object_as_stream(self, chunk_size) + + def delete(self): + return self.driver.delete_object(self) + + def __repr__(self): + return ('' % + (self.name, self.size, self.hash, self.driver.name)) + +class Container(object): + """ + Represents a container (bucket) which can hold multiple objects. + """ + + def __init__(self, name, extra, driver): + """ + @type name: C{str} + @param name: Container name (must be unique). + + @type extra: C{dict} + @param extra: Extra attributes. + + @type driver: C{StorageDriver} + @param driver: StorageDriver instance. + """ + + self.name = name + self.extra = extra or {} + self.driver = driver + + def list_objects(self): + return self.driver.list_container_objects(container=self) + + def get_cdn_url(self): + return self.driver.get_container_cdn_url(container=self) + + def enable_cdn(self): + return self.driver.enable_container_cdn(container=self) + + def get_object(self, object_name): + return self.driver.get_object(container_name=self.name, + object_name=object_name) + + def upload_object(self, file_path, object_name, extra=None, verify_hash=True): + return self.driver.upload_object( + file_path, self, object_name, extra, verify_hash) + + def upload_object_via_stream(self, iterator, object_name, extra=None): + return self.driver.upload_object_via_stream( + iterator, self, object_name, extra) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + return self.driver.download_object(obj, destination_path) + + def download_object_as_stream(self, obj, chunk_size=None): + return self.driver.download_object_as_stream(obj, chunk_size) + + def delete_object(self, obj): + return self.driver.delete_object(obj) + + def delete(self): + return self.driver.delete_container(self) + + def __repr__(self): + return ('' + % (self.name, self.driver.name)) + +class StorageDriver(object): + """ + A base StorageDriver to derive from. + """ + + connectionCls = ConnectionKey + name = None + hash_type = 'md5' + + def __init__(self, key, secret=None, secure=True, host=None, port=None): + self.key = key + self.secret = secret + self.secure = secure + args = [self.key] + + if self.secret != None: + args.append(self.secret) + + args.append(secure) + + if host != None: + args.append(host) + + if port != None: + args.append(port) + + self.connection = self.connectionCls(*args) + + self.connection.driver = self + self.connection.connect() + + def list_containters(self): + raise NotImplementedError( + 'list_containers not implemented for this driver') + + def list_container_objects(self, container): + """ + Return a list of objects for the given container. + + @type container: C{Container} + @param container: Container instance + + @return A list of Object instances. + """ + raise NotImplementedError( + 'list_objects not implemented for this driver') + + def get_container(self, container_name): + """ + Return a container instance. + + @type container_name: C{str} + @param container_name: Container name. + + @return: C{Container} instance. + """ + raise NotImplementedError( + 'get_object not implemented for this driver') + + def get_container_cdn_url(self, container): + """ + Return a container CDN URL. + + @type container: C{Container} + @param container: Container instance + + @return A CDN URL for this container. + """ + raise NotImplementedError( + 'get_container_cdn_url not implemented for this driver') + + def get_object(self, container_name, object_name): + """ + Return an object instance. + + @type container_name: C{str} + @param container_name: Container name. + + @type object_name: C{str} + @param object_name: Object name. + + @return: C{Object} instance. + """ + raise NotImplementedError( + 'get_object not implemented for this driver') + + def get_object_cdn_url(self, obj): + """ + Return a container CDN URL. + + @type obj: C{Object} + @param obj: Object instance + + @return A CDN URL for this object. + """ + raise NotImplementedError( + 'get_object_cdn_url not implemented for this driver') + + def enable_container_cdn(self, container): + raise NotImplementedError( + 'enable_container_cdn not implemented for this driver') + + def enable_object_cdn(self, obj): + raise NotImplementedError( + 'enable_object_cdn not implemented for this driver') + + def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): + """ + Download an object to the specified destination path. + + @type obj; C{Object} + @param obj: Object instance. + + @type destination_path: C{str} + @type destination_path: Full path to a file or a directory where the + incoming file will be saved. + + @type overwrite_existing: C{bool} + @type overwrite_existing: True to overwrite an existing file, defaults to False. + + @type delete_on_failure: C{bool} + @param delete_on_failure: True to delete a partially downloaded file if + the download was not successful (hash mismatch / file size). + + @return C{bool} True if an object has been successfully downloaded, False + otherwise. + """ + raise NotImplementedError( + 'download_object not implemented for this driver') + + def download_object_as_stream(self, obj, chunk_size=None): + """ + Return a generator which yields object data. + + @type obj: C{Object} + @param obj: Object instance + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (in bytes). + """ + raise NotImplementedError( + 'download_object_as_stream not implemented for this driver') + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + """ + Upload an object. + + @type file_path: C{str} + @param file_path: Path to the object on disk. + + @type container: C{Container} + @param container: Destination container. + + @type object_name: C{str} + @param object_name: Object name. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + + @type verify_hash: C{boolean} + @param verify_hash: True to do a file integrity check. + """ + raise NotImplementedError( + 'upload_object not implemented for this driver') + + def upload_object_via_stream(self, iterator, container, + object_name, + extra=None): + """ + @type iterator: C{object} + @param iterator: An object which implements the iterator interface. + + @type container: C{Container} + @param container: Destination container. + + @type object_name: C{str} + @param object_name: Object name. + + @type extra: C{dict} + @param extra: (optional) Extra attributes (driver specific). + """ + raise NotImplementedError( + 'upload_object_via_stream not implemented for this driver') + + def delete_object(self, obj): + """ + Delete an object. + + @type obj: C{Object} + @param obj: Object instance. + + @return: C{bool} True on success. + """ + raise NotImplementedError( + 'delete_object not implemented for this driver') + + def create_container(self, container_name): + """ + Create a new container. + + @type container_name: C{str} + @param container_name: Container name. + + @return C{Container} instance on success. + """ + raise NotImplementedError( + 'create_container not implemented for this driver') + + def delete_container(self, container): + """ + Delete a container. + + @type container: C{Container} + @param container: Container instance + + @return C{bool} True on success, False otherwise. + """ + raise NotImplementedError( + 'delete_container not implemented for this driver') + + def _get_object(self, obj, callback, callback_kwargs, response, + success_status_code=None): + """ + Call passed callback and start transfer of the object' + + @type obj: C{Object} + @param obj: Object instance. + + @type callback: C{Function} + @param callback: Function which is called with the passed callback_kwargs + + @type callback_kwargs: C{dict} + @param callback_kwargs: Keyword arguments which are passed to the callback. + + @typed response: C{Response} + @param response: Response instance. + + @type success_status_code: C{int} + @param success_status_code: Status code which represents a successful + transfer (defaults to httplib.OK) + + @return C{bool} True on success, False otherwise. + """ + success_status_code = success_status_code or httplib.OK + + if response.status == success_status_code: + return callback(**callback_kwargs) + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(object_name=obj.name, + value='', driver=self) + + raise LibcloudError(value='Unexpected status code: %s' % + (response.status), + driver=self) + + def _save_object(self, response, obj, destination_path, + overwrite_existing=False, delete_on_failure=True, + chunk_size=None): + """ + Save object to the provided path. + + @type response: C{RawResponse} + @param response: RawResponse instance. + + @type obj: C{Object} + @param obj: Object instance. + + @type destination_path: C{Str} + @param destination_path: Destination directory. + + @type delete_on_failure: C{bool} + @param delete_on_failure: True to delete partially downloaded object if + the download fails. + @type overwrite_existing: C{bool} + @param overwrite_existing: True to overwrite a local path if it already + exists. + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to L{libcloud.storage.base.CHUNK_SIZE}, 8kb) + + @return C{bool} True on success, False otherwise. + """ + + chunk_size = chunk_size or CHUNK_SIZE + + base_name = os.path.basename(destination_path) + + if not base_name and not os.path.exists(destination_path): + raise LibcloudError( + value='Path %s does not exist' % (destination_path), + driver=self) + + if not base_name: + file_path = pjoin(destination_path, obj.name) + else: + file_path = destination_path + + if os.path.exists(file_path) and not overwrite_existing: + raise LibcloudError( + value='File %s already exists, but ' % (file_path) + + 'overwrite_existing=False', + driver=self) + + stream = utils.read_in_chunks(response, chunk_size) + + try: + data_read = stream.next() + except StopIteration: + # Empty response? + return False + + bytes_transferred = 0 + + with open(file_path, 'wb') as file_handle: + while len(data_read) > 0: + file_handle.write(data_read) + bytes_transferred += len(data_read) + + try: + data_read = stream.next() + except StopIteration: + data_read = '' + + if int(obj.size) != int(bytes_transferred): + # Transfer failed, support retry? + if delete_on_failure: + try: + os.unlink(file_path) + except Exception: + pass + + return False + + return True + + def _upload_object(self, object_name, content_type, upload_func, + upload_func_kwargs, request_path, request_method='PUT', + headers=None, file_path=None, iterator=None): + """ + Helper function for setting common request headers and calling the + passed in callback which uploads an object. + """ + headers = headers or {} + + if file_path and not os.path.exists(file_path): + raise OSError('File %s does not exist' % (file_path)) + + if not content_type: + if file_path: + name = file_path + else: + name = object_name + content_type, _ = utils.guess_file_mime_type(name) + + if not content_type: + raise AttributeError( + 'File content-type could not be guessed and' + + ' no content_type value provided') + + if iterator: + headers['Transfer-Encoding'] = 'chunked' + upload_func_kwargs['chunked'] = True + else: + file_size = os.path.getsize(file_path) + headers['Content-Length'] = file_size + upload_func_kwargs['chunked'] = False + + headers['Content-Type'] = content_type + response = self.connection.request(request_path, + method=request_method, data=None, + headers=headers, raw=True) + + upload_func_kwargs['response'] = response + success, data_hash, bytes_transferred = upload_func(**upload_func_kwargs) + + if not success: + raise LibcloudError(value='Object upload failed, Perhaps a timeout?', + driver=self) + + result_dict = { 'response': response, 'data_hash': data_hash, + 'bytes_transferred': bytes_transferred } + return result_dict + + def _stream_data(self, response, iterator, chunked=False, + calculate_hash=True, chunk_size=None): + """ + Stream a data over an http connection. + + @type response: C{RawResponse} + @param response: RawResponse object. + + @type iterator: C{} + @param response: An object which implements an iterator interface + or a File like object with read method. + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) + + @return C{tuple} First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + + chunk_size = chunk_size or CHUNK_SIZE + + data_hash = None + if calculate_hash: + data_hash = hashlib.md5() + + generator = utils.read_in_chunks(iterator, chunk_size) + + bytes_transferred = 0 + try: + chunk = generator.next() + except StopIteration: + # No data? + return False, None, None + + while len(chunk) > 0: + try: + if chunked: + response.connection.connection.send('%X\r\n' % + (len(chunk))) + response.connection.connection.send(chunk) + response.connection.connection.send('\r\n') + else: + response.connection.connection.send(chunk) + except Exception: + # TODO: let this exception propagate + # Timeout, etc. + return False, None, bytes_transferred + + bytes_transferred += len(chunk) + if calculate_hash: + data_hash.update(chunk) + + try: + chunk = generator.next() + except StopIteration: + chunk = '' + + if chunked: + response.connection.connection.send('0\r\n\r\n') + + if calculate_hash: + data_hash = data_hash.hexdigest() + + return True, data_hash, bytes_transferred + + def _upload_file(self, response, file_path, chunked=False, + calculate_hash=True): + """ + Upload a file to the server. + + @type response: C{RawResponse} + @param response: RawResponse object. + + @type file_path: C{str} + @param file_path: Path to a local file. + + @type iterator: C{} + @param response: An object which implements an iterator interface (File + object, etc.) + + @return C{tuple} First item is a boolean indicator of success, second + one is the uploaded data MD5 hash and the third one + is the number of transferred bytes. + """ + with open (file_path, 'rb') as file_handle: + success, data_hash, bytes_transferred = ( + self._stream_data( + response=response, + iterator=iter(file_handle), + chunked=chunked, + calculate_hash=calculate_hash)) + + return success, data_hash, bytes_transferred diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8b04f38895763c7e97f00c17be610babdcc15f --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/__init__.py @@ -0,0 +1,23 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Drivers for working with different providers +""" + +__all__ = [ + 'dummy', + 'cloudfiles' +] diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/cloudfiles.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/cloudfiles.py new file mode 100644 index 0000000000000000000000000000000000000000..0f882f1e3552a7be0570ad34950583c9ec75f29b --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/cloudfiles.py @@ -0,0 +1,504 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import urllib + +try: + import json +except: + import simplejson as json + +from libcloud.utils import read_in_chunks +from libcloud.common.types import MalformedResponseError, LibcloudError +from libcloud.common.base import Response, RawResponse + +from libcloud.storage.providers import Provider +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError +from libcloud.storage.types import InvalidContainerNameError + +from libcloud.common.rackspace import ( + AUTH_HOST_US, AUTH_HOST_UK, RackspaceBaseConnection) + +CDN_HOST = 'cdn.clouddrive.com' +API_VERSION = 'v1.0' + + +class CloudFilesResponse(Response): + + valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT ] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_body(self): + if not self.body: + return None + + if 'content-type' in self.headers: + key = 'content-type' + elif 'Content-Type' in self.headers: + key = 'Content-Type' + else: + raise LibcloudError('Missing content-type header') + + content_type = self.headers[key] + if content_type.find(';') != -1: + content_type = content_type.split(';')[0] + + if content_type == 'application/json': + try: + data = json.loads(self.body) + except: + raise MalformedResponseError('Failed to parse JSON', + body=self.body, + driver=CloudFilesStorageDriver) + elif content_type == 'text/plain': + data = self.body + else: + data = self.body + + return data + +class CloudFilesRawResponse(CloudFilesResponse, RawResponse): + pass + +class CloudFilesConnection(RackspaceBaseConnection): + """ + Base connection class for the Cloudfiles driver. + """ + + responseCls = CloudFilesResponse + rawResponseCls = CloudFilesRawResponse + auth_host = None + _url_key = "storage_url" + + def __init__(self, user_id, key, secure=True): + super(CloudFilesConnection, self).__init__(user_id, key, secure=secure) + self.api_version = API_VERSION + self.accept_format = 'application/json' + + def request(self, action, params=None, data='', headers=None, method='GET', + raw=False, cdn_request=False): + if not headers: + headers = {} + if not params: + params = {} + + if cdn_request: + host = self._get_host(url_key='cdn_management_url') + else: + host = None + + # Due to first-run authentication request, we may not have a path + if self.request_path: + action = self.request_path + action + params['format'] = 'json' + if method in [ 'POST', 'PUT' ]: + headers.update({'Content-Type': 'application/json; charset=UTF-8'}) + + return super(CloudFilesConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers, + raw=raw, host=host + ) + + +class CloudFilesUSConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles US endpoint. + """ + + auth_host = AUTH_HOST_US + + +class CloudFilesUKConnection(CloudFilesConnection): + """ + Connection class for the Cloudfiles UK endpoint. + """ + + auth_host = AUTH_HOST_UK + + +class CloudFilesStorageDriver(StorageDriver): + """ + Base CloudFiles driver. + + You should never create an instance of this class directly but use US/US + class. + """ + name = 'CloudFiles' + connectionCls = CloudFilesConnection + hash_type = 'md5' + + def list_containers(self): + response = self.connection.request('') + + if response.status == httplib.NO_CONTENT: + return [] + elif response.status == httplib.OK: + return self._to_container_list(json.loads(response.body)) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def list_container_objects(self, container): + response = self.connection.request('/%s' % (container.name)) + + if response.status == httplib.NO_CONTENT: + # Empty or inexistent container + return [] + elif response.status == httplib.OK: + return self._to_object_list(json.loads(response.body), container) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_container(self, container_name): + response = self.connection.request('/%s' % (container_name), + method='HEAD') + + if response.status == httplib.NO_CONTENT: + container = self._headers_to_container( + container_name, response.headers) + return container + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(None, self, container_name) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_object(self, container_name, object_name): + container = self.get_container(container_name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='HEAD') + if response.status in [ httplib.OK, httplib.NO_CONTENT ]: + obj = self._headers_to_object( + object_name, container, response.headers) + return obj + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(None, self, object_name) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_container_cdn_url(self, container): + container_name = container.name + response = self.connection.request('/%s' % (container_name), + method='HEAD', + cdn_request=True) + + if response.status == httplib.NO_CONTENT: + cdn_url = response.headers['x-cdn-uri'] + return cdn_url + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value='', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def get_object_cdn_url(self, obj): + container_cdn_url = self.get_container_cdn_url(container=obj.container) + return '%s/%s' % (container_cdn_url, obj.name) + + def enable_container_cdn(self, container): + container_name = container.name + response = self.connection.request('/%s' % (container_name), + method='PUT', + cdn_request=True) + + if response.status in [ httplib.CREATED, httplib.ACCEPTED ]: + return True + + return False + + def create_container(self, container_name): + container_name = self._clean_container_name(container_name) + response = self.connection.request( + '/%s' % (container_name), method='PUT') + + if response.status == httplib.CREATED: + # Accepted mean that container is not yet created but it will be + # eventually + extra = { 'object_count': 0 } + container = Container(name=container_name, extra=extra, driver=self) + + return container + elif response.status == httplib.ACCEPTED: + error = ContainerAlreadyExistsError(None, self, container_name) + raise error + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def delete_container(self, container): + name = self._clean_container_name(container.name) + + # Only empty container can be deleted + response = self.connection.request('/%s' % (name), method='DELETE') + + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value='', + container_name=name, driver=self) + elif response.status == httplib.CONFLICT: + # @TODO: Add "delete_all_objects" parameter? + raise ContainerIsNotEmptyError(value='', + container_name=name, driver=self) + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + container_name = obj.container.name + object_name = obj.name + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + container_name = obj.container.name + object_name = obj.name + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={ 'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True): + """ + Upload an object. + + Note: This will override file with a same name if it already exists. + """ + upload_func = self._upload_file + upload_func_kwargs = { 'file_path': file_path } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, file_path=file_path, + verify_hash=verify_hash) + + def upload_object_via_stream(self, iterator, + container, object_name, extra=None): + if isinstance(iterator, file): + iterator = iter(iterator) + + upload_func = self._stream_data + upload_func_kwargs = { 'iterator': iterator } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, iterator=iterator) + + def delete_object(self, obj): + container_name = self._clean_container_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + + response = self.connection.request( + '/%s/%s' % (container_name, object_name), method='DELETE') + + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value='', object_name=object_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def ex_get_meta_data(self): + response = self.connection.request('', method='HEAD') + + if response.status == httplib.NO_CONTENT: + container_count = response.headers.get( + 'x-account-container-count', 'unknown') + object_count = response.headers.get( + 'x-account-object-count', 'unknown') + bytes_used = response.headers.get( + 'x-account-bytes-used', 'unknown') + + return { 'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used) } + + raise LibcloudError('Unexpected status code: %s' % (response.status)) + + def _put_object(self, container, object_name, upload_func, + upload_func_kwargs, extra=None, file_path=None, + iterator=None, verify_hash=True): + extra = extra or {} + container_name_cleaned = self._clean_container_name(container.name) + object_name_cleaned = self._clean_object_name(object_name) + content_type = extra.get('content_type', None) + meta_data = extra.get('meta_data', None) + + headers = {} + if meta_data: + for key, value in meta_data.iteritems(): + key = 'X-Object-Meta-%s' % (key) + headers[key] = value + + request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + result_dict = self._upload_object(object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method='PUT', + headers=headers, file_path=file_path, + iterator=iterator) + + response = result_dict['response'].response + bytes_transferred = result_dict['bytes_transferred'] + server_hash = result_dict['response'].headers.get('etag', None) + + if response.status == httplib.EXPECTATION_FAILED: + raise LibcloudError(value='Missing content-type header', + driver=self) + elif verify_hash and not server_hash: + raise LibcloudError(value='Server didn\'t return etag', + driver=self) + elif (verify_hash and result_dict['data_hash'] != server_hash): + raise ObjectHashMismatchError( + value=('MD5 hash checksum does not match (expected=%s, ' + + 'actual=%s)') % (result_dict['data_hash'], server_hash), + object_name=object_name, driver=self) + elif response.status == httplib.CREATED: + obj = Object( + name=object_name, size=bytes_transferred, hash=server_hash, + extra=None, meta_data=meta_data, container=container, + driver=self) + + return obj + else: + # @TODO: Add test case for this condition (probably 411) + raise LibcloudError('status_code=%s' % (response.status), + driver=self) + + def _clean_container_name(self, name): + """ + Clean container name. + """ + if name.startswith('/'): + name = name[1:] + name = urllib.quote(name) + + if name.find('/') != -1: + raise InvalidContainerNameError(value='Container name cannot' + ' contain slashes', + container_name=name, driver=self) + + if len(name) > 256: + raise InvalidContainerNameError(value='Container name cannot be' + ' longer than 256 bytes', + container_name=name, driver=self) + + + return name + + def _clean_object_name(self, name): + name = urllib.quote(name) + return name + + def _to_container_list(self, response): + # @TODO: Handle more then 10k containers - use "lazy list"? + containers = [] + + for container in response: + extra = { 'object_count': int(container['count']), + 'size': int(container['bytes'])} + containers.append(Container(name=container['name'], extra=extra, + driver=self)) + + return containers + + def _to_object_list(self, response, container): + objects = [] + + for obj in response: + name = obj['name'] + size = int(obj['bytes']) + hash = obj['hash'] + extra = { 'content_type': obj['content_type'], + 'last_modified': obj['last_modified'] } + objects.append(Object( + name=name, size=size, hash=hash, extra=extra, + meta_data=None, container=container, driver=self)) + + return objects + + def _headers_to_container(self, name, headers): + size = int(headers.get('x-container-bytes-used', 0)) + object_count = int(headers.get('x-container-object-count', 0)) + + extra = { 'object_count': object_count, + 'size': size } + container = Container(name=name, extra=extra, driver=self) + return container + + def _headers_to_object(self, name, container, headers): + size = int(headers.pop('content-length', 0)) + last_modified = headers.pop('last-modified', None) + etag = headers.pop('etag', None) + content_type = headers.pop('content-type', None) + + meta_data = {} + for key, value in headers.iteritems(): + if key.find('x-object-meta-') != -1: + key = key.replace('x-object-meta-', '') + meta_data[key] = value + + extra = { 'content_type': content_type, 'last_modified': last_modified } + + obj = Object(name=name, size=size, hash=etag, extra=extra, + meta_data=meta_data, container=container, driver=self) + return obj + +class CloudFilesUSStorageDriver(CloudFilesStorageDriver): + """ + Cloudfiles storage driver for the US endpoint. + """ + + type = Provider.CLOUDFILES_US + name = 'CloudFiles (US)' + connectionCls = CloudFilesUSConnection + +class CloudFilesUKStorageDriver(CloudFilesStorageDriver): + """ + Cloudfiles storage driver for the UK endpoint. + """ + + type = Provider.CLOUDFILES_UK + name = 'CloudFiles (UK)' + connectionCls = CloudFilesUKConnection diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/dummy.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..274225d0b90f8bed5cbc00eef091943f77754d29 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/dummy.py @@ -0,0 +1,410 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path +import random +import hashlib + +from libcloud.common.types import LibcloudError + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerAlreadyExistsError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import ObjectDoesNotExistError + + +class DummyFileObject(file): + def __init__(self, yield_count=5, chunk_len=10): + self._yield_count = yield_count + self._chunk_len = chunk_len + + def read(self, size): + i = 0 + + while i < self._yield_count: + yield self._get_chunk(self._chunk_len) + i += 1 + + raise StopIteration + + def _get_chunk(self, chunk_len): + chunk = [str(x) for x in random.randint(97, 120)] + return chunk + + def __len__(self): + return self._yield_count * self._chunk_len + +class DummyIterator(object): + def __init__(self, data=None): + self.hash = hashlib.md5() + self._data = data or [] + self._current_item = 0 + + def get_md5_hash(self): + return self.hash.hexdigest() + + def next(self): + if self._current_item == len(self._data): + raise StopIteration + + value = self._data[self._current_item] + self.hash.update(value) + self._current_item += 1 + return value + +class DummyStorageDriver(StorageDriver): + """ + Dummy Storage driver. + + >>> from libcloud.storage.drivers.dummy import DummyStorageDriver + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container') + >>> container + + >>> container.name + 'test container' + >>> container.extra['object_count'] + 0 + """ + + name = 'Dummy Storage Provider' + + def __init__(self, api_key, api_secret): + self._containers = {} + + def get_meta_data(self): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_meta_data() + {'object_count': 0, 'container_count': 0, 'bytes_used': 0} + >>> container = driver.create_container(container_name='test container 1') + >>> container = driver.create_container(container_name='test container 2') + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> driver.get_meta_data() + {'object_count': 1, 'container_count': 2, 'bytes_used': 50} + """ + + container_count = len(self._containers) + object_count = sum([ len(self._containers[container]['objects']) for + container in self._containers ]) + + bytes_used = 0 + for container in self._containers: + objects = self._containers[container]['objects'] + for _, obj in objects.iteritems(): + bytes_used += obj.size + + return { 'container_count': int(container_count), + 'object_count': int(object_count), + 'bytes_used': int(bytes_used) } + + def list_containers(self): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.list_containers() + [] + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> container = driver.create_container(container_name='test container 2') + >>> container + + >>> container = driver.create_container( + ... container_name='test container 2') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerAlreadyExistsError: + >>> container_list=driver.list_containers() + >>> sorted([container.name for container in container_list]) + ['test container 1', 'test container 2'] + """ + + return [container['container'] for container in + self._containers.values()] + + def list_container_objects(self, container): + container = self.get_container(container.name) + + return container.objects + + def get_container(self, container_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> driver.get_container('test container 1') + + """ + + if container_name not in self._containers: + raise ContainerDoesNotExistError(driver=self, value=None, + container_name=container_name) + + return self._containers[container_name]['container'] + + def get_container_cdn_url(self, container): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_container('unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container.name + 'test container 1' + >>> container.get_cdn_url() + 'http://www.test.com/container/test_container_1' + """ + + if container.name not in self._containers: + raise ContainerDoesNotExistError(driver=self, value=None, + container_name=container.name) + + return self._containers[container.name]['cdn_url'] + + def get_object(self, container_name, object_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> driver.get_object('unknown', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> driver.get_object( + ... 'test container 1', 'unknown') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ObjectDoesNotExistError: + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj + + """ + + self.get_container(container_name) + container_objects = self._containers[container_name]['objects'] + if object_name not in container_objects: + raise ObjectDoesNotExistError(object_name=object_name, value=None, + driver=self) + + return container_objects[object_name] + + def get_object_cdn_url(self, obj): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> obj = container.upload_object_via_stream(object_name='test object 5', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj + + >>> obj.get_cdn_url() + 'http://www.test.com/object/test_object_5' + """ + + container_name = obj.container.name + container_objects = self._containers[container_name]['objects'] + if obj.name not in container_objects: + raise ObjectDoesNotExistError(object_name=obj.name, value=None, + driver=self) + + return container_objects[obj.name].meta_data['cdn_url'] + + + def create_container(self, container_name): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container + + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerAlreadyExistsError: + """ + + if container_name in self._containers: + raise ContainerAlreadyExistsError(container_name=container_name, + value=None, driver=self) + + extra = { 'object_count': 0 } + container = Container(name=container_name, extra=extra, driver=self) + + self._containers[container_name] = { 'container': container, + 'objects': {}, + 'cdn_url': + 'http://www.test.com/container/%s' % + (container_name.replace(' ', '_')) + } + return container + + def delete_container(self, container): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = Container(name = 'test container', + ... extra={'object_count': 0}, driver=driver) + >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerDoesNotExistError: + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> len(driver._containers) + 1 + >>> driver.delete_container(container=container) + True + >>> len(driver._containers) + 0 + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> driver.delete_container(container=container)#doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ContainerIsNotEmptyError: + """ + + container_name = container.name + if container_name not in self._containers: + raise ContainerDoesNotExistError(container_name=container_name, + value=None, driver=self) + + container = self._containers[container_name] + if len(container['objects']) > 0: + raise ContainerIsNotEmptyError(container_name=container_name, + value=None, driver=self) + + del self._containers[container_name] + return True + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + kwargs_dict = {'obj': obj, + 'response': DummyFileObject(), + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure} + + return self._save_object(**kwargs_dict) + + def download_object_as_stream(self, obj, chunk_size=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> stream = container.download_object_as_stream(obj) + >>> stream #doctest: +ELLIPSIS + ' at 0x...> + """ + + return DummyFileObject() + + def upload_object(self, file_path, container, object_name, extra=None, + file_hash=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container(container_name='test container 1') + >>> container.upload_object(file_path='/tmp/inexistent.file', + ... object_name='test') #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + LibcloudError: + >>> file_path = path = os.path.abspath(__file__) + >>> file_size = os.path.getsize(file_path) + >>> obj = container.upload_object(file_path=file_path, object_name='test') + >>> obj #doctest: +ELLIPSIS + + >>> obj.size == file_size + True + """ + + if not os.path.exists(file_path): + raise LibcloudError(value='File %s does not exist' % (file_path), + driver=self) + + size = os.path.getsize(file_path) + return self._add_object(container=container, object_name=object_name, + size=size, extra=extra) + + def upload_object_via_stream(self, iterator, container, + object_name, extra=None): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream( + ... object_name='test object', iterator=DummyFileObject(5, 10), extra={}) + >>> obj #doctest: +ELLIPSIS + + """ + + size = len(iterator) + return self._add_object(container=container, object_name=object_name, + size=size, extra=extra) + + def delete_object(self, obj): + """ + >>> driver = DummyStorageDriver('key', 'secret') + >>> container = driver.create_container( + ... container_name='test container 1') #doctest: +IGNORE_EXCEPTION_DETAIL + >>> obj = container.upload_object_via_stream(object_name='test object', + ... iterator=DummyFileObject(5, 10), extra={}) + >>> obj #doctest: +ELLIPSIS + + >>> container.delete_object(obj=obj) + True + >>> obj = Object(name='test object 2', + ... size=1000, hash=None, extra=None, + ... meta_data=None, container=container,driver=None) + >>> container.delete_object(obj=obj) #doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ObjectDoesNotExistError: + """ + + container_name = obj.container.name + object_name = obj.name + obj = self.get_object(container_name=container_name, + object_name=object_name) + + del self._containers[container_name]['objects'][object_name] + return True + + def _add_object(self, container, object_name, size, extra=None): + container = self.get_container(container.name) + + extra = extra or {} + meta_data = extra.get('meta_data', {}) + meta_data.update({'cdn_url': 'http://www.test.com/object/%s' % + (object_name.replace(' ', '_'))}) + obj = Object(name=object_name, size=size, extra=extra, hash=None, + meta_data=meta_data, container=container, driver=self) + + self._containers[container.name]['objects'][object_name] = obj + return obj + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/google_storage.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/google_storage.py new file mode 100644 index 0000000000000000000000000000000000000000..7a3a892903a49d977d616c9a4feb0791608837ea --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/google_storage.py @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/s3.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/s3.py new file mode 100644 index 0000000000000000000000000000000000000000..12cd9a817c849609f7d1f799b9014313a781f6da --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/drivers/s3.py @@ -0,0 +1,476 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import httplib +import urllib +import copy +import base64 +import hmac + +from hashlib import sha1 +from xml.etree.ElementTree import Element, SubElement, tostring + +from libcloud.utils import fixxpath, findtext, in_development_warning +from libcloud.utils import read_in_chunks +from libcloud.common.types import InvalidCredsError, LibcloudError +from libcloud.common.base import ConnectionUserAndKey, RawResponse +from libcloud.common.aws import AWSBaseResponse + +from libcloud.storage.base import Object, Container, StorageDriver +from libcloud.storage.types import ContainerIsNotEmptyError +from libcloud.storage.types import InvalidContainerNameError +from libcloud.storage.types import ContainerDoesNotExistError +from libcloud.storage.types import ObjectDoesNotExistError +from libcloud.storage.types import ObjectHashMismatchError + +in_development_warning('libcloud.storage.drivers.s3') + +# How long before the token expires +EXPIRATION_SECONDS = 15 * 60 + +S3_US_STANDARD_HOST = 's3.amazonaws.com' +S3_US_WEST_HOST = 's3-us-west-1.amazonaws.com' +S3_EU_WEST_HOST = 's3-eu-west-1.amazonaws.com' +S3_AP_SOUTHEAST_HOST = 's3-ap-southeast-1.amazonaws.com' +S3_AP_NORTHEAST_HOST = 's3-ap-northeast-1.amazonaws.com' + +API_VERSION = '2006-03-01' +NAMESPACE = 'http://s3.amazonaws.com/doc/%s/' % (API_VERSION) + + +class S3Response(AWSBaseResponse): + + valid_response_codes = [ httplib.NOT_FOUND, httplib.CONFLICT, + httplib.BAD_REQUEST ] + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 or i in self.valid_response_codes + + def parse_error(self): + if self.status in [ httplib.UNAUTHORIZED, httplib.FORBIDDEN ]: + raise InvalidCredsError(self.body) + elif self.status == httplib.MOVED_PERMANENTLY: + raise LibcloudError('This bucket is located in a different ' + + 'region. Please use the correct driver.', + driver=S3StorageDriver) + raise LibcloudError('Unknown error. Status code: %d' % (self.status), + driver=S3StorageDriver) + +class S3RawResponse(S3Response, RawResponse): + pass + +class S3Connection(ConnectionUserAndKey): + """ + Repersents a single connection to the EC2 Endpoint + """ + + host = 's3.amazonaws.com' + responseCls = S3Response + rawResponseCls = S3RawResponse + + def add_default_params(self, params): + expires = str(int(time.time()) + EXPIRATION_SECONDS) + params['AWSAccessKeyId'] = self.user_id + params['Expires'] = expires + return params + + def pre_connect_hook(self, params, headers): + params['Signature'] = self._get_aws_auth_param(method=self.method, + headers=headers, + params=params, + expires=params['Expires'], + secret_key=self.key, + path=self.action) + return params, headers + + def _get_aws_auth_param(self, method, headers, params, expires, + secret_key, path='/'): + """ + Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); + + StringToSign = HTTP-VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Expires + "\n" + + CanonicalizedAmzHeaders + + CanonicalizedResource; + """ + special_header_keys = [ 'content-md5', 'content-type', 'date' ] + special_header_values = { 'date': '' } + amz_header_values = {} + + headers_copy = copy.deepcopy(headers) + for key, value in headers_copy.iteritems(): + if key.lower() in special_header_keys: + special_header_values[key.lower()] = value.lower().strip() + elif key.lower().startswith('x-amz-'): + amz_header_values[key.lower()] = value.strip() + + if not special_header_values.has_key('content-md5'): + special_header_values['content-md5'] = '' + + if not special_header_values.has_key('content-type'): + special_header_values['content-type'] = '' + + if expires: + special_header_values['date'] = str(expires) + + keys_sorted = special_header_values.keys() + keys_sorted.sort() + + buf = [ method ] + for key in keys_sorted: + value = special_header_values[key] + buf.append(value) + string_to_sign = '\n'.join(buf) + + keys_sorted = amz_header_values.keys() + keys_sorted.sort() + + amz_header_string = [] + for key in keys_sorted: + value = amz_header_values[key] + amz_header_string.append('%s:%s' % (key, value)) + amz_header_string = '\n'.join(amz_header_string) + + values_to_sign = [] + for value in [ string_to_sign, amz_header_string, path]: + if value: + values_to_sign.append(value) + + string_to_sign = '\n'.join(values_to_sign) + b64_hmac = base64.b64encode( + hmac.new(secret_key, string_to_sign, digestmod=sha1).digest() + ) + return b64_hmac + +class S3StorageDriver(StorageDriver): + name = 'Amazon S3 (standard)' + connectionCls = S3Connection + hash_type = 'md5' + ex_location_name = '' + + def list_containers(self): + response = self.connection.request('/') + if response.status == httplib.OK: + containers = self._to_containers(obj=response.object, + xpath='Buckets/Bucket') + return containers + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def list_container_objects(self, container): + response = self.connection.request('/%s' % (container.name)) + if response.status == httplib.OK: + objects = self._to_objs(obj=response.object, + xpath='Contents', container=container) + return objects + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def get_container(self, container_name): + # This is very inefficient, but afaik it's the only way to do it + containers = self.list_containers() + + try: + container = [ c for c in containers if c.name == container_name ][0] + except IndexError: + raise ContainerDoesNotExistError(value=None, driver=self, + container_name=container_name) + + return container + + def get_object(self, container_name, object_name): + # TODO: Figure out what is going on when the object or container does not exist + # - it seems that Amazon just keeps the connection open and doesn't return a + # response. + container = self.get_container(container_name=container_name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='HEAD') + if response.status == httplib.OK: + obj = self._headers_to_object(object_name=object_name, + container=container, + headers=response.headers) + return obj + + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=object_name) + + def create_container(self, container_name): + if self.ex_location_name: + root = Element('CreateBucketConfiguration') + child = SubElement(root, 'LocationConstraint') + child.text = self.ex_location_name + data = tostring(root) + else: + data = '' + + response = self.connection.request('/%s' % (container_name), + data=data, + method='PUT') + + if response.status == httplib.OK: + container = Container(name=container_name, extra=None, driver=self) + return container + elif response.status == httplib.CONFLICT: + raise InvalidContainerNameError(value='Container with this name ' + + 'already exists. The name must be unique among ' + 'all the containers in the system', + container_name=container_name, driver=self) + elif response.status == httplib.BAD_REQUEST: + raise InvalidContainerNameError(value='Container name contains ' + + 'invalid characters.', + container_name=container_name, + driver=self) + + raise LibcloudError('Unexpected status code: %s' % (response.status), + driver=self) + + def delete_container(self, container): + # Note: All the objects in the container must be deleted first + response = self.connection.request('/%s' % (container.name), + method='DELETE') + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.CONFLICT: + raise ContainerIsNotEmptyError(value='Container must be empty' + + ' before it can be deleted.', + container_name=container.name, + driver=self) + elif response.status == httplib.NOT_FOUND: + raise ContainerDoesNotExistError(value=None, + driver=self, + container_name=container.name) + + return False + + def download_object(self, obj, destination_path, overwrite_existing=False, + delete_on_failure=True): + container_name = self._clean_object_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', + raw=True) + + return self._get_object(obj=obj, callback=self._save_object, + response=response, + callback_kwargs={'obj': obj, + 'response': response.response, + 'destination_path': destination_path, + 'overwrite_existing': overwrite_existing, + 'delete_on_failure': delete_on_failure}, + success_status_code=httplib.OK) + + def download_object_as_stream(self, obj, chunk_size=None): + container_name = self._clean_object_name(obj.container.name) + object_name = self._clean_object_name(obj.name) + response = self.connection.request('/%s/%s' % (container_name, + object_name), + method='GET', raw=True) + + return self._get_object(obj=obj, callback=read_in_chunks, + response=response, + callback_kwargs={ 'iterator': response.response, + 'chunk_size': chunk_size}, + success_status_code=httplib.OK) + + def upload_object(self, file_path, container, object_name, extra=None, + verify_hash=True, ex_storage_class=None): + upload_func = self._upload_file + upload_func_kwargs = { 'file_path': file_path } + + return self._put_object(container=container, object_name=object_name, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + extra=extra, file_path=file_path, + verify_hash=verify_hash, + storage_class=ex_storage_class) + + def upload_object_via_stream(self, iterator, container, object_name, + extra=None, ex_storage_class=None): + # Amazon S3 does not support chunked transfer encoding. + # Using multipart upload to "emulate" it would mean unnecessary + # buffering of data in memory. + raise NotImplementedError( + 'upload_object_via_stream not implemented for this driver') + + def delete_object(self, obj): + object_name = self._clean_object_name(name=obj.name) + response = self.connection.request('/%s/%s' % (obj.container.name, + object_name), + method='DELETE') + if response.status == httplib.NO_CONTENT: + return True + elif response.status == httplib.NOT_FOUND: + raise ObjectDoesNotExistError(value=None, driver=self, + object_name=obj.name) + + return False + + def _clean_object_name(self, name): + name = urllib.quote(name) + return name + + def _put_object(self, container, object_name, upload_func, + upload_func_kwargs, extra=None, file_path=None, + iterator=None, verify_hash=True, storage_class=None): + headers = {} + extra = extra or {} + storage_class = storage_class or 'standard' + if storage_class not in ['standard', 'reduced_redundancy']: + raise ValueError('Invalid storage class value: %s' % (storage_class)) + + headers['x-amz-storage-class'] = storage_class.upper() + + container_name_cleaned = container.name + object_name_cleaned = self._clean_object_name(object_name) + content_type = extra.get('content_type', None) + meta_data = extra.get('meta_data', None) + + if meta_data: + for key, value in meta_data.iteritems(): + key = 'x-amz-meta-%s' % (key) + headers[key] = value + + request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned) + # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE + # here. + # SIGPIPE is thrown if the provided container does not exist or the user + # does not have correct permission + result_dict = self._upload_object(object_name=object_name, + content_type=content_type, + upload_func=upload_func, + upload_func_kwargs=upload_func_kwargs, + request_path=request_path, + request_method='PUT', + headers=headers, file_path=file_path, + iterator=iterator) + + response = result_dict['response'] + bytes_transferred = result_dict['bytes_transferred'] + headers = response.headers + response = response.response + server_hash = headers['etag'].replace('"', '') + + if (verify_hash and result_dict['data_hash'] != server_hash): + raise ObjectHashMismatchError( + value='MD5 hash checksum does not match', + object_name=object_name, driver=self) + elif response.status == httplib.OK: + obj = Object( + name=object_name, size=bytes_transferred, hash=server_hash, + extra=None, meta_data=meta_data, container=container, + driver=self) + + return obj + else: + raise LibcloudError('Unexpected status code, status_code=%s' % (response.status), + driver=self) + + def _to_containers(self, obj, xpath): + return [ self._to_container(element) for element in \ + obj.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] + + def _to_objs(self, obj, xpath, container): + return [ self._to_obj(element, container) for element in \ + obj.findall(fixxpath(xpath=xpath, namespace=NAMESPACE))] + + def _to_container(self, element): + extra = { + 'creation_date': findtext(element=element, xpath='CreationDate', + namespace=NAMESPACE) + } + + container = Container( + name=findtext(element=element, xpath='Name', + namespace=NAMESPACE), + extra=extra, + driver=self + ) + + return container + + def _headers_to_object(self, object_name, container, headers): + meta_data = { 'content_type': headers['content-type'] } + hash = headers['etag'].replace('"', '') + + obj = Object(name=object_name, size=headers['content-length'], + hash=hash, extra=None, + meta_data=meta_data, + container=container, + driver=self) + return obj + + def _to_obj(self, element, container): + owner_id = findtext(element=element, xpath='Owner/ID', + namespace=NAMESPACE) + owner_display_name = findtext(element=element, + xpath='Owner/DisplayName', + namespace=NAMESPACE) + meta_data = { 'owner': { 'id': owner_id, + 'display_name':owner_display_name }} + + obj = Object(name=findtext(element=element, xpath='Key', + namespace=NAMESPACE), + size=int(findtext(element=element, xpath='Size', + namespace=NAMESPACE)), + hash=findtext(element=element, xpath='ETag', + namespace=NAMESPACE).replace('"', ''), + extra=None, + meta_data=meta_data, + container=container, + driver=self + ) + + return obj + +class S3USWestConnection(S3Connection): + host = S3_US_WEST_HOST + +class S3USWestStorageDriver(S3StorageDriver): + name = 'Amazon S3 (us-west-1)' + connectionCls = S3USWestConnection + ex_location_name = 'us-west-1' + +class S3EUWestConnection(S3Connection): + host = S3_EU_WEST_HOST + +class S3EUWestStorageDriver(S3StorageDriver): + name = 'Amazon S3 (eu-west-1)' + connectionCls = S3EUWestConnection + ex_location_name = 'EU' + +class S3APSEConnection(S3Connection): + host = S3_AP_SOUTHEAST_HOST + +class S3APSEStorageDriver(S3StorageDriver): + name = 'Amazon S3 (ap-southeast-1)' + connectionCls = S3APSEConnection + ex_location_name = 'ap-southeast-1' + +class S3APNEConnection(S3Connection): + host = S3_AP_NORTHEAST_HOST + +class S3APNEStorageDriver(S3StorageDriver): + name = 'Amazon S3 (ap-northeast-1)' + connectionCls = S3APNEConnection + ex_location_name = 'ap-northeast-1' diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/providers.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/providers.py new file mode 100644 index 0000000000000000000000000000000000000000..43d9b32775ce79795643a679facf63a088137e0b --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/providers.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.utils import get_driver as get_provider_driver +from libcloud.storage.types import Provider + +DRIVERS = { + Provider.DUMMY: + ('libcloud.storage.drivers.dummy', 'DummyStorageDriver'), + Provider.CLOUDFILES_US: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUSStorageDriver'), + Provider.CLOUDFILES_UK: + ('libcloud.storage.drivers.cloudfiles', 'CloudFilesUKStorageDriver'), + Provider.S3: + ('libcloud.storage.drivers.s3', 'S3StorageDriver'), + Provider.S3_US_WEST: + ('libcloud.storage.drivers.s3', 'S3USWestStorageDriver'), + Provider.S3_EU_WEST: + ('libcloud.storage.drivers.s3', 'S3EUWestStorageDriver'), + Provider.S3_AP_SOUTHEAST: + ('libcloud.storage.drivers.s3', 'S3APSEStorageDriver'), + Provider.S3_AP_NORTHEAST: + ('libcloud.storage.drivers.s3', 'S3APNEStorageDriver'), +} + +def get_driver(provider): + return get_provider_driver(DRIVERS, provider) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/types.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/types.py new file mode 100644 index 0000000000000000000000000000000000000000..62f2ec1e44b3e3f310828bec302aca50927f3a38 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/storage/types.py @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError + +__all__ = ['Provider', + 'ContainerError', + 'ObjectError', + 'ContainerAlreadyExistsError', + 'ContainerDoesNotExistError', + 'ContainerIsNotEmptyError', + 'ObjectDoesNotExistError', + 'ObjectHashMismatchError', + 'InvalidContainerNameError'] + +class Provider(object): + """ + Defines for each of the supported providers + + @cvar DUMMY: Example provider + @cvar CLOUDFILES_US: CloudFiles US + @cvar CLOUDFILES_UK: CloudFiles UK + @cvar S3: Amazon S3 US + @cvar S3_US_WEST: Amazon S3 US West (Northern California) + @cvar S3_EU_WEST: Amazon S3 EU West (Ireland) + @cvar S3_AP_SOUTHEAST_HOST: Amazon S3 Asia South East (Singapore) + @cvar S3_AP_NORTHEAST_HOST: Amazon S3 Asia South East (Tokyo) + """ + DUMMY = 0 + CLOUDFILES_US = 1 + CLOUDFILES_UK = 2 + S3 = 3 + S3_US_WEST = 4 + S3_EU_WEST = 5 + S3_AP_SOUTHEAST = 6 + S3_AP_NORTHEAST = 7 + +class ContainerError(LibcloudError): + error_type = 'ContainerError' + + def __init__(self, value, driver, container_name): + self.container_name = container_name + super(ContainerError, self).__init__(value=value, driver=driver) + + def __str__(self): + return ('<%s in %s, container=%s, value=%s>' % + (self.error_type, repr(self.driver), + self.container_name, self.value)) + +class ObjectError(LibcloudError): + error_type = 'ContainerError' + + def __init__(self, value, driver, object_name): + self.object_name = object_name + super(ObjectError, self).__init__(value=value, driver=driver) + + def __str__(self): + return '<%s in %s, value=%s, object = %s>' % (self.error_type, repr(self.driver), + self.value, self.object_name) + +class ContainerAlreadyExistsError(ContainerError): + error_type = 'ContainerAlreadyExistsError' + +class ContainerDoesNotExistError(ContainerError): + error_type = 'ContainerDoesNotExistError' + +class ContainerIsNotEmptyError(ContainerError): + error_type = 'ContainerIsNotEmptyError' + +class ObjectDoesNotExistError(ObjectError): + error_type = 'ObjectDoesNotExistError' + +class ObjectHashMismatchError(ObjectError): + error_type = 'ObjectHashMismatchError' + +class InvalidContainerNameError(ContainerError): + error_type = 'InvalidContainerNameError' diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/types.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/types.py new file mode 100644 index 0000000000000000000000000000000000000000..18aebe4e3b0809673d35e8fd252302f4f33fca98 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/types.py @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.common.types import InvalidCredsError, InvalidCredsException +from libcloud.compute.types import Provider, NodeState, DeploymentError +from libcloud.compute.types import DeploymentException + +from libcloud.utils import deprecated_warning + +__all__ = ["LibcloudError", "MalformedResponseError", + "InvalidCredsError", "InvalidCredsException", + "Provider", "NodeState", "DeploymentError", + "DeploymentException" + ] +deprecated_warning(__name__) diff --git a/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/utils.py b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ab20befffd90afa4b8b866001c35d95f572cf5 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/contrib/libcloud/utils.py @@ -0,0 +1,195 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import mimetypes +import warnings +from httplib import HTTPResponse + +SHOW_DEPRECATION_WARNING = True +SHOW_IN_DEVELOPMENT_WARNING = True +OLD_API_REMOVE_VERSION = '0.6.0' + +def read_in_chunks(iterator, chunk_size=None): + """ + Return a generator which yields data in chunks. + + @type iterator: C{Iterator} + @param response: An object which implements an iterator interface + or a File like object with read method. + + @type chunk_size: C{int} + @param chunk_size: Optional chunk size (defaults to CHUNK_SIZE) + """ + + if isinstance(iterator, (file, HTTPResponse)): + get_data = iterator.read + args = (chunk_size, ) + else: + get_data = iterator.next + args = () + + while True: + chunk = str(get_data(*args)) + + if len(chunk) == 0: + raise StopIteration + + yield chunk + +def guess_file_mime_type(file_path): + filename = os.path.basename(file_path) + (mimetype, encoding) = mimetypes.guess_type(filename) + return mimetype, encoding + +def deprecated_warning(module): + if SHOW_DEPRECATION_WARNING: + warnings.warn('This path has been deprecated and the module' + ' is now available at "libcloud.compute.%s".' + ' This path will be fully removed in libcloud %s.' % + (module, OLD_API_REMOVE_VERSION), + category=DeprecationWarning) + +def in_development_warning(module): + if SHOW_IN_DEVELOPMENT_WARNING: + warnings.warn('The module %s is in development and your are advised ' + 'against using it in production.' % (module), + category=FutureWarning) + +def str2dicts(data): + """ + Create a list of dictionaries from a whitespace and newline delimited text. + + For example, this: + cpu 1100 + ram 640 + + cpu 2200 + ram 1024 + + becomes: + [{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}] + """ + list_data = [] + list_data.append({}) + d = list_data[-1] + + lines = data.split('\n') + for line in lines: + line = line.strip() + + if not line: + d = {} + list_data.append(d) + d = list_data[-1] + continue + + whitespace = line.find(' ') + + if not whitespace: + continue + + key = line[0:whitespace] + value = line[whitespace + 1:] + d.update({key: value}) + + list_data = [value for value in list_data if value != {}] + return list_data + +def str2list(data): + """ + Create a list of values from a whitespace and newline delimited text (keys are ignored). + + For example, this: + ip 1.2.3.4 + ip 1.2.3.5 + ip 1.2.3.6 + + becomes: + ['1.2.3.4', '1.2.3.5', '1.2.3.6'] + """ + list_data = [] + + for line in data.split('\n'): + line = line.strip() + + if not line: + continue + + try: + splitted = line.split(' ') + # key = splitted[0] + value = splitted[1] + except Exception: + continue + + list_data.append(value) + + return list_data + +def dict2str(data): + """ + Create a string with a whitespace and newline delimited text from a dictionary. + + For example, this: + {'cpu': '1100', 'ram': '640', 'smp': 'auto'} + + becomes: + cpu 1100 + ram 640 + smp auto + + cpu 2200 + ram 1024 + """ + result = '' + for k in data: + if data[k] != None: + result += '%s %s\n' % (str(k), str(data[k])) + else: + result += '%s\n' % str(k) + + return result + +def fixxpath(xpath, namespace): + # ElementTree wants namespaces in its xpaths, so here we add them. + return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')]) + +def findtext(element, xpath, namespace): + return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + +def findattr(element, xpath, namespace): + return element.findtext(fixxpath(xpath=xpath, namespace=namespace)) + +def findall(element, xpath, namespace): + return element.findall(fixxpath(xpath=xpath, namespace=namespace)) + +def reverse_dict(dictionary): + return dict([ (value, key) for key, value in dictionary.iteritems() ]) + +def get_driver(drivers, provider): + """ + Get a driver. + + @param drivers: Dictionary containing valid providers. + @param provider: Id of provider to get driver + @type provider: L{libcloud.types.Provider} + """ + if provider in drivers: + mod_name, driver_name = drivers[provider] + _mod = __import__(mod_name, globals(), locals(), [driver_name]) + return getattr(_mod, driver_name) + + raise AttributeError('Provider %s does not exist' % (provider)) diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/Makefile b/conpaas/branches/Y1DEMO-conpaassql/doc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f06d72ce5c92ba22bb0efd2dcd943c0fddcdec24 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/Makefile @@ -0,0 +1,130 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ConPaaSSQLServer.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ConPaaSSQLServer.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ConPaaSSQLServer" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ConPaaSSQLServer" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + make -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_agent.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_agent.rst new file mode 100644 index 0000000000000000000000000000000000000000..c66e2e319ff4c2ba2721dc20bd8ec48697683359 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_agent.rst @@ -0,0 +1,136 @@ +============ +Client agent +============ +.. py:function:: AgentException(Exception) + + Receives Exception. + +.. py:function:: __check_reply(body) + + Raises :py:func:`AgentException` if receiveda object is not JSON or if response does not contain "opState". :py:func:`AgentException` is also raised if opState doesnt have value OK. + + :param body: should contain opState + :type body: JSON object + :rtype: Returns body + +.. py:function:: getMySQLServerState(host, port) + + :py:func:`_http_get` is used for getting state of MySQL Server on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: createMySQLServer(host, port) + + :py:func:`_http_post` is used for sending command for starting MySQL Server on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: printUsage() + + Prints instructions for use of Client agent. + +.. py:function:: restartMySQLServer(host, port) + + :py:func:`_http_post` is used for sending command for restarting MySQL Server on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: stopMySQLServer(host, port) + + :py:func:`_http_post` is used for sending command for stoping MySQL Server on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response + +.. py:function:: configure_user(host, port, username, password) + + :py:func:`_http_post` is used for sending :py:attr:`username` and :py:attr:`password` using :py:attr:`params` to create new MySQL user on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param username: Username that new user will have + :param password: Password that new user will have + :rtype: Returns :py:func:`__check_reply` response + +.. py:function:: get_all_users(host, port) + + :py:func:`_http_get` is used for sending :py:attr:`params`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. Reply should be list of all MySQL users on given :py:attr:`host` and :py:attr:`port`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: remove_user(host,port,name) + + :py:func:`_http_post` is used for sending :py:attr:`username` using :py:attr:`params` that are used to remove MySQL user on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param name: Username of the user that will be removed + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: setMySQLServerConfiguration(host,port, param_id, val) + + :py:func:`_http_post` is used for sending :py:attr:`param_id` using params that are used to change MySQL server configuration on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param param_id: Identifier of the parameter that has to be changed + :param val: Value to which parameter has to be changed. + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: send_mysqldump(host,port,location) + + :py:func:`_http_post` is used for sending :py:attr:`params` and files located by :py:attr:`location`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param location: Location on the computer for the MySQL dump file + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: _http_post(host, port, uri, params, files=[]) + + Constructs a HTTP POST request + +.. py:function:: _http_get(host, port, uri, params=None) + + Constructs a HTTP GET request + +.. py:attribute:: host + + Attribute used to identify host name that MySQL server uses. + +.. py:attribute:: port + + Attribute used to identify port that MySQL server uses. + +.. py:attribute:: username + + Attribute used to set username that will be used in adding or removing user. + +.. py:attribute:: password + + Attribute used to set password for a user. + +.. py:attribute:: param_id + + Attribute used to identify parameter that will be changed on MySQL server. Changeable parameters are: data directory (datadir), port (port), bind address (bind-address). + +.. py:attribute:: location + + Location of the MySQL dump file on the computer. + +.. py:attribute:: httplib.OK + + Http response code 200 + +.. py:attribute:: params + + JSON object used to send actions and attributes using _http_post or _http_get diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_manager.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_manager.rst new file mode 100644 index 0000000000000000000000000000000000000000..cca3a6b47daae1623782460a7204be0ac9e8119b --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Client_manager.rst @@ -0,0 +1,80 @@ +============== +Client manager +============== +.. py:function:: AgentException(Exception) + + Receives Exception. + +.. py:function:: __check_reply(body) + + Raises :py:func:`AgentException` if receiveda object is not JSON or if response does not contain "opState". :py:func:`AgentException` is also raised if opState doesnt have value OK. + + :param body: should contain opState + :type body: JSON object + :rtype: Returns body + +.. py:function:: printUsage() + + Prints instructions for use of Client manger. + +.. py:function:: getListServiceNodes(host, port) + + :py:func:`_http_get` is used for getting list of service nodes supplied by server manager on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. Reply from the server manager should be a list of all raised-by-server nodes. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: getMySQLServerState(host, port) + + :py:func:`_http_get` is used for getting current state of server manager on given :py:attr:`host` and :py:attr:`port`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: addServiceNode(host, port, function) + + :py:func:`_http_post` is used for sending command to Server manager on given :py:attr:`host` and :py:attr:`port` to create a new service node. Function of the new node is defined by :py:attr:`function`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param function: What function will the new service node have: manager or agent + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: deleteServiceNode(host, port, id) + + :py:func:`_http_post` is used for sending command to server manager on given :py:attr:`host` and :py:attr:`port` to delete existing service node. Service node that will be deleted is defined by :py:attr:`id`. Raises Exception if return code is not :py:attr:`httplib.OK` else sends body to :py:func:`__check_reply`. + + :param host: Host that will be used to connect to MySQL Server + :param port: Port that will be used to connect to MySQL Server + :param id: ID of service node that will be removed. + :rtype: Returns :py:func:`__check_reply` response. + +.. py:function:: _http_post(host, port, uri, params, files=[]) + + Constructs a HTTP POST request. + +.. py:function:: _http_get(host, port, uri, params=None) + + Constructs a HTTP GET request. + +.. py:attribute:: host + + Attribute used to identify host name that MySQL server uses. + +.. py:attribute:: port + + Attribute used to identify port that MySQL server uses. + +.. py:attribute:: function + + Attribute used to define which function will the new node have. + +.. py:attribute:: id + + Attribute used to identify service node that will be deleted. + +.. py:attribute:: httplib.OK + + Http response code 200 diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/IaaS.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/IaaS.rst new file mode 100644 index 0000000000000000000000000000000000000000..8a0df753b73dd4926251ac49a18461db464acd91 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/IaaS.rst @@ -0,0 +1,221 @@ +==== +IaaS +==== +Core for opearations with open nebula. + +.. py:class:: OneXmlrpcNode() + + .. py:method:: __init__(self, node): + + Sets parameters from input parameter node. + + .. py:attribute:: id + + Id of service node. + + .. py:attribute:: state + + State of service node. + + .. py:attribute:: name + + Name of service node. + + .. py:attribute:: template + + Template of service node. + + .. py:attribute:: public_ip + + Ip of service node. + + :param node: Information about node. + +.. py:class:: OneXmlrpc(NodeDriver) + + XMLRPC driver for OpenNebula. + + .. py:method:: __init__(self, uname, password, scheme, host, port): + + Uses input parameters for connection to OpenNebula + + :param uname: Username used when connecting to OpenNebula. + :param password: Password used when connecting to OpenNebula. + :param scheme: Scheme used when connecting to OpenNebula. + :param host: Host used when connecting to OpenNebula. + :param port: Port used when connecting to OpenNebula. + + .. py:method:: list_nodes(self): + + :rtype: Returns dictionary of all nodes. + + .. py:method:: create_node(self, **kwargs): + + Using inpute parameters determines which template to use and which id to use for image and network. + + :param kwargs: Determines which template will be used and with what parameters. + :rtype: returns response of allocating new service node. + + .. py:method:: destroy_node(self, id): + + Removes service node idenfied by parameter id + + :param id: Id of service node that will be removed. + + .. py:method:: list_sizes(self, location=None) + + Returns different NodeSize. Atm does not have usefull function. + + :param location: + :rtype: Returns information about 3 different Node sizes. + +.. py:class:: IaaSClient + + .. py:attribute:: NodeStates: + + In the begining states are defined + + .. code-block:: python + + RUNNING = NodeState.RUNNING + REBOOTING = NodeState.REBOOTING + TERMINATED = NodeState.TERMINATED + PENDING = NodeState.PENDING + UNKNOWN = NodeState.UNKNOWN + + .. py:method:: __config_opennebula_xmlrpc(self, iaas_config) + + Sets scheme, host, port, path, username, password, img_id, on_ex_network_id one_context_manager_script, one_context_agent_script and driver according to values in iaas_config file. + + .. py:attribute:: scheme + + Scheme that will be used when connectiong to OpenNebula. + + .. py:attribute:: host + + Host that will be used when connecting to OpenNebula. + + .. py:attribute:: port + + Port that will be used when connecting to OpenNebula. + + .. py:attribute:: path + + Path that will be used when connecting to OpenNebula. + + .. py:attribute:: username + + Username that will be used when connecting to OpenNebula. + + .. py:attribute:: password + + Password that will be used when connecting to OpenNebula. + + .. py:attribute:: img_id + + Id of image on OpenNebula that will be used when creating new service node. + + .. py:attribute:: on_ex_network_id + + Id of network on OpenNebula that will be used when creating new service node. + + .. py:attribute:: one_context_manager_script + + Path to manager script that will be used in template. + + .. py:attribute:: one_context_agent_script + + Path to agent script that will be used in template. + + .. py:attribute:: driver + + Calls :py:meth:`OneXmlrpc`. + + :param iaas_config: Configuration file containing everything needed for connecting to OpenNebula. + + .. py:method:: __config_opennebula(self, iaas_config) + + .. py:attribute:: scheme + + .. py:attribute:: hostname + + .. py:attribute:: port + + .. py:attribute:: path + + .. py:attribute:: username + + .. py:attribute:: password + + .. py:attribute:: img_id + + .. py:attribute:: size_id + + .. py:attribute:: on_ex_network_id + + .. py:attribute:: on_ex_network_gateawy + + .. py:attribute:: driver + + :param iaas_config: + + .. py:method:: __config_ec2(self, iaas_config) + + .. py:attribute:: username + + .. py:attribute:: password + + .. py:attribute:: ec2_ex_securitygroup + + .. py:attribute:: ec2_ex_keyname + + .. py:attribute:: img_id + + .. py:attribute:: size_id + + .. py:attribute:: driver + + :param iaas_config: + + .. py:method:: __setdriver(self, iaas_config) + + Raises Exception if iaas_config doesnt have name of driver. Calls appropriate method according to driver name. If drivername is OPENNEBULA :py:meth:`__config_opennebula` is called. If drivername is OPENNEBULA_XMLRPC :py:meth:`__config_opennebula_xmlrpc` is called and if drivername is EC2 :py:meth:`__config_ec2` is called + + :param iaas_config: Configuration file containing everything needed for connecting to OpenNebula. + + .. py:method:: __init__(self, iaas_config) + + Calls :py:meth:`__setdriver` with iaas_config as a parameter. + + :param iaas_config: Configuration file containing everything needed for connecting to OpenNebula. + + .. py:method:: listVMs(self) + + Constructs dictionary with information about all service nodes. + + :rtype: Returns information about all service nodes. + + .. py:method:: getVMInfo(self, vm_id) + + Gets information about service node identified by vm_id. + + :param vm_id: Used to identifie service node + :rtype: Returns information about service node. + + .. py:method:: newInstance(self, function) + + Creates new service node using appropriate driver. Returns id, state, name and ip of created service ndoe. + + :param function: Function of service node. Manager or agent. + :rtype: Returns information about service node. + + .. py:method:: killInstance(self, vm_id) + + Deletes service node using appropriate driver. + + :param vm_id: Id of service node that will be deleted. + :rtype: if service node is not found returns False + +.. py:attribute:: logger + + Used for logging information. diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent.rst new file mode 100644 index 0000000000000000000000000000000000000000..0e048a0df65df30d8d81ae8f2b0338c0b01abb24 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent.rst @@ -0,0 +1,14 @@ +============ +Server agent +============ + +.. py:class:: AgentServer(HTTPServer, ThreadingMixIn) + + .. py:method:: __init__(self, server_address, RequestHandlerClass=AbstractRequestHandler) + + Initializes HTTP server. Calls :py:meth:`register_method` for every element inside dictionary exposed_function + + .. py:method:: register_method(self, http_method, func_name, callback) + + Registers a POST or GET method. + diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent_internals.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent_internals.rst new file mode 100644 index 0000000000000000000000000000000000000000..fd6835f08ea56abe5b9f308a3735264c78e3a73e --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_agent_internals.rst @@ -0,0 +1,421 @@ +====================== +Server agent Internals +====================== + +.. py:class:: MySQLServerConfiguration + + Class holds MySQL server configuration. + + .. py:method:: __init__(self) + + Initializes attributes. And calls :py:meth:`read_config` + + .. py:attribute:: hostname + + Gets hostname from socket. + + .. code-block:: python + + self.hostname = socket.gethostname() + + .. py:attribute:: restart_count + + Initializes variable restart_count with value zero. + + .. code-block:: python + + self.restart_count = 0 + + .. py:attribute:: pid_file + + Constructs pid file location. + + .. code-block:: python + + self.pid_file = "/var/lib/mysql/" + self.hostname + ".pid" + + .. py:attribute:: config_dir + + Set's configuration directory. + + .. code-block:: python + + self.config_dir = os.getcwd() + + .. py:attribute:: access_log + + Sets location of access_log. + + .. code-block:: python + + self.access_log = os.getcwd() + '/access.log' + + .. py:attribute:: error_log + + Sets location of error log. + + .. code-block:: python + + self.error_log = os.getcwd() + '/error.log' + + .. py:attribute:: conn_location + + Defines location where to connect. Initialized as empty string. + + .. py:attribute:: conn_username + + Defines username to use in connection to MySQL. Initialized as empty string. + + .. py:attribute:: conn_password + + Defines password to use in connection to MySQL. Initialized as empty string. + + .. py:attribute:: mycnf_filepath + + Location of MySQL configuration file. Initialized as empty string. + + .. py:attribute:: path_mysql_ssr + + Location of file that controls MySQL. It's used for starting stopping and restarting MySQL server. Initialized as empty string. + + .. py:attribute:: port_client + + Initialized as empty string. + + .. py:attribute:: port_mysqld + + Initialized as empty string. + + .. py:attribute:: bind_address + + Initialized as empty string. + + .. py:attribute:: data_dir + + Initialized as empty string. + + .. py:method:: read_config(self) + + Reads the configuration file and sets the value of :py:attr:`MySQLServerConfiguration.conn_location`, :py:attr:`MySQLServerConfiguration.conn_password` and :py:attr:`MySQLServerConfiguration.conn_location`. + + .. code-block:: python + + self.conn_location = config.get("MySQL_root_connection", "location") + self.conn_password = config.get("MySQL_root_connection", "password") + self.conn_username = config.get("MySQL_root_connection", "username") + + From configuration file also gets values for :py:attr:`MySQLServerConfiguration.mycnf_filepath` and :py:attr:`MySQLServerConfiguration.path_mysql_ssr`. + + .. code-block:: python + + self.mycnf_filepath = config.get("MySQL_configuration","my_cnf_file") + self.path_mysql_ssr = config.get("MySQL_configuration","path_mysql_ssr") + + After obtaining :py:attr:`MySQLServerConfiguration.mycnf_filepath` MySQL server configuration file is read and parsed with :py:meth:`MySQLServerConfiguration.MySQLConfigParser`. Values for :py:attr:`MySQLServerConfiguration.port_mysqld`, :py:attr:`MySQLServerConfiguration.bind_address` and :py:attr:`MySQLServerConfiguration.data_dir` are defined from parsed file. Before exiting temporary file that was created by :py:meth:`MySQLServerConfiguration.MySQLConfigParser` is deleted. + + .. code-block:: python + + config.readfp( self.MySQLConfigParser(my_cnf_text)) + self.port_mysqld = config.get ("mysqld", "port") + self.bind_address = config.get ("mysqld", "bind-address") + self.data_dir = config.get ("mysqld", "datadir") + + .. py:method:: change_config(self, id_param, param) + + Changes the values in MySQL configuration file. Value that has to be changed is identified by id_param and value it changes into is defined by param + + :param id_param: should one of these values: datadir, port or bind-address + :param param: defines the new value + + .. py:method:: MySQLConfigParser(self, text) + + Receives text as a parameter. Text is parsed so it has appropriate form. After the parsing is complete the new text is written to temporary file. File handler is then returned. + + :param text: Text that has to be parsed so it has the right form. + + .. py:method:: add_user_to_MySQL(self, new_username, new_password) + + Adds a new user to MySQL. Username and password are defined with parameters. + + :param new_username: Username that new user will have. + :param new_password: Password that new user will have. + + .. py:method:: remove_user_to_MySQL(self, username) + + :param username: Username of user that will be removed. + + .. py:method:: get_users_in_MySQL(self) + + Queries the list of all users from MySQL and returns it. + + :rtype: returns list of all users in MySQL + + .. py:method:: create_MySQL_with_dump(self, f) + + Creates MySQL database with dump file. + + :param f: Dump file that will be used to create MySQL database + +.. py:class:: MySQLServer + + .. py:method:: __init__(self) + + Initializes instance of :py:class:`MySQLServerConfiguration` and :py:attr:`MySQLServer.state` + + .. py:attribute:: state + + At initializing sets the state to :py:attr:`S_INIT` + + .. py:method:: post_restart(self) + + Not yet implemented. Things to do after restart. + + .. py:method:: start(self) + + sets :py:attr:`MySQLServer.state` to :py:attr:`S_STARTING` and tries to start MySQL server. If starting failed :py:attr:`MySQLServer.state` is set to :py:attr:`S_STOPPED`. If starting succeeded :py:attr:`MySQLServer.state` is set to :py:attr:`S_RUNNING`. + + .. py:method:: stop(self) + + If server is running sets :py:attr:`MySQLServer.state` to :py:attr:`S_STOPPING`. If it succeeded :py:attr:`MySQLServer.state` is set to :py:attr:`S_STOPPED`. If it fails to stop :py:attr:`MySQLServer.state` is set to :py:attr:`S_RUNNING`. Method also checks for pid file in :py:attr:`MySQLServerConfiguration.pid_file`. If it doesnt exist :py:attr:`MySQLServer.state` is set to :py:attr:`S_STOPPED`. + + .. py:method:: restart(self) + + Increases :py:attr:`MySQLServerConfiguration.restart_count` by one. If restarting succeeded :py:attr:`MySQLServer.state` is set to :py:attr:`S_RUNNING` if not it is set to :py:attr:`S_STOPPED` + + .. py:method:: status(self) + + returns :py:attr:`MySQLServerConfiguration.port_mysqld` and :py:attr:`MySQLServer.state` of MySQL . + +.. py:class:: AgentException(Exception) + + Class used to format Exceptions. + + .. code-block:: python + + class AgentException(Exception): + def __init__(self, code, *args, **kwargs): + self.code = code + self.args = args + if 'detail' in kwargs: + self.message = '%s DETAIL:%s' % ( (E_STRINGS[code] % args), str(kwargs['detail']) ) + else: + self.message = E_STRINGS[code] % args + +.. py:attribute:: exposed_functions + + Dictionary that is populated with functions that are registered in :py:func:`AgentServer.__init__` + +.. py:function:: createMySQLServer + + Calls :py:meth:`MySQLServer.start`. If no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + if Exception was raised returns: + + .. code-block:: python + + return {'opState': 'ERROR', 'error': str(e)} + +.. py:function:: stopMySQLServer(params) + + Calls :py:meth:`MySQLServer.stop`. If no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + if Exception was raised returns: + + .. code-block:: python + + return {'opState': 'ERROR', 'error': str(e)} + +.. py:function:: restartMySQLServer(params) + + Calls :py:meth:`MySQLServer.restart`. If no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + if Exception was raised returns: + + .. code-block:: python + + return {'opState': 'ERROR', 'error': str(e)} + +.. py:function:: getMySQLServerState(params) + + Calls :py:meth:`MySQLServer.status`. If no Exception was raised returns: + + .. code-block:: python + + return {'opState':'OK', 'return': status} + + if Exception was raised returns: + + .. code-block:: python + + return {'opState': 'ERROR', 'error': str(e)} + +.. py:function:: setMySQLServerConfiguration(params) + + Calls :py:meth:`MySQLServer.restart`. If no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + if Exception was raised returns: + + .. code-block:: python + + return {'opState': 'ERROR', 'error': str(e)} + +.. py:function:: createNewMySQLuser(params) + + Calls :py:meth:`MySQLServerConfiguration.add_user_to_MySQL` + + .. code-block:: python + + niam.config.add_user_to_MySQL(params['username'], params['password']) + + and if no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + If Exception was raised returns: + + .. code-block:: python + + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + return {'opState': 'ERROR', 'error': ex.message} + +.. py:function:: removeMySQLuser(params) + + Calls :py:meth:`MySQLServerConfiguration.remove_user_to_MySQL` + + .. code-block:: python + + niam.config.remove_user_to_MySQL(params['username']) + + and if no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + If Exception was raised returns: + + .. code-block:: python + + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + return {'opState': 'ERROR', 'error': ex.message} + +.. py:function:: listAllMySQLusers(params) + + Calls :py:meth:`MySQLServerConfiguration.get_users_from_MYSQL` + + .. code-block:: python + + niam.config.remove_user_to_MySQL(params['username']) + + and if no Exception was raised returns: + + .. code-block:: python + + return {'opState': 'OK'} + + If Exception was raised returns: + + .. code-block:: python + + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + return {'opState': 'ERROR', 'error': ex.message} + +.. py:function:: create_with_MySQLdump(params) + + Calls :py:meth:`MySQLServerConfiguration.create_MySQL_with_dump` + + .. code-block:: python + + ret = niam.config.create_MySQL_with_dump(f) + +.. py:attribute:: S_INIT + + Contains string 'INIT' that describes server state. + +.. py:attribute:: S_STARTING + + Contains string 'STARTING' that describes server state. + +.. py:attribute:: S_RUNNING + + Contains string 'RUNNING' that describes server state. + +.. py:attribute:: S_STOPPING + + Contains string 'STOPPING' that describes server state. + +.. py:attribute:: S_STOPPED + + Contains string 'STOPPED' that describes server state. + +.. py:attribute:: E_STRINGS + + List of error strings indexed by rows: + + .. code-block:: python + + E_STRINGS = [ + 'Unexpected arguments %s', # 1 param (a list) + 'Unable to open configuration file: %s', + 'Failed to parse configuration file error: %s', + 'Configuration file already exists', + 'Invalid arguments', + 'Unknown error. Description: %s', + 'Failed to commit configuration', + 'Missing argument: %s', + 'MySQL reported an error: %s' + ] + +.. py:attribute:: E_ARGS_UNEXPECTED + + Index 0 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_NOT_EXIST + + Index 1 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_READ_FAILED + + Index 2 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_EXISTS + + Index 3 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_ARGS_INVALID + + Index 4 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_UNKNOWN + + Index 5 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_COMMIT_FAILED + + Index 6 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_ARGS_MISSING + + Index 7 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_MYSQL + + Index 8 at :py:attr:`E_STRINGS` diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager.rst new file mode 100644 index 0000000000000000000000000000000000000000..2dfb3676b55fb5128041d56c454f0a0c4c5339f8 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager.rst @@ -0,0 +1,20 @@ +============== +Server manager +============== + +.. py:class:: SQLServerRequestHandler(AbstractRequestHandler): + + .. py:method:: _dispatch(self, method, params) + + Sends back appropriate response code. + +.. py:class:: ManagerServer(HTTPServer, ThreadingMixIn) + + .. py:method:: __init__(self, server_address, RequestHandlerClass=AbstractRequestHandler) + + Initializes HTTP server. Calls :py:meth:`register_method` for every element inside exposed_function + + .. py:method:: register_method(self, http_method, func_name, callback) + + Registers a POST or GET method. + diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_config.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_config.rst new file mode 100644 index 0000000000000000000000000000000000000000..85ddd098f590e6e6f648be0b565bbfded8feaaa9 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_config.rst @@ -0,0 +1,155 @@ +====== +Config +====== + +Holds configuration for the ConPaaS SQL Server Manager. + +.. py:class:: ManagerException(Exception) + + Handles exceptions + + .. py:method:: __init__(self, code, *args, **kwargs) + + Gets information about exception and formats apropriate message. + + .. py:attribute:: code + + Exception code. + + .. py:attribute:: args + + Exception arguments. + + .. py:attribute:: message + + Formated with parameters. + + :param code: Exception code. + :param args: Exception arguments. + :param kwargs: If it contains detail then it's used in formatting message. + +.. py:class:: ServiceNode(object) + + .. py:method:: __init__(self, vmid, runMySQL=False) + + Initializes service node. + + .. py:attribute:: vmid + + Service node id. + + .. py:attribute:: isRunningMySQL + + Indicator if service node is running MySQL + + :param vmid: Id that will be set. + :param runMySQL: Indicator if service node is running MySQL + + .. py:method:: __repr__(self) + + :rtype: Returns service node's information. Id ip and if mysql is running on this service node. + + .. py:method:: __cmp__(self, other) + + :param other: Service node whos id will be compared to. + :rtype: Returns 0 if id of other and self.id are the same. Returns -1 if other has higher id. If other has lower id returns 1. + +.. py:class:: Configuration(object) + + .. py:method:: __read_config(self,config) + + Reads the configuration file and defines attributes with values from configuration file. + + .. py:attribute:: driver + + Name of driver that will be used to communicate with open nebula. + + .. py:attribute:: xmlrpc_conn_location + + Connection information. + + .. py:attribute:: conn_password + + Connection password + + .. py:attribute:: conn_username + + Connection username + + :param config: Configuration file. + + .. py:method:: __init__(self, configuration) + + Initializes :py:attr:`mysql_count` with value zero and empty dictionary :py:attr:`serviceNodes`. Calls :py:meth:`__read_config` + + .. py:attribute:: mysql_count + + Nomber of MySQL service nodes. + + .. py:attribute:: serviceNodes + + Dictionary of all service nodes. + + :param configuration: Configuration file. + + .. py:method:: getMySQLServiceNodes(self) + + :rtype: Returns :py:attr:`serviceNodes` + + .. py:method:: getMySQLTuples(self) + + :rtype: Returns service node ip and MySQL port for each service node that is running MySQL. + + .. py:method:: getMySQLIPs(self) + + :rtype: Returns ip address for each virtual machine that runs MySQL. + + .. py:method:: addMySQLServiceNode(self, vmid, accesspoint) + + Adds new service node to :py:attr:`serviceNodes` defined by vmid. Also increases :py:attr:`mysql_count` + + :param vmid: Id that will be used for service node. + :param accesspoint: + + .. py:method:: removeMySQLServiceNode(self, vmid) + + Removes service node from :py:attr:`serviceNodes` identified by parameter vmid. + + :param vmid: Input parameter used to find service node that in :py:attr:`serviceNodes` that will be removed. + +.. py:attribute:: CONFIGURATION_FILE + + Holds the path to the configuration file + +.. py:attribute:: logger + + Used for logging information. + +.. py:attribute:: E_STRINGS + + List of error strings indexed by rows: + + .. code-block:: python + + E_STRINGS = [ + 'Unexpected arguments %s', + 'Unable to open configuration file: %s', + 'Configuration file does not exist: %s', + 'Unknown error.' + ] + +.. py:attribute:: E_ARGS_UNEXPECTED + + Index 0 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_READ_FAILED + + Index 1 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_CONFIG_NOT_EXIST + + Index 2 at :py:attr:`E_STRINGS` + +.. py:attribute:: E_UNKNOWN + + Index 3 at :py:attr:`E_STRINGS` diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_internals.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_internals.rst new file mode 100644 index 0000000000000000000000000000000000000000..ed2031678dbabc02408c6f332775cd5bf71e2376 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/Server_manager_internals.rst @@ -0,0 +1,79 @@ +======================== +Server manager internals +======================== + +.. py:class:: MySQLServerManager + + .. py:method:: __init__(self, conf) + + Initializes :py:attr:`config` using Config and sets :py:attr:`state` to :py:attr:`S_INIT` + + .. py:attribute:: state + + State of MySQLServerManager. + + :param conf: Configuration file. + +.. py:function:: listServiceNodes(kwargs) + + Uses :py:meth:`IaaSClient.listVMs()` to get list of all Service nodes. For each service node it gets it checks if it is in servers list. If some of them are missing they are removed from the list. Returns list of all service nodes. + +.. py:function:: createServiceNode(kwargs) + + Creates a new service node using :py:meth:`IaaSClient.newInstance()`. Calls :py:func:`createServiceNodeThread`. + +.. py:function:: deleteServiceNode(kwargs) + + Using :py:meth:`IaaSClient.killInstance()` removes service node from OpenNebula and after removing calls :py:meth:`Configuration.removeMySQLServiceNode()` + +.. py:function:: createServiceNodeThread(function, new_vm) + + Calls :py:func:`wait_for_nodes`. And after completing calls :py:meth:`Configuration.addMySQLServiceNode()` to add new service node. + +.. py:function:: getMySQLServerManagerState(params) + + :rtype: Returns state of Server manager. + +.. py:function:: wait_for_nodes(nodes, poll_interval=10) + + Every poll_interval seconds checks if node is up. Calls function getserverstate so it gets state of agent. + +.. py:attribute:: S_INIT + + Contains string 'INIT' that describes server state. + +.. py:attribute:: S_PROLOGUE + + Contains string 'STARTING' that describes server state. + +.. py:attribute:: S_RUNNING + + Contains string 'RUNNING' that describes server state. + +.. py:attribute:: S_ADAPTING + + Contains string 'ADAPTING' that describes server state. + +.. py:attribute:: S_EPILOGUE + + Contains string 'EPILOGUE' that describes server state. + +.. py:attribute:: S_STOPPED + + Contains string 'STOPPED' that describes server state. + +.. py:attribute:: S_ERROR + + Contains string 'ERROR' that describes server state. + +.. py:attribute:: config + + After initialization used for methods found in :py:class:`Configuration()` + +.. py:attribute:: dstate + +.. py:attribute:: exposed_functions + + Dictionary that is populated with functions that are registered in :py:func:`ManagerServer.__init__` + +.. py:attribute:: iaas diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/age_tree.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/age_tree.rst new file mode 100644 index 0000000000000000000000000000000000000000..aafbfc196b8d645ac4863ea3d33a13429af9277c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/age_tree.rst @@ -0,0 +1,9 @@ +===== +Agent +===== +.. toctree:: + + Server_agent_internals + Server_agent + Client_agent + diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/conf.py b/conpaas/branches/Y1DEMO-conpaassql/doc/source/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..83d4d16915eaabc14d1436e56aef5a9822bfd09c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/conf.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +# +# ConPaaS SQL Server documentation build configuration file, created by +# sphinx-quickstart on Thu Aug 18 16:01:28 2011. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath(os.path.join('..','src'))) + + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'ConPaaS SQL Server' +copyright = u'2011, Aleš Černivec' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.1' +# The full version, including alpha/beta/rc tags. +release = '0.1' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ConPaaSSQLServerdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'ConPaaSSQLServer.tex', u'ConPaaS SQL Server Documentation', + u'Aleš Černivec', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'conpaassqlserver', u'ConPaaS SQL Server Documentation', + [u'Aleš Černivec'], 1) +] diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/index.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..af97f787391b661ea5f78d0c383744357bc7ba49 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/index.rst @@ -0,0 +1,23 @@ +.. ConPaaS SQL Server documentation master file, created by + sphinx-quickstart on Thu Aug 18 16:01:28 2011. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to ConPaaS SQL Server's documentation! +============================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + age_tree + man_tree + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/source/man_tree.rst b/conpaas/branches/Y1DEMO-conpaassql/doc/source/man_tree.rst new file mode 100644 index 0000000000000000000000000000000000000000..1132d74c279dfe4b847edb9472b86bc6c2c9144f --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/source/man_tree.rst @@ -0,0 +1,10 @@ +======= +Manager +======= +.. toctree:: + + Server_manager_config + Server_manager_internals + Server_manager + Client_manager + IaaS diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/tex/Makefile b/conpaas/branches/Y1DEMO-conpaassql/doc/tex/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..428f98988bcaea73f05d7dbf2fb339af1c4f794c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/tex/Makefile @@ -0,0 +1,5 @@ +mysql_conpaas_doc : mysql_conpaas_doc.tex + pdflatex mysql_conpaas_doc.tex + +clean : + 'rm' -fr *.aux *.dvi *.lof *.log *.pdf *.ps *.spl *.toc *~ diff --git a/conpaas/branches/Y1DEMO-conpaassql/doc/tex/mysql_conpaas_doc.tex b/conpaas/branches/Y1DEMO-conpaassql/doc/tex/mysql_conpaas_doc.tex new file mode 100644 index 0000000000000000000000000000000000000000..2ad20f3c4d40bd42d8e8a14c6680b6e1187ff0dc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/doc/tex/mysql_conpaas_doc.tex @@ -0,0 +1,79 @@ +\documentclass[a4paper,10pt]{article} +\usepackage[cp1250]{inputenc} % �umnike lahko vna�amo s tipkovnico +\usepackage{epsfig} +\usepackage[T1]{fontenc} % kodiranje pisave +\usepackage{eurosym} +\usepackage {algorithmicx} +\usepackage{algpseudocode} +\usepackage{algorithm} +\usepackage{amsfonts} % dodatni matemati�ni simboli +\usepackage{amsmath} % za sklice na oznake +\usepackage{graphicx} +\usepackage{listings} + +\bibliographystyle{plain} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\begin{document} +\title{Technical document on ConPaaS services: ConPaaS MySQL Server} +\vspace{15pt} +\author{Ale{\v s} {\v C}ernivec} +\vspace{50pt} +\maketitle +\vspace{15pt} +\setlength{\parindent}{15pt} +\newpage +\tableofcontents +\newpage +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section{Introduction} + +\section{Architecture} + +\section{Conclusion} + +%% \begin{figure}[htb] +%% \begin{center} +%% \includegraphics[scale=0.6]{../slike/StrukturaOCSP.jpeg} +%% \end{center} +%% \caption[Shematski prikaz OCSP]{Shematski prikaz protokola OCSP.} +%% \label{fig:structure_OCSP} +%%\end{figure} + +%%\begin{small} +%%\begin{verbatim} +%% PKCS10CertificationRequest certRequest = new +%%PKCS10CertificationRequest( +%% "SHA256withRSA", +%% new X500Principal(""), +%% keyPair.getPublic(), +%% null, +%% keyPair.getPrivate()); +%%\end{verbatim} +%%\end{small} + +\newpage +\vspace{4mm} + +\begin{thebibliography}{99} + \bibitem{seriesX}Series X: Data Networks, Open System Communications and +Security, X.509, 08/2005 + \bibitem{rfcCertAndCRL} R. Housley et al, Internet X.509 Public Key +Infrastructure Certificate and Certificate Revocation List (CRL) Profile, RSA +Laboratories, April 2002 + \bibitem{federal_office}The Office of the Federal Privacy Commissioner, +Privacy and Public Key Infrastructure: Guidelines for Agencies using PKI to +communicate or transact with individuals, 21 December 2001 + \bibitem {ten_risks} Carl Ellison and Bruce Schneier, Ten Risks of PKI: +What You are not Being Told about Public Key Infrastructure, Computer Security +Journal, Volume XVI, Number 1, 2000 + \bibitem {rfcOCSP} X.509 Internet Public Key Infrastructure Online +Certificate Status Protocol - OCSP http://www.ietf.org/rfc/rfc2560.txt + \bibitem {applied}Alfred J. Menezes, Paul C. Van Oorschot, Scott A. +Vanstone, Handbook of Applied Cryptography, 5th ED + \bibitem{beginning}Hook D., Beginning Cryptography with Java, 2005 + \bibitem{securityplus}Pastore M., Dulaney M., Security+, Second +Edition, Exam SYO-101, 2004 +\end{thebibliography} + +\end{document} \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/agent-install.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/agent-install.sh new file mode 100755 index 0000000000000000000000000000000000000000..5ebe0fd5a5bf1b18ec35a0b93105f36edd492ebc --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/agent-install.sh @@ -0,0 +1,46 @@ +#!/bin/bash +SERVER=172.16.93.108 +PACKAGE_NAME=conpaas-sql.tar +DEST_DIR=/home/contrail/conpaassql + +apt-get -y update + +apt-get install -y unzip +apt-get install -y python +apt-get install -y python-mysqldb +apt-get install -y python-pycurl + +mkdir -p ${DEST_DIR} +cd ${DEST_DIR} + +wget ${SERVER}/${PACKAGE_NAME} + +wget https://github.com/lukaszo/python-oca/zipball/0.2.3 +wget http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz#md5=7df2a529a074f613b509fb44feefe74e + +tar xvfz setuptools-0.6c11.tar.gz +cd setuptools-0.6c11 +python setup.py build +python setup.py install +cd .. +rm -rf setuptools +rm setuptools-0.6c11.tar.gz + +unzip 0.2.3 +cd lukaszo-python-oca-61992c1 +python setup.py build +python setup.py install +cd .. +rm -rf lukaszo-python-oca-61992c1 +rm 0.2.3 + +#tar xvfz testpackage.tar.gz +tar xvf ${PACKAGE_NAME} + +#rm testpackage.tar.gz +rm ${PACKAGE_NAME} + +cd src +#PYTHONPATH=$PWD python conpaas/mysql/server/agent/server.py +PYTHONPATH=${DEST_DIR}/src python conpaas/mysql/server/agent/server.py -c ${DEST_DIR}/src/conpaas/mysql/server/agent/configuration.cnf + diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/deploy.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..20182255bb2954e9994b489a5f7bc69b7c9eb6cb --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/deploy.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ $# -ne 2 ] +then + echo "Usage: `basename $0` user server command" + echo " first argument: server name" + echo " second argument: server to deploy to" + echo " third argument: command after the deployment (start-agent, start-manager)" + exit 1 +fi + +PACKAGE_DIR="~/conpaassql" +PACKAGE_NAME="conpaassql.tar.gz" +SOURCE="../../conpaassql" +TAR_DEST="../../conpaassql.tar.gz" +CONPAASROOT="/tmp" +CONPAASSQLPID="${CONPAASROOT}/conpaassql.pid" +CONPAASSQLERR="${CONPAASROOT}/conpaassql.err" +CONPAASSQLOUT="${CONPAASROOT}/conpaassql.out" + +USER=$1 +DEST_SERVER_IP=$2 +COMMAND=$3 + +echo "tar-ing new code on local node - code to be deployed" +tar cvfz ${TAR_DEST} --exclude=.svn ${SOURCE} 1> /dev/null 2> /dev/null +echo "scp-ing new code to remote node" +scp ${TAR_DEST} ${USER}@${DEST_SERVER_IP}:~ +#echo "Installing new and killing old remote existing instances." +echo "Installing new instance." +#ssh ${USER}@${DEST_SERVER_IP} "rm -fr ${PACKAGE_DIR}; tar xvfz ~/${PACKAGE_NAME} 1> /dev/null 2> /dev/null; if [ -e ${CONPAASSQLPID} ]; then kill -9 \`cat ${CONPAASSQLPID}\`; else exit 0; fi" +ssh ${USER}@${DEST_SERVER_IP} "rm -fr ${PACKAGE_DIR}; tar xvfz ~/${PACKAGE_NAME} 1> /dev/null 2> /dev/null" +#if [ ${COMMAND} == "start-agent" ] +#then +# ssh ${USER}@${DEST_SERVER_IP} "cd ${PACKAGE_DIR}/scripts/; ./run-agent-server.sh" +#fi diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/manager-install.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/manager-install.sh new file mode 100755 index 0000000000000000000000000000000000000000..c23d2d954defe5dd2ead08ed31ffc09f5191cf08 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/manager-install.sh @@ -0,0 +1,44 @@ +#!/bin/bash +SERVER=172.16.93.108 +PACKAGE_NAME=conpaas-sql.tar +DEST_DIR=/home/contrail/conpaassql + +apt-get -y update + +apt-get install -y unzip +apt-get install -y python +apt-get install -y python-mysqldb +apt-get install -y python-pycurl + +mkdir -p ${DEST_DIR} +cd ${DEST_DIR} + +wget ${SERVER}/${PACKAGE_NAME} + +wget https://github.com/lukaszo/python-oca/zipball/0.2.3 +wget http://pypi.python.org/packages/source/s/setuptools/setuptools-0.6c11.tar.gz#md5=7df2a529a074f613b509fb44feefe74e + +tar xvfz setuptools-0.6c11.tar.gz +cd setuptools-0.6c11 +python setup.py build +python setup.py install +cd .. +rm -rf setuptools +rm setuptools-0.6c11.tar.gz + +unzip 0.2.3 +cd lukaszo-python-oca-61992c1 +python setup.py build +python setup.py install +cd .. +rm -rf lukaszo-python-oca-61992c1 +rm 0.2.3 + +#tar xvfz testpackage.tar.gz +tar xvf ${PACKAGE_NAME} + +#rm testpackage.tar.gz +rm ${PACKAGE_NAME} + +cd src +PYTHONPATH=${DEST_DIR}/src:${DEST_DIR}/contrib/ python conpaas/mysql/server/manager/server.py -c ${DEST_DIR}/src/conpaas/mysql/server/manager/configuration.cnf diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/one-vm-rc.local b/conpaas/branches/Y1DEMO-conpaassql/scripts/one-vm-rc.local new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/one-vm-start-template.tpl b/conpaas/branches/Y1DEMO-conpaassql/scripts/one-vm-start-template.tpl new file mode 100644 index 0000000000000000000000000000000000000000..e4a001b203d3d5fa2e617a4412f193ce323e9af7 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/one-vm-start-template.tpl @@ -0,0 +1,19 @@ +NAME = conpaassql-manager +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "80", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = 24 ] +GRAPHICS = [ + type="vnc" + ] +CONTEXT = [ + target=sdc, + files = /home/contrail/manager/conpaassql-install.sh + ] \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/opennebula.conf b/conpaas/branches/Y1DEMO-conpaassql/scripts/opennebula.conf new file mode 100644 index 0000000000000000000000000000000000000000..86ba3be10a86ddc374c6fc50539f22836f675886 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/opennebula.conf @@ -0,0 +1,3 @@ +OPENNEBULA_URL http://172.16.120.228:4566 +OPENNEBULA_USER oneadmin +OPENNEBULA_PASSWORD oneadmin diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/pack.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/pack.sh new file mode 100755 index 0000000000000000000000000000000000000000..3ebf39ce110741f97e39fae43a5241ec87cd9bf4 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/pack.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +tar cvfz ../../conpaassql.tar.gz --exclude=.svn ../../conpaassql diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-client.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-client.sh new file mode 100755 index 0000000000000000000000000000000000000000..e83c3309b7da353a00f5c60ffabdee97de5266c4 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-client.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +PWD=`pwd` +PYTHONPATH=${PWD}/../src +PYTHONPATH=${PYTHONPATH} python ${PWD}/../src/conpaas/mysql/client/agent_client.py $* & diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-server.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-server.sh new file mode 100755 index 0000000000000000000000000000000000000000..160fc44c553a2287de67c9c1097286bfc8ddfe8f --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-agent-server.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +PIDFILE="/tmp/conpaassql.pid" +touch ${PIDFILE} +rm ${PIDFILE} +PWD=`pwd` +PYTHONPATH=${PWD}/../src +PYTHONPATH=${PYTHONPATH} python ${PWD}/../src/conpaas/mysql/server/agent/server.py -c ${PWD}/../src/conpaas/mysql/server/agent/configuration.cnf +#PYTHONPATH=${PYTHONPATH} python ${PWD}/../src/conpaas/mysql/server/agent/server.py & +#echo $! > ${PIDFILE} diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-client.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-client.sh new file mode 100755 index 0000000000000000000000000000000000000000..35e0abe96080f3937730b1d5ce4ccd6b1afc9b11 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-client.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +PWD=`pwd` +PYTHONPATH=${PWD}/../src:${PWD}/../contrib +PYTHONPATH=${PYTHONPATH} python ${PWD}/../src/conpaas/mysql/client/manager_client.py $* & diff --git a/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-server.sh b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-server.sh new file mode 100755 index 0000000000000000000000000000000000000000..cdb4738185504a58c5d0550ce03dd574cba755a2 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/scripts/run-manager-server.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +PWD=`pwd` +PYTHONPATH=${PWD}/../src:${PWD}/../contrib +PYTHONPATH=${PYTHONPATH} python ${PWD}/../src/conpaas/mysql/server/manager/server.py -c ${PWD}/../src/conpaas/mysql/server/manager/configuration.cnf diff --git a/conpaas/branches/Y1DEMO-conpaassql/setup.py b/conpaas/branches/Y1DEMO-conpaassql/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..3de8fe288aae21d450b876b5fc718b609e98347d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/setup.py @@ -0,0 +1,18 @@ +#from distutils.core import setup +from setuptools import setup, find_packages + +setup(name = 'conpaassql-server', + version = '0.1', + description = 'Contrail ConPaaS SQL Server.', + author = 'Contrail', + author_email = 'ales.cernivec@xlab.si', + url = 'http://contrail.eu/', + #packages = ['conpaas', 'conpaas.mysql','conpaas.mysql.server','conpaas.mysql.server.agent', 'conpaas.mysql.server.manager', 'conpaas.mysql.client','contrib','contrib.libcloud','contrib.libcloud.drivers','contrib.libcloud.storage','contrib.libcloud.storage.drivers','contrib.libcloud.compute','contrib.libcloud.compute.drivers','contrib.libcloud.common'], + #package_dir = { 'src': 'src/conpaas','contrib' : 'contrib' }, + package_dir={'': 'src', 'contrib' : 'contrib'}, + packages=find_packages('src', 'contrib'), + include_package_data=True, + classifiers=['Operating System :: POSIX :: Linux', + 'Programming Language :: Python'], + zip_safe=False + ) diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/iaas.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/iaas.py new file mode 100644 index 0000000000000000000000000000000000000000..581a54c6e3b49bd3c8e83610d9f020f7bde1876d --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/iaas.py @@ -0,0 +1,513 @@ +''' +Created on Jan 21, 2011 + +@author: ielhelw +''' + +import urlparse +import oca + +from socket import gethostbyname + +from libcloud.compute.types import Provider, NodeState +from libcloud.compute.providers import get_driver +from libcloud.compute.base import NodeImage, NodeSize +from libcloud.compute.drivers.opennebula import OpenNebulaNodeDriver +from libcloud.compute.drivers.ec2 import EC2NodeDriver + +import libcloud.security +from libcloud.compute.base import NodeDriver +from conpaas.log import create_logger +from random import Random +import random +libcloud.security.VERIFY_SSL_CERT = False + +logger = create_logger(__name__) + +''' +For Unit testing +''' +class DummyNode(): + + class DummyNic(): + + def __init__(self, ip): + self.ip = ip + + class DummyTemplate(): + + def __init__(self): + self.nics = {} + self.nics[0] = DummyNode.DummyNic("127.0.0.1") + + def __init__(self, id, str_state, name, template, ip): + logger.debug("Entering __init__") + self.id = id + self.vmid=id + self.str_state = str_state + self.name = name + self.template = self.DummyTemplate() + self.isRunningMySQL = True + self.ip = ip + self.port = 60000 + self.state = str_state + +class OneXmlrpcNode(): + + def __init__(self, node): + self.id = node.id + self.state = node.str_state + self.name = node.name + self.template = node.template + self.public_ip = node.template.nics[0].ip + +''' + Dummy driver for ONE - used for testing. +''' +class DummyONEDriver(NodeDriver): + + nodes = {} + + def get_dummy_list(self): + return self.nodes + + def __init__(self, uname, password, scheme, host, port): + self.client = None + node1 = DummyNode(1, "conpaas01", NodeState.RUNNING, "UnitTest", "127.0.0.1"); + node2 = DummyNode(2, "conpaas01", NodeState.RUNNING, "UnitTest", "127.0.0.1"); + node3 = DummyNode(3, "conpaas01", NodeState.RUNNING, "UnitTest", "127.0.0.1"); + self.nodes[node1.id] = node1 + self.nodes[node2.id] = node2 + self.nodes[node3.id] = node3 + + def list_nodes(self): + logger.debug("Dummy VMs:") + #nodes={} + retnodes={} + for i in self.nodes: + logger.debug( str(self.nodes[i].id) + ": " +str(self.nodes[i].name) + ", " +str(self.nodes[i].str_state)) + retnodes[self.nodes[i].id]=OneXmlrpcNode(self.nodes[i]) + return retnodes + + ''' + Create new node. + ''' + def create_node(self, **kwargs): + logger.debug("Entering create_node") + if kwargs['function'] == 'agent': + logger.debug("creating agent") + template='''NAME = conpaassql-server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +CONTEXT = [ + target=sdc, + files = '''+str(kwargs['ex_userdata_agent'])+''' + ] +RANK = "- RUNNING_VMS" +''' + elif kwargs['function'] == 'manager': + logger.debug("creating manager") + template='''NAME = conpaassql-server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +CONTEXT = [ + target=sdc, + files = '''+str(kwargs['ex_userdata_manager'])+'''] +RANK = "- RUNNING_VMS" +''' + else: + logger.debug("creating") + template='''NAME = conpaassql_server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +RANK = "- RUNNING_VMS" +''' + logger.debug('Provisioning VM:' + template) + randid = random.randint(1,100) + newnode = DummyNode(randid, "conpaas"+str(randid), NodeState.RUNNING, "UnitTest", "127.0.0.1"); + self.nodes[newnode.id] = newnode + logger.debug("Exiting create_node") + return newnode.id + + ''' + Destroying ONE node with XML RPC. + ''' + def destroy_node(self, id): + logger.debug("Entering destroy_node with id " + str(id)) + del self.nodes[id] + logger.debug("Exiting destroy_node") + + def list_sizes(self, location=None): + return [ + NodeSize(id=1, + name="small", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=2, + name="medium", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=3, + name="large", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + +''' + XMLRPC driver for OpenNebula +''' +class OneXmlrpc(NodeDriver): + + def __init__(self, uname, password, scheme, host, port): + self.client = oca.Client(uname+":"+password, scheme+"://"+host+":"+str(port)+"/RPC2") + + def list_nodes(self): + vm_pool=oca.VirtualMachinePool(self.client) + vm_pool.info(-2) + logger.debug("All VMs:") + nodes={} + for i in vm_pool: + logger.debug( str(i.id) + ": " +str(i.name) + ", " +str(i.str_state)) + nodes[i.id]=OneXmlrpcNode(i) + return nodes + + ''' + Create new node. + ''' + def create_node(self, **kwargs): + logger.debug("Entering create_node") + if kwargs['function'] == 'agent': + logger.debug("creating agent") + template='''NAME = conpaassql-server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +CONTEXT = [ + target=sdc, + files = '''+str(kwargs['ex_userdata_agent'])+''' + ] +RANK = "- RUNNING_VMS" +''' + elif kwargs['function'] == 'manager': + logger.debug("creating manager") + template='''NAME = conpaassql-server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +CONTEXT = [ + target=sdc, + files = '''+str(kwargs['ex_userdata_manager'])+'''] +RANK = "- RUNNING_VMS" +''' + else: + logger.debug("creating") + template='''NAME = conpaassql_server +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image_id = "''' + str(kwargs['image'].id) + '''", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK_ID = '''+str(kwargs['ex_network_id'])+''' ] +GRAPHICS = [ + type="vnc" + ] +RANK = "- RUNNING_VMS" +''' + logger.debug('Provisioning VM:' + template) + rez=oca.VirtualMachine.allocate(self.client, template) + logger.debug('Result:' + str(rez)) + logger.debug("Exiting create_node") + return rez + + ''' + Destroying ONE node with XML RPC. + ''' + def destroy_node(self, id): + logger.debug("Entering destroy_node") + #oca.VirtualMachine.finalize(self.client.id) + vm_pool=oca.VirtualMachinePool(self.client) + vm_pool.info(-2) + vm = vm_pool.get_by_id(id) + vm.finalize() + logger.debug("Exiting destroy_node") + + def list_sizes(self, location=None): + return [ + NodeSize(id=1, + name="small", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=2, + name="medium", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + NodeSize(id=3, + name="large", + ram=None, + disk=None, + bandwidth=None, + price=None, + driver=self), + ] + +class IaaSClient: + + RUNNING = NodeState.RUNNING + REBOOTING = NodeState.REBOOTING + TERMINATED = NodeState.TERMINATED + PENDING = NodeState.PENDING + UNKNOWN = NodeState.UNKNOWN + + driver = None + + def __config_opennebula_dummy(self, iaas_config): + if not iaas_config.has_option('iaas', 'OPENNEBULA_URL'): raise Exception('Configuration error: [iaas] OPENNEBULA_URL not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_USER'): raise Exception('Configuration error: [iaas] OPENNEBULA_USER not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_PASSWORD'): raise Exception('Configuration error: [iaas] OPENNEBULA_PASSWORD not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_NETWORK_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_NETWORK_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_SIZE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_SIZE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_IMAGE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_IMAGE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_MANAGER'): raise Exception('Configuration error: [iaas] OPENNEBULA_CONTEXT_SCRIPT_MANAGER not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_AGENT'): raise Exception('Configuration error: [iaas] OPENNEBULA_CONTEXT_SCRIPT_AGENT not set') + + parsed = urlparse.urlparse(iaas_config.get('iaas', 'OPENNEBULA_URL')) + self.scheme = parsed.scheme + self.host = parsed.hostname + self.port = parsed.port + self.path = parsed.path + self.username = iaas_config.get('iaas', 'OPENNEBULA_USER') + self.password = iaas_config.get('iaas', 'OPENNEBULA_PASSWORD') + self.img_id = iaas_config.get('iaas', 'OPENNEBULA_IMAGE_ID') + self.size_id = iaas_config.get('iaas', 'OPENNEBULA_SIZE_ID') + self.on_ex_network_id = iaas_config.get('iaas', 'OPENNEBULA_NETWORK_ID') + self.one_context_manager_script = iaas_config.get('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_MANAGER') + self.one_context_agent_script = iaas_config.get('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_AGENT') + self.driver = DummyONEDriver(self.username, self.password, self.scheme, self.host, self.port); + + def __config_opennebula_xmlrpc(self, iaas_config): + if not iaas_config.has_option('iaas', 'OPENNEBULA_URL'): raise Exception('Configuration error: [iaas] OPENNEBULA_URL not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_USER'): raise Exception('Configuration error: [iaas] OPENNEBULA_USER not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_PASSWORD'): raise Exception('Configuration error: [iaas] OPENNEBULA_PASSWORD not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_NETWORK_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_NETWORK_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_SIZE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_SIZE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_IMAGE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_IMAGE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_MANAGER'): raise Exception('Configuration error: [iaas] OPENNEBULA_CONTEXT_SCRIPT_MANAGER not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_AGENT'): raise Exception('Configuration error: [iaas] OPENNEBULA_CONTEXT_SCRIPT_AGENT not set') + + parsed = urlparse.urlparse(iaas_config.get('iaas', 'OPENNEBULA_URL')) + self.scheme = parsed.scheme + self.host = parsed.hostname + self.port = parsed.port + self.path = parsed.path + self.username = iaas_config.get('iaas', 'OPENNEBULA_USER') + self.password = iaas_config.get('iaas', 'OPENNEBULA_PASSWORD') + self.img_id = iaas_config.get('iaas', 'OPENNEBULA_IMAGE_ID') + self.size_id = iaas_config.get('iaas', 'OPENNEBULA_SIZE_ID') + self.on_ex_network_id = iaas_config.get('iaas', 'OPENNEBULA_NETWORK_ID') + self.one_context_manager_script = iaas_config.get('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_MANAGER') + self.one_context_agent_script = iaas_config.get('iaas', 'OPENNEBULA_CONTEXT_SCRIPT_AGENT') + + self.driver = OneXmlrpc(self.username, self.password, self.scheme, self.host, self.port) + + def __config_opennebula(self, iaas_config): + if not iaas_config.has_option('iaas', 'OPENNEBULA_URL'): raise Exception('Configuration error: [iaas] OPENNEBULA_URL not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_USER'): raise Exception('Configuration error: [iaas] OPENNEBULA_USER not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_PASSWORD'): raise Exception('Configuration error: [iaas] OPENNEBULA_PASSWORD not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_IMAGE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_IMAGE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_SIZE_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_SIZE_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_NETWORK_ID'): raise Exception('Configuration error: [iaas] OPENNEBULA_NETWORK_ID not set') + if not iaas_config.has_option('iaas', 'OPENNEBULA_NETWORK_GATEWAY'): raise Exception('Configuration error: [iaas] OPENNEBULA_NETWORK_GATEWAY not set') + + parsed = urlparse.urlparse(iaas_config.get('iaas', 'OPENNEBULA_URL')) + self.scheme = parsed.scheme + self.host = parsed.hostname + self.port = parsed.port + self.path = parsed.path + self.username = iaas_config.get('iaas', 'OPENNEBULA_USER') + self.password = iaas_config.get('iaas', 'OPENNEBULA_PASSWORD') + + self.img_id = iaas_config.get('iaas', 'OPENNEBULA_IMAGE_ID') + self.size_id = iaas_config.get('iaas', 'OPENNEBULA_SIZE_ID') + + self.on_ex_network_id = iaas_config.get('iaas', 'OPENNEBULA_NETWORK_ID') + self.on_ex_network_gateawy = iaas_config.get('iaas', 'OPENNEBULA_NETWORK_GATEWAY') + + ONDriver = get_driver(Provider.OPENNEBULA) + self.driver = ONDriver(self.username, secret=self.password, secure=(self.scheme == 'https'), host=self.host, port=self.port) + + def __config_ec2(self, iaas_config): + if not iaas_config.has_option('iaas', 'EC2_USER'): raise Exception('Configuration error: [iaas] EC2_USER not set') + if not iaas_config.has_option('iaas', 'EC2_PASSWORD'): raise Exception('Configuration error: [iaas] EC2_PASSWORD not set') + if not iaas_config.has_option('iaas', 'EC2_IMAGE_ID'): raise Exception('Configuration error: [iaas] EC2_IMAGE_ID not set') + if not iaas_config.has_option('iaas', 'EC2_SIZE_ID'): raise Exception('Configuration error: [iaas] EC2_SIZE_ID not set') + + if not iaas_config.has_option('iaas', 'EC2_SECURITY_GROUP_NAME'): raise Exception('Configuration error: [iaas] EC2_SECURITY_GROUP_NAME not set') + if not iaas_config.has_option('iaas', 'EC2_KEY_NAME'): raise Exception('Configuration error: [iaas] EC2_KEY_NAME not set') + + self.username = iaas_config.get('iaas', 'EC2_USER') + self.password = iaas_config.get('iaas', 'EC2_PASSWORD') + + self.ec2_ex_securitygroup = iaas_config.get('iaas', 'EC2_SECURITY_GROUP_NAME') + self.ec2_ex_keyname = iaas_config.get('iaas', 'EC2_KEY_NAME') + + self.img_id = iaas_config.get('iaas', 'EC2_IMAGE_ID') + self.size_id = iaas_config.get('iaas', 'EC2_SIZE_ID') + + EC2Driver = get_driver(Provider.EC2_US_EAST) + self.driver = EC2Driver(self.username, self.password) + + def __setdriver(self, iaas_config): + if not iaas_config.has_option('iaas', 'DRIVER'): raise Exception('Configuration error: [iaas] DRIVER not set') + drivername = iaas_config.get('iaas', 'DRIVER') + if drivername == 'OPENNEBULA': + self.__config_opennebula(iaas_config) + if drivername == 'OPENNEBULA_XMLRPC': + self.__config_opennebula_xmlrpc(iaas_config) + if drivername == 'OPENNEBULA_DUMMY': + self.__config_opennebula_dummy(iaas_config) + elif drivername == 'EC2': + self.__config_ec2(iaas_config) + + def __init__(self, iaas_config): + self.__setdriver(iaas_config) + + '''List VMs which are part of my configuration. + ''' + def listVMs(self): + nodes = self.driver.list_nodes() + vms = {} + for node in nodes.values(): + vms[node.id] = {'id': node.id, + 'state': node.state, + 'name': node.name, + 'ip': node.public_ip} + return vms + + def getVMInfo(self, vm_id): + return self.listVMs()[vm_id] + + def newInstance(self, function): + size_one = [ i for i in self.driver.list_sizes() if i.id == self.size_id ] + size = size_one[0] + img = NodeImage(self.img_id, '', None) + kwargs = {'size': size, + 'image': img, + 'function' : function + } + if isinstance(self.driver, OneXmlrpc): + kwargs['ex_network_id'] = self.on_ex_network_id + kwargs['ex_userdata_manager'] = self.one_context_manager_script + kwargs['ex_userdata_agent'] = self.one_context_agent_script + if isinstance(self.driver, DummyONEDriver): + kwargs['ex_network_id'] = self.on_ex_network_id + kwargs['ex_userdata_manager'] = self.one_context_manager_script + kwargs['ex_userdata_agent'] = self.one_context_agent_script + if isinstance(self.driver, OpenNebulaNodeDriver): + kwargs['ex_network_id'] = self.on_ex_network_id + kwargs['ex_network_gateawy'] = self.on_ex_network_gateawy + if isinstance(self.driver, EC2NodeDriver): + kwargs['ex_securitygroup'] = self.ec2_ex_securitygroup + kwargs['ex_keyname'] = self.ec2_ex_keyname + kwargs['ex_userdata'] = '''#!/bin/bash + wget -P /root/ http://hppc644.few.vu.nl/contrail/ConPaaSWeb.tar.gz + wget -P /root/ http://hppc644.few.vu.nl/contrail/agent-start + wget -P /root/ http://hppc644.few.vu.nl/contrail/agent-stop + + chmod 755 /root/agent-start + chmod 755 /root/agent-stop + + /root/agent-start + ''' + + node = self.driver.create_node(name='conpaas', **kwargs) + nodes = self.driver.list_nodes() + + return {'id': nodes[node].id, + 'state': nodes[node].state, + 'name': nodes[node].name, + 'ip': nodes[node].public_ip} + + def killInstance(self, vm_id): + nodes = self.driver.list_nodes() + for i in nodes: + if int(i) == int(vm_id): + return self.driver.destroy_node(i) + return False diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/log.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/log.py new file mode 100644 index 0000000000000000000000000000000000000000..d6e1dfaa00cf94d588bc462ff83de825332d36f4 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/log.py @@ -0,0 +1,41 @@ +''' +Created on Feb 9, 2011 + +@author: ielhelw +''' + +import logging +import os + +logging_level = logging.DEBUG + +log_formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(lineno)d %(message)s') + +'''Where to put logs (directory). +''' +log_dir_path = None + +stream_handler = logging.StreamHandler() +stream_handler.setFormatter(log_formatter) +stream_handler.setLevel(logging_level) + +#file_handler = logging.FileHandler(filename, mode, encoding, delay) +#file_handler.setFormatter(log_formatter) + +def set_logging_level(level): + logging_level = level + stream_handler.setLevel(level) + +def register_logger(logger): + logger.addHandler(stream_handler) +# logger.addHandler(file_handler) + +def create_logger(name): + logger = logging.getLogger(name) + logger.setLevel(logging_level) + register_logger(logger) + #hdlr = logging.FileHandler(os.getcwd() + '/contrail.log') + hdlr = logging.FileHandler('/tmp/conpaassql-contrail.log') + + logger.addHandler(hdlr) + return logger \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/agent_client.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/agent_client.py new file mode 100644 index 0000000000000000000000000000000000000000..43e90fff8bc4e1629b3ace72f02148340ca78767 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/agent_client.py @@ -0,0 +1,179 @@ +''' +Created on Jun 8, 2011 + +@author: ales +''' +from conpaas.web.http import _http_get, _http_post, _jsonrpc_get, _jsonrpc_post +import httplib, json +import sys + + +class AgentException(Exception): pass + +def _check(response): + code, body = response + if code != httplib.OK: raise AgentException('Received http response code %d' % (code)) + try: data = json.loads(body) + except Exception as e: raise AgentException(*e.args) + if data['error']: raise AgentException(data['error']) + else: return True + +#=============================================================================== +# def __check_reply(body): +# try: +# ret = json.loads(body) +# except Exception as e: raise AgentException(*e.args) +# if not isinstance(ret, dict): raise AgentException('Response not a JSON object') +# if 'opState' not in ret: raise AgentException('Response does not contain "opState"') +# if ret['opState'] != 'OK': +# if 'ERROR' in ret['opState']: raise AgentException(ret['opState'], ret['error']) +# else: raise AgentException(ret['opState']) +# return ret +#=============================================================================== + +def get_server_state(host, port): + method = "get_server_state" + result = _jsonrpc_get(host, port, '/', method) + if _check(result): + return result + else: + return False + +def create_server(host, port): + method = "create_server" + result = _jsonrpc_post(host, port, '/', method) + if _check(result): + return result + else: + return False + +def printUsage(): + print 'Usage: agent_ip agent_port function function_params\n\ +Functions: get_server_state - no params\n \ + createMySQLServer - no params\n \ + restartMySQLServer - no params\n \ + stopMySQLServer - no params\n \ + configure_user - username, port \n \ + get_all_users - no params\n \ + remove_user - name \n \ + setMySQLServerConfiguration - paramid value\n \ + send_mysqldump - location on disc\n' + pass + +def restartMySQLServer(host, port): + method = "restartMySQLServer" + result = _jsonrpc_post(host, port, '/', method) + if _check(result): + return result + else: + return False + +def stop_server(host, port): + method = "stop_server" + result = _jsonrpc_post(host, port, '/', method) + if _check(result): + return result + else: + return False + +def configure_user(host, port, username, password): + method = 'configure_user' + params = {'username': username, + 'password': password} + return _check(_jsonrpc_post(host, port, '/', method, params=params)) + +def get_all_users(host, port): + method = "get_all_users" + result = _jsonrpc_get(host, port, '/', method) + if _check(result): + return result + else: + return False + +def remove_user(host,port,name): + method = 'remove_user' + params = {'username': name} + return _check(_jsonrpc_get(host, port, '/', method, params=params)) + +def setMySQLServerConfiguration(host,port, param_id, val): + params = {'action': 'setMySQLServerConfiguration', + 'id_param': param_id, + 'value': val + } + code, body = _http_post(host, port, '/', params= params) + if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) + return __check_reply(body) + +def send_mysqldump(host,port,location): + params = {'method': 'create_with_MySQLdump'} + files = {'mysqldump': location} + return _check(_http_post(host, port, '/', params, files=files)) + + #method = 'create_with_MySQLdump' + #params = { + # 'action': 'create_with_MySQLdump'} + #_jsonrpc_post(host, port, '/', method, params=params) + + #code, body = _http_post(host, port, '/', params= params, files={'mysqldump':location}) + #if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) + #return __check_reply(body) + +def set_up_replica_master(host,port): + params = { + 'action': 'set_up_replica_master'} + code, body = _http_post(host, port, '/', params= params) + if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) + return __check_reply(body) + +''' + @param master_host: hostname of the master node. + @param master_log_file: filename of the master log. + @param master_log_pos: position of the master log file. + @param slave_server_id: id which will be written into my.cnf. + +''' +def set_up_replica_slave(host,port, master_host, master_log_file, master_log_pos, slave_server_id): + params = { + 'action': 'set_up_replica_slave', + 'master_host': master_host, + 'master_log_file': master_log_file, + 'master_log_pos': master_log_pos, + 'slave_server_id': slave_server_id + } + code, body = _http_post(host, port, '/', params= params) + if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) + return __check_reply(body) + +if __name__ == '__main__': + if sys.argv.__len__() > 3: + host = sys.argv[1] + port = sys.argv[2] + if sys.argv[3] == 'get_server_state': + ret = get_server_state(host, port) + print ret + if sys.argv[3] == 'create_server': + ret = create_server(host, port) + print ret + if sys.argv[3] == 'restartMySQLServer': + ret = restartMySQLServer(host, port) + print ret + if sys.argv[3] == 'stop_server': + ret = stop_server(host, port) + print ret + if sys.argv[3] == 'configure_user': + ret = configure_user(host, port, sys.argv[4], sys.argv[5]) + print ret + if sys.argv[3] == 'get_all_users': + ret =get_all_users(host, port) + print ret + if sys.argv[3] == 'remove_user': + ret = remove_user(host,port,sys.argv[4]) + print ret + if sys.argv[3] == 'setMySQLServerConfiguration': + ret = setMySQLServerConfiguration(host,port, sys.argv[4], sys.argv[5]) + print ret + if sys.argv[3] == 'send_mysqldump': + ret = send_mysqldump(host,port,sys.argv[4]) + print ret + else: + printUsage() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/manager_client.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/manager_client.py new file mode 100644 index 0000000000000000000000000000000000000000..e527d1eb2489bffdf28924447ea2cd7dad5b1984 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/manager_client.py @@ -0,0 +1,141 @@ +''' +Created on Jun 8, 2011 + +@author: ales +''' +from conpaas.web.http import _http_get, _http_post, HttpError, _jsonrpc_get,\ + _jsonrpc_post +import httplib, json +import sys +from threading import Thread +from conpaas.mysql.server.manager.internals import get_node_info + +class ManagerException(Exception): pass + +class ClientError(Exception): pass + +def _check(response): + code, body = response + if code != httplib.OK: raise HttpError('Received http response code %d' % (code)) + try: data = json.loads(body) + except Exception as e: raise ClientError(*e.args) + if data['error']: raise ClientError(data['error']) + else: return data['result'] + +def __check_reply(body): + try: + ret = json.loads(body) + except Exception as e: raise ManagerException(*e.args) + if not isinstance(ret, dict): raise ManagerException('Response not a JSON object') + if 'opState' not in ret: raise ManagerException('Response does not contain "opState"') + if ret['opState'] != 'OK': + if 'ERROR' in ret['opState']: raise ManagerException(ret['opState'], ret['error']) + else: raise ManagerException(ret['opState']) + return ret + +def printUsage(): + print 'Usage: service_ip service_port function function_params\n\ +Functions: list_nodes - no params\n \ + get_node_info - no params\n \ + get_service_info - no params\n \ + add_nodes - no params\n \ + remove_nodes - username, port \n \ + get_service_performance - no params\n' + pass + +#=============================================================================== +# def getListServiceNodes(host, port): +# params = {'action': 'listServiceNodes'} +# code, body = _http_get(host, port, '/', params=params) +# if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) +# return __check_reply(body) +#=============================================================================== + +def list_nodes(host, port): + method = 'list_nodes' + return _check(_jsonrpc_get(host, port, '/', method)) + +def get_node_info(host, port, serviceNodeId): + method = 'get_node_info' + params = {'serviceNodeId': serviceNodeId} + return _check(_jsonrpc_get(host, port, '/', method, params=params)) + +def get_service_info(host, port): + method = 'get_service_info' + return _check(_jsonrpc_get(host, port, '/', method)) + +def add_nodes(host, port, function): + params = {} + params['function'] = function + method = 'add_nodes' + return _check(_jsonrpc_post(host, port, '/', method, params=params)) + +#=============================================================================== +# def getMySQLServerState(host, port): +# params = {'action': 'getMySQLServerManagerState'} +# code, body = _http_get(host, port, '/', params=params) +# if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) +# return __check_reply(body) +# +# def addServiceNode(host, port, function): +# params = {'action': 'createServiceNode', 'function':function} +# #Thread(target=wait_for_reply(params)).start() +# code, body = _http_post(host, port, '/', params=params) +# if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) +# return __check_reply(body) +#=============================================================================== + + + +#def wait_for_reply(prms): +# code, body = _http_post(host, port, '/', params=prms) +# if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) +# return __check_reply(body) + +#=============================================================================== +# def deleteServiceNode(host, port, id): +# params = {'action': 'deleteServiceNode','id':str(id)} +# code, body = _http_post(host, port, '/', params=params) +# if code != httplib.OK: raise Exception('Received HTTP response code %d' % (code)) +# return __check_reply(body) +#=============================================================================== + +def remove_nodes(host, port, serviceNodeId): + method = 'remove_nodes' + params={} + params['serviceNodeId'] = serviceNodeId + return _check(_jsonrpc_post(host, port, '/', method, params=params)) + +def get_service_performance(host, port): + method = 'get_service_performance' + return _check(_jsonrpc_get(host, port, '/', method)) + +if __name__ == '__main__': + if sys.argv.__len__() in (4, 5): + host = sys.argv[1] + port = sys.argv[2] + if sys.argv[3] in ("list_nodes"): + ret = list_nodes(host, port) + print ret + if sys.argv[3] in ("add_nodes"): + ret = add_nodes(host, port, sys.argv[4]) + print ret + if sys.argv[3] in ("get_service_info"): + ret = get_service_info(host, port) + print ret + if sys.argv[3] in ("remove_nodes"): + id = sys.argv[4] + ret = remove_nodes(host, port, id) + print ret + if sys.argv[3] in ("list_nodes"): + ret = list_nodes(host, port) + print ret + if sys.argv[3] in ("get_node_info"): + serviceNodeId = sys.argv[4] + ret = get_node_info(host, port, serviceNodeId) + print ret + if sys.argv[3] in ("get_service_performance"): + ret = get_service_performance(host, port) + print ret + else: + printUsage() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test.py new file mode 100644 index 0000000000000000000000000000000000000000..996b7f3e6c9ae186a925dec10deda5907eacf745 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test.py @@ -0,0 +1,46 @@ + +from conpaas.mysql.client.agent_client import getMySQLServerState, createMySQLServer, stopMySQLServer, setMySQLServerConfiguration,\ + remove_user, get_all_users, configure_user +import unittest + +class test_startup (unittest.TestCase): + def test1_is_shutdown(self): + dict = getMySQLServerState("127.0.0.1", 60000) + self.assertEqual(dict['return']['state'], 'STOPPED') + #we create Mysql server and check if it is really up and what's the server state + def test2_starting_server(self): + dict = createMySQLServer('127.0.0.1', 60000) + self.assertEqual(dict['opState'], 'OK') + def test3_is_running(self): + dictionari = getMySQLServerState("127.0.0.1", 60000) + self.assertEqual(dictionari['return']['port'], "3306") + self.assertEqual(dictionari['return']['state'],"RUNNING") + #def test4_adding_user(self): + # dict = get_all_users('127.0.0.1',60000) + # self.assertEqual(dict['opState'], 'OK') + # dict = configure_user('127.0.0.1', 60000, 'janez4','rekar') + # self.assertEqual(dict['opState'],'OK') + #def test5_removing_user(self): + # dict = remove_user('127.0.0.1', 60000, 'janez4') + # self.assertEqual(dict['opState'],'OK') + def test6_changing_port_number(self): + dict = setMySQLServerConfiguration('127.0.0.1', 60000, "port", 50000) + self.assertEqual(dict['opState'], 'OK'); + dictionari = getMySQLServerState("127.0.0.1", 60000) + self.assertNotEqual(dictionari['return']['port'], "3306", "port wasn't changed") + self.assertEqual(dictionari['return']['port'], "50000", "wrong port") + dict = setMySQLServerConfiguration('127.0.0.1', 60000, "port", 3306) + def test6_stoping_server(self): + dict = stopMySQLServer("127.0.0.1", 60000) + self.assertEqual(dict['opState'], "OK") + dict = getMySQLServerState("127.0.0.1", 60000) + self.assertEqual(dict['return']['state'], 'STOPPED') + test_startup.test1_is_shutdown(self) + +if __name__ == '__main__': +#server mora biti ugasnen predno se zacnejo testi + if (getMySQLServerState('127.0.0.1', 60000)['return'] != 'shutdown'): + if (getMySQLServerState('127.0.0.1', 60000)['return']['state'] == 'RUNNING') : + stopMySQLServer('127.0.0.1', 60000) + unittest.main() + \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test_pb.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test_pb.py new file mode 100644 index 0000000000000000000000000000000000000000..08ea06b27fd86bf1b10b4909c303b5d7966d27a3 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/client/test_pb.py @@ -0,0 +1,11 @@ +import MySQLdb + + +db = MySQLdb.connect("localhost","root","ErtPoi") +exc = db.cursor() +exc.execute("show databases;") +ret = exc.fetchall() +for retr in ret: + for retrt in retr: + print retrt +db.close() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/configuration.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/configuration.cnf new file mode 100644 index 0000000000000000000000000000000000000000..c0fcdc2202a62231503eb3c891c3e5f5aab86bb6 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/configuration.cnf @@ -0,0 +1,8 @@ +[MySQL_root_connection] +location= +password=topole48 +username=root + +[MySQL_configuration] +my_cnf_file=/etc/mysql/my.cnf +path_mysql_ssr=/etc/init.d/mysql \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/internals.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/internals.py new file mode 100644 index 0000000000000000000000000000000000000000..9a24643f832fd0f7501307572d81507293b3f9f1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/internals.py @@ -0,0 +1,689 @@ +from threading import Lock +from string import Template +from os import kill, makedirs, remove +from os.path import join, devnull, exists +from subprocess import Popen + +from conpaas.log import create_logger +from subprocess import Popen +from os.path import devnull, exists + +import socket +import os +import ConfigParser +import MySQLdb +import pickle +import io +from conpaas.web.http import HttpJsonResponse, HttpErrorResponse + +exposed_functions = {} + +CONFIGURATION_FILE='configuration.cnf' +DATABASE_DUMP_LOCATION='/tmp/contrail_dbdump.db' + +E_ARGS_UNEXPECTED = 0 +E_CONFIG_NOT_EXIST = 1 +E_CONFIG_READ_FAILED = 2 +E_CONFIG_EXISTS = 3 +E_ARGS_INVALID = 4 +E_UNKNOWN = 5 +E_CONFIG_COMMIT_FAILED = 6 +E_ARGS_MISSING = 7 +E_MYSQL = 8 + +E_STRINGS = [ + 'Unexpected arguments %s', # 1 param (a list) + 'Unable to open configuration file: %s', + 'Failed to parse configuration file error: %s', + 'Configuration file already exists', + 'Invalid arguments: %s', + 'Unknown error. Description: %s', + 'Failed to commit configuration', + 'Missing argument: %s', + 'MySQL reported an error: %s' +] + +web_lock = Lock() +logger = create_logger(__name__) +mysql_file = "/tmp/conpaassql" + +S_PREFIX = "/tmp/" +S_INIT = 'INIT' +S_STARTING = 'STARTING' +S_RUNNING = 'RUNNING' +S_STOPPING = 'STOPPING' +S_STOPPED = 'STOPPED' +S_MYSQL_HOME = S_PREFIX + 'conpaas/conf/mysql' + +agent = None + +class MySQLServerConfiguration: + + pid_file = None + + dummy_config = False + + def __init__(self, config, _dummy_config=False): + self.dummy_config = _dummy_config + '''holds the configuration of the server. + ''' + logger.debug("Entering init MySQLServerConfiguration") + self.hostname = socket.gethostname() + self.restart_count = 0 + '''Default mysql pid file. + ''' + try: + '''This is always like that. + ''' + self.pid_file = "/var/lib/mysql/" + self.hostname + ".pid" + logger.debug("Trying to get params from configuration file ") + self.conn_location = config.get("MySQL_root_connection", "location") + self.conn_username = config.get("MySQL_root_connection", "username") + self.conn_password = config.get("MySQL_root_connection", "password") + logger.debug("Got parameters for root connection to MySQL") + self.mycnf_filepath = config.get("MySQL_configuration","my_cnf_file") + self.path_mysql_ssr = config.get("MySQL_configuration","path_mysql_ssr") + file = open(self.mycnf_filepath) + my_cnf_text = file.read() + mysqlconfig = ConfigParser.ConfigParser() + #mysqlconfig = ConfigParser.RawConfigParser(allow_no_value=True) + mysqlconfig.readfp( self.MySQLConfigParser(my_cnf_text)) + #mysqlconfig.readfp(io.BytesIO(my_cnf_text)) + self.port_mysqld = mysqlconfig.get ("mysqld", "port") + self.bind_address = mysqlconfig.get ("mysqld", "bind-address") + self.data_dir = mysqlconfig.get ("mysqld", "datadir") + #mysqlconfig.set("mysqld", "newOption", value=None) + #newfile=open("/tmp/contrial.tmp","w") + #mysqlconfig.write(newfile) + #newfile.close() + logger.debug("Got configuration parameters") + '''Removing temporary file created in MySQLConfigParser + ''' + os.system("rm temp.cnf") + except ConfigParser.Error, err: + ex = AgentException(E_CONFIG_READ_FAILED, str(err)) + logger.critical(ex.message) + except IOError, err: + ex = AgentException(E_CONFIG_NOT_EXIST, str(err)) + logger.critical(ex.message) + logger.debug("Leaving init MySQLServerConfiguration") + + def change_config(self, id_param, param): + if id_param == 'datadir': + os.system("sed -i 's\datadir\t\t= " + self.data_dir +"|datadir\t\t= " + param + "|g' " + self.mycnf_filepath) + self.data_dir = param + elif id_param == 'port': + os.system("sed -i 's/port\t\t= " + self.port_mysqld +"/port\t\t= " + param + "/g' " + self.mycnf_filepath) + self.port_mysqld = param + elif id_param == 'bind-address': + os.system("sed -i 's/bind-address\t\t= " + self.bind_address +"/bind-address\t\t= " + param + "/g' " + self.mycnf_filepath) + self.bind_address = param + else: + ex = AgentException(E_CONFIG_READ_FAILED, "cant find id: " + id_param) + raise Exception(ex.message) + + '''Read mysqld configuration. Creates a temporary file in the working directory. Needs to be erased. + ''' + def MySQLConfigParser(self, text): + #comments inside + while text.count("#")>0: + text = text[0:text.index("#")] + text[text.index("\n",text.index("#")):] + zac = 0 + while text.count("\n",zac)>1: + if ( text[text.index("\n",zac)+1:text.index("\n",text.index("\n",zac+1))].find("[") == -1 + & text[text.index("\n",zac)+1:text.index("\n",text.index("\n",zac+1))].find("=") == -1): + text = text[0:text.index("\n",zac)+1] + text[text.index("\n",text.index("\n",zac+1)):] + zac = text.index("\n", zac+1) + # \n inside + while text.count("\t")>0: + text = text[0:text.index("\t")] + text[text.index("\t")+1:] + while text.count(" ")>0: + text = text[0:text.index(" ")] + text[text.index(" ")+1:] + while text.count("\n\n")>0: + text = text[0:text.index("\n\n")] + text[text.index("\n\n")+1:] + file = open(os.getcwd()+"/temp.cnf", "w") + file.write(text) + file.close() + file = open(os.getcwd()+"/temp.cnf","r") + return file + + def add_user_to_MySQL(self, new_username, new_password): + db = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db.cursor() + exc.execute ("create user '" + new_username + "'@'localhost' identified by '" + new_password + "'") + exc.execute ("grant all privileges on *.* TO '" + new_username + "'@'localhost' with grant option;") + exc.execute ("create user '" + new_username + "'@'%' identified by '" + new_password + "'") + exc.execute ("grant all privileges on *.* TO '" + new_username + "'@'%' with grant option;") + db.close() + + def remove_user_to_MySQL(self, username): + db = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db.cursor() + exc.execute ("drop user '" + username +"'@'localhost'") + exc.execute ("drop user '" + username +"'@'%'") + db.close() + + def get_users_in_MySQL(self): + db = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db.cursor() + exc.execute("SELECT user, host FROM mysql.user") + rows = exc.fetchall() + db.close() + ret = {'opState': 'OK'} + i = 0 + for row in rows: + i = i+1 + ret['info' + str(i)] = {'location': row[1], 'username': row[0]} + return ret + + '''Before creating a data snapshot or starting + the replication process, you should record the + position of the binary log on the master. You will + need this information when configuring the slave so + that the slave knows where within the binary log to + start executing events. See Section 15.1.1.4, Obtaining + the Replication Master Binary Log Coordinates. + +1st session +mysql> FLUSH TABLES WITH READ LOCK; + +2nd session +mysql > SHOW MASTER STATUS; +record the values + +close 2nd session + +on the master + mysqldump --all-databases --lock-all-tables >dbdump.db + +1st session +mysql>UNLOCK TABLES; + + ''' + def replication_record_the_position(self): + '''1st session + ''' + db1 = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db1.cursor() + exc.execute("FLUSH TABLES WITH READ LOCK;") + '''2nd session + ''' + db2 = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db2.cursor() + exc.execute("SHOW MASTER STATUS;") + rows = exc.fetchall() + db2.close() + i = 0 + ret = {'opState': 'OK'} + for row in rows: + i = i+1 + ret['position' + str(i)] = {'binfile': row[0], 'position': row[1]} + os.system("mysqldump -u " + self.conn_username + " -p" + self.conn_password + "--all-databases --lock-all-tables > " + DATABASE_DUMP_LOCATION) + exc = db1.cursor() + exc.execute("UNLOCK TABLES;") + db1.close() + return ret + + ''' + @param master_host: hostname of the master node. + @param master_log_file: filename of the master log. + @param master_log_pos: position of the master log file. + @param slave_server_id: id which will be written into my.cnf. + + ''' + def set_up_replication_slave(self, master_host, master_log_file, master_log_pos, slave_server_id): + logger.debug('Entering set_up_replication_slave') + logger.debug("Creating sql query for replication slave-master connection") + db = MySQLdb.connect(self.conn_location, self.conn_username, self.conn_password) + exc = db.cursor() + query=("CHANGE MASTER TO MASTER_HOST='%s', " + + "MASTER_USER='%s', " + + "MASTER_PASSWORD='%s', " + "MASTER_LOG_FILE='%s', " + + "MASTER_LOG_POS=%s;" % (master_host, self.conn_username, self.conn_password, master_log_file, master_log_pos)) + logger.debug("Created query: " + query) + exc.execute(query) + db.close() + logger.debug("Adding server-id into my.cnf") + file = open(self.mycnf_filepath) + content = file.read() + mysqlconfig = ConfigParser.RawConfigParser(allow_no_value=True) + mysqlconfig.readfp(io.BytesIO(content)) + mysqlconfig.set("mysqld", "server-id", slave_server_id) + mysqlconfig.set("mysqld", "skip-slave-start", slave_server_id) + file.close() + os.remove(self.mycnf_filepath) + newfile=open(self.mycnf_filepath,"w") + logger.debug("Writing new configuration file.") + mysqlconfig.write(newfile) + newfile.close() + logger.debug("Restarting mysql server due to changed server-id.") + agent.restart() + logger.debug('Exiting set_up_replication_slave') + + def create_MySQL_with_dump(self, f): + logger.debug("Entering create_MySQL_with_dump") + try: + mysqldump = f.read() + logger.debug("temporary writing file to: : " + os.getcwd() + '/mysqldump') + file(os.getcwd() + '/mysqldump' , "wb").write(mysqldump) + os.system("mysql -u " + self.conn_username + " -p" + self.conn_password + " < " + os.getcwd() + '/mysqldump') + os.system("rm mysqldump") + logger.debug("Leaving create_MySQL_with_dump") + return HttpJsonResponse({'return':'OK'}) + except Exception as e: + ex = AgentException(E_UNKNOWN,e.message) + logger.exception(ex.message) + return HttpJsonResponse({'return': 'ERROR', 'error': e.message}) + +class MySQLServer: + + dummy_backend = False + + def __init__(self, configInput, _dummy_backend=False): + logger.debug("Entering MySQLServer initialization") + self.config = MySQLServerConfiguration(configInput, _dummy_backend) + self.state = S_RUNNING + self.dummy_backend = _dummy_backend + logger.debug("Leaving MySQLServer initialization") + + + def post_restart(self): pass + ''' TODO: things that are done after restart + ''' + + def start(self): + #TODO: could look for PID file ? + logger.debug("Entering MySQLServer.start") + self.state = S_STARTING + if self.dummy_backend == False: + devnull_fd = open(devnull, 'w') + logger.debug('Starting with arguments:' + self.config.path_mysql_ssr + " start") + proc = Popen([self.config.path_mysql_ssr, "start"], stdout=devnull_fd, stderr=devnull_fd, close_fds=True) + logger.debug("MySQL started") + proc.wait() + logger.debug("Server started.") + if exists(self.config.pid_file) == False: + logger.critical('Failed to start mysql server.)') + self.state = S_STOPPED + raise OSError('Failed to start mysql server.') + if proc.wait() != 0: + logger.critical('Failed to start mysql server (code=%d)' % proc.returncode) + self.state = S_STOPPED + raise OSError('Failed to start mysql server (code=%d)' % proc.returncode) + else: + logger.debug("Running with dummy backend") + self.state = S_RUNNING + logger.info('MySql started') + logger.debug('Leaving MySQLServer.start') + + def stop(self): + logger.debug('Entering MySQLServer.stop') + if self.dummy_backend: + if self.state == S_RUNNING: + logger.debug("Stopping server") + self.state = S_STOPPING + self.state = S_STOPPED + logger.debug('Leaving MySQLServer.stop') + else: + if self.state == S_RUNNING: + self.state = S_STOPPING + if exists(self.config.pid_file): + try: + int(open(self.config.pid_file, 'r').read().strip()) + devnull_fd = open(devnull, 'w') + logger.debug('Stopping with arguments:' + self.config.path_mysql_ssr + " stop") + proc = Popen([self.config.path_mysql_ssr, "stop"], stdout=devnull_fd, stderr=devnull_fd, close_fds=True) + logger.debug("Stopping server") + proc.wait() + if exists(self.config.pid_file) == True: + logger.critical('Failed to stop mysql server.)') + self.state = S_RUNNING + raise OSError('Failed to stop mysql server.') + self.state = S_STOPPED + except IOError as e: + self.state = S_STOPPED + logger.exception('Failed to open PID file "%s"' % (self.pid_file)) + raise e + except (ValueError, TypeError) as e: + self.state = S_STOPPED + logger.exception('PID in "%s" is invalid' % (self.pid_file)) + raise e + else: + logger.critical('Could not find PID file %s to kill WebServer' % (self.pid_file)) + self.state = S_STOPPED + logger.debug('Leaving MySQLServer.stop') + raise IOError('Could not find PID file %s to kill WebServer' % (self.pid_file)) + else: + logger.warning('Request to kill WebServer while it is not running') + logger.debug('Leaving MySQLServer.stop') + + def restart(self): + logger.debug("Entering MySQLServer restart") + self.config.restart_count += 1 + logger.debug("Restart count just increased to: " + str(self.config.restart_count)) + if self.dummy_backend: + logger.debug('Restarting with arguments:' + self.config.path_mysql_ssr + " restart") + logger.debug("Restarting mysql server") + self.state = S_RUNNING + logger.info('MySQL restarted') + else: + try: + #int(open(self.config.pid_file, 'r').read().strip()) + devnull_fd = open(devnull, 'w') + logger.debug('Restarting with arguments:' + self.config.path_mysql_ssr + " restart") + proc = Popen([self.config.path_mysql_ssr, "restart"] , stdout=devnull_fd, stderr=devnull_fd, close_fds=True) + logger.debug("Restarting mysql server") + proc.wait() + if exists(self.config.pid_file) == False: + logger.critical('Failed to restart mysql server.)') + raise OSError('Failed to restart mysql server.') + except IOError as e: + logger.exception('Failed to open PID file "%s"' % (self._current_pid_file(increment=-1))) + self.state = S_STOPPED + raise e + except (ValueError, TypeError) as e: + logger.exception('PID in "%s" is invalid' % (self._current_pid_file(increment=-1))) + self.state = S_STOPPED + raise e + else: + self.post_restart() + self.state = S_RUNNING + logger.info('MySQL restarted') + logger.debug("Leaving MySQLServer restart") + + def status(self): + logger.debug('Entering MySQLServer.status') + logger.debug('Leaving MySQLServer.status') + return {'state': self.state, + 'port': self.config.port_mysqld} + +def expose(http_method): + def decorator(func): + if http_method not in exposed_functions: + exposed_functions[http_method] = {} + exposed_functions[http_method][func.__name__] = func + def wrapped(*args, **kwargs): + return func(*args, **kwargs) + return wrapped + return decorator + +class AgentException(Exception): + def __init__(self, code, *args, **kwargs): + self.code = code + self.args = args + if 'detail' in kwargs: + self.message = '%s DETAIL:%s' % ( (E_STRINGS[code] % args), str(kwargs['detail']) ) + else: + self.message = E_STRINGS[code] % args + +def _mysqlserver_get_params(post_params): + '''TODO: check for file inclusion. Add aditional parameters. ''' + ret = {} + logger.debug('Got post_params %s' % post_params) + if 'port' not in post_params: + raise AgentException(E_ARGS_MISSING, 'port') + if not post_params['port'].isdigit(): + raise AgentException(E_ARGS_INVALID, detail='Invalid "port" value') + ret['port'] = int(post_params.pop('port')) + if len(post_params) != 0: + raise AgentException(E_ARGS_UNEXPECTED, post_params.keys()) + return ret + +def getMySQLServerState_old(kwargs): + """GET state of WebServer""" + if len(kwargs) != 0: + return {'opState': 'ERROR', 'error': AgentException(E_ARGS_UNEXPECTED, kwargs.keys()).message} + with web_lock: + try: + if os.path.exists(mysql_file): + fd = open(mysql_file, 'r') + p = pickle.load(fd) + fd.close() + else: + return {'opState':'OK','return': 'shutdown'} + except Exception as e: + ex = AgentException(E_CONFIG_READ_FAILED, MySQLServer.__name__, mysql_file, detail=e) + logger.exception(ex.message) + return {'opState': 'ERROR', 'error': ex.message} + else: + return {'opState': 'OK', 'return': p.status()} + + +@expose('POST') +def create_server(post_params): + logger.debug("Entering create_server") + try: + agent.start() + logger.debug("Leaving create_server") + return HttpJsonResponse({'return': 'OK'}) + except Exception as e: + logger.exception("Error: " + str(e)) + return HttpJsonResponse({'return': 'ERROR', 'error': str(e)}) + +def createMySQLServer_old(post_params): + """Create the MySQLServer""" + logger.debug('Entering createMySQLServer') + try: post_params = _mysqlserver_get_params(post_params) + except AgentException as e: + return {'opState': 'ERROR', 'error': e.message} + else: + with web_lock: + if exists(mysql_file): + logger.debug('Leaving createMySQLServer') + return {'opState': 'ERROR', 'error': AgentException(E_CONFIG_EXISTS).message} + try: + if type(post_params) != dict: raise TypeError() + p = MySQLServer(**post_params) + except (ValueError, TypeError) as e: + ex = AgentException(E_ARGS_INVALID, detail=str(e)) + logger.debug('Leaving createMySQLServer') + return {'opState': 'ERROR', 'error': ex.message} + except Exception as e: + ex = AgentException(E_UNKNOWN, detail=e) + logger.exception(e) + logger.debug('Leaving createMySQLServer') + return {'opState': 'ERROR', 'error': ex.message} + else: + try: + fd = open(mysql_file, 'w') + pickle.dump(p, fd) + fd.close() + except Exception as e: + ex = AgentException(E_CONFIG_COMMIT_FAILED, detail=e) + logger.exception(ex.message) + return {'opState': 'ERROR', 'error': ex.message} + else: + return {'opState': 'OK'} + +def stopMySQLServer_old(kwargs): + """KILL the WebServer""" + if len(kwargs) != 0: + return {'opState': 'ERROR', 'error': AgentException(E_ARGS_UNEXPECTED, kwargs.keys()).message} + with web_lock: + try: + try: + fd = open(mysql_file, 'r') + p = pickle.load(fd) + fd.close() + except Exception as e: + ex = AgentException(E_CONFIG_READ_FAILED, 'stopMySQLServer',str(mysql_file), detail=e) + logger.exception(ex.message) + return {'opState': 'ERROR', 'error': ex.message} + p.stop() + remove(mysql_file) + return {'opState': 'OK'} + except Exception as e: + ex = AgentException(E_UNKNOWN, detail=e) + logger.exception(e) + return {'opState': 'ERROR', 'error': ex.message} + +''' + Shuts down the whole Agent together with MySQL server. +''' +def shutdownMySQLServerAgent(kwargs): + """Shutdown the Agent""" + agent.stop() + from conpaas.mysql.server.agent.server import agentServer + agentServer.shutdown() + import sys + sys.exit(0) + +@expose('POST') +def stop_server(params): + logger.debug("Entering stop_server") + try: + agent.stop() + logger.debug("Leaving stop_server") + return HttpJsonResponse ({'return':'OK'}) + except Exception as e: + ex = AgentException(E_UNKNOWN, 'stop_server', detail=e) + logger.exception(e) + logger.debug('Leaving stop_server') + return HttpJsonResponse ({'return': 'ERROR', 'error': ex.message}) + +@expose('POST') +def restart_server(params): + logger.debug("Entering restart_server") + try: + agent.restart() + logger.debug("Leaving restart_server") + return HttpJsonResponse ({'return':'OK'}) + except Exception as e: + ex = AgentException(E_UNKNOWN, 'restart_server', detail=e) + logger.exception(e) + logger.debug('Leaving restart_server') + return HttpJsonResponse ({'return': 'ERROR', 'error': ex.message}) + +@expose('GET') +def get_server_state(params): + logger.debug("Entering get_server_state") + try: + status = agent.status() + logger.debug("Leaving get_server_state") + return HttpJsonResponse({'return': status}) + except Exception as e: + ex = AgentException(E_UNKNOWN, 'get_server_state', detail=e) + logger.exception(e) + logger.debug('Leaving get_server_state') + return HttpJsonResponse({'return': status}) + +@expose('POST') +def set_server_configuration(params): + logger.debug("Entering set_server_configuration") + try: + agent.config.change_config(params['id_param'], params["value"]) + restart_server(None) + logger.debug("Leaving set_server_configuration") + return HttpJsonResponse ({'return':'OK'}) + except Exception as e: + ex = AgentException(E_UNKNOWN, 'set_server_configuration', detail=e) + logger.exception(e) + logger.debug('Leaving set_server_configuration') + return HttpJsonResponse ({'return': 'ERROR', 'error': ex.message}) + +@expose('POST') +def configure_user(params): + logger.debug("Entering configure_user") + if 'username' not in params: return HttpErrorResponse(AgentException(E_ARGS_MISSING,'username missing' ,'configure_user').message) + username = params.pop('username') + if 'password' not in params: return HttpErrorResponse(AgentException(E_ARGS_MISSING,'password missing' ,'configure_user').message) + password = params.pop('password') + if len(params) != 0: + return HttpErrorResponse(AgentException(E_ARGS_UNEXPECTED,'too many parameters', AgentException.keys()).message) + try: + logger.debug("configuring new user " + username) + agent.config.add_user_to_MySQL(username, password) + logger.debug("Leaving configure_user") + return HttpJsonResponse ({'return': 'OK'}) + except MySQLdb.Error, e: + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + logger.exception(ex.message) + return HttpJsonResponse ({'return': 'ERROR', 'error': ex.message}) + +@expose('POST') +def delete_user(params): + logger.debug("Entering delete_user") + if len(params) != 1: + ex = AgentException(E_ARGS_UNEXPECTED, params) + logger.exception(ex.message) + return HttpJsonResponse({'return': 'ERROR', 'error': ex.message}) + try: + agent.config.remove_user_to_MySQL(params['username']) + logger.debug("Leaving delete_user") + HttpJsonResponse({'return': 'OK'}) + except MySQLdb.Error, e: + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + logger.exception(ex.message) + return HttpJsonResponse({'return': 'ERROR', 'error': ex.message}) + +@expose('GET') +def get_all_users(params): + logger.debug("Entering get_all_users") + try: + ret = agent.config.get_users_in_MySQL() + logger.debug("Got response: " + str(ret)) + logger.debug("Leaving get_all_users") + return HttpJsonResponse({'users': ret}) + except MySQLdb.Error, e: + ex = AgentException(E_MYSQL, 'error "%d, %s' %(e.args[0], e.args[1])) + logger.exception(ex.message) + return HttpJsonResponse( {'users': 'ERROR', 'error': ex.message}) + +@expose('UPLOAD') +def create_with_MySQLdump(params): + logger.debug("Entering create_with_MySQLdump") + file=params['mysqldump'] + f=file.file + #f = params['mysqldump']['file'] + ret = agent.config.create_MySQL_with_dump(f) + return ret + +@expose('POST') +def set_up_replica_master(params): + agent.stop() + path=agent.config.mycnf_filepath + file = open(path) + content = file.read() + mysqlconfig = ConfigParser.RawConfigParser(allow_no_value=True) + mysqlconfig.readfp(io.BytesIO(content)) + mysqlconfig.set("mysqld", "server-id", "1") + mysqlconfig.set("mysqld", "log_bin", "/var/log/mysql/mysql-bin.log") + file.close() + os.remove(path) + newfile=open(path,"w") + mysqlconfig.write(newfile) + newfile.close() + position= agent.config.replication_record_the_position() + return {'opState': 'OK'} + +''' + 1)Change server id in the my.cnf. + 2)You will need to configure the slave with settings + for connecting to the master, such as the host name, login credentials, and binary + log file name and position. See Section 15.1.1.10, Setting the Master Configuration + on the Slave. + +Example: + mysql>CHANGE MASTER TO MASTER_HOST='vm-10-1-0-10', MASTER_USER='root', + MASTER_PASSWORD='topole48', MASTER_LOG_FILE='mysql-bin.000001', MASTER_LOG_POS=106; + + @param master_host: hostname of the master node. + @param master_log_file: filename of the master log. + @param master_log_pos: position of the master log file. + @param slave_server_id: id which will be written into my.cnf. + +''' +@expose('POST') +def set_up_replica_slave(params): + logger.debug('Entering set_up_replica_slave') + if len(params) != 4: + ex = AgentException(E_ARGS_UNEXPECTED, params) + logger.exception(ex.message) + return {'opState': 'ERROR', 'error': ex.message} + agent.config.set_up_replication_slave(params['master_host'], + params['master_log_file'], + params['master_log_pos'], + params['slave_server_id']) + logger.debug('Entering set_up_replica_slave') + return {'opState': 'OK'} \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/params.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/params.cnf new file mode 100644 index 0000000000000000000000000000000000000000..ff0b85fc7243021e1c92d26baa67ce182f08acb0 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/params.cnf @@ -0,0 +1,4 @@ +[mysql] +location: localhost +username: root +password: topole48 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/server.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/server.py new file mode 100644 index 0000000000000000000000000000000000000000..370a6ca5ca7292eba51e6013c9a39eeda0020235 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/agent/server.py @@ -0,0 +1,53 @@ +''' +Created on Jun 01, 2011 + +@author: ales +''' +from BaseHTTPServer import HTTPServer +from SocketServer import ThreadingMixIn + +from conpaas.web.http import AbstractRequestHandler +from conpaas.log import create_logger +from conpaas.mysql.server.agent.internals import MySQLServer +from ConfigParser import ConfigParser + +logger = create_logger(__name__) +agentServer = None +''' + Holds configuration for the Agent. +''' + +''' + Class AgentServer +''' +class AgentServer(HTTPServer, ThreadingMixIn): + + def __init__(self, server_address, config, RequestHandlerClass=AbstractRequestHandler): + HTTPServer.__init__(self, server_address, RequestHandlerClass) + self.callback_dict = {'GET': {}, 'POST': {}, 'UPLOAD': {}} + + from conpaas.mysql.server.agent import internals + + self.whitelist_addresses = [] + + internals.agent = MySQLServer(config) + for http_method in internals.exposed_functions: + for func_name in internals.exposed_functions[http_method]: + logger.debug( 'Going to register ' + " " + http_method + " " +func_name) + self.register_method(http_method, func_name, getattr(internals, func_name)) + + def register_method(self, http_method, func_name, callback): + self.callback_dict[http_method][func_name] = callback + +if __name__ == '__main__': + from optparse import OptionParser + parser = OptionParser() + parser.add_option('-p', '--port', type='int', default=60000, dest='port') + parser.add_option('-b', '--bind', type='string', default='0.0.0.0', dest='address') + parser.add_option('-c', '--config', type='string', default='./configuration.cnf', dest='config') + options, args = parser.parse_args() + config_parser = ConfigParser() + config_parser.read(options.config) + print 'Starting the MySQL server at ', options.address, options.port + agentServer = AgentServer((options.address, options.port), config_parser) + agentServer.serve_forever() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/config.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/config.py new file mode 100644 index 0000000000000000000000000000000000000000..6d69c464801ffa8a14632d7ad83583046cc8f5f1 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/config.py @@ -0,0 +1,124 @@ +''' +Created on Mar 9, 2011 + + Holds configuration for the ConPaaS SQL Server Manager. + +@author: ales +''' +from conpaas.log import create_logger + +import ConfigParser +import os +from conpaas.iaas import DummyONEDriver + +MYSQL_PORT = 3306 +CONFIGURATION_FILE=os.getcwd() + "/sql_manager_configuration.cnf" + +E_ARGS_UNEXPECTED = 0 +E_CONFIG_READ_FAILED = 1 +E_CONFIG_NOT_EXIST=2 +E_UNKNOWN=3 +E_ARGS_MISSING = 4 +E_ARGS_INVALID=5 +E_STATE_ERROR=6 + +E_STRINGS = [ + 'Unexpected arguments %s', + 'Unable to open configuration file: %s', + 'Configuration file does not exist: %s', + 'Unknown error.', + 'Missing argument: %s', + 'Invalid argument: %s', + 'Service in wrong state' +] + +iaas = None + +logger = create_logger(__name__) + +class ManagerException(Exception): + + def __init__(self, code, *args, **kwargs): + self.code = code + self.args = args + if 'detail' in kwargs: + self.message = '%s DETAIL:%s' % ( (E_STRINGS[code] % args), str(kwargs['detail']) ) + else: + self.message = E_STRINGS[code] % args + +class ServiceNode(object): + + def __init__(self, vm, runMySQL=False): + self.vmid = vm['id'] + self.ip = vm['ip'] + self.name = vm['name'] + self.state = vm['state'] + self.isRunningMySQL = runMySQL + self.isRunningProxy = False + self.isRunningBackend= False + self.isRunningWeb= False + self.port = 60000 + + def __repr__(self): + return 'ServiceNode(vmid=%s, ip=%s, mysql=%s)' % (str(self.vmid), self.ip, str(self.isRunningMySQL)) + + def __cmp__(self, other): + if self.vmid == other.vmid: return 0 + elif self.vmid < other.vmid: return -1 + else: return 1 + +class Configuration(object): + + dummy_backend = False + + def __read_config(self,config, _dummy_backend = False): + logger.debug("Entering read_config") + try: + logger.debug("Trying to get params from configuration file ") + self.driver = config.get("iaas", "DRIVER") + if self.driver == "OPENNEBULA_XMLRPC": + self.xmlrpc_conn_location = config.get("iaas", "OPENNEBULA_URL") + self.conn_password = config.get("iaas", "OPENNEBULA_USER") + self.conn_username = config.get("iaas", "OPENNEBULA_PASSWORD") + logger.debug("Got configuration parameters") + except ConfigParser.Error, err: + ex = ManagerException(E_CONFIG_READ_FAILED, str(err)) + logger.critical(ex.message) + except IOError, err: + ex = ManagerException(E_CONFIG_NOT_EXIST, str(err)) + logger.critical(ex.message) + logger.debug("Leaving read_config") + + '''Representation of the deployment configuration''' + def __init__(self, configuration, _dummy_backend = False): + self.dummy_backend=_dummy_backend + self.mysql_count = 0 + self.serviceNodes = {} + self.__read_config(configuration, _dummy_backend) + + def getMySQLServiceNodes(self): + return [ serviceNode for serviceNode in self.serviceNodes.values() if serviceNode.isRunningMySQL ] + #return self.serviceNodes + + def getMySQLTuples(self): + return [ [serviceNode.ip, MYSQL_PORT] for serviceNode in self.serviceNodes.values() if serviceNode.isRunningMySQL ] + + def getMySQLIPs(self): + return [ serviceNode.ip for serviceNode in self.serviceNodes.values() if serviceNode.isRunningMySQL ] + + ''' + Add new Service Node to the server (configuration). + @param accesspoint: new VM + ''' + def addMySQLServiceNode(self, accesspoint): + logger.debug('Entering addMySQLServiceNode') + self.serviceNodes[accesspoint['id']]=ServiceNode(accesspoint, True) + self.mysql_count+=1 + logger.debug('Exiting addMySQLServiceNode') + + ''' + Remove Service Node to the server (configuration). + ''' + def removeMySQLServiceNode(self, vmid): + del self.serviceNodes[vmid] + self.mysql_count-=1 \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/configuration.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/configuration.cnf new file mode 100644 index 0000000000000000000000000000000000000000..24dbce873336bde33db05fca02d56eb408a5a743 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/configuration.cnf @@ -0,0 +1,11 @@ +[iaas] +DRIVER=OPENNEBULA_XMLRPC +OPENNEBULA_URL=http://10.30.1.9:2633/RPC2 +#OPENNEBULA_URL=http://127.0.0.1:2633/RPC2 +OPENNEBULA_USER=oneadmin +OPENNEBULA_PASSWORD=oneadmin +OPENNEBULA_IMAGE_ID=166 +OPENNEBULA_NETWORK_ID=195 +OPENNEBULA_SIZE_ID=1 +OPENNEBULA_CONTEXT_SCRIPT_MANAGER=/home/ales/sql/manager/conpaassql-install.sh +OPENNEBULA_CONTEXT_SCRIPT_AGENT=/home/ales/sql/agent/conpaassql-install.sh \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/internals.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/internals.py new file mode 100644 index 0000000000000000000000000000000000000000..323fd289e4277137f2644428979b7acefe320c78 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/internals.py @@ -0,0 +1,401 @@ +''' +Created on Jun 7, 2011 + +@author: ales +''' +from conpaas.log import create_logger +from conpaas.mysql.server.manager.config import Configuration, ManagerException,\ + E_ARGS_UNEXPECTED, ServiceNode, E_UNKNOWN, E_ARGS_MISSING, E_STATE_ERROR, E_ARGS_INVALID +from threading import Thread +from conpaas.mysql.client import agent_client +import time +import conpaas +import conpaas.mysql.server.manager +from conpaas.web.http import HttpErrorResponse, HttpJsonResponse + +S_INIT = 'INIT' +S_PROLOGUE = 'PROLOGUE' +S_RUNNING = 'RUNNING' +S_ADAPTING = 'ADAPTING' +S_EPILOGUE = 'EPILOGUE' +S_STOPPED = 'STOPPED' +S_ERROR = 'ERROR' + +memcache = None +dstate = None +exposed_functions = {} +config = None +logger = create_logger(__name__) +iaas = None +managerServer = None +dummy_backend = None + +class MySQLServerManager(): + + dummy_backend = False + + def __init__(self, conf, _dummy_backend=False): + logger.debug("Entering MySQLServerManager initialization") + conpaas.mysql.server.manager.internals.config = Configuration(conf, _dummy_backend) + self.state = S_INIT + self.dummy_backend = _dummy_backend + conpaas.mysql.server.manager.internals.dummy_backend = _dummy_backend + # TODO: + self.__findAlreadyRunningInstances() + logger.debug("Leaving MySQLServer initialization") + + ''' + Adds running instances of mysql agents to the list. + ''' + def __findAlreadyRunningInstances(self): + logger.debug("Entering __findAlreadyRunningInstances") + list = iaas.listVMs() + logger.debug('List obtained: ' + str(list)) + if self.dummy_backend: + for i in list.values(): + conpaas.mysql.server.manager.internals.config.addMySQLServiceNode(i) + else: + for i in list.values(): + up = True + try: + if i['ip'] != '': + logger.debug('Probing ' + i['ip'] + ' for state.') + ret = agent_client.get_server_state(i['ip'], 60000) + logger.debug('Returned query:' + str(ret)) + else: + up = False + except agent_client.AgentException as e: logger.error('Exception: ' + str(e)) + except Exception as e: + logger.error('Exception: ' + str(e)) + up = False + if up: + logger.debug('Adding service node ' + i['ip']) + conpaas.mysql.server.manager.internals.config.addMySQLServiceNode(i) + logger.debug("Exiting __findAlreadyRunningInstances") + +def expose(http_method): + def decorator(func): + if http_method not in exposed_functions: + exposed_functions[http_method] = {} + exposed_functions[http_method][func.__name__] = func + def wrapped(*args, **kwargs): + return func(*args, **kwargs) + return wrapped + return decorator + +''' + Wait for nodes to get ready. It tries to call a function of the agent. If exception + is thrown, wait for poll_interval seconds. + @param nodes: a list of nodes + @param poll_intervall: how many seconds to wait. +''' +def wait_for_nodes(nodes, poll_interval=10): + logger.debug('wait_for_nodes: going to start polling') + if conpaas.mysql.server.manager.internals.dummy_backend: + pass + else: + done = [] + while len(nodes) > 0: + for i in nodes: + up = True + try: + if i['ip'] != '': + logger.debug('Probing ' + i['ip'] + ' for state.') + agent_client.get_server_state(i['ip'], 60000) + else: + up = False + except agent_client.AgentException: pass + except: up = False + if up: + done.append(i) + nodes = [ i for i in nodes if i not in done] + if len(nodes): + logger.debug('wait_for_nodes: waiting for %d nodes' % len(nodes)) + time.sleep(poll_interval) + no_ip_nodes = [ i for i in nodes if i['ip'] == '' ] + if no_ip_nodes: + logger.debug('wait_for_nodes: refreshing %d nodes' % len(no_ip_nodes)) + refreshed_list = iaas.listVMs() + for i in no_ip_nodes: + i['ip'] = refreshed_list[i['id']]['ip'] + logger.debug('wait_for_nodes: All nodes are ready %s' % str(done)) + +''' + Waits for new VMs to awake. + @param function: None, agent or manager. + @param new_vm: new VM's details. +''' +def createServiceNodeThread (function, new_vm): + node_instances = [] + vm=iaas.listVMs()[new_vm['id']] + node_instances.append(vm) + wait_for_nodes(node_instances) + config.addMySQLServiceNode(new_vm) + +''' + For each of the node from the list of the manager check that it is alive (in the list + returned by the ONE). +''' +#=============================================================================== +# @expose('GET') +# def listServiceNodes(kwargs): +# logger.debug("Entering listServiceNode") +# if len(kwargs) != 0: +# return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message} +# #dstate = memcache.get(DEPLOYMENT_STATE) +# vms = iaas.listVMs() +# vms_mysql = config.getMySQLServiceNodes() +# for vm in vms_mysql: +# if not(vm.vmid in vms.keys()): +# logger.debug('Removing instance ' + str(vm.vmid) + ' since it is not in the list returned by the listVMs().') +# config.removeMySQLServiceNode(vm.vmid) +# #if dstate != S_RUNNING and dstate != S_ADAPTING: +# # return {'opState': 'ERROR', 'error': ManagerException(E_STATE_ERROR).message} +# #config = memcache.get(CONFIG) +# logger.debug("Exiting listServiceNode") +# return { +# 'opState': 'OK', +# #'sql': [ serviceNode.vmid for serviceNode in managerServer.config.getMySQLServiceNodes() ] +# #'sql': [ vms.keys() ] +# 'sql': [ [serviceNode.vmid, serviceNode.ip, serviceNode.port, serviceNode.state ] for serviceNode in config.getMySQLServiceNodes() ] +# } +#=============================================================================== + +@expose('GET') +def list_nodes(kwargs): + logger.debug("Entering list_nodes") + if len(kwargs) != 0: + return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) + vms = iaas.listVMs() + vms_mysql = config.getMySQLServiceNodes() + for vm in vms_mysql: + if not(vm.vmid in vms.keys()): + logger.debug('Removing instance ' + str(vm.vmid) + ' since it is not in the list returned by the listVMs().') + config.removeMySQLServiceNode(vm.vmid) + logger.debug("Exiting list_nodes") + _nodes = [ serviceNode.vmid for serviceNode in config.getMySQLServiceNodes() ] + return HttpJsonResponse({ + 'serviceNode': _nodes, + }) + +'''Gets info of a specific node. +@param param: serviceNodeId is a VMID of an existing service node. +''' +@expose('GET') +def get_node_info(kwargs): + if 'serviceNodeId' not in kwargs: return HttpErrorResponse(ManagerException(E_ARGS_MISSING, 'serviceNodeId').message) + serviceNodeId = kwargs.pop('serviceNodeId') + if len(kwargs) != 0: + return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) + #for keys in config.serviceNodes.keys(): + # if keys + if int(serviceNodeId) not in config.serviceNodes.keys(): return HttpErrorResponse(ManagerException(E_ARGS_INVALID , "serviceNodeId" , detail='Invalid "serviceNodeId"').message) + serviceNode = config.serviceNodes[int(serviceNodeId)] + return HttpJsonResponse({ + 'serviceNode': { + 'id': serviceNode.vmid, + 'ip': serviceNode.ip, + 'isRunningMySQL': serviceNode.isRunningMySQL + } + }) + +'''Creates a new service node. +@param function: None, "manager" or "agent". If None, empty image is provisioned. If "manager" +new manager is awaken and if the function equals "agent", new instance of the agent is +provisioned. +''' +#=============================================================================== +# @expose('POST') +# def createServiceNode(kwargs): +# if not(len(kwargs) in (0,1, 3)): +# return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message} +# if len(kwargs) == 0: +# new_vm=iaas.newInstance(None) +# Thread(target=createServiceNodeThread(None, new_vm)).start() +# elif len(kwargs) == 1: +# new_vm=iaas.newInstance(kwargs['function']) +# Thread(target=createServiceNodeThread(kwargs['function'], new_vm)).start() +# else: +# pass +# return { +# 'opState': 'OK', +# 'sql': [ new_vm['id'] ] +# } +#=============================================================================== + +@expose('POST') +def add_nodes(kwargs): + function = None + if 'function' in kwargs: + #if not isinstance(kwargs['function'], str): + # logger.error("Expected a string value for function") + # return HttpErrorResponse(ManagerException(E_ARGS_INVALID, detail='Expected a string value for "function"').message) + function = str(kwargs.pop('function')) + #if not(len(kwargs) in (0,1, 3)): + # return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message} + new_vm=iaas.newInstance(function) + Thread(target=createServiceNodeThread(function, new_vm)).start() + return HttpJsonResponse({ + 'serviceNode': { + 'id': new_vm['id'], + 'ip': new_vm['ip'], + 'state': new_vm['state'], + 'name': new_vm['name'] + } + }) + +'''Creating a service replication. +''' +@expose('POST') +def create_replica(kwargs): + if not(len(kwargs) in (2)): + return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message} + new_vm=iaas.newInstance('agent') + master_id=kwargs['master_id'] + createServiceNodeThread('agent', new_vm) + '''new_vm is a new replica instance + ''' + '''TODO: insert code for initializing a replica master''' + '''TODO: insert code for initializing a replica client''' + + return { + 'opState': 'OK', + 'sql': [ new_vm['id'] ] + } + +#=============================================================================== +# @expose('POST') +# def deleteServiceNode(kwargs): +# if len(kwargs) != 1: +# return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message} +# logger.debug('deleteServiceNode ' + str(kwargs['id'])) +# if iaas.killInstance(kwargs['id']): +# config.removeMySQLServiceNode(kwargs['id']) +# '''TODO: If false, return false response. +# ''' +# return { +# 'opState': 'OK' +# } +#=============================================================================== + +@expose('POST') +def remove_nodes(kwargs): + logger.debug("Entering delete_nodes") + if 'serviceNodeId' not in kwargs: return HttpErrorResponse(ManagerException(E_ARGS_MISSING, 'serviceNodeId').message) + serviceNodeId = int(kwargs.pop('serviceNodeId')) + if len(kwargs) != 0: + return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) + if serviceNodeId not in config.serviceNodes: return HttpErrorResponse(ManagerException(E_ARGS_INVALID, "serviceNodeId", detail='Invalid "serviceNodeId"').message) + serviceNode = config.serviceNodes[serviceNodeId] + logger.debug('deleteServiceNode ' + str(serviceNodeId)) + if iaas.killInstance(serviceNodeId): + config.removeMySQLServiceNode(serviceNodeId) + '''TODO: If false, return false response. + ''' + return HttpJsonResponse({'result': 'OK'}) + +@expose('GET') +def get_service_info(kwargs): + logger.debug("Entering get_service_info") + try: + logger.debug("Leaving get_service_info") + return HttpJsonResponse({ + 'service': { + 'state':managerServer.state + } + }) + except Exception as e: + ex = ManagerException(E_UNKNOWN, detail=e) + logger.exception(e) + logger.debug('Leaving get_service_info') + return HttpJsonResponse({'result': 'OK'}) + +#=============================================================================== +# @expose('GET') +# def get_node_info( kwargs): +# logger.debug("Entering get_node_info") +# if 'serviceNodeId' not in kwargs: return HttpErrorResponse(ManagerException(E_ARGS_MISSING, 'serviceNodeId').message) +# serviceNodeId = kwargs.pop('serviceNodeId') +# if len(kwargs) != 0: +# return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) +# +# config = self._configuration_get() +# if serviceNodeId not in config.serviceNodes: return HttpErrorResponse(ManagerException(E_ARGS_INVALID, detail='Invalid "serviceNodeId"').message) +# serviceNode = config.serviceNodes[serviceNodeId] +# return HttpJsonResponse({ +# 'serviceNode': { +# 'id': serviceNode.vmid, +# 'ip': serviceNode.ip, +# 'isRunningProxy': serviceNode.isRunningProxy, +# 'isRunningWeb': serviceNode.isRunningWeb, +# 'isRunningBackend': serviceNode.isRunningBackend, +# 'isRunningMySQL': serviceNode.isRunningBackend, +# } +# }) +#=============================================================================== + +''' + Sets up a replica master node + @param id: new replica master id. + +''' +@expose('POST') +def set_up_replica_master(params): + logger.debug("Entering set_up_replica_master") + if len(params) != 1: + return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, params.keys()).message} + new_master_id = params['id'] + new_master_ip = '' + new_master_port = '' + for node in config.getMySQLServiceNodes(): + if new_master_id == node.id: + new_master_ip=node.ip + new_master_port=node.port + agent_client.set_up_replica_master(new_master_ip, new_master_port) + logger.debug("Exiting set_up_replica_master") + pass + +@expose('POST') +def set_up_replica_slave(params): + logger.debug("Entering set_up_replica_slave") + if len(params) != 5: + return {'opState': 'ERROR', 'error': ManagerException(E_ARGS_UNEXPECTED, params.keys()).message} + _id = params['id'] + _host = '' + _port = '' + for node in config.getMySQLServiceNodes(): + if _id == node.id: + _host=node.ip + _port=node.port + master_host = params['master_host'] + master_log_file = params['master_log_file'] + master_log_pos = params['master_log_pos'] + slave_server_id = params['slave_server_id'] + agent_client.set_up_replica_slave(_host, _port, master_host, master_log_file, master_log_pos, slave_server_id) + logger.debug("Exiting set_up_replica_slave") + pass + +@expose('POST') +def shutdown(self, kwargs): + if len(kwargs) != 0: + return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) + + dstate = self._state_get() + if dstate != self.S_RUNNING: + return HttpErrorResponse(ManagerException(E_STATE_ERROR).message) + + config = self._configuration_get() + self._state_set(self.S_EPILOGUE, msg='Shutting down') + Thread(target=self.do_shutdown, args=[config]).start() + return HttpJsonResponse({'state': self.S_EPILOGUE}) + +@expose('GET') +def get_service_performance(self, kwargs): + if len(kwargs) != 0: + return HttpErrorResponse(ManagerException(E_ARGS_UNEXPECTED, kwargs.keys()).message) + return HttpJsonResponse({ + 'request_rate': 0, + 'error_rate': 0, + 'throughput': 0, + 'response_time': 0, + }) \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/server.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/server.py new file mode 100644 index 0000000000000000000000000000000000000000..19493b6cf5380c4e22a785289bb33532db1c53ef --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/server.py @@ -0,0 +1,101 @@ +''' +Created on Jun 7, 2011 + +@author: ales +''' +from BaseHTTPServer import HTTPServer +from conpaas.web.http import AbstractRequestHandler +import httplib +import json +from conpaas.mysql.server.manager import internals +from conpaas.iaas import IaaSClient +from conpaas.mysql.server.manager.internals import MySQLServerManager +from SocketServer import ThreadingMixIn +from conpaas.log import log_dir_path + +class SQLServerRequestHandler(AbstractRequestHandler): + + + #=========================================================================== + # def _dispatch(self, method, params): + # if 'action' not in params: + # self.send_custom_response(httplib.BAD_REQUEST, 'Did not specify "action"') + # elif params['action'] not in self.server.callback_dict[method]: + # self.send_custom_response(httplib.NOT_FOUND, 'action not found') + # else: + # callback_name = params['action'] + # del params['action'] + # self.send_custom_response(httplib.OK, json.dumps(self.server.callback_dict[method][callback_name](params))) + #=========================================================================== + + + def _render_arguments(self, method, params): + ret = '

Arguments:' + ret += '' + for param in params: + if isinstance(params[param], dict): + ret += '' + else: + ret += '' + ret += '
Method' + method + '
' + param + 'Contents of: ' + params[param].filename + '
' + param + '' + params[param] + '

' + return ret + + def send_action_missing(self, method, params): + self.send_custom_response(httplib.BAD_REQUEST, ''' + + BAD REQUEST + + +

ConPaaS MySQL

+

No "action" specified.

+

This URL is used to access the service manager directly. + You may want to copy-paste the URL as a parameter to the 'managerc.py' command-line utility.

+ ''' + self._render_arguments(method, params) + '') + + def send_action_not_found(self, method, params): + self.send_custom_response(httplib.NOT_FOUND, ''' + + ACTION NOT FOUND + + +

ConPaaS MySQL

+

The specified "action" was not found.

+

You may want to review the list of supported actions provided by the 'managerc.py' command-line utility.

+ ''' + self._render_arguments(method, params) + '') + +#class MultithreadedHTTPServer(ThreadingMixIn, HTTPServer): +# pass + +class ManagerServer(ThreadingMixIn, HTTPServer): + + def register_method(self, http_method, func_name, callback): + self.callback_dict[http_method][func_name] = callback + + def __init__(self, server_address, iaas_config, RequestHandlerClass=SQLServerRequestHandler): + HTTPServer.__init__(self, server_address, RequestHandlerClass) + self.callback_dict = {'GET': {}, 'POST': {}} + from conpaas.mysql.server.manager import internals + internals.iaas = IaaSClient(iaas_config) + #internals.config = iaas_config + self.whitelist_addresses = [] + #self.callback_dict = {'GET': {}, 'POST': {}, 'UPLOAD': {}} + + internals.managerServer=MySQLServerManager(iaas_config) + for http_method in internals.exposed_functions: + for func_name in internals.exposed_functions[http_method]: + print 'Going to register ', http_method, func_name + self.register_method(http_method, func_name, getattr(internals, func_name)) + +if __name__ == '__main__': + from optparse import OptionParser + from ConfigParser import ConfigParser + parser = OptionParser() + parser.add_option('-p', '--port', type='int', default=50000, dest='port') + parser.add_option('-b', '--bind', type='string', default='0.0.0.0', dest='address') + parser.add_option('-c', '--config', type='string', default='./config.cfg', dest='config') + options, args = parser.parse_args() + config_parser = ConfigParser() + config_parser.read(options.config) + print options.address, options.port + d = ManagerServer((options.address, options.port), config_parser) + d.serve_forever() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/sql_manager_configuration.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/sql_manager_configuration.cnf new file mode 100644 index 0000000000000000000000000000000000000000..70315e08132617a883fb6075d0aae6aea49560ce --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/server/manager/sql_manager_configuration.cnf @@ -0,0 +1,10 @@ +[iaas] +DRIVER=OPENNEBULA_XMLRPC +OPENNEBULA_URL=http://172.16.120.228:2633/RPC2 +#OPENNEBULA_URL=http://10.30.1.9:2633/RPC2 +OPENNEBULA_USER=oneadmin +OPENNEBULA_PASSWORD=oneadmin +OPENNEBULA_IMAGE_ID=76 +OPENNEBULA_NETWORK_ID=24 +OPENNEBULA_SIZE_ID=1 +OPENNEBULA_CONTEXT_SCRIPT=/home/leo/manager/ts.sh \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/opennebula_test.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/opennebula_test.py new file mode 100644 index 0000000000000000000000000000000000000000..865b73eae8c2025c2fe61f3bd1036e3e82f6f80c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/opennebula_test.py @@ -0,0 +1,62 @@ +''' +Created on Jul 28, 2011 + +@author: ales +''' +#from conpaas.iaas import IaaSClient +import oca + +def xmlRPCtest(): + client = oca.Client('oneadmin:oneadmin', 'http://172.16.120.228:2633/RPC2') + #new_host_id = oca.Host.allocate(client, 'host_name', 'im_xen', 'vmm_xen', 'tm_nfs') + hostpool = oca.HostPool(client) + hostpool.info() + for i in hostpool: + print "Active host:" + print i.name, i.str_state + vm_pool=oca.VirtualMachinePool(client) + vm_pool.info(-2) + print "All VMs:" + for i in vm_pool: + print i.name, i.str_state + print "Allocating new VM..." + #oca.VirtualMachine. + rez=oca.VirtualMachine.allocate(client, '''NAME = conpaassql01 +CPU = 0.2 +MEMORY = 512 + OS = [ + arch = "i686", + boot = "hd", + root = "hda" ] +DISK = [ + image = "Ubu10-10-rmq-3", + bus = "scsi", + readonly = "no" ] +NIC = [ NETWORK = "Private LAN" ] +GRAPHICS = [ + type="vnc" + ] +''') + print rez + +def xmlRPCtest2(): + client = oca.Client('oneadmin:oneadmin', 'http://172.16.120.228:2633/RPC2') + rez=oca.VirtualMachinePool(client) + rez.info(-2) + vm = rez.get_by_id(424) + + #rez=oca.VirtualMachine.info(client, 424) + print rez + +if __name__ == '__main__': + #xmlRPCtest() + xmlRPCtest2() + #CP = ConfigParser() + #ONDriver = get_driver(Provider.OPENNEBULA) + #driver = OpenNebulaNodeDriver("oneadmin", "oneadmin", False , "172.16.120.228", 4566) + #driver.list_images() + #driver.list_nodes() + #driver.list_locations() + #CP.readfp(open("scripts/opennebula.conf")) + #client = IaaSClient(CP) + #print client.listVMs() diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/agent.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..890ee9e46fd6e5bd9e6271150758ea81d74767aa --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/agent.py @@ -0,0 +1,90 @@ +''' +Created on Aug 26, 2011 + +@author: ales +''' +import unittest +from conpaas.mysql.server.agent.server import AgentServer +from conpaas.mysql.client.agent_client import get_server_state +from conpaas.mysql.client.agent_client import create_server +import threading +from ConfigParser import ConfigParser +from conpaas.mysql.server.agent.internals import MySQLServer +from conpaas.mysql.server.agent import internals +import os + +config_file=None + +class TestServerAgent(unittest.TestCase): + + #host = '0.0.0.0' + #port = 60000 + a = None + + def setUp(self): + from optparse import OptionParser + #parser = OptionParser() + #parser.add_option('-c', '--config', type='string', default='./configuration.cnf', dest='config') + #options, args = parser.parse_args() + + config_file=os.curdir+"/src/conpaas/mysql/test/unit/configuration.cnf" + # This is for testing locally. + #config_file="configuration.cnf" + + config_parser = ConfigParser() + config_parser.read(config_file) + '''Set up configuration for the parser. + ''' + config_parser.set('MySQL_configuration', 'my_cnf_file', '/etc/mysql/my.cnf') + config_parser.set('MySQL_configuration', 'path_mysql_ssr', '/etc/init.d/mysql') + config_parser.set('MySQL_root_connection', 'location', '') + config_parser.set('MySQL_root_connection', 'username', '') + config_parser.set('MySQL_root_connection', 'password', '') + + # This is for integration testing + #self.a = AgentServer((self.host, self.port), config_parser) + #self.t = threading.Thread(target=self.a.serve_forever) + #self.t.start() + self.a=MySQLServer(config_parser, True) + internals.agent = self.a + self.a.start() + + def tearDown(self): + self.a.stop() + self.a = None + #self.t.join() + #self.t = None + + def testSQLServerState(self): + #self.assertTrue(getMySQLServerState(self.host, self.port)) + ret=internals.get_server_state(False) + self.assertTrue(ret.obj != None) + #self.__check_reply(ret) + + def testStartStopSQLServer(self): + #ret = createMySQLServer(self.host,self.port) + #self.assertTrue(ret) + #self.__check_reply(ret) + #ret = stopMySQLServer(self.host,self.port) + #self.assertTrue(ret) + #self.__check_reply(ret) + ret = internals.create_server(False) + self.assertTrue(ret) + self.__check_reply(ret) + ret = internals.stop_server(False) + self.assertTrue(ret) + self.__check_reply(ret) + + def testRestartSQLServer(self): + ret = internals.restart_server(False) + self.__check_reply(ret) + + def __check_reply(self, ret): + self.assertTrue( ret.obj['return'] == 'OK') + +if __name__ == "__main__": + #import sys;sys.argv = ['', 'Test.testName'] + + unittest.main() + #suite = unittest.TestLoader().loadTestsFromTestCase(TestServerAgent) + #unittest.TextTestRunner(verbosity=2).run(suite) \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/configuration.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/configuration.cnf new file mode 100644 index 0000000000000000000000000000000000000000..0b1c5d28d22d64e012590f5bda1ad4a2f64dd1bd --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/configuration.cnf @@ -0,0 +1,8 @@ +[MySQL_root_connection] +location= +password= +username= + +[MySQL_configuration] +my_cnf_file=/etc/mysql/my.cnf +path_mysql_ssr=/etc/init.d/mysql \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/manager.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/manager.py new file mode 100644 index 0000000000000000000000000000000000000000..d173a1430970c78c9b6eed48a1fc7cdb5bde6107 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/manager.py @@ -0,0 +1,83 @@ +''' +Created on Aug 31, 2011 + +@author: ales +''' +import unittest +#from conpaas.mysql.server.manager.server import ManagerServer +import threading +from ConfigParser import ConfigParser +from conpaas.iaas import IaaSClient +from conpaas.mysql.server.manager import internals +from conpaas.mysql.server.manager.internals import MySQLServerManager +import os + + +class TestServerManager(unittest.TestCase): + + def setUp(self): + # This is prepared for integration unit testing. + #======================================================================= + # from optparse import OptionParser + # from ConfigParser import ConfigParser + # parser = OptionParser() + # parser.add_option('-p', '--port', type='int', default=50000, dest='port') + # parser.add_option('-b', '--bind', type='string', default='0.0.0.0', dest='address') + # parser.add_option('-c', '--config', type='string', default='./sql_manager_configuration.cnf', dest='config') + # options, args = parser.parse_args() + # config_parser = ConfigParser() + # config_parser.read(options.config) + # print options.address, options.port + # self.managerServer = ManagerServer((options.address, options.port), config_parser) + # self.t = threading.Thread(target=self.managerServer.serve_forever) + # self.managerServer.serve_forever() + # self.t.start() + #======================================================================= + config = os.curdir+'/src/conpaas/mysql/test/unit/sql_manager_configuration.cnf' + #config = 'sql_manager_configuration.cnf' + config_parser = ConfigParser() + config_parser.read(config) + '''Set up configuration for the parser. + ''' + + # This is for integration testing + #self.a = AgentServer((self.host, self.port), config_parser) + #self.t = threading.Thread(target=self.a.serve_forever) + #self.t.start() + self.a=IaaSClient(config_parser) + internals.iaas = self.a + internals.managerServer=MySQLServerManager(config_parser, True) + self.managerServer = internals.managerServer + + def tearDown(self): + #self.managerServer.shutdown() + #self.managerServer = None + #self.t.join() + #self.t = None + self.managerServer = None + self.a = None + + def testAddNodes(self): + nodes = internals.list_nodes({}) + len1 = len(nodes.obj['serviceNode']) + internals.add_nodes({}) + nodes = internals.list_nodes({}) + self.assertTrue(len(nodes.obj['serviceNode'])== len1+1) + + def testRemoveNodes(self): + newnode = internals.add_nodes({}) + nodes = internals.list_nodes({}) + len1 = len(nodes.obj['serviceNode']) + internals.remove_nodes({'serviceNodeId':nodes.obj['serviceNode'][0]}) + nodes = internals.list_nodes({}) + self.assertTrue(len(nodes.obj['serviceNode'])== len1-1) + + def testGetNodes(self): + nodes = internals.list_nodes({}) + self.assertTrue(nodes.obj) + #self.assertTrue(nodes.obj['serviceNode']==[]) + + +if __name__ == "__main__": + #import sys;sys.argv = ['', 'Test.testName'] + unittest.main() \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/sql_manager_configuration.cnf b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/sql_manager_configuration.cnf new file mode 100644 index 0000000000000000000000000000000000000000..b50bf4d6b532cc9e551a264a60a19d52d8085a5c --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/mysql/test/unit/sql_manager_configuration.cnf @@ -0,0 +1,10 @@ +[iaas] +DRIVER=OPENNEBULA_DUMMY +OPENNEBULA_URL=http://localhost:2633/RPC2 +OPENNEBULA_USER=oneadmin +OPENNEBULA_PASSWORD=oneadmin +OPENNEBULA_IMAGE_ID=21 +OPENNEBULA_NETWORK_ID=24 +OPENNEBULA_SIZE_ID=1 +OPENNEBULA_CONTEXT_SCRIPT_MANAGER=/home/contrail/manager/conpaassql-install.sh +OPENNEBULA_CONTEXT_SCRIPT_AGENT=/home/contrail/agent/conpaassql-install.sh \ No newline at end of file diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/__init__.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/http.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/http.py new file mode 100644 index 0000000000000000000000000000000000000000..8de00b450eb4e1fdb5d110d2158f94b03f98cf5e --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/http.py @@ -0,0 +1,308 @@ +''' +Copyright (C) 2010-2011 Contrail consortium. + +This file is part of ConPaaS, an integrated runtime environment +for elastic cloud applications. + +ConPaaS is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +ConPaaS is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with ConPaaS. If not, see . + + +Created on Feb 8, 2011 + +@author: ielhelw +''' + +from BaseHTTPServer import BaseHTTPRequestHandler +from StringIO import StringIO +import urlparse, urllib, cgi, httplib, json, pycurl, os +from conpaas.log import create_logger + +logger = create_logger(__name__) + +class HttpError(Exception): pass + + +class FileUploadField(object): + def __init__(self, filename, file): + self.filename = filename + self.file = file + + +class HttpRequest(object): pass +class HttpResponse(object): pass + + +class HttpErrorResponse(HttpResponse): + def __init__(self, error_message): + self.message = error_message + + +class HttpJsonResponse(HttpResponse): + def __init__(self, obj={}): + self.obj = obj + + +class HttpFileDownloadResponse(HttpResponse): + def __init__(self, filename, file): + self.filename = filename + self.file = file + + +class AbstractRequestHandler(BaseHTTPRequestHandler): + '''Minimal HTTP request handler that uses reflection to map requested URIs + to URI handler methods. + + Mapping is as follows: + GET /foo -> self.foo_GET + POST /foo -> self.foo_POST + etc. + + URI handler methods should accept 1 parameter; a dict containing key/value + pairs of GET parameters. + + ''' + + JSON_CONTENT_TYPES = ['application/json-rpc', + 'application/json', + 'application/jsonrequest'] + MULTIPART_CONTENT_TYPE = 'multipart/form-data' + + def handle_one_request(self): + '''Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + ''' + self.raw_requestline = self.rfile.readline() + if not self.raw_requestline: + self.close_connection = 1 + return + if not self.parse_request(): # An error code has been sent, just exit + return + logger.debug("Obtained: " + self.path) + parsed_url = urlparse.urlparse(self.path) + # we allow calls to / only + if parsed_url.path != '/': + logger.debug("we allow calls to / only. I got: " + parsed_url.path) + self.send_error(httplib.NOT_FOUND) + return + + ## if whitelist specified + if self.server.whitelist_addresses \ + and self.client_address[0] not in self.server.whitelist_addresses: + self.send_custom_response(httplib.FORBIDDEN) + return + + # require content-type header + if 'content-type' not in self.headers: + self.send_error(httplib.UNSUPPORTED_MEDIA_TYPE) + return + + if self.command == 'GET': + self._handle_get(parsed_url) + elif self.command == 'POST': + self._handle_post() + else: + self.send_error(httplib.METHOD_NOT_ALLOWED) + + def _handle_get(self, parsed_url): + if self.headers['content-type'] in self.JSON_CONTENT_TYPES: + self._dispatch('GET', self._parse_jsonrpc_get_params(parsed_url)) + else: + self.send_error(httplib.UNSUPPORTED_MEDIA_TYPE) + + def _handle_post(self): + if self.headers['content-type'] in self.JSON_CONTENT_TYPES: + self._dispatch('POST', self._parse_jsonrpc_post_params()) + elif self.headers['content-type'].startswith(self.MULTIPART_CONTENT_TYPE): + self._dispatch('UPLOAD', self._parse_upload_params()) + else: + self.send_error(httplib.UNSUPPORTED_MEDIA_TYPE) + + def _parse_jsonrpc_get_params(self, parsed_url): + params = urlparse.parse_qs(parsed_url.query) + # get rid of repeated params, pick the last one + for k in params: + if isinstance(params[k], list): + params[k] = params[k][-1] + if 'params' in params: + params['params'] = json.loads(params['params']) + return params + + def _parse_jsonrpc_post_params(self): + if 'content-length' not in self.headers: + self.send_error(httplib.LENGTH_REQUIRED) + if not self.headers['content-length'].isdigit(): + self.send_error(httplib.BAD_REQUEST) + tp = self.rfile.read(int(self.headers['content-length'])) + params = json.loads(tp) + return params + + def _parse_upload_params(self): + post_data = cgi.FieldStorage(fp=self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST'}) + params = {} + # get rid of repeated params, pick the last one + if post_data.list: + for k in post_data.keys(): + if isinstance(post_data[k], list): record = post_data[k][-1] + else: record = post_data[k] + if record.filename == None: + params[record.name] = record.value + else: + params[record.name] = FileUploadField(record.filename, record.file) + return params + + def _dispatch(self, callback_type, params): + if 'method' not in params: + self.send_method_missing(callback_type, params) + elif params['method'] not in self.server.callback_dict[callback_type]: + self.send_method_not_found(callback_type, params) + else: + callback_name = params.pop('method') + callback_params = {} + if callback_type != 'UPLOAD': + if 'params' in params: + callback_params = params['params'] + request_id = params['id'] + else: + callback_params = params + request_id = 1 + try: + response = self._do_dispatch(callback_type, callback_name, callback_params) + if isinstance(response, HttpFileDownloadResponse): + self.send_file_response(httplib.OK, response.file, {'Content-disposition': 'attachement; filename="%s"' % (response.filename)}) + elif isinstance(response, HttpErrorResponse): + self.send_custom_response(httplib.OK, json.dumps({'error': response.message, 'id': request_id})) + elif isinstance(response, HttpJsonResponse): + self.send_custom_response(httplib.OK, json.dumps({'result': response.obj, 'error': None, 'id': request_id})) + except Exception as e: + print "Problem in method call " + callback_name + print e + + def _do_dispatch(self, callback_type, callback_name, params): + return self.server.callback_dict[callback_type][callback_name](params) + + def send_custom_response(self, code, body=None): + '''Convenience method to send a custom HTTP response. + code: HTTP Response code. + body: Optional HTTP response content. + ''' + self.send_response(code) + self.end_headers() + if body != None: + print >>self.wfile, body, + + def send_file_response(self, code, filename, headers=None): + fd = open(filename) + stat = os.fstat(fd.fileno()) + self.send_response(code) + for h in headers: + self.send_header(h, headers[h]) + self.send_header('Content-length', stat.st_size) + self.end_headers() + while fd.tell() != stat.st_size: + print >>self.wfile, fd.read(), + fd.close() + + def send_method_missing(self, method, params): + self.send_custom_response(httplib.BAD_REQUEST, 'Did not specify method') + + def send_method_not_found(self, method, params): + self.send_custom_response(httplib.NOT_FOUND, 'method not found') + + def log_message(self, format, *args): + '''Override logging to disable it.''' + pass + + +def _http_get(host, port, uri, params=None): + try: + buffer = StringIO() + c = pycurl.Curl() + if params != None: + c.setopt(c.URL, 'http://%s:%s%s?%s' % (host, str(port), uri, urllib.urlencode(params))) + else: + c.setopt(c.URL, 'http://%s:%s%s' % (host, str(port), uri)) + c.setopt(c.WRITEFUNCTION, buffer.write) + c.perform() + ret = c.getinfo(c.RESPONSE_CODE), buffer.getvalue() + c.close() + return ret + except pycurl.error as e: + raise HttpError(*e.args) + +def _http_post(host, port, uri, params, files=[]): + try: + values = [] + for key in params: + values.append((key, str(params[key]))) + for key in files: + values.append((key, (pycurl.FORM_FILE, str(files[key])))) + buffer = StringIO() + c = pycurl.Curl() + c.setopt(c.URL, 'http://%s:%s%s' % (host, str(port), uri)) + c.setopt(c.HTTPHEADER, ['Expect: ']) + c.setopt(c.HTTPPOST, values) + c.setopt(c.WRITEFUNCTION, buffer.write) + c.perform() + ret = c.getinfo(c.RESPONSE_CODE), buffer.getvalue() + c.close() + return ret + except pycurl.error as e: + raise HttpError(*e.args) + +def _jsonrpc_get(host, port, uri, method, params=None): + try: + buffer = StringIO() + c = pycurl.Curl() + curl_params = {'method': method, 'id': '1'} + if params: + curl_params['params'] = json.dumps(params) + c.setopt(c.URL, 'http://%s:%s%s?%s' % (host, str(port), uri, urllib.urlencode(curl_params))) + c.setopt(c.WRITEFUNCTION, buffer.write) + c.setopt(c.HTTPHEADER, ['Content-Type: application/json']) + c.perform() + ret = c.getinfo(c.RESPONSE_CODE), buffer.getvalue() + c.close() + return ret + except pycurl.error as e: + raise HttpError(*e.args) + +def _jsonrpc_post(host, port, uri, method, params={}): + try: + values = [] + for key in params: + values.append((key, str(params[key]))) + response_buf = StringIO() + c = pycurl.Curl() + c.setopt(c.URL, 'http://%s:%s%s' % (host, str(port), uri)) + c.setopt(c.POST, 1) + c.setopt(c.POSTFIELDS, json.dumps({'method': method, 'params': params, 'id': '1'})) + c.setopt(c.WRITEFUNCTION, response_buf.write) + c.setopt(c.HTTPHEADER, ['Content-Type: application/json']) + c.perform() + ret = c.getinfo(c.RESPONSE_CODE), response_buf.getvalue() + c.close() + return ret + except pycurl.error as e: + raise HttpError(*e.args) + + +if __name__ == "__main__": + from BaseHTTPServer import HTTPServer + s = HTTPServer(('0.0.0.0', 6666), RequestHandlerClass=AbstractRequestHandler) + s.serve_forever() + diff --git a/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/misc.py b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..c42d178cf3b649bbfe06235445d1885a1a102041 --- /dev/null +++ b/conpaas/branches/Y1DEMO-conpaassql/src/conpaas/web/misc.py @@ -0,0 +1,81 @@ +''' +Created on Feb 8, 2011 + +@author: ielhelw +''' + +import re, zipfile, tarfile, socket + +def verify_port(port): + '''Raise Type Error if port is not an integer. + Raise ValueError if port is an invlid integer value. + ''' + if type(port) != int: raise TypeError('port should be an integer') + if port < 1 or port > 65535: raise ValueError('port should be a valid port number') + +def verify_ip_or_domain(ip): + '''Raise TypeError f ip is not a string. + Raise ValueError if ip is an invalid IP address in dot notation. + ''' + if (type(ip) != str and type(ip) != unicode): + raise TypeError('IP is should be a string') + try: + socket.gethostbyname(ip) + except: + raise ValueError('Invalid IP string') + +def verify_ip_port_list(l): + '''Check l is a list of [IP, PORT]. Raise appropriate Error if invalid types + or values were found + ''' + if type(l) != list: + raise TypeError('Expected a list of [IP, PORT]') + for pair in l: + if len(pair) != 2: + raise TypeError('List should contain IP,PORT values') + verify_ip_or_domain(pair[0]) + verify_port(pair[1]) + +zip = ['.zip'] +tar = ['.tar', '.tar.gz', '.tar.bz2'] + +def archive_supported_extensions(): + return tar + zip + +def archive_supported_name(name): + for ext in tar + zip: + if name.endswith(ext): + return True + return False + +def archive_get_type(name): + if tarfile.is_tarfile(name): + return 'tar' + elif zipfile.is_zipfile(name): + return 'zip' + else: return None + +def archive_open(name): + if tarfile.is_tarfile(name): + return tarfile.open(name) + elif zipfile.is_zipfile(name): + return zipfile.ZipFile(name) + else: return None + +def archive_get_members(arch): + if isinstance(arch, zipfile.ZipFile): + members = arch.namelist() + elif isinstance(arch, tarfile.TarFile): + members = [ i.name for i in arch.getmembers() ] + return members + +def archive_extract(arch, path): + if isinstance(arch, zipfile.ZipFile): + arch.extractall(path) + elif isinstance(arch, tarfile.TarFile): + arch.extractall(path=path) + +def archive_close(arch): + if isinstance(arch, zipfile.ZipFile)\ + or isinstance(arch, tarfile.TarFile): + arch.close() diff --git a/conpaas/branches/conpaas-dailybuild/AUTHORS.txt b/conpaas/branches/conpaas-dailybuild/AUTHORS.txt new file mode 100644 index 0000000000000000000000000000000000000000..66b80f8854be96026e55ac693be9caebb2aef5ef --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/AUTHORS.txt @@ -0,0 +1,12 @@ +ConPaaS (http://www.conpaas.eu) is developed by Vrije Universiteit +Amsterdam, Zuse Institut Berlin, and XLAB in the Contrail European +research project (http://contrail-project.eu/). Contrail is partially +funded by the FP7 Programme of the European Commission under Grant +Agreement FP7-ICT-257438. + +The main ConPaaS developers are: + +- Frontend: Claudiu-Dan Gheorghe, Ismail El Helw, Guillaume Pierre. + +- Web hosting service: Ismail El Helw. + diff --git a/conpaas/branches/conpaas-dailybuild/BRANCH_README b/conpaas/branches/conpaas-dailybuild/BRANCH_README new file mode 100644 index 0000000000000000000000000000000000000000..ebb60f96e6c7e1c33bb94d5333700a71aed68155 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/BRANCH_README @@ -0,0 +1,2 @@ + +This branch aims to automate the build of conpaas package for the bamboo continuous integration system diff --git a/conpaas/branches/conpaas-dailybuild/LICENSE.txt b/conpaas/branches/conpaas-dailybuild/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/LICENSE.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/conpaas/branches/conpaas-dailybuild/Makefile b/conpaas/branches/conpaas-dailybuild/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4d28bbdd684b2286e45e6c07aab9807308b32343 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/Makefile @@ -0,0 +1,51 @@ +# +# Makefile: automate the creation of the conpaas tarball +# + +NAME=conpaas +VERSION:=0.1.0-$(shell date '+%s') + + +WDIR=work + +INCLUDES=$(filter-out $(WDIR) map-reduce scalaris bag-of-tasks sql, $(wildcard *)) + +BIN_DIR=web-servers/scripts +BIN_CONF=$(addprefix $(BIN_DIR)/, ec2-manager-user-data opennebula-manager-user-data) +BIN_CODE=$(filter-out $(BIN_CONF), $(wildcard $(BIN_DIR)/*)) +SDK_URL=http://pear.amazonwebservices.com/get/sdk-latest.zip + +CODE_DIR=frontend/www/code +CONF_DIR=frontend/www/conf +SDK_DIR=frontend/www/lib/aws-sdk + + + +help: + @echo "Usage: make package [VERSION=]" + + +$(WDIR): + mkdir -p $(WDIR) + +$(WDIR)/sdk-latest.zip: + wget -P $(WDIR) $(SDK_URL) + +package: $(WDIR) $(WDIR)/sdk-latest.zip + mkdir -p $(WDIR)/$(CONF_DIR) + install $(BIN_CONF) $(WDIR)/$(CONF_DIR) + mkdir -p $(WDIR)/$(CODE_DIR) + install -m 755 $(BIN_CODE) $(WDIR)/$(CODE_DIR) + tar czf $(WDIR)/$(CODE_DIR)/ConPaaSWeb.tar.gz --exclude-vcs \ + --transform 's/^web-servers/ConPaaSWeb/' web-servers + unzip -u -d $(WDIR) $(WDIR)/sdk-latest.zip + mkdir -p $(WDIR)/$(SDK_DIR) + cp -r $(WDIR)/sdk-*/sdk-*/* $(WDIR)/$(SDK_DIR) + tar czf $(WDIR)/$(NAME)-$(VERSION).tar.gz --exclude-vcs \ + --transform 's|^$(WDIR)/||' \ + $(INCLUDES) $(WDIR)/frontend/ + + +clean: + rm -fr $(WDIR) + diff --git a/conpaas/branches/conpaas-dailybuild/README.txt b/conpaas/branches/conpaas-dailybuild/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb1555e78f4ca3bc3d2be7e1f77b9647390d5f20 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/README.txt @@ -0,0 +1,53 @@ + ConPaaS: an integrated runtime environment + for elastic Cloud applications + http://www.conpaas.eu + + +Introduction +============ + +ConPaaS aims at simplifying the deployment and management of +applications in the Cloud. In ConPaaS, an application is defined as a +composition of one or more services. Each service is an elastic +component dedicated to the hosting of a particular type of +functionality. A service can be seen as a standalone component of a +distributed application. + +Each ConPaaS service is self-managed and elastic: it can deploy itself +on the Cloud, monitor its own performance, and increase or decrease +its processing capacity by dynamically (de-)provisioning instances of +itself in the Cloud. Services are designed to be composable: an +application can for example use a Web hosting service, a database +service to store the internal application state, a file storage +service to store access logs, and a MapReduce service to periodically +compute statistics from these logs. Application providers simply need +to submit a manifest file describing the structure of their +application and its performance requirements. + +ConPaaS currently contains: + +- A Web frontend that can be installed within or outside the + Cloud. This is the website that developers can use to create, delete + and manage applications in ConPaaS. + +- One service dedicated to hosting static Web content as well as Web + applications written in PHP. + +- More services are currently being developed. They will be released + as soon as they are reasonably well tested and integrated with the + frontend. + + +Installation +============ + +You will find a small documentation in the "doc" directory. + + +Bugs +==== + +No matter how carefully we have tested this system, it most certainly +still contains a number of remaining bugs. If you encounter any +abnormal behavior, please let us know at info@conpaas.eu. + diff --git a/conpaas/branches/conpaas-dailybuild/doc/Makefile b/conpaas/branches/conpaas-dailybuild/doc/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..bad1538a49f113d17b4b511dfa31a6162f527da9 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/doc/Makefile @@ -0,0 +1,9 @@ + +all: userguide.pdf adminguide.pdf + +%.pdf : %.tex + pdflatex $< + pdflatex $< + +clean: + rm -f *.pdf *.out *.aux *.log *~ diff --git a/conpaas/branches/conpaas-dailybuild/doc/adminguide.pdf b/conpaas/branches/conpaas-dailybuild/doc/adminguide.pdf new file mode 100644 index 0000000000000000000000000000000000000000..5e4db20097c09d720305d8991f1c68002b3b22e4 Binary files /dev/null and b/conpaas/branches/conpaas-dailybuild/doc/adminguide.pdf differ diff --git a/conpaas/branches/conpaas-dailybuild/doc/adminguide.tex b/conpaas/branches/conpaas-dailybuild/doc/adminguide.tex new file mode 100644 index 0000000000000000000000000000000000000000..7777ac24a8eaf406424e44b0614253f9549583eb --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/doc/adminguide.tex @@ -0,0 +1,333 @@ +\documentclass[10pt]{article} + +%\usepackage{listings} +%\usepackage{framed} +\usepackage{hyperref} +\usepackage{url} +\usepackage{ulem} \normalem + +\usepackage{fancyvrb} +\DefineVerbatimEnvironment% + {code}% + {Verbatim}% + {frame=single,framesep=1mm,fontsize=\rm,resetmargins=true} + +% \newenvironment{what} +% {\begin{description} \item [What is happening now?] \hfill \\} +% {\end{description}} + +% \newenvironment{framedbox}[1]% +% {\begin{framed} +% \begingroup +% \fontsize{#1}{#1}\selectfont +% } +% { +% \endgroup +% \end{framed} +% } + + +\begin{document} +\title{ConPaaS -- Administrator guide} +\author{Ismail El Helw \and Guillaume Pierre} +\maketitle + +\vfil +\tableofcontents +\vfil +\newpage + +\section{Creating a ConPaaS image for Amazon EC2} + +The Web Hosting Service is capable of running over the Elastic Compute +Cloud (EC2) of Amazon Web Services (AWS). This section describes the +process of configuring an AWS account to run the Web Hosting Service. +You can skip this section if you plan to install ConPaaS over +OpenNebula. + +If you are new to EC2, you will need to create an account at +\url{http://aws.amazon.com/ec2/}. A very good EC2 documentation can be +found at +\url{http://docs.amazonwebservices.com/AWSEC2/latest/GettingStartedGuide/}. + +\subsection{Create an EBS backed AMI on Amazon EC2} + +The Web Hosting Service requires the creation of an Amazon Machine +Image (AMI) to contain the dependencies of it's processes. The +easiest method of creating a new Elastic Block Store backed Amazon +Machine Image is to start from an already existing one, customize it +and save the resulting filesystem as a new AMI. The following steps +explains how to setup an AMI using this methodology. + +\begin{enumerate} +\item Search the public AMIs for a Debian squeeze EBS AMI and run an + instance of it. If you are going to use micro-instances then the AMI + with ID \verb+ami-e0e11289+ could be a good choice. + +\item Upload the \verb+web-servers/conpaas_web_deps+ script to the instance: + \begin{code} + chmod 0400 yourpublickey.pem + scp -i yourpublickey.pem web-servers/conpaas_web_deps \ + root@instancename.com: + \end{code} + +\item Now, ssh to your instance: + \begin{code} + ssh -i yourpublickey.pem root@your.instancename.com + \end{code} + Run the \verb+conpaas_web_deps+ script inside the instance. This + script will install all of the dependencies of the manager and agent + processes as well as create the necessary directory structure. At + some point the script requests to accept licenses, accept them. + +\item Clean the filesystem by removing the + \verb+conpaas_web_deps+ file and any other temporary files you might + have created. + +\item Go to the EC2 administration page at the AWS website, right + click on the running instance and select ``\emph{Create Image (EBS + AMI)}''. AWS + documentation is available at + \url{http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?Tutorial_CreateImage.html}. + +\item After the image has been fully created, you can return to the + EC2 dashboard, right-click on your instance, and terminate it. +\end{enumerate} + +\subsection{Create a Security Group} + +An AWS security group is an abstraction of a set of firewall rules to +limit inbound traffic. The default policy of a new group is to deny +all inbound traffic. Therefore, one needs to specify a whitelist of +protocols and destination ports that are accesible from the outside. +The Web Hosting Service uses TCP ports 80, 5555, 8080 and 9000. All +three ports should be open for all running instances. AWS +documentation is available at +\url{http://docs.amazonwebservices.com/AWSEC2/latest/UserGuide/index.html?using-network-security.html}. + +\section{Creating a ConPaaS image for OpenNebula} + +The Web Hosting Service is capable of running over an OpenNebula +installation. This section describes the process of configuring +OpenNebula to run ConPaaS. You can skip this section if you plan to +deploy ConPaaS over Amazon Web Services. + +To create an image for OpenNebula you can execute the script\\ +\verb+web-servers/scripts/opennebula-create-new-vm-image+ in any +64-bit Debian or Ubuntu machine. + +\begin{enumerate} +\item Make sure your system has the following executables installed + (they are usually located in \verb+/sbin+ or \verb+/usr/sbin+, so + make sure these directories are in your \verb+$PATH+): % $ + \emph{dd parted losetup kpartx mkfs.ext3 tune2fs mount debootstrap + chroot umount grub-install} +\item It is particularly important that you use Grub version 2. To + install it: + \begin{code} + sudo apt-get install grub2 + \end{code} +\item Edit the first lines of the + \verb+web-servers/scripts/opennebula-create-new-vm-image+ script, + then execute it as root. +\item The script generates an image file called \verb+conpaasweb.img+ + by default. You can now register it in OpenNebula: +\begin{code} + oneimage register conpaasweb.img +\end{code} +\end{enumerate} + + +\paragraph{If things go wrong}~\\ + +Note that if anything fails during the image file creation, the script +will stop. However, it will not always reset your system to its +original state. To undo everything the script has done, follow these +instructions: + +\begin{enumerate} +\item The image has been mounted as a separate file system. Find the + mounted directory using command \verb+df -h+. The directory should + be in the form of \verb+/tmp/tmp.X+. + +\item There may be a \verb+dev+ and a \verb+proc+ directories mounted + inside it. Unmount everything using: + \begin{code} + sudo umount /tmp/tmp.X/dev /tmp/tmp.X/proc /tmp/tmp.X + \end{code} + +\item Find which loop device your using: + \begin{code} + sudo losetup -a + \end{code} + +\item Remove the device mapping: + \begin{code} + sudo kpartx -d /dev/loopX + \end{code} + +\item Remove the binding of the loop device: + \begin{code} + sudo losetup -d /dev/loopX + \end{code} + +\item Delete the image file + +\item Your system should be back to its original state. +\end{enumerate} + +\subsection{Make sure OpenNebula is properly configured} + +There are two main topics that you should pay attention to: + +\begin{enumerate} +\item Make sure you started OpenNebula's OCCI deamon. ConPaaS relies + on it to communicate with OpenNebula. + +\item Replace the \verb+occi_templates/common.erb+ OCCI profile from + your OpenNebula installation with the one from + \verb+misc/common.erb+. This new version features a number of + improvements from the standard version: + \begin{itemize} + \item The match for \verb+OS TYPE:arch+ allows the caller to specify + the architecture of the machine. + \item The graphics line allows for using vnc to connect to the VM. + This is very useful for debugging purposes and is not necessary + once testing is complete. + \end{itemize} +\end{enumerate} + +\section{Setup ConPaaS's Frontend} + +The ConPaaS frontend is a web application that allows users to manager +their ConPaaS services. Users can create, configure and terminate +services through it. This section describes the process of setting up +a ConPaaS frontend. + +To setup your frontend, you will need a PHP-enabled web server and a +MySQL database. The easiest way to install them on a Debian or Ubuntu +machine is: + +\begin{code} + sudo apt-get install libapache2-mod-php5 php5-curl \ + php5-mysql mysql-server mysql-client +\end{code} + +\subsection{Create a MySQL Database} + +The ConPaaS frontend uses a MySQL database to store data about users +and their services. The script located in +\verb+frontend/scripts/frontend-db.sql+ creates a new user +\verb+DB_USER+ with password \verb+DB_PASSWD+ and a database +\verb+DB_NAME+. It grants all access permissions to user +\verb+DB_USER+ on the new database. Finally, it creates the database +schema. You must update the first four lines to change \verb+DB_USER+, +\verb+DB_PASSWD+ and \verb+DB_NAME+ to reasonable values. + +Install a MysQL database if you don't have one already. You can now +create the database schema using this command, replacing \verb+ADMIN+ +and \verb+ADMINPASSWORD+ with the MySQL administrator's name and +password: + +\begin{code} + mysql -u ADMIN -p < frontend-db.sql +\end{code} + + +You will be prompted for the administrator's password, then the +database schema will be created automatically. + +\subsection{Configure the Front-end} + +The ConPaaS Front-end code is a collection of PHP scripts. It can run +on any PHP-enabled Web server. We recommend using Apache with the +\verb+mod_php+ module. The following instructions detail the +configuration of the frontend once you have a working PHP-enabled Web +server. + +\begin{enumerate} +\item Copy all files from the \verb+frontend/conf+ directory to a + location \uline{\emph{outside}} of the Web server's document root. + This directory contains sensitive configuration parameters which + must not be accesible by external users. A good location could be + for example \verb+/etc/conpaas+. Note that files in this + directory must be readable by the Web server (in Debian and Ubuntu + distributions the Web server runs under username \verb+www-data+). + + Edit each of these files to setup the required configuration + parameters. Each variable should be described in the config file + itself. If you are installing ConPaaS on EC2 you do not need to edit + file \verb+opennebula.ini+. If you are installing ConPaaS on + OpenNebula you do not need to edit file \verb+aws.ini+. + +\item Place the PHP code found in directory \verb+frontend/www+ at the + document root of the frontend web server such that the file named + \verb+__init__.php+ is directly underneath it. + +\item Edit the \verb+CONF_DIR+ variable in \verb+__init__.php+ such + that it points to the configuration directory path chosen in step 1. + +\item (Only if you are installing ConPaaS from the svn repository) + Download the AWS sdk for PHP from + \url{http://aws.amazon.com/sdkforphp/}. Extract the sdk directory + and rename it to \verb+aws-sdk+. Place it under the lib directory of + the front-end source code such that \verb+lib/aws-sdk/+ contains a + file named \verb+config-sample.inc.php+ (among others). + +\item Inside the web document's root, copy + \verb+lib/aws-sdk/config-sample.inc.php+ to + \verb+lib/aws-sdk/config.inc.php+ and fill in \verb+AWS_KEY+, + \verb+AWS_SECRET_KEY+, \verb+AWS_ACCOUNT_ID+ and + \verb+AWS_CANONICAL_ID+ as instructed in the file's documentation. + +\item (Only if you are installing ConPaaS from the svn repository) + Make sure that the Web server's document directory contains a + subdirectory named \verb+code+ and containing the following files: + \verb+agent-start+, \verb+agent-stop+, \verb+ConPaaSWeb.tar.gz+, + \verb+ec2-agent-user-data+ and \verb+manager-start+. These files + contain the entire implementation of the Web hosting service. They + are downloaded by newly created VM instances upon startup. + + Files \verb+ec2-manager-user-data+ and \verb+opennebula-user-data+ + must be placed in the frontend's configuration directory, and edited + with configuration information. + +\end{enumerate} + +At this point, your front-end should be working! + +\section{Miscellaneous} +\subsection{The credit system} + +The frontend is designed to maintain accounting of resources used by +each user. When a new user is created, (s)he receives a number of +credits as specified in the ``main.ini'' configuration file. Later on, +one credit is substracted each time a VM is executed for (a fraction +of) one hour. The administrator can change the number of credits by +directly editing the frontend's database. + +\subsection{Application sandboxing} + +The default ConPaaS configuration creates strong snadboxing so that +applications cannot open sockets, access the file system, execute +commands, etc. This makes the platform relatively secure against +malicious applications. On the other hand, it strongly restricts the +actions that ConPaaS applications can do. To reduce these security +measures to a more usable level, you need to edit two files: + +\begin{itemize} +\item To change restrictions applied to PHP applications, edit file + \verb+web-servers/etc/fpm.tmpl+ to change the list of + \verb+disable\_functions+. Do not forget to recreate a file + \verb+ConPaaSWeb.tar.gz+ out of the entire \verb+web-servers+ + directory, and to copy it at the URL specified in file + \verb+frontend/conf/manager-user-data+. +\item To change restrictions applied to Java applications, edit file + ``web-servers/etc/tomcat-catalina.policy''. Do not forget to + recreate a file ConPaaSWeb.tar.gz out of the entire ``web-servers'' + directory, and to copy it at the URL specified in file + ``frontend/conf/manager-user-data''. +\end{itemize} + + +\end{document} diff --git a/conpaas/branches/conpaas-dailybuild/doc/userguide.pdf b/conpaas/branches/conpaas-dailybuild/doc/userguide.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7893ba79ccffc0248f4dbee4709a400b28db5945 Binary files /dev/null and b/conpaas/branches/conpaas-dailybuild/doc/userguide.pdf differ diff --git a/conpaas/branches/conpaas-dailybuild/doc/userguide.tex b/conpaas/branches/conpaas-dailybuild/doc/userguide.tex new file mode 100644 index 0000000000000000000000000000000000000000..3d2cf37265460f695f531e8254e4dae57a4ba527 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/doc/userguide.tex @@ -0,0 +1,331 @@ +\documentclass[10pt]{article} +\usepackage{listings} +\usepackage{framed} + + +\newenvironment{what} +{\begin{description} \item [What is happening now?] \hfill \\} +{\end{description}} + +\newenvironment{framedbox}[1]% +{\begin{framed} + \begingroup + \fontsize{#1}{#1}\selectfont +} +{ + \endgroup + \end{framed} +} + +\pagestyle{myheadings} +\markright{User Manual:ConPaaS Web Hosting Service} + +\begin{document} +\title{User Manual: ConPaaS Web Hosting Service} +\date{August 15, 2011} +\thispagestyle{empty} + +\begin{center} +\begingroup +\fontsize{20pt}{20pt}\selectfont +\textbf{User Manual: ConPaaS Web Hosting Service} \linebreak +\endgroup + +\begingroup +\fontsize{16pt}{16pt}\selectfont +August 15, 2011 +\endgroup +\end{center} + +\section{Cloud Front-end} +The cloud front-end provides an intuitive web-based user interface that +allows users to register new accounts in order to start using the Web +Hosting Service. Registered users can create as well as terminate +services. Additionally, the front-end provides a simplified interface +through which a user can configure his services. + +\subsection{Register a user} +Note: User registration can only be done through the cloud front-end. +\begin{enumerate} +\item Click on the "register" link. +\item Enter your user name and click on the "register" button. +\item You will be automatically logged in. Notice your user name and the "logout" + link at the top right corner of the web page. You are now ready to + experiment with the Web Hosting Service. +\end{enumerate} + +\subsection{Create a Web Hosting Service} +Node: Creating a new service can only be done through the cloud front-end. +\begin{enumerate} +\item Starting from the user home page, click on the "create service" button. +\item Select a type of web hosting service. For example "PHP Service". +\item Select a target cloud platform. For example "Amazon EC2". +\item Click on the "create service" button to start. + \begin{what} + The Web interface requested a new virtual machine from Amazon EC2 and + is waiting for it to boot. This virtual machine will be the managing + node of the new service. The web page will be automatically redirected + and a list of all of the created services will be displayed. + Note that, the service status is "Initializing". When the service is + ready, its status will change to "created" and you can click on it to + configure it further. + \end{what} +\end{enumerate} + +\subsection{Rename the Service} +Renaming a service can only be done through the cloud front-end. +\begin{enumerate} +\item Starting from the user home page, click on the service you intend to + rename. +\item Click on the service name at the top left of the web + page. A dialog box will appear where you can enter the new service + name. +\end{enumerate} + +\subsection{Terminate the service} +Note: terminating the service can only be done through the cloud front-end. +\begin{enumerate} +\item Starting from the user home page, click on the service you intend to + terminate. +\item Press on the "terminate" button at the top right of the page. + Terminating the service will release the virtual machine hosting + the manager and will delete all of the service's configuration and + uploaded code. +\end{enumerate} + +\section{Starting and Stopping the Service} + +\subsection{Start the Service} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Starting from the user home page, click on the service you intend to start. +\item Notice the message "No instances are running". This means that there is no + web server running yet. +\item Click on the "start" button at the top right + of the page to start a web server. Notice the progress message that + appeared at the top of the page. + \begin{what} + The service requested a new virtual machine from the cloud provider. + When the machine is ready, the manager will configure it to run + a web server. When the web server is ready, the web page will + display the running instances' information. + \end{what} +\item Notice the displayed instance information. It is tagged with "proxy", + "web" and "php" which means that this virtual machine is running a proxy + (load balancer) server, a web server and it supports executing PHP + scripts.On the right end you will find the domain name of the virtual + machine. You can use this to access the virtual machine directly. +\item Notice the link labeled "access active version". Click on it to access + the newly created web server. The web servers start with a default + welcome page. +\end{enumerate} + +\subsubsection{Using the Command-line Client} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ startup +\end{verbatim}\end{framedbox} + + +\subsection{Stop the Service} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Starting from the user home page, click on the service you intend to stop. +\item Press on the "stop" button at the top right of the web page. +\item Stopping the service would release the web servers but the service + manager will remain active. If you want to permanently destroy the + service, press on the terminate button after you stop the service. +\end{enumerate} +\subsubsection{Using the Command-line Client} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ shutdown +\end{verbatim}\end{framedbox} + +\section{Code Management} +The Web Hosting Service can manage and store multiple code archives. You can +upload code archives to it and select which one should be active online. +This section explains how to manage code archives. + +\subsection{Upload Code Version} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Use the "choose file" button to upload a code archive. When creating an + archive, you need to make sure it expands directly in the same directory. + The upload file must be an archive of type '.zip', '.tar', '.tar.bz2' or + '.tar.gz'. PHP applications should have a file named "index.php" + which will be the default page. Java applications should have a file + named index.jsp. +\item Notice that the "available code versions" list grew. A new code version + appeared in the list but it is not active yet. Hover over the new code + version with the mouse and two more links will appear; "set active" and + "download". +\end{enumerate} + + +\subsubsection{Using the Command-line Client} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ upload_code_version -h +Usage: upload_code_version + +Options: + -h, --help show this help message and exit + +$ ./cpsclient.web http://x-x-x-x/ upload_code_version path/to/archive.zip +codeVersionId: code-XXXXX +\end{verbatim}\end{framedbox} + +\subsection{Activate Code Version} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Hover over a code version with the mouse and two links will appear; + "set active" and "download". +\item Click on "set active" to activate this version online. +\item Notice that the selected code version is labeled with "active". +\item If the service is already running, click on "access active version" to + validate that the new code version is running. Your web browser would + normally cache web pages so you may need to refresh the page to view + the latest updates. +\end{enumerate} + +\subsubsection{Using the Command-line Client} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ update_java_configuration -h +Usage: update_java_configuration + +Options: + -h, --help show this help message and exit + -c CODEVERSIONID, --code=CODEVERSIONID + +$ ./cpsclient.web http://x-x-x-x/ update_java_configuration -c code-XXXX +\end{verbatim}\end{framedbox} + +\subsection{Download Code Version} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Hover over a code version with the mouse and two links will appear; + "set active" and "download". +\item Click the "download" link will download the file to your local computer. +\end{enumerate} + +\section{Resource Management} +One of the advantages of ConPaaS is elasticity. The Web Hosting Service can +configure multiple servers and assign them different roles to scale. The +deployment can grow and shrink transparently to the users without any +service disruption. + +\subsection{Scaling Out/In} +\subsubsection{Through the Front-end} +\begin{enumerate} +\item Notice the section labeled "add or remove instances to your deployment" in + the web page were there are 3 boxes labeled "proxy", "web" and "php" with + a 0 to the left of each one. +\item Click on the 0 of any box and a dialog will appear where you can specify +the number of nodes you want to add/remove. +\item Let's add 1 web server, 1 proxy and 1 php. Then press on the "submit" + button to their right. +\item A progress message will show up at the top of the page. + \begin{what} + Requesting new virtual machines will take some time. As soon as the new + virtual machines become available, the manager will configure them and + reconfigure the old nodes as well. If you want to monitor the progress of the + new virtual machines more closely, click on the "raw log" link at the top of + the page to view the log produced by the manager. You will need to refresh + this page to view recent updates. Once the nodes are ready, they will be + displayed on the web page. + \end{what} +\item Notice that the web page is now displaying the newly created nodes as + well. Each node is tagged with its roles (proxy, web or php). +\end{enumerate} + +\subsubsection{Using the Command-line Client} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ add_nodes -h +Usage: add_nodes + +Options: + -h, --help show this help message and exit + -p PROXY, --proxy=PROXY + -w WEB, --web=WEB + -b BACKEND, --backend=BACKEND + +$ ./cpsclient.web http://x-x-x-x/ add_nodes -w 1 -b 1 + +$ ./cpsclient.web http://x-x-x-x/ remove_nodes -h +Usage: remove_nodes + +Options: + -h, --help show this help message and exit + -p PROXY, --proxy=PROXY + -w WEB, --web=WEB + -b BACKEND, --backend=BACKEND + +$ ./cpsclient.web http://x-x-x-x/ remove_nodes -w 1 -b 1 +\end{verbatim}\end{framedbox} + +\section{Command-line Administration} +You can perform all of the operations provided by the web interface +by using a command-line tool. \textbf{\emph{Prerequisites: python $>=$ 2.6, +python-pycurl and python-simplejson packages.}} +Create a service, go to its web page and copy the URL provided by the +"access manager" link at the top of the page. This URL points to the manager +directly and you can use it with the command-line program "cpsclient.web" to +issue commands. + +\subsection{Prepare Command-line Environment} +\begin{itemize} +\item Download the source code file ConPaaSWeb.tar.gz. +\item Unpack it and prepare your environment as follows: +\end{itemize} +\begin{framedbox}{12pt}\begin{verbatim} +$ tar -zxf ConPaaSWeb.tar.gz # unpack the archive +$ export PYTHONPATH=`pwd`/ConPaaSWeb/src # Set PYTHONPATH +\end{verbatim}\end{framedbox} +\begin{itemize} +\item The PYTHONPATH environment variable needs to be pointing to the + location of the 'src' directory on your file system. +\item Run ConPaaSWeb/bin/cpsclient.web to view a list of supported + operations. +\end{itemize} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web +Usage: ./cpsclient.web URL ACTION options + +Action could be one of: + ACTION DESCRIPTION + add_nodes Add more service nodes to a deployment + getLog Get raw logging + get_configuration Get the configuration of a deployment + get_node_info Get information about a single service node + get_service_history Get the state change history of a deployment + get_service_info Get the state of a deployment + get_service_performance Get the average request rate and throughput + help Print the help menu + list_code_versions List identifiers of all code versions stored by a deployment + list_nodes Get a list of service nodes + remove_nodes Remove some service nodes from a deployment + shutdown Shutdown a deployment + startup Startup a deployment + update_java_configuration Update the configuration of a Java deployment + update_php_configuration Update the configuration of a PHP deployment + upload_code_version Upload a new code version +\end{verbatim}\end{framedbox} + +\begin{itemize} +\item Use the "access manager" URL as a first argument to cpsclient.web followed + by one of the operation names to perform it. Use the '-h' option to check + if an operation requires additional arguments. +\end{itemize} +\begin{framedbox}{8pt}\begin{verbatim} +$ ./cpsclient.web http://x-x-x-x/ get_node_info -h +Usage: get_node_info + +Options: + -h, --help show this help message and exit + +$ ./cpsclient.web http://x-x-x-x/ get_node_info i-23dffe4 +Service Node Address Role(s) +i-23dffe4d ec2-xxx-xx-xx-xxx.compute-x.amazonaws.com WEB +\end{verbatim}\end{framedbox} + + +\end{document} + diff --git a/conpaas/branches/conpaas-dailybuild/frontend/README.txt b/conpaas/branches/conpaas-dailybuild/frontend/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..210f0a027535611c279ff1766d462eef8e743ea6 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/README.txt @@ -0,0 +1,26 @@ +This directory contains the ConPaaS front-end. This is the Web +application that allows ConPaaS users to easily create, start, stop, +and manage their ConPaaS services. This Web application can run +outside of the Cloud. Detailed installation instructions can be found +in the "../doc" directory. + +In short: + +- All files located in the "www" directory must be made available in a + PHP-enabled Web server. + +- All files located in the "conf" directory must be made available + *out* of the Web server directory. For example one may want to store + them in /etc/conpaas/ or a similar path. These files must be filled + in with configuration details of the local ConPaaS installation. + +- You must edit the file __init__.php in the www directory such that + it points to the location of the configuration files. + +- You must download the AWS JDK for PHP from + http://aws.amazon.com/sdkforphp/ and expand it in the "www/lib" + directory (thereby creating a directory "www/lib/aws-jdk" containing + a number of PHP files and subdirectories). + +Guillaume Pierre +gpierre@cs.vu.nl diff --git a/conpaas/branches/conpaas-dailybuild/frontend/conf/aws.ini b/conpaas/branches/conpaas-dailybuild/frontend/conf/aws.ini new file mode 100644 index 0000000000000000000000000000000000000000..74de54e77a50e64b8794e5c44d0eaaf97af72720 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/conf/aws.ini @@ -0,0 +1,26 @@ +; This configuration file defines access details to Amazon Web Services + +; This variable should contain the identifier of the Amazon Machine +; Image created from the Web hosting service. Your AMIs can be found at +; https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images +ami = "" + +; This variable should contain the created security group from the Web +; hosting service. Your security groups can be found at +; https://console.aws.amazon.com/ec2/home?region=us-east-1#s=SecurityGroups +security_group = "" + +; This variable should contain the Key Pair name to be used. +; Your keypairs can be found at +; https://console.aws.amazon.com/ec2/home?region=us-east-1#s=KeyPairs +keypair = "" + +; This variable should contain the absolute path of file +; "ec2-manager-user-data. This should normally be in your the same +; directory as the current file. +user_data_file = "/path/to/conf/ec2-manager-user-data" + +; This variable should contain the type of EC2 instances to use. A +; good value to use inexpensive, low-performance instances is +; "t1.micro". +instance_type = "t1.micro" diff --git a/conpaas/branches/conpaas-dailybuild/frontend/conf/db.ini b/conpaas/branches/conpaas-dailybuild/frontend/conf/db.ini new file mode 100644 index 0000000000000000000000000000000000000000..2dbbac7adff53c5be4bd786be0d5c8bbb5fbcc2a --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/conf/db.ini @@ -0,0 +1,26 @@ +; This configuration file defines access to a local database +; to store the state of the frontend application. + +[mysql] + +; This variable must contain the IP address where the frontend's +; MySQL database is located. +server = "" + +; This variable must contain the username under which the frontend's +; database can be accessed. This should be set to the same name as the +; one you wrote in the "frontend/scrips/frontend-db.sql" script before +; creating the database schema. +user = "" + +; This variable must contain the password under which the frontend's +; database can be accessed. This should be set to the same password as +; the one you wrote in the "frontend/scrips/frontend-db.sql" script +; before creating the database schema. +pass = "" + +; This variable must contain the database name under which the frontend's +; database can be accessed. This should be set to the same database name +; as the one you wrote in the "frontend/scrips/frontend-db.sql" script +; before creating the database schema. +db = "" diff --git a/conpaas/branches/conpaas-dailybuild/frontend/conf/main.ini b/conpaas/branches/conpaas-dailybuild/frontend/conf/main.ini new file mode 100644 index 0000000000000000000000000000000000000000..71526f6987f590294b0d32c43d7ac5a3a07c214c --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/conf/main.ini @@ -0,0 +1,28 @@ +; This file defines the main parameters of the ConPaaS frontend + +[main] + +; This variable should contain the location of a file where the errors +; will be reported. +logfile = "/path/to/error.log" + +; This variable should be set to the ConPaaS administrator's email +; address. The frontend will issue one email each time a new user +; registers to the frontend. +admin_email = "admin@whatever.com" + +; This variable should be set to the initial credit given to newly +; registered users. If it is set to 0 then new users will not be able +; to use ConPaaS until the administrator gives them credits by +; directly editing the users database. +initial_credit = "5" + +; Set this variable to "yes" to enable the EC2 cloud backend. If it is +; set to any other value, then users will not be able to run services +; on Amazon Web Services. +enable_ec2 = "yes" + +; Set this variable to "yes" to enable the OpenNebula cloud +; backend. If it is set to any other value, then users will not be +; able to run services on OpenNebula. +enable_opennebula = "no" diff --git a/conpaas/branches/conpaas-dailybuild/frontend/conf/opennebula.ini b/conpaas/branches/conpaas-dailybuild/frontend/conf/opennebula.ini new file mode 100644 index 0000000000000000000000000000000000000000..f1078298e7a7f0e0506cdc7623fad4058b9d103b --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/conf/opennebula.ini @@ -0,0 +1,38 @@ +; This configuration file defines access details to an OpenNebula installation + + +; This variable should contain the absolute path of file +; "opennebula-manager-user-data". This should normally be in your the +; same directory as the current file. +user_data_file = "" + +; OCCI defines 3 standard instance types: small medium and large. This +; variable should choose one of these. +instance_type = "" + +; Your OpenNebula user name +user = "" + +; Your OpenNebula password +passwd = "" + +; The image ID (an integer). You can list the registered OpenNebula +; images with command "oneimage list" command. +image = "" + +; The network ID (an integer). You can list the registered OpenNebula +; networks with the "onevnet list" command. +network = "" + +; The URL of the OCCI interface at OpenNebula. Note: ConPaaS currently +; supports only the default OCCI implementation that comes together +; with OpenNebula. It does not yet support the full OCCI-0.2 and later +; versions. +url = "" + +; The network gateway through which new VMs can route their traffic in +; OpenNebula (an IP address) +gateway = "" + +; The DNS server that VMs should use to resolve DNS names (an IP address) +nameserver = "" diff --git a/conpaas/branches/conpaas-dailybuild/frontend/scripts/frontend-db.sql b/conpaas/branches/conpaas-dailybuild/frontend/scripts/frontend-db.sql new file mode 100644 index 0000000000000000000000000000000000000000..13b4931c45e06bd74eb12887d2e6071d77f19978 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/scripts/frontend-db.sql @@ -0,0 +1,39 @@ +-- This script creates a database schema where the ConPaaS frontend +-- can store its state. Please update the first four lines to replace +-- DB_USER, DB_PASSWD and DB_NAME with reasonable values. In particular, +-- make sure you replace DB_PASSWD with a strong password. You will need +-- to enter these three values in the frontend/cond/db.ini file as well. + +create user 'DB_USER'@'%' identified by 'DB_PASSWD'; +create database DB_NAME; +grant all on DB_NAME.* to 'DB_USER'@'%'; +use DB_NAME; + +CREATE TABLE `services` ( + `sid` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(256) DEFAULT NULL, + `type` varchar(32) DEFAULT NULL, + `state` int(11) DEFAULT NULL, + `creation_date` datetime DEFAULT NULL, + `manager` varchar(512) DEFAULT NULL, + `uid` int(11) DEFAULT NULL, + `vmid` varchar(256) DEFAULT NULL, + `cloud` varchar(32) DEFAULT NULL, + PRIMARY KEY (`sid`), + KEY `searchbystate` (`state`), + KEY `searchbyuser` (`uid`) +) ENGINE=InnoDB AUTO_INCREMENT=301 DEFAULT CHARSET=latin1; + +CREATE TABLE `users` ( + `uid` int(11) NOT NULL AUTO_INCREMENT, + `username` varchar(256) DEFAULT NULL, + `fname` varchar(256) DEFAULT NULL, + `lname` varchar(256) DEFAULT NULL, + `email` varchar(256) DEFAULT NULL, + `affiliation` varchar(256) DEFAULT NULL, + `passwd` varchar(256) DEFAULT NULL, + `created` date DEFAULT NULL, + `credit` int(11) DEFAULT '0', + PRIMARY KEY (`uid`), + KEY `searchname` (`username`) +) ENGINE=InnoDB AUTO_INCREMENT=34 DEFAULT CHARSET=latin1; diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/Cluster.php b/conpaas/branches/conpaas-dailybuild/frontend/www/Cluster.php new file mode 100644 index 0000000000000000000000000000000000000000..7c366dfa538a60833929df1e02ebc05f4dea62cd --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/Cluster.php @@ -0,0 +1,72 @@ +. + +require_once('Instance.php'); + +class Cluster { + + private $role; /* web, php or proxy */ + private $nodes = array(); + + public function __construct($role, $roleName=NULL) { + $this->role = $role; + if ($roleName !== NULL) + $this->roleName = $roleName; + else + $this->roleName = $role; + } + + public function addNode($node) { + $this->nodes[] = $node; + } + + private function getRoleColor() { + static $roles = array( + 'backend' => 'purple', + 'web' => 'blue', + 'proxy' => 'orange' + ); + return $roles[$this->role]; + } + + private function getRoleClass() { + return 'cluster-'.$this->role; + } + + public function render() { + $html = + '
'. + '
'. + '
'.$this->roleName.'
'. + '
'; + foreach ($this->nodes as $node) { + $instance = new Instance($node); + $html .= $instance->renderInCluster(); + } + $html .= '
'; + return $html; + } + + public function getSize() { + return count($this->nodes); + } + +} + +?> diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/DB.php b/conpaas/branches/conpaas-dailybuild/frontend/www/DB.php new file mode 100644 index 0000000000000000000000000000000000000000..c8e970fd54f77b2679473ea04efa63e2003bec19 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/DB.php @@ -0,0 +1,60 @@ +. + +class DB { + + private static $conn = null; + + private static function loadConfiguration() { + $conf = parse_ini_file(Conf::CONF_DIR.'/db.ini', true); + if ($conf === false) { + throw new Exception('Could not read db configuration file db.ini'); + } + return $conf['mysql']; + } + + public static function getConn() { + if (self::$conn === null) { + $conf = self::loadConfiguration(); + self::$conn = mysql_connect($conf['server'], $conf['user'], + $conf['pass']); + if (self::$conn === FALSE) { + throw new Exception('Could not connect to the DB'); + } + mysql_select_db($conf['db'], self::$conn); + mysql_set_charset('utf8', self::$conn); + } + return self::$conn; + } + + public static function fetchAssocAll($res) { + $rows = array(); + while ($row = mysql_fetch_assoc($res)) { + $rows[] = $row; + } + return $rows; + } +} + +class DBException extends Exception { + + public function __construct($conn) { + parent::__construct(mysql_error($conn), mysql_errno($conn)); + } +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/EC2Service.php b/conpaas/branches/conpaas-dailybuild/frontend/www/EC2Service.php new file mode 100644 index 0000000000000000000000000000000000000000..fa5311b0587320f908f1deb430a9dba70d0d59c2 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/EC2Service.php @@ -0,0 +1,121 @@ +. + +require_once('logging.php'); +require_once('Service.php'); +require_once('DB.php'); +require_once('lib/aws-sdk/sdk.class.php'); + +class EC2 { + + protected $vmid; + + private $ec2; + + private $manager_ami; + private $security_group; + private $keypair; + private $user_data_file; + private $instance_type; + private $service_type; + + public function __construct($data) { + $this->service_type = $data['type']; + $this->sid = $data['sid']; + $this->vmid = $data['vmid']; + $this->ec2 = new AmazonEC2(); + $this->loadConfiguration(); + } + + private function loadConfiguration() { + $conf = parse_ini_file(Conf::CONF_DIR.'/aws.ini', true); + if ($conf === false) { + throw new Exception('Could not read AWS configuration file aws.ini'); + } + $this->manager_ami = $conf['ami']; + $this->security_group = $conf['security_group']; + $this->keypair = $conf['keypair']; + $this->user_data_file = $conf['user_data_file']; + $this->instance_type = $conf['instance_type']; + } + + /** + * Instantiate a virtual image of the Manager. + * @return string id of the virtual instance + * @throws Exception + */ + public function createManagerInstance() { + $user_data = file_get_contents($this->user_data_file); + if ($user_data === false) { + throw new Exception('could not read manager user data: '. + $this->user_data_file); + } + $user_data = str_replace( + array('%CONPAAS_SERVICE_TYPE%', '%CONPAAS_SERVICE_ID%'), + array(strtoupper($this->service_type), $this->sid), + $user_data); + $response = $this->ec2->run_instances($this->manager_ami, 1, 1, array( + 'InstanceType' => $this->instance_type, + 'KeyName' => $this->keypair, + 'SecurityGroup' => $this->security_group, + 'UserData' => base64_encode($user_data), + )); + if (!$response->isOK()) { + dlog($response); + throw new Exception('the EC2 instance was not created'); + } + /* get the instance id */ + $instance = $response->body->instancesSet->item; + return $instance->instanceId; + } + + /** + * @return false if the state is not 'running' + * the address (DNS) of the instance + * @throws Exception + */ + public function getManagerAddress() { + $response = $this->ec2->describe_instances(array( + 'InstanceId' => $this->vmid, + )); + if (!$response->isOK()) { + dlog($response); + throw new Exception('describe_instances call failed'); + } + $instance = $response->body->reservationSet->item->instancesSet->item; + if (!$instance->instanceState->name == 'running') { + return false; + } + if (!isset($instance->dnsName) || $instance->dnsName == '') { + return false; + } + return $instance->dnsName; + } + + public function terminateService() { + $response = $this->ec2->terminate_instances($this->vmid); + if (!$response->isOK()) { + dlog($response); + throw new Exception('terminate_instances('.$this->vmid.') '. + 'failed for service '.$this->name.'['.$this->sid.']'); + } + } + +} +?> diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/InputButton.php b/conpaas/branches/conpaas-dailybuild/frontend/www/InputButton.php new file mode 100644 index 0000000000000000000000000000000000000000..e05a795d8f85ae318dbe9877cc52d4227db82bea --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/InputButton.php @@ -0,0 +1,71 @@ +. + +function InputButton($text) { + return new InputButton($text); +} + +class InputButton { + + protected $id = ''; + protected $text; + protected $visible = true; + protected $disabled = false; + + public function __construct($text) { + $this->text = $text; + } + + public function setVisible($visible) { + $this->visible = $visible; + return $this; + } + + public function setId($id) { + $this->id = $id; + return $this; + } + + public function setDisabled($disabled) { + $this->disabled = $disabled; + return $this; + } + + private function invisibleClass() { + if ($this->visible) { + return ''; + } + return 'invisible'; + } + + private function disabledMarker() { + if ($this->disabled) { + return ' disabled="disabled" '; + } + return ''; + } + + public function __toString() { + return + 'disabledMarker().'/>'; + } +} + diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/Instance.php b/conpaas/branches/conpaas-dailybuild/frontend/www/Instance.php new file mode 100644 index 0000000000000000000000000000000000000000..87b30ca639856b8e008f1a5f1f271ecfa76319a0 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/Instance.php @@ -0,0 +1,76 @@ +. + +class Instance { + + private $info; + + public function __construct($info) { + $this->info = $info; + } + + private function renderCapabs() { + $html = ''; + if ($this->info['isRunningProxy']) { + $html .= '
proxy
'; + } + if ($this->info['isRunningWeb']) { + $html .= '
web
'; + } + if ($this->info['isRunningBackend']) { + $html .= '
' . $this->info['service_type'] . '
'; + } + return $html; + } + + public function render() { + return + '
' + .'
' + .'Instance '.$this->info['id'].'' + .$this->renderCapabs() + .'
running
' + .'
' + .'
' + .''.$this->info['ip'].'' + .'
' + .'
' + .'
'; + } + + public function renderInCluster() { + return + '
' + .'
' + .'Instance '.$this->info['id'].'' + .'running for 6 hours' + .'
' + .'
' + .''.$this->info['ip'].'' + .'
' + .'
' + .'
'; + } + + public function getSize() { + return 1; + } +} + +?> diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/LinkUI.php b/conpaas/branches/conpaas-dailybuild/frontend/www/LinkUI.php new file mode 100644 index 0000000000000000000000000000000000000000..b186b7338fe0d5e19ee44083eb2672dae2182aae --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/LinkUI.php @@ -0,0 +1,59 @@ +. + +function LinkUI($text, $href) { + return new LinkUI($text, $href); +} + +class LinkUI { + + private $text; + private $href; + private $external = false; + private $class = 'link'; + + public function __construct($text, $href) { + $this->text = $text; + $this->href = $href; + } + + public function setExternal($external) { + $this->external = $external; + return $this; + } + + public function addClass($class) { + $this->class .= ' '.$class; + return $this; + } + + private function renderSymbol() { + return + ''; + } + + public function __toString() { + $target = $this->external ? 'target="new"' : ''; + return + '
' + .''.$this->text.'' + .$this->renderSymbol() + .'
'; + } +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/LocalService.php b/conpaas/branches/conpaas-dailybuild/frontend/www/LocalService.php new file mode 100644 index 0000000000000000000000000000000000000000..0421d3605020c3b2ecb5732c42f7dca91183686c --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/LocalService.php @@ -0,0 +1,38 @@ +. + +require_once('Service.php'); + +class LocalCloud { + + public function __construct($service_data) { + } + + public function getManagerAddress() { + return $this->manager; + } + + public function checkManagerInstance() { + return false; + } + + public function needsPolling() { + return false; + } +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/MapredPage.php b/conpaas/branches/conpaas-dailybuild/frontend/www/MapredPage.php new file mode 100644 index 0000000000000000000000000000000000000000..3ca4725f8ebf377cb27dc492cfb9c86100a74efd --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/MapredPage.php @@ -0,0 +1,90 @@ +. + +class MapredPage { + + static $states = array( + 'INIT' => false, + 'RUNNING' => false, + 'PROLOGUE' => true, + 'EPILOGUE' => true, + 'STOPPED' => false + ); + + private $managerAddress; + private $conf = null; + + public function __construct($data) { + $this->managerAddress = $data['manager']; + } + + public function is_transient($state) { + return + !array_key_exists($state, self::$states) || + (self::$states[$state] == true); + } + + public function getUploadURL() { + return $this->managerAddress; + } + + public function fetchState() { + return 'RUNNING'; + } + + public function renderActions($state) { + $startButton = InputButton('start') + ->setId('start'); + $stopButton = InputButton('stop') + ->setId('stop'); + $terminateButton = InputButton('terminate') + ->setDisabled(true); + + switch ($state) { + case 'INIT': + $stopButton->setVisible(false); + $terminateButton->setVisible(false); + break; + case 'RUNNING': + $startButton->setVisible(false); + $terminateButton->setVisible(false); + break; + case 'STOPPED': + $stopButton->setVisible(false); + break; + default: + } + + return $startButton.' '.$stopButton.' '.$terminateButton; + } + + public function renderStateClass($state) { + switch ($state) { + case 'INIT': + case 'RUNNING': + return 'active'; + case 'STOPPED': + return 'stopped'; + default: + return ''; + } + } + +} +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/OpenNebulaService.php b/conpaas/branches/conpaas-dailybuild/frontend/www/OpenNebulaService.php new file mode 100644 index 0000000000000000000000000000000000000000..7091f021e02c0870550032833b3049ad50bc0768 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/OpenNebulaService.php @@ -0,0 +1,156 @@ +. + +require_once('logging.php'); +require_once('Service.php'); +require_once('DB.php'); +require_once('lib/aws-sdk/sdk.class.php'); + +class OpenNebula { + + protected $vmid; + + private $user_data_file; + private $instance_type; + private $service_type; + + public function __construct($data) { + $this->service_type = $data['type']; + $this->sid = $data['sid']; + $this->vmid = $data['vmid']; + $this->loadConfiguration(); + } + + private function loadConfiguration() { + $conf = parse_ini_file(Conf::CONF_DIR.'/opennebula.ini', true); + if ($conf === false) { + throw new Exception('Could not read OpenNebula configuration file opennebula.ini'); + } + $this->user_data_file = $conf['user_data_file']; + $this->instance_type = $conf['instance_type']; + $this->opennebula_url = $conf['url']; + $this->user = $conf['user']; + $this->passwd = $conf['passwd']; + $this->image = $conf['image']; + $this->network = $conf['network']; + $this->gateway = $conf['gateway']; + $this->nameserver = $conf['nameserver']; + } + + public function http_request($method, $resource, $xml=null) { + $ch = curl_init(); + curl_setopt($ch, CURLOPT_URL, $this->opennebula_url . $resource); + curl_setopt($ch, CURLOPT_HEADER, 'Accept: */*'); + curl_setopt($ch, CURLOPT_HTTPAUTH, CURLAUTH_BASIC ) ; + curl_setopt($ch, CURLOPT_USERPWD, $this->user.':'.sha1($this->passwd)); + switch($method) { + case 'POST': + curl_setopt($ch, CURLOPT_POST, 1); + curl_setopt($ch, CURLOPT_POSTFIELDS, $xml); + break; + case 'DELETE': + curl_setopt($ch, CURLOPT_CUSTOMREQUEST, 'DELETE'); + break; + default: + } + curl_setopt($ch, CURLOPT_RETURNTRANSFER, 1); + $body = curl_exec($ch); + return $body; + } + + /** + * Instantiate a virtual image of the Manager. + * @return string id of the virtual instance + * @throws Exception + */ + public function createManagerInstance() { + $user_data = file_get_contents($this->user_data_file); + if ($user_data === false) { + throw new Exception('could not read manager user data: '. + $this->user_data_file); + } + $user_data = str_replace( + array('%CONPAAS_SERVICE_TYPE%', '%CONPAAS_SERVICE_ID%'), + array(strtoupper($this->service_type), $this->sid), + $user_data); + $hex_user_data = bin2hex($user_data); + $response = $this->http_request('POST', '/compute', + ''. + 'conpaas'. + ''. $this->instance_type .''. + ''. + ''. + ''. + ''. + ''. + ''. + ''. + '$NAME'. + '$NIC[IP]'. + ''.$this->gateway.''. + ''.$this->nameserver.''. + ''.$hex_user_data.''. + 'sdb'. + ''. + ''. + ''); + dlog($response); + if ($response === FALSE) { + dlog($response); + throw new Exception('the OpenNebula instance was not created'); + } + + $obj = simplexml_load_string($response); + if ($obj === FALSE) { + throw new Exception('Invalid response from opennebula'); + } + /* get the instance id */ + return (string)$obj->ID; + } + + /** + * @return false if the state is not 'running' + * the address (DNS) of the instance + * @throws Exception + */ + public function getManagerAddress() { + $response = $this->http_request('GET', '/compute/'.$this->vmid); + if ($response === FALSE) { + dlog($response); + throw new Exception('Faile to fetch state of node from OpenNebula'); + } + + $obj = simplexml_load_string($response); + if ($obj === FALSE) { + throw new Exception('Invalid response from opennebula'); + } + /* get the instance id */ + if ( ((string)$obj->STATE) === 'ACTIVE' ) { + return (string)$obj->NIC->IP; + } + else { + return FALSE; + } + } + + public function terminateService() { + $response = $this->http_request('DELETE', '/compute/'.$this->vmid); + } +} +?> diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/Page.php b/conpaas/branches/conpaas-dailybuild/frontend/www/Page.php new file mode 100644 index 0000000000000000000000000000000000000000..6cbfb8f76aba7b083c86e5043cda968cf2dabee6 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/Page.php @@ -0,0 +1,118 @@ +. + +require_once('UserData.php'); +require_once('ServiceData.php'); + +class Page { + + const UFILE = 'services/users.ini'; + + protected $uid; + protected $username; + + protected $browser; + + public static function redirect($toURL) { + header('Location: '.$toURL); + exit(); + } + + private function fetchBrowser() { + $user_agent = $_SERVER['HTTP_USER_AGENT']; + if (strpos($user_agent, 'Firefox') !== false) { + $this->browser = 'firefox'; + } else if (strpos($user_agent, 'WebKit') != false) { + $this->browser = 'webkit'; + } else { + $this->browser = 'other'; + } + } + + public function __construct() { + if (isset($_SESSION['uid'])) { + $this->uid = $_SESSION['uid']; + } else { + self::redirect('login.php'); + } + $uinfo = UserData::getUserById($this->uid); + if ($uinfo === false) { + throw new Exception('User does not exist'); + } + $this->username = $uinfo['username']; + $this->user_credit = $uinfo['credit']; + $this->instances = $uinfo['instances']; + $this->fetchBrowser(); + } + + public function getUserCredit() { + return $this->user_credit; + } + + public function getInstances() { + $instances = 0; + $services_data = ServiceData::getServicesByUser($this->uid); + + foreach ($services_data as $service_data) { + $service = ServiceFactory::createInstance($service_data); + $instances += $service->getNodesCount()+1; + } + return $instances; + } + + public function getBrowserClass() { + return $this->browser; + } + + public function getUsername() { + return $this->username; + } + + public function getUID() { + return $this->uid; + } + + /* render functions */ + public function renderHeader() { + return + '
'. + ''. + '
'. + $this->getUsername().' | '. + ''. + '' . + $this->getInstances() . + ''. + ' active VMs'. + ' |' . + ' '. + ''. + $this->getUserCredit(). + ''. + ' credits'. + ' | help | '. + 'logout'. + '
'. + '
'. + '
'; + } + +} + +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/PageStatus.php b/conpaas/branches/conpaas-dailybuild/frontend/www/PageStatus.php new file mode 100644 index 0000000000000000000000000000000000000000..e729ef3186c6a7fae1ff06f1e375beeb7e776b9f --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/PageStatus.php @@ -0,0 +1,40 @@ +. + +function PageStatus() { + return new PageStatus(); +} + +class PageStatus { + + private $id = ''; + + public function setId($id) { + $this->id = $id; + return $this; + } + + public function __toString() { + return + ''; + } +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/Service.php b/conpaas/branches/conpaas-dailybuild/frontend/www/Service.php new file mode 100644 index 0000000000000000000000000000000000000000..f848fc893ef9ebef9d26d218979e2c69baecb94e --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/Service.php @@ -0,0 +1,397 @@ +. + +require_once('logging.php'); +require_once('ServiceData.php'); + +abstract class Service { + + protected $sid, + $name, + $type, + $sw_version, + $state, + $creation_date, + $manager, + $uid, + $cloud, + $cloud_instance; + + private $nodesLists; + private $nodesCount = 0; + + private $reachable = false; + private $stable = true; + + const STATE_RUNNING = 0; + const STATE_STOPPED = 1; + const STATE_TERMINATED = 2; + const STATE_PREINIT = 3; + const STATE_INIT = 4; + const STATE_ERROR = 5; + + static $state_txt = array( + Service::STATE_RUNNING => 'running', + Service::STATE_STOPPED => 'stopped', + Service::STATE_TERMINATED => 'terminated', + Service::STATE_INIT => 'initializing', + Service::STATE_PREINIT => 'preparing', + Service::STATE_ERROR => 'error', + ); + + private static $CURL_OPTS = array( + CURLOPT_CONNECTTIMEOUT => 10, + CURLOPT_RETURNTRANSFER => true, + CURLOPT_TIMEOUT => 60, + ); + + public static function stateIsStable($remoteState) { + return + $remoteState != 'PROLOGUE' && + $remoteState != 'EPILOGUE' && + $remoteState != 'ADAPTING' + ; + } + + private function pingManager() { + if (!isset($this->manager)) { + return; + } + try { + $json = $this->fetchState(); + $state = json_decode($json, true); + if ($state !== null && isset($state['result'])) { + $this->reachable = true; + $this->stable = self::stateIsStable($state['result']['state']); + } + } catch (Exception $e) { + // nothing + error_log('error trying to connect to manager'); + } + } + + public function __construct($data, $cloud_instance) { + foreach ($data as $key => $value) { + $this->$key = $value; + } + $this->cloud_instance = $cloud_instance; + $this->pingManager(); + /* fetch the nodes and arrange them */ + if ($this->reachable && $this->state == self::STATE_RUNNING) { + $this->nodesLists = $this->fetchNodesLists(); + /* compute the nodes count */ + if ($this->nodesLists !== false) { + $selected = array(); + foreach ($this->nodesLists as $role => $nodesList) { + foreach ($nodesList as $nodeId) { + if (!array_key_exists($nodeId, $selected)) { + $selected[$nodeId] = true; + $this->nodesCount++; + } + } + } + } + } + } + + public function isReachable() { + return $this->reachable; + } + + public function isStable() { + return $this->stable; + } + + public function isRunning() { + return $this->state == SERVICE::STATE_RUNNING; + } + + public function isConfigurable() { + return + $this->reachable && + $this->state != self::STATE_TERMINATED && + $this->state != self::STATE_PREINIT; + } + + public function needsPolling() { + return (!$this->reachable && + ($this->state == self::STATE_RUNNING || + $this->state == self::STATE_INIT || + $this->state == self::STATE_PREINIT)); + } + + protected function managerRequest($method, $params, $http_method='get', $ping=false, $rpc=true) { + $opts = self::$CURL_OPTS; + if ($rpc) { + $opts[CURLOPT_HTTPHEADER] = array('Expect:', 'Content-Type: application/json'); + } + else { + $opts[CURLOPT_HTTPHEADER] = array('Expect:'); + } + if ($ping) { + $opts[CURLOPT_CONNECTTIMEOUT] = 1; + } + + $url = $this->manager; + $http_method = strtolower($http_method); + if ($http_method == 'post') { + $opts[CURLOPT_POST] = 1; + if ($rpc) { + $opts[CURLOPT_POSTFIELDS] = json_encode(array( + 'method' => $method, + 'params' => $params, + 'id' =>1 )); + } + else { + $opts[CURLOPT_POSTFIELDS] = array_merge($params, array('method' => $method)); + } + } else { + /* default is GET */ + $url .= '?'.http_build_query( + array( + 'method' => $method, + 'params' => json_encode($params), + 'id' => 1), + null, '&'); + } + $opts[CURLOPT_URL] = $url; + + $conn = curl_init(); + curl_setopt_array($conn, $opts); + $result = curl_exec($conn); + if ($result === false) { + $e = new Exception( + 'Error sending cURL request to '.$url.' '. + 'Error code: '.curl_errno($conn).' '. + 'Error msg: '.curl_error($conn) + ); + curl_close($conn); + throw $e; + } + curl_close($conn); + return $result; + } + + public function getNodeInfo($node) { + $json_info = $this->managerRequest('get_node_info', array( + 'serviceNodeId' => $node, + )); + $info = json_decode($json_info, true); + if ($info == null) { + return false; + } + return $info['result']['serviceNode']; + } + + private function fetchNodesLists() { + if (!isset($this->manager)) { + return false; + } + $json = $this->managerRequest('list_nodes', array()); + $response = json_decode($json, true); + if ($response == null || $response['error'] != null) { + return false; + } + return $response['result']; + } + + public function fetchState() { + $ret = $this->managerRequest( + 'get_service_info', + array(), + 'get', + true + ); + return $ret; + } + + public function fetchCodeVersions() { + $json = $this->managerRequest('list_code_versions', array()); + $versions = json_decode($json, true); + if ($versions == null) { + return false; + } + return $versions['result']['codeVersions']; + } + + public function fetchHighLevelMonitoringInfo() { + $json = $this->managerRequest('get_service_performance', array()); + $monitoring = json_decode($json, true); + if ($monitoring == null) { + return false; + } + return $monitoring['result']; + } + + public function getConfiguration() { + $json_conf = $this->managerRequest('get_configuration', array()); + $responseObj = json_decode($json_conf); + if ($responseObj == null) { + return null; + } + if (!isset($responseObj->result->phpconf)) { + return null; + } + return $this->conf = $responseObj->result->phpconf; + } + + abstract public function sendConfiguration($params); + + public function uploadCodeVersion($params) { + return $this->managerRequest('upload_code_version', $params, 'post', false, false); + } + + public function fetchStateLog() { + $json = $this->managerRequest('get_service_history', array()); + $log = json_decode($json, true); + if ($log != null) { + return $log['result']['state_log']; + } + else return array(); + } + + public function fetchLog() { + $json = $this->managerRequest('getLog', array()); + $log = json_decode($json, true); + return $log['result']['log']; + } + + public function addServiceNodes($params) { + if (isset($params['backend'])) $params['backend'] = intval($params['backend']); + if (isset($params['web'])) $params['web'] = intval($params['web']); + if (isset($params['proxy'])) $params['proxy'] = intval($params['proxy']); + return $this->managerRequest('add_nodes', $params, 'post'); + } + + public function removeServiceNodes($params) { + if (isset($params['backend'])) $params['backend'] = intval($params['backend']); + if (isset($params['web'])) $params['web'] = intval($params['web']); + if (isset($params['proxy'])) $params['proxy'] = intval($params['proxy']); + return $this->managerRequest('remove_nodes', $params, 'post'); + } + + public function requestShutdown() { + return $this->managerRequest('shutdown', array(), 'post'); + } + + public function requestStartup() { + return $this->managerRequest('startup', array(), 'post'); + } + + /** + * Deletes the service entry from the database + */ + public function terminateService() { + $this->cloud_instance->terminateService(); + ServiceData::deleteService($this->sid); + } + + public function getAccessLocation() { + $loadbalancer = $this->getNodeInfo($this->nodesLists['proxy'][0]); + return 'http://'.$loadbalancer['ip']; + } + + public function getNodesLists() { + return $this->nodesLists; + } + + public function getNodesCount() { + return $this->nodesCount; + } + + public function getSID() { + return $this->sid; + } + + public function getName() { + return $this->name; + } + + public function getType() { + return $this->type; + } + + public function getManager() { + return $this->manager; + } + + public function getVersion() { + return $this->sw_version; + } + + public function getState() { + return $this->state; + } + + public function getCloud() { + return $this->cloud; + } + + public function getStatusText() { + return self::$state_txt[$this->state]; + } + + public function getDate() { + return $this->creation_date; + } + + public function getUID() { + return $this->uid; + } + + public function getCloudInstance() { + return $this->cloud_instance; + } + + /** + * @return true if updated + */ + public function checkManagerInstance() { + $manager_addr = $this->cloud_instance->getManagerAddress(); + if ($manager_addr !== false) { + $manager_url = 'http://'.$manager_addr.':80'; + if ($manager_url != $this->manager) { + ServiceData::updateManagerAddress($this->sid, $manager_url); + return true; + } + } + return false; + } +} + + +class PHPService extends Service { + public function __construct($data, $cloud_instance) { + parent::__construct($data, $cloud_instance); + } + + public function sendConfiguration($params) { + return $this->managerRequest('update_php_configuration', $params, 'post'); + } +} + +class JavaService extends Service { + public function __construct($data, $cloud_instance) { + parent::__construct($data, $cloud_instance); + } + + public function sendConfiguration($params) { + return $this->managerRequest('update_java_configuration', $params, 'post'); + } +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceData.php b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceData.php new file mode 100644 index 0000000000000000000000000000000000000000..c4b349cc583e12ccff456ca72dfb06454d0ff831 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceData.php @@ -0,0 +1,128 @@ +. + +require_once('DB.php'); +require_once('UserData.php'); +require_once('Service.php'); + +class ServiceData { + + public static function createService($default_name, $type, $cloud, $uid) { + if (UserData::updateUserCredit($uid, -1) === false) { + /* not enough credit */ + return false; + } + $query = sprintf("INSERT INTO services ". + "(name, type, cloud, state, creation_date, uid) VALUES ". + "('%s', '%s', '%s', %d, now(), %d)", + mysql_escape_string($default_name), + mysql_escape_string($type), + mysql_escape_string($cloud), + mysql_escape_string(Service::STATE_PREINIT), + mysql_escape_string($uid) + ); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + + /* get the service id */ + $sid = mysql_insert_id(DB::getConn()); + return $sid; + } + + public static function getServicesByUser($uid) { + $query = sprintf("SELECT * FROM services WHERE uid='%s' ". + " ORDER BY sid DESC", + mysql_escape_string($uid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + return DB::fetchAssocAll($res); + } + + public static function getServiceById($sid) { + $query = sprintf("SELECT * FROM services WHERE sid='%s' LIMIT 1", + mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + $entries = DB::fetchAssocAll($res); + if (count($entries) != 1) { + throw new Exception('Service does not exist'); + } + return $entries[0]; + } + + public static function updateVmid($sid, $vmid) { + $query = sprintf("UPDATE services SET vmid='%s' WHERE sid=%d", + mysql_escape_string($vmid), mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + } + + public static function updateName($sid, $name) { + $query = sprintf("UPDATE services SET name='%s' WHERE sid=%d", + mysql_escape_string($name), + mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + } + + + public static function updateManagerAddress($sid, $manager) { + $query = sprintf("UPDATE services SET manager='%s', state=%d ". + " WHERE sid=%d", + mysql_escape_string($manager), + mysql_escape_string(Service::STATE_INIT), + mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + } + + public static function updateState($sid, $state) { + $query = sprintf("UPDATE services SET state=%d WHERE sid=%d", + mysql_escape_string($state), + mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + } + + public static function deleteService($sid) { + $query = sprintf("DELETE FROM services WHERE sid=%d", + mysql_escape_string($sid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + } + +} + +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceFactory.php b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceFactory.php new file mode 100644 index 0000000000000000000000000000000000000000..40db859c28692bea58e87783ba85062408877046 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceFactory.php @@ -0,0 +1,54 @@ +. + +require_once('LocalService.php'); +require_once('EC2Service.php'); +require_once('OpenNebulaService.php'); +require_once('Service.php'); + +class ServiceFactory { + + public static function createInstance($service_data) { + $cloud = $service_data['cloud']; + $type = $service_data['type']; + $cloud_instance = null; + + switch ($cloud) { + case 'local': + $cloud_instance = new LocalCloud($service_data); + break; + case 'ec2': + $cloud_instance = new EC2($service_data); + break; + case 'opennebula': + $cloud_instance = new OpenNebula($service_data); + break; + default: + throw new Exception('Unknown cloud'); + } + switch ($type) { + case 'php': + return new PHPService($service_data, $cloud_instance); + case 'java': + return new JavaService($service_data, $cloud_instance); + default: + throw new Exception('Unknown service type'); + } + } +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceItem.php b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceItem.php new file mode 100644 index 0000000000000000000000000000000000000000..6487f57ee05b5dce9b21328e672777c98be23b53 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/ServiceItem.php @@ -0,0 +1,157 @@ +. + +require_once('Service.php'); +require_once('StatusLed.php'); +require_once('TimeHelper.php'); + +function ServiceItem($data) { + return new ServiceItem($data); +} + +class ServiceItem { + + private $service; + private $last = false; + + public function __construct(Service $service) { + $this->service = $service; + } + + public function setLast($last=true) { + $this->last = $last; + return $this; + } + + private function renderImage() { + return + '
' + .'' + .'
'; + } + + private function renderActions() { + if (!$this->service->isReachable()) { + $actions = 'service is unreachable'; + if ($this->service->getState() == Service::STATE_INIT) { + $actions .= ': initializing'; + } + } else { + $ts = strtotime($this->service->getDate()); + $actions = 'created '.TimeHelper::timeRelativeDescr($ts).' ago'; + } + + return + '
' + .$actions + .'
'; + } + + private function renderStatistic($content, $note) { + return + '
' + .'
'.$content.'
' + .'
'.$note.'
' + .'
'; + } + + private function renderStats() { + if (!$this->service->isReachable()) { + if ($this->service->getState() == Service::STATE_INIT) { + $imgsrc = 'images/throbber-on-white.gif'; + } else { + $imgsrc = 'images/warning.png'; + } + return $this->renderStatistic('',''); + } + /* is reachable */ + if ($this->service->getState() == Service::STATE_INIT) { + return ''; + } + $monitor = $this->service->fetchHighLevelMonitoringInfo(); + $resources = + ''.$this->service->getNodesCount().'' + .''; + + if ($this->service->getType() == 'php') { + $resptime = + ''.$monitor['throughput'].'ms'. + ''; + + return + $this->renderStatistic( + '' + .$monitor['error_rate'].'%' + .' ', + 'error rate') + .$this->renderStatistic( + ''.$monitor['request_rate'].'/s' + .' ', + 'requests rate') + .$this->renderStatistic($resptime, 'response time') + .$this->renderStatistic($resources, 'virtual instances'); + } else if ($this->service->getType() == 'hadoop') { + return + $this->renderStatistic($resources, 'virtual instances') + .$this->renderStatistic('233GB', + 'data processed'); + } + } + + private function renderColorTag() { + $color_class = $this->service->getState() == Service::STATE_RUNNING ? + 'colortag-active' : 'colortag-stopped'; + return + ''; + } + + private function renderTitle() { + if (!$this->service->isConfigurable()) { + $title = $this->service->getName(); + } else { + $title = + '' + .$this->service->getName() + .''; + } + return + '
' + .StatusLed($this->service) + .$title + .'
'; + } + + public function __toString() { + $lastClass = $this->last ? 'last' : ''; + return + '' + .$this->renderColorTag() + .'' + .$this->renderImage() + .'
' + .$this->renderTitle() + .$this->renderActions() + .'
' + .$this->renderStats() + .'
' + .'' + .''; + } + +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/ServicePage.php b/conpaas/branches/conpaas-dailybuild/frontend/www/ServicePage.php new file mode 100644 index 0000000000000000000000000000000000000000..d568277e9a42a122f8813c084c0fc2efb17390e9 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/ServicePage.php @@ -0,0 +1,430 @@ +. + +require_once('logging.php'); +require_once('Page.php'); +require_once('Instance.php'); +require_once('Cluster.php'); +require_once('StatusLed.php'); +require_once('TimeHelper.php'); +require_once('LinkUI.php'); +require_once('Service.php'); + +class ServicePage extends Page { + + static $states = array( + 'INIT' => false, + 'RUNNING' => false, + 'PROLOGUE' => true, + 'EPILOGUE' => true, + 'STOPPED' => false + ); + + private $service; + private $conf = null; + private $nodes = null; + + public function __construct(Service $service) { + parent::__construct(); + $this->service = $service; + } + + public function is_transient($state) { + return + !array_key_exists($state, self::$states) || + (self::$states[$state] == true); + } + + public function getUploadURL() { + return 'services/uploadCodeVersion.php?sid='.$this->service->getSID(); + } + + public function getState() { + $json_text = $this->service->fetchState(); + $responseObj = json_decode($json_text); + if ($responseObj === null) { + return 'UNREACHABLE'; + } + if ($responseObj->error != null) { + throw new Exception('Something went wrong when fetching the state: `' + .$json_text.'`'); + } + return $responseObj->result->state; + } + + public function renderActions() { + $startButton = InputButton('start') + ->setId('start'); + $stopButton = InputButton('stop') + ->setId('stop'); + $terminateButton = InputButton('terminate') + ->setId('terminate'); + + switch ($this->service->getState()) { + case Service::STATE_INIT: + $stopButton->setVisible(false); + break; + case Service::STATE_RUNNING: + $startButton->setVisible(false); + $terminateButton->setVisible(false); + break; + case Service::STATE_STOPPED: + $stopButton->setVisible(false); + break; + default: + } + if (!$this->service->isReachable()) { + $startButton->setVisible(false); + $stopButton->setVisible(false); + } + + return $startButton.' '.$stopButton.' '.$terminateButton; + } + + public function renderStateClass($state) { + switch ($state) { + case 'INIT': + case 'RUNNING': + return 'active'; + case 'STOPPED': + return 'stopped'; + default: + return ''; + } + } + + private function getVersionDownloadURL($versionID) { + return $this->service->getManager() + .'?action=downloadCodeVersion&codeVersionId='.$versionID; + } + + public function renderVersions() { + $versions = $this->service->fetchCodeVersions(); + if ($versions === false) { + return '

No versions available

'; + } + $active = null; + for ($i = 0; $i < count($versions); $i++) { + if (isset($versions[$i]['current'])) { + $active = $i; + } + } + if (count($versions) == 0) { + return '

No versions available

'; + } + $html = '
    '; + for ($i = 0; $i < count($versions); $i++) { + $versions[$i]['downloadURL'] = + $this->getVersionDownloadURL($versions[$i]['codeVersionId']); + $versionUI = Version($versions[$i]) + ->setLinkable($this->service->isRunning()); + if ($active == $i) { + if ($this->service->isRunning()) { + $versionUI->setActive(true, $this->service->getAccessLocation()); + } + else { + $versionUI->setActive(true); + } + } + if ($i == count($versions) - 1) { + $versionUI->setLast(); + } + $html .= $versionUI; + } + $html .= '
'; + return $html; + } + + private function getCurrentExecLimit() { + if ($this->conf == null) { + $this->conf = $this->service->getConfiguration(); + } + if ($this->conf == null || !isset($this->conf->max_execution_time)) { + // default value + return 30; + } + return intval($this->conf->max_execution_time); + } + + public function renderExecTimeOptions() { + static $options = array(30, 60, 90); + $selected = $this->getCurrentExecLimit(); + $html = ''; + return $html; + } + + private function getCurrentMemLimit() { + if ($this->conf == null) { + $this->service->getConfiguration(); + } + if ($this->conf == null || !isset($this->conf->memory_limit)) { + // default value + return '128M'; + } + return $this->conf->memory_limit; + } + + public function renderMemLimitOptions() { + static $options = array('64M', '128M', '256M'); + $selected = $this->getCurrentMemLimit(); + $html = ''; + return $html; + } + + public function getNodes() { + if ($this->nodes !== null) { + return $this->nodes; + } + $nodes = array(); + $nodes_info = array(); + $selected = array(); + $nodesLists = $this->service->getNodesLists(); + foreach ($nodesLists as $role => $nodesList) { + if (count($nodesList) > 1) { + if ($role === 'backend') + $cluster = new Cluster($role, $this->service->getType()); + else + $cluster = new Cluster($role); + foreach ($nodesList as $node) { + $info = $this->service->getNodeInfo($node); + if ($info !== false) { + $info['service_type'] = $this->service->getType(); + $cluster->addNode($info); + } + } + $nodes_info[] = $cluster; + } else { + /* just one node for this role */ + $info = $this->service->getNodeInfo($nodesList[0]); + $id = $info['id']; + if ($info !== false && !array_key_exists($id, $selected)) { + $info['service_type'] = $this->service->getType(); + $nodes_info[] = new Instance($info); + $selected[$id] = true; + } + } + } + $this->nodes = $nodes_info; + return $this->nodes; + } + + public function getNodesCount() { + $this->getNodes(); + $count = 0; + foreach ($this->nodes as $node) { + $count += $node->getSize(); + } + return $count; + } + + private function renderCloud() { + static $cloud_providers = array( + 'local' => 'local deployment', + 'ec2' => 'Amazon EC2', + ); + return $cloud_providers[$this->service->getCloud()]; + } + + public function renderInstances() { + $nodes = $this->getNodes(); + if ($nodes === false) { + return 'could not retrieve nodes'; + } + $instances_txt = count($nodes) > 1 ? 'instances' : 'instance'; + $html = + '
' + .$this->getNodesCount().' running '.$instances_txt + .' · on '.$this->renderCloud() + .'
' + .'
'; + + foreach ($nodes as $node) { + $html .= $node->render(); + } + $html .= '
'; + return $html; + } + + private function getTypeImage() { + return $this->service->getType().'.png'; + } + + private function renderEditableName() { + if (!$this->service->isConfigurable()) { + return ''.$this->service->getName().''; + } + return + '' + .$this->service->getName() + .''; + } + + private function renderStateChange() { + $stateLog = $this->service->fetchStateLog(); + // consider state changes in reverse order + foreach (array_reverse($stateLog) as $stateChange) { + if (Service::stateIsStable($stateChange['state'])) { + $ts = TimeHelper::timeRelativeDescr($stateChange['time']); + $state = ($stateChange['state'] == 'RUNNING') ? + 'started' : strtolower($stateChange['state']); + return $state.' '.$ts.' ago'; + } + } + // default + $ts = TimeHelper::timeRelativeDescr( + strtotime($this->service->getDate())); + return 'created '.$ts.' ago'; + } + + private function renderSubname() { + return + '
' + .$this->renderStateChange() + .' · ' + .LinkUI('service manager', $this->service->getManager()) + ->setExternal(true) + .' · ' + .LinkUI('manager log', 'viewlog.php?sid='.$this->service->getSID()) + ->setExternal(true) + .'
'; + } + + private function renderName() { + return + '
' + .$this->renderEditableName() + .$this->renderSubname() + .'
'; + } + + public function renderTopMenu() { + return + ''; + } + + public function renderSettings() { + if ($this->service instanceof PHPService) + return $this->renderPHPSettings(); + elseif ($this->service instanceof JavaService) + return $this->renderJavaSettings(); + else + throw new Exception('Unknown service type'); + } + + private function renderJavaSettings() { + return '
No modifiable settings.
'; + } + + private function renderPHPSettings() { + $html = << + + Software version + + + + + + Maximum script execution time + + {$this->renderExecTimeOptions()} + + + + Memory limit + + {$this->renderMemLimitOptions()} + + + + + + + + + + + + +EOD; + return $html; + } + +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/ServicesListUI.php b/conpaas/branches/conpaas-dailybuild/frontend/www/ServicesListUI.php new file mode 100644 index 0000000000000000000000000000000000000000..4d7f8519487c73f676fc730f58c685aeef648359 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/ServicesListUI.php @@ -0,0 +1,86 @@ +. + +require_once('ServiceItem.php'); +require_once('ServiceFactory.php'); + +class ServicesListUI { + + private $services; + + public function __construct(array $services) { + $this->services = array(); + foreach ($services as $service_data) { + $this->services[] = ServiceFactory::createInstance($service_data); + } + } + + public function addService(Service $service) { + $this->services[] = $service; + return $this; + } + + public function isEmpty() { + return count($this->services) == 0; + } + + private function renderItems() { + $html = ''; + for ($i = 0; $i < count($this->services); $i++) { + $serviceUI = new ServiceItem($this->services[$i]); + if ($i == count($this->services) - 1) { + $serviceUI->setLast(); + } + $html .= $serviceUI->__toString(); + } + return $html; + } + + public function needsRefresh() { + foreach ($this->services as $service) { + if ($service->needsPolling()) { + return true; + } + } + return false; + } + + private function generateRefreshScript() { + if (!$this->needsRefresh()) { + return ''; + } + return + ''; + } + + public function render() { + return + '
'. + '
all services
'. + ''. + $this->renderItems(). + '
'. + '
'. + $this->generateRefreshScript(); + } +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/StatusLed.php b/conpaas/branches/conpaas-dailybuild/frontend/www/StatusLed.php new file mode 100644 index 0000000000000000000000000000000000000000..59c52a3517fdf22812f39d2a2d25eefb1d25c02f --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/StatusLed.php @@ -0,0 +1,60 @@ +. + +require_once('Service.php'); + +function StatusLed(Service $service) { + return new StatusLed($service); +} + +class StatusLed { + + private $service; + + public function __construct(Service $service) { + $this->service = $service; + } + + private function getImage() { + $state = $this->service->getState(); + if (!$this->service->isReachable()) { + if ($state == Service::STATE_INIT || + $state == Service::STATE_PREINIT) { + return 'images/ledlightblue.png'; + } + return 'images/ledorange.png'; + } + /* for reachable case */ + switch ($state) { + case Service::STATE_INIT: + return 'images/ledlightblue.png'; + case Service::STATE_RUNNING: + return 'images/ledgreen.png'; + case Service::STATE_STOPPED: + return 'images/ledgray.png'; + case Service::STATE_ERROR: + return 'images/ledred.png'; + } + } + + public function __toString() { + return ''; + } +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/TimeHelper.php b/conpaas/branches/conpaas-dailybuild/frontend/www/TimeHelper.php new file mode 100644 index 0000000000000000000000000000000000000000..724cb7b98ca082ab372fbfb19d77759bb64fa667 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/TimeHelper.php @@ -0,0 +1,49 @@ +. + +class TimeHelper { + + public static function timeRelativeDescr($utc_ts) { + $ts = strftime("%s", $utc_ts); + $delta = time() - $ts; + if ($delta < 0) { + // this is in the future + return 'a few moments'; + } + $delta = ($delta > 0) ? $delta : - $delta; + static $seconds = array( + 'year' => 31536000, // 365 * days + 'month' => 2592000, // 30 * days + 'week' => 604800, // 7 * days + 'day' => 86400, // 24 * 60 * 60 + 'hour' => 3600, // 60 * 60 + 'minute' => 60 + ); + static $units = array('year', 'month', 'week', 'day', 'hour', 'minute'); + foreach ($units as $unit) { + $count = (int) ($delta / $seconds[$unit]); + if ($count > 0) { + $suffix = ($count > 1) ? 's' : ''; + return $count.' '.$unit.$suffix; + } + } + return 'a few moments'; + } + +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/UserData.php b/conpaas/branches/conpaas-dailybuild/frontend/www/UserData.php new file mode 100644 index 0000000000000000000000000000000000000000..91ed18de2efa432539036a8d5f21a37eb5c5e030 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/UserData.php @@ -0,0 +1,107 @@ +. + +require_once('DB.php'); +require_once('logging.php'); + +class UserData { + public static function createUser($username, $email, $fname, $lname, $affiliation, $passwd, $credit) { + $query = sprintf("INSERT INTO users (username, email, fname, lname, affiliation, passwd, created, credit) ". + "VALUES ('%s', '%s', '%s', '%s', '%s', '%s', now(), '%s')", + mysql_escape_string($username), + mysql_escape_string($email), + mysql_escape_string($fname), + mysql_escape_string($lname), + mysql_escape_string($affiliation), + mysql_escape_string(md5($passwd)), + mysql_escape_string($credit)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + $uid = mysql_insert_id(DB::getConn()); + return $uid; + } + + public static function getUserByName($username) { + $query = sprintf("SELECT * FROM users WHERE username='%s'", mysql_escape_string($username)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + $entries = DB::fetchAssocAll($res); + if (count($entries) != 1) { + return false; + } + return $entries[0]; + } + + public static function getUserById($uid) { + $query = sprintf("SELECT * FROM users WHERE uid='%s'", mysql_escape_string($uid)); + $res = mysql_query($query, DB::getConn()); + if ($res === false) { + throw new DBException(DB::getConn()); + } + $entries = DB::fetchAssocAll($res); + if (count($entries) != 1) { + return false; + } + return $entries[0]; + } + + public static function updateUserCredit($uid, $increment) { + try{ + $conn = DB::getConn(); + if(mysql_query('BEGIN', $conn) === false) { + throw new DBException($conn); + } + $query = sprintf("UPDATE users SET credit=credit+('%s') WHERE uid='%s'", + mysql_escape_string($increment), + mysql_escape_string($uid)); + $res = mysql_query($query, $conn); + if ($res === false) { + throw new DBException($conn); + } + if($increment < 0) { + $query = sprintf("SELECT credit FROM users WHERE uid='%s'", $uid); + $res = mysql_query($query, $conn); + if ($res === false) { + throw new DBException($conn); + } + $entries = DB::fetchAssocAll($res); + if (count($entries) != 1) { + throw new DBException($conn); + } + if ($entries[0]['credit'] < 0) { + mysql_query("ROLLBACK"); + return false; + } + } + if(mysql_query("COMMIT") === false) { + throw new DBException($conn); + } + return true; + } catch (DBException $e) { + mysql_query('ROLLBACK', $conn); + throw $e; + } + } +} + +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/Version.php b/conpaas/branches/conpaas-dailybuild/frontend/www/Version.php new file mode 100644 index 0000000000000000000000000000000000000000..fb221c4e0d27b9adcf18e8fcbddf624e0efe1a70 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/Version.php @@ -0,0 +1,122 @@ +. + +require_once('TimeHelper.php'); +require_once('LinkUI.php'); +require_once('logging.php'); + +function Version($data) { + return new Version($data); +} + +class Version { + + private $name; + private $filename; + private $timestamp; + private $downloadURL = ''; + private $active = false; + private $linkAddress = true; + private $address = null; + private $last = false; + + public function __construct($data) { + $this->name = $data['codeVersionId']; + $this->timestamp = $data['time']; + $this->filename = $data['filename']; + $this->downloadURL = $data['downloadURL']; + dlog($data); + } + + public function setLast() { + $this->last = true; + return $this; + } + + public function setActive($active, $address=null) { + $this->active = $active; + $this->address = $address; + return $this; + } + + public function setLinkable($linkable) { + $this->linkAddress = $linkable; + return $this; + } + + private function renderClass() { + $class = $this->active ? 'active' : 'inactive'; + if ($this->last) { + $class .= ' last'; + } + return $class; + } + + private function renderActivateLink() { + $dot = ' · '; + if ($this->active) { + return $dot.'
active
'; + } + return + $dot + .'' + .'set active' + .'' + .''; + } + + private function renderName() { + if (!$this->active || !$this->linkAddress) { + return $this->name; + } + + return LinkUI($this->name, $this->address) + ->setExternal(true) + ->addClass('address'); + } + + private function renderFilename() { + return + ''.$this->filename.''; + } + + private function renderDownloadLink() { + return + ' · download'; + } + + public function __toString() { + return + '
  • ' + .$this->renderName() + .$this->renderFilename() + .'
    ' + .$this->renderActivateLink() + .$this->renderDownloadLink() + .'
    ' + .'
    ' + .TimeHelper::timeRelativeDescr($this->timestamp).' ago' + .'
    ' + .'
  • '; + } + +} \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/__init__.php b/conpaas/branches/conpaas-dailybuild/frontend/www/__init__.php new file mode 100644 index 0000000000000000000000000000000000000000..b54357d21041a221afa9c2d578027b63bd2fa0ed --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/__init__.php @@ -0,0 +1,32 @@ +. + +session_set_cookie_params(60 * 60 * 24 * 15); // expires in 15 days +session_start(); + +class Conf { + + // This variable must be set to the directory where + // configuration files are located. Beware: it is highly + // recommended to keep configuration files *out* of the + // Web server's document directory. + const CONF_DIR = '/etc/conpaas'; +} + +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/callback/decrementUserCredit.php b/conpaas/branches/conpaas-dailybuild/frontend/www/callback/decrementUserCredit.php new file mode 100644 index 0000000000000000000000000000000000000000..87c74682977acd24d1d96bd9acb353e7ff3c7ab8 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/callback/decrementUserCredit.php @@ -0,0 +1,69 @@ +. + +ignore_user_abort(true); + +require_once('../__init__.php'); +require_once('../UserData.php'); +require_once('../ServiceData.php'); +require_once('../ServiceFactory.php'); +require_once('../logging.php'); + + +/* accept POST requests only */ +if ($_SERVER['REQUEST_METHOD'] !== 'POST') { + die(); +} + +$response = 'empty'; + +if(!isset($_POST['sid']) || !isset($_POST['decrement'])) { + $response = array('error' => 'Missing arguments'); +} +else if ($_POST['decrement'] < 1) { + $response = array('error' => 'Invalid arguments'); +} +else { + try { + /* accept requests from manager nodes only */ + $service_data = ServiceData::getServiceById($_POST['sid']); + $service = ServiceFactory::createInstance($service_data); + $manager_host = parse_url($service->getManager(), PHP_URL_HOST); + /* test source of request is from a manager node */ + if (gethostbyname($manager_host) !== $_SERVER['REMOTE_ADDR']) { + $response = array('error' => 'Not allowed'); + } + else { + $ret = UserData::updateUserCredit($service->getUID(), -$_POST['decrement']); + if ($ret === false) { + $response = array('error' => 'Not enough credit'); + } + else { + $response = array('error' => null); + } + } + } catch (Exception $e) { + $response = array( + 'error' => 'Internal error' + ); + } +} + +echo json_encode($response); +?> diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/callback/terminateService.php b/conpaas/branches/conpaas-dailybuild/frontend/www/callback/terminateService.php new file mode 100644 index 0000000000000000000000000000000000000000..9a1241b8b7021d222f75c96a4be9238076342a3a --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/callback/terminateService.php @@ -0,0 +1,58 @@ +. + +ignore_user_abort(true); + +require_once('../__init__.php'); +require_once('../UserData.php'); +require_once('../ServiceData.php'); +require_once('../ServiceFactory.php'); + +/* accept POST requests only */ +if ($_SERVER['REQUEST_METHOD'] !== 'POST') { + die(); +} + +if(!isset($_POST['sid'])) { + $response = array('error' => 'Missing arguments'); +} +else { + try { + /* accept requests from manager node only */ + $service_data = ServiceData::getServiceById($_POST['sid']); + $service = ServiceFactory::createInstance($service_data); + $manager_host = parse_url($service->getManager(), PHP_URL_HOST); + /* test source of request is from a manager node */ + if (gethostbyname($manager_host) !== $_SERVER['REMOTE_ADDR']) { + $response = array('error' => 'Not allowed'); + } + else { + $service->terminateService(); + $response = array('error' => null); + } + } catch (Exception $e) { + $response = array( + 'error' => 'Internal error' + ); + } +} + +echo json_encode($response); + +?> \ No newline at end of file diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/code/README.txt b/conpaas/branches/conpaas-dailybuild/frontend/www/code/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..20024ffcd9680f24826a4fee57fc5edc52aa9fbc --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/code/README.txt @@ -0,0 +1,15 @@ +Please copy into this directory: + +- All scripts found in directory web-servers/scripts + +- A file called ConPaaSWeb.tar.gz containing the web-servers + directory. This file must expand into files such as: + +drwxr-xr-x gpierre/gpierre 0 2011-09-05 16:58 ConPaaSWeb/ +drwxr-xr-x gpierre/gpierre 0 2011-08-02 11:02 ConPaaSWeb/test/ +drwxr-xr-x gpierre/gpierre 0 2011-08-19 16:42 ConPaaSWeb/test/unit/ +-rw-r--r-- gpierre/gpierre 10689 2011-08-19 16:35 ConPaaSWeb/test/unit/role.py +-rw-r--r-- gpierre/gpierre 6295 2011-08-04 14:49 ConPaaSWeb/test/unit/agent.py +drwxr-xr-x gpierre/gpierre 0 2011-08-18 13:28 ConPaaSWeb/test/unit/manager/ +-rw-r--r-- gpierre/gpierre 4448 2011-08-04 16:49 ConPaaSWeb/test/unit/manager/java.pyc +-rw-r--r-- gpierre/gpierre 18291 2011-08-04 16:49 ConPaaSWeb/test/unit/manager/provision.pyc diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/configure.php b/conpaas/branches/conpaas-dailybuild/frontend/www/configure.php new file mode 100644 index 0000000000000000000000000000000000000000..456c1200ab16be499dd3438320f8244009fadd8f --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/configure.php @@ -0,0 +1,564 @@ +. + +require_once('__init__.php'); +require_once('Service.php'); +require_once('logging.php'); +require_once('InputButton.php'); +require_once('ServicePage.php'); +require_once('Version.php'); +require_once('ServiceData.php'); +require_once('PageStatus.php'); +require_once('ServiceFactory.php'); +require_once('LinkUI.php'); + +$sid = $_GET['sid']; +$service_data = ServiceData::getServiceById($sid); +$service = ServiceFactory::createInstance($service_data); + +$page = new ServicePage($service); + +if ($service->getUID() !== $page->getUID()) { + $page->redirect('index.php'); +} + +$state = $page->getState(); +$backendType = $service->getType(); + +?> + + + + +ConPaaS - configure PHP Service + + + + + + + + +renderHeader(); ?> +setId('loading'); ?> + +
    + renderTopMenu(); ?> + + + +isConfigurable()): ?> + isRunning()): ?> + + +
    +
    + renderInstances(); ?> +
    +
    add or remove instances to your deployment
    +
    +
    0 proxy
    +
    0 web
    +
    0
    + + +
    +
    + +
    + No instances are running +
    + + +
    +
    +
    Code management
    +
    + isRunning()) { + echo LinkUI('Access the application', + $service->getAccessLocation()) + ->setExternal(true); + } + ?> +
    +
    +
    +
    +
    + you may update the stage by +
    + uploading archive +
    + or by +
    + checking out repository +
    +
    +
    + +
    + + + +
    +
    + + +
    +
    +
    + example: .zip, .tar of your source tree
    (expanding in the current directory)
    or .war for Java servlets +
    + + +
    + +
    +
    + + + +
    available code versions
    +
    + renderVersions(); ?> +
    + +
    + +
    +
    +
    Settings
    +
    +
    + renderSettings(); ?> +
    + + isStable()): ?> + + + +
    + You cannot configure the service in the current state - unreachable. + Please contact the system administrator. +
    + + +
    +
    + + + diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/conpaas.css b/conpaas/branches/conpaas-dailybuild/frontend/www/conpaas.css new file mode 100644 index 0000000000000000000000000000000000000000..f3cd590ceac2755ba5cd0d1de646c035f1c8543c --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/conpaas.css @@ -0,0 +1,920 @@ +@CHARSET "UTF-8"; + +body { + padding: 2px 5px; + margin: 0; + font-size: 12px; + font-weight: bold; + color: #0055A5; + font-family: Verdana, Tahoma, sans serif; + width: 800px; +} + +h1 { + font-size: 24px; + padding: 3px 3px 3px 30px; + font-weight: normal; +} + +a, a:visited { + color: #008cf7; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +.clear { + clear: both; +} + +.blue { + color: #0055A5; +} + +#logo { + width: 140px; + height: 50px; + background: url('images/conpaas.png') top left no-repeat; + float: left; +} + +.user { + float: right; + padding: 23px 3px 3px 0; +} + +.header { + border-bottom: 1px solid #0055A5; + padding: 0 5px 5px 5px; + background-color: #eee; +} + +.pageheader .info { + float: left; + margin: 10px; +} + +.pageheader h1 { + float: left; + font-size: 22px; +} + +.pageheader .info .nameWrapper, .pageheader .info img.stype { + float: left; +} + +.info .nameWrapper { + margin: 5px; +} + +.pageheader .info .name { + padding: 0 3px 0 3px; + margin: 1px 0 1px 0; + font-size: 16px; + color: #333; + border: 1px solid white; + font-style: normal; +} + +.pageheader .info .subname { + color: #999; + font-size: 12px; + font-weight: normal; + padding-left: 3px; +} + +.subname .link { + display: inline; +} + +.editable { + border: 1px solid white; +} + +.actionsbar .editable { + border: 1px solid #ccc; + padding: 1px; +} + +.editable:hover { + background-color: #fff6d5; + border: 1px solid #ffdd55; +} + +.pageheader .menu { + padding-top: 25px; + float: right; +} + +.menu img.loading { + float: right; +} + +.menu .led { + padding: 3px; +} + +.loadingWrapper { + height: 30px; + position: fixed; + top: 0; + left: 320px; + border-left: 2px solid #003380; + border-right: 2px solid #003380; + border-bottom: 2px solid #003380; + background-color: white; + padding: 3px 10px 3px 10px; +} + +.loadingWrapper b { + vertical-align: top; + line-height: 30px; +} + +.status { + display: inline; + padding: 3px; +} + +a.button { + padding: 6px; + margin: 1px; + color: white; + font-size: 12px; + text-shadow: #333 1px 1px 0px; + border: 1px solid #003380; +} + +body.webkit a.button { + background: -webkit-gradient(linear,0% 0,0% 100%,from(#008cf7),to(#003380)); + border-radius: 5px; +} + +body.firefox a.button { + background: -moz-linear-gradient(270deg, #008cf7, #003380) repeat scroll 0 0 transparent; + -moz-border-radius: 5px; +} + +a.button:hover { + text-decoration: none; +} + +.services { + margin-top: 20px; +} + +.brief { + padding: 3px; + font-weight: normal; + color: #999; +} + +.services ul.slist { + list-style: none; + padding-left: 0; + margin-top: 0; + border: 1px solid #eee; +} + +.services ul.slist li.service { + padding:0; +} + +.services ul.slist li.service:hover { + background-color: #eee; + border-top: 1px solid #999; + border-right: 1px solid #ccc; + border-left: 1px solid #ccc; +} + +.services table.slist { + width: 100%; +} + +tr.service { + +} + + +.service .instances { + padding: 5px 0 0px 24px; + margin-top: 7px; + margin-left: 5px; + color: #666; + font-family: Helvetica, Verdana; + font-size: 16px; + line-height: 20px; + background: url('images/server.png') top left no-repeat; + cursor: pointer; +} + +.service:hover .instances { + visibility: visible; +} + +.service .content, .service .icon { + float: left; +} + +.service .statistic { + float: right; +} + +.service .content { + margin-left: 15px; +} + +.service .last { + border-bottom: 1px solid #ccc; +} + +.service .statistic { + margin-left: 15px; + margin-right: 5px; +} + +.service .statistic .statcontent { + text-align: center; + padding: 6px 3px 0 3px; +} + +.service .statistic .statcontent .text { + color: #000; + font-size: 24px; + text-shadow: white 1px 1px 0px; + font-family: Helvetica, Tahoma, sans-serif; + font-style: normal; + margin: 5px; +} + +.service .statistic .note { + color: #999; + font-size: 12px; + font-weight: normal; + padding: 3px; +} + +.service .wrapper { + padding: 10px; + border-right: 1px solid #ccc; + border-top: 1px solid #ddd; +} + +.service:hover .wrapper { + background-color: #eee; +} + +.service .content .title { + font-size: 14px; + padding: 5px 3px 2px 3px; + text-shadow: white 1px 1px 0px; +} + +.service .title a { + font-size: 16px; + font-weight: bold; +} + +.service .colortag { + width: 20px; + margin-right: 10px; + border-right: 1px solid white; +} + +.service:hover .colortag-active { + background-color: #008300; +} + +.service .colortag-active { + background-color: #00b100; +} + +.service:hover .colortag-stopped { + background-color: #999; +} + +.service .colortag-stopped { + background-color: #ccc; +} + +.service .icon { + width: 70px; + height: 50px; +} + +.service .led { + padding: 0px 10px 0 0px; +} + +.service .content .link { + margin: 10px 0 0 30px; +} + +.service .content .actions { + font-size: 12px; + color: #999; + font-weight: normal; + padding: 3px; +} + +.services .slist .service .info { + float: right; + text-align: right; +} + +.status { + font-size: 11px; +} + +.created { + color: #999; + font-size: 10px; + font-weight: normal; +} + +.active { + color: #008000; +} + +.stopped { + color: #800000; +} + +.services ul.slist li.service .title img { + padding: 1px 4px 0 4px; + margin: 0; +} + +.link { + color: #666; + font-weight: normal; +} + +.services a, .services a:visited { + color: #008cf7; + font-weight: normal; + font-size: 12px; +} + +.form { + margin: 5px; +} + +.form td { + vertical-align: top; +} + +.form #status { + font-weight: normal; + color: #666; + font-size: 10px; + font-style: normal; + padding: 3px; +} + +.form .description { + color: #666; + font-size: 12px; + font-weight: normal; + padding: 5px; + text-align: right; + line-height: 24px; + width: 150px; +} + +.form .info { + color: #999; + font-weight: normal; + font-size: 12px; + padding: 10px; +} + +.form .input { + padding: 8px 5px; + color: #333; +} + +.form .input select, .form .input option { + font-size: 13px; +} + +.form .input input[type="text"] { + padding: 1px; + color: #333; + font-weight: bold; + font-size: 12px; +} + +input.button { + margin: 3px; + font-weight: bold; +} + +.timestamp { + font-weight: normal; + color: #333; + padding: 4px 2px 2px 1px; + margin: 0; +} + +.form-section { + padding: 15px 5px; + color: #333; + font-weight: normal; +} + +.form-section .form-header { + background-color: #e9f1f9; + border-top: 1px solid #0055A5; + padding: 5px 5px 5px 10px; +} + +.form-header .title { + font-size: 14px; + color: #0055A5; + float: left; +} + +.form-header .access-box { + float: right; + font-size: 10px; + text-align: right; +} + +.form-header .access-box a { + font-size: 12px; +} + +.form-header .access { + padding: 3px; +} + +.access .access-type { + color: #999; + display: inline; +} + +.access .access-info { + padding: 2px 0 2px 5px; + font-weight: bold; + text-align:left; + display: inline; + color: #0055A5; +} + +.access .access-info a { + color: inherit; +} + +.tag { + font-size: 10px; + display: inline; + padding: 2px; + margin: 3px; + border: 2px solid; + border-radius: 5px; +} + +.actionsbar .tag { + font-size: 11px; + padding: 5px; +} + +.purple { + color: #800080; + border-color: #800080; +} + +.blue { + color: blue; + border-color: blue; +} + +.orange { + color: #ff6600; + border-color: #ff6600; +} + +ul.versions { + list-style: none; + border: 1px solid #999; + margin: 3px 0 5px 0; + padding: 0; +} + +ul.versions li { + padding: 5px 3px 3px 10px; + line-height: 25px; + border-bottom: 1px solid #999; + color: #333; + font-weight: bold; +} + +.versions li .loading { + margin-bottom: 5px; + margin-left: 10px; +} + +ul.versions li.last { + border: none; +} + +ul.versions li.inactive:hover { + background-color: #eee; +} + +ul.versions li.inactive .version-actions { + display: inline; + visibility: hidden; +} + +ul.versions li.inactive:hover .version-actions { + visibility: visible; +} + +ul.versions li.active{ + background-color: #ddd; +} + +ul.versions li.active .version-actions { + display: inline; + visibility: visible; +} + +ul.versions li a.link { + font-size: 11px; + color: #008cf7; +} + +.versions .address { + font-size: 12px; + font-weight: bold; + color: #333; + display: inline; +} + +.versions .address a { + color: #333; +} + +.versions .filename { + font-size: 10px; + font-weight: normal; + color: #999; + margin-left: 5px; +} + +ul.versions li a.activate, ul.versions li .status { +} + +ul.versions li .timestamp { + float: right; + padding: 1px; + color: #999; +} + +.newversion { + margin: 2px 10px 2px 10px; +} + +#deployform { + padding: 15px 10px; + color: #999; +} + +.deployoptions { + width: 40%; +} + +.deployoptions, .deployactions { + float: left; +} + +.deployactions { + padding: 10px; +} + +.deployoption { + margin-left: 10px; + padding: 3px; + color: #333; +} + +.deploycontent { + margin: 5px; +} + +.visibleblock { + display: block; +} + +.invisible { + display: none; +} + +.checkoption { + padding: 5px 10px 5px 5px; + float: left; +} + +.unchecked { + font-weight: normal; + color: #999; +} + +.hint { + color: #999; + font-size: 9px; + padding: 3px; +} + +#createServiceBtn { + margin-top: 5px; +} + +.transient { + text-align: center; +} + +.transient h3 { +} + + +/* two sided layout box */ +.dualbox { + display: block; +} + +.dualbox .left { + float: left; +} + +.dualbox .right { + float: right; +} + +.positive { + font-size: 10px; + font-style: normal; + color: green; +} + +#fileForm, .deployactions .additional { + float: left; +} + +.deployactions .additional { + padding-left: 10px; + padding-top: 5px; +} + +.deployactions .positive { +} + +.settings-form { + margin-left: 5px; +} + +.front-section img.stype, .front-section .form { + float: left; +} + +.front-section img.stype { + margin-left: 20px; +} + +.front-section .form { + margin-left: 0px; +} + + +.form a.link { + font-weight: bold; + font-size: 12px; +} + +/* instances */ + +#instances { + border: 1px solid #ccc; +} + +#instances .instance { + border-bottom: 1px solid #999; + padding: 10px 10px 5px 30px; + background: url('images/server.png') no-repeat 7px 5px; +} + +#instances .instance .title { + font-size: 12px; + font-weight: bold; + color: #333; + font-style: normal; + line-height: 20px; + padding: 3px 20px 3px 3px; +} + +#instances .instance .timestamp { + color: #999; +} + +#instances .instance .address { + font-style: normal; + color: #aaa; +} + +#instances .cluster { + padding: 5px 0 0 0px; + margin-bottom: 1px; + border-left: 10px solid; +} + +#instances .cluster .instance { + border-left: none; +} + +#instances .cluster .cluster-header { + padding: 5px 3px 5px 5px; +} + +#instances .cluster-web { + border-color: blue; +} + +#instances .cluster-backend { + border-color: purple; +} + +#instances .cluster-proxy { + border-color: orange; +} + +#instances .cluster .tag { + margin-left: 3px; +} + +#instances .instance:hover { + background-color: #eee; +} + +/* MapReduce */ + +#jarform { + margin-top: 20px; +} + +#mrapp { +} + +.mrform i { + font-weight: bold; + color: #999; +} + +.mrform i, .mrform select, .mrform input { + margin: 3px; +} + +.mrform .action { + padding: 0 5px 0 5px; + font-weight: bold; + color: #333; +} + +.joblist { + border: 1px solid #ccc; + margin: 5px 0 20px 0; +} + +.joblist .mrjob { + padding: 10px; + border-bottom: 1px solid #999; +} + +.mrjob .title { + font-weight: bold; + font-size: 14px; + color: #333; +} + +.mrjob .subtitle { + font-size: 10px; + color: #999; +} + +.mrjob .jobicon { + margin: 0 10px 0 0; +} + +.mrjob .progress { + margin-top: 5px; + margin-left: 20px; +} + +.mrjob .left { + float: left; +} + +.mrjob .right { + float: right; + text-align: right; +} + +.actionsbar { + margin: 10px 3px 10px 0; +} + +.actionstitle { + padding: 10px 3px 3px 3px; + color: #999; +} + +.actionsbar .loading { + margin-left: 10px; +} + +/* login */ + +body.loginbody { + background: url('images/onclouds.jpg') top left no-repeat; +} + +.login { + width: 800px; + margin: auto; + padding-top: 270px; + padding-left: 100px; +} + +.login .form .input { + padding: 3px 3px 0 5px; +} + +.login a, .login #error { + font-weight: normal; + font-size: 10px; + margin: 3px; +} + +.login #error { + color: red; + margin-left: 1px; +} + +.login .title { + padding-left: 85px; +} + +.login input.active { + color: #333; +} + +.login .descr, .login .formwrap { + float: left; +} + +.login .form { + padding: 20px; + background-color: white; + border: 1px solid #214478; + margin: 0; +} + +.login .formwrap { + border-top: 1px solid white; + border-right: 1px solid white; +} + +.login .descr { + width: 400px; + height: 200px; +} + +.login .descr img { + margin-top: 10px; +} + +.login .form .name { + vertical-align: middle; + text-align: right; + padding-right: 5px; +} + +.login .form .hint { + font-weight: normal; +} + +.box { + padding: 10px; + border: 1px solid; + color: #666; + font-size: 12px; + font-weight: normal; +} + +.infobox { + border-color: #ccc; + background-color: #eee; +} diff --git a/conpaas/branches/conpaas-dailybuild/frontend/www/create.php b/conpaas/branches/conpaas-dailybuild/frontend/www/create.php new file mode 100644 index 0000000000000000000000000000000000000000..c09e65da16a146431823aa33e3a7b8c7b3ebd7c4 --- /dev/null +++ b/conpaas/branches/conpaas-dailybuild/frontend/www/create.php @@ -0,0 +1,176 @@ +. + +require_once('__init__.php'); +require_once('logging.php'); +require_once('Page.php'); + +function loadBackendConfiguration() { + $conf = parse_ini_file(Conf::CONF_DIR.'/main.ini', true); + if ($conf === false) { + throw new Exception('Could not read configuration file main.ini'); + } + if ($conf['main']['enable_ec2']=="yes") { + $conf['main']['default_backend']="ec2"; + } elseif ($conf['main']['enable_opennebula']=="yes") { + $conf['main']['default_backend']="opennebula"; + } else { + $conf['main']['default_backend']=""; + } + return $conf['main']; +} + +$backend_conf = loadBackendConfiguration(); +$ec2conf = ""; +$opennebulaconf = ""; + +if ($backend_conf['enable_ec2'] != "yes") { + $ec2conf = " disabled=\"disabled\""; +} else if ($backend_conf['default_backend'] == "ec2") { + $ec2conf = " selected"; +} +if ($backend_conf['enable_opennebula'] != "yes") { + $opennebulaconf = " disabled=\"disabled\""; +} else if ($backend_conf['default_backend'] == "opennebula") { + $opennebulaconf = " selected"; +} + +$page = new Page(); + +?> + + + + + + ConPaaS - create new service + + + + + + + renderHeader(); ?> + +
    + + + + + + + + + + + + + + +
    type of service + + + for now only selectable services available +
    software version + +
    cloud provider +