aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore10
-rw-r--r--LICENSE28
-rw-r--r--README.md32
-rw-r--r--deployment/access_classes/manifests/admin.pp8
-rw-r--r--deployment/access_classes/manifests/committers.pp14
-rw-r--r--deployment/access_classes/manifests/init.pp5
-rw-r--r--deployment/access_classes/manifests/iso_makers.pp5
-rw-r--r--deployment/access_classes/manifests/web.pp5
-rw-r--r--deployment/backups/manifests/init.pp23
-rw-r--r--deployment/common/manifests/base_packages.pp30
-rw-r--r--deployment/common/manifests/default_ssh_root_key.pp91
-rw-r--r--deployment/common/manifests/export_ssh_keys.pp7
-rw-r--r--deployment/common/manifests/i18n.pp12
-rw-r--r--deployment/common/manifests/import_ssh_keys.pp3
-rw-r--r--deployment/common/manifests/init.pp33
-rw-r--r--deployment/common/manifests/sudo_sysadmin.pp7
-rw-r--r--deployment/common/manifests/urpmi_update.pp8
-rw-r--r--deployment/common/templates/i18n20
-rw-r--r--deployment/common/templates/locale.conf20
-rw-r--r--deployment/common/templates/sudoers.sysadmin1
-rw-r--r--deployment/dns/manifests/init.pp1
-rw-r--r--deployment/dns/manifests/reverse_zone.pp5
-rw-r--r--deployment/dns/manifests/server.pp7
-rw-r--r--deployment/dns/manifests/zone.pp5
-rw-r--r--deployment/dns/templates/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone12
-rw-r--r--deployment/dns/templates/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone19
-rw-r--r--deployment/dns/templates/mageia.org.zone174
-rw-r--r--deployment/forums/manifests/init.pp22
-rwxr-xr-xdeployment/lists/manifests/init.pp420
-rw-r--r--deployment/main_mirror/files/README24
-rw-r--r--deployment/main_mirror/files/mirror/mirror.readme79
-rw-r--r--deployment/main_mirror/files/mirror/paths.readme34
-rw-r--r--deployment/main_mirror/manifests/init.pp21
-rw-r--r--deployment/main_mirror/templates/rsyncd.conf31
-rw-r--r--deployment/mga_buildsystem/manifests/buildnode.pp4
-rw-r--r--deployment/mga_buildsystem/manifests/config.pp668
-rw-r--r--deployment/mga_buildsystem/manifests/init.pp2
-rw-r--r--deployment/mga_buildsystem/manifests/mainnode.pp14
-rw-r--r--deployment/mgagit/files/git_multimail.py4383
-rw-r--r--deployment/mgagit/manifests/init.pp170
-rw-r--r--deployment/mgagit/manifests/tmpl.pp9
-rwxr-xr-xdeployment/mgagit/templates/git-post-receive-hook314
-rw-r--r--deployment/mgagit/templates/git-post-update-hook12
-rw-r--r--deployment/mgagit/templates/gitolite.rc161
-rw-r--r--deployment/mgagit/templates/group_owned_repo.gl36
-rw-r--r--deployment/mgagit/templates/mgagit.conf57
-rw-r--r--deployment/mgagit/templates/repodef_repo.gl8
-rw-r--r--deployment/releasekey/manifests/init.pp27
-rw-r--r--deployment/releasekey/templates/sign_checksums11
-rw-r--r--deployment/reports/manifests/ii.pp15
-rw-r--r--deployment/reports/templates/socket.yaml2
-rw-r--r--deployment/repositories/manifests/git_mirror.pp16
-rw-r--r--deployment/repositories/manifests/subversion.pp73
-rw-r--r--deployment/repositories/manifests/svn_mirror.pp17
-rw-r--r--deployment/repositories/templates/puppet_update.sh2
-rw-r--r--deployment/repositories/templates/puppet_update.sudoers1
-rw-r--r--deployment/shadow/manifests/init.pp23
-rw-r--r--deployment/softwarekey/manifests/init.pp24
-rw-r--r--deployment/tld_redirections/manifests/init.pp26
-rw-r--r--deployment/websites/manifests/archives.pp20
-rw-r--r--deployment/websites/manifests/base.pp9
-rw-r--r--deployment/websites/manifests/doc.pp20
-rw-r--r--deployment/websites/manifests/forum_proxy.pp13
-rw-r--r--deployment/websites/manifests/git.pp10
-rw-r--r--deployment/websites/manifests/hugs.pp16
-rw-r--r--deployment/websites/manifests/init.pp1
-rw-r--r--deployment/websites/manifests/meetbot.pp14
-rw-r--r--deployment/websites/manifests/nav.pp27
-rw-r--r--deployment/websites/manifests/perl.pp54
-rw-r--r--deployment/websites/manifests/releases.pp22
-rw-r--r--deployment/websites/manifests/start.pp11
-rw-r--r--deployment/websites/manifests/static.pp16
-rw-r--r--deployment/websites/manifests/svn.pp10
-rw-r--r--deployment/websites/manifests/www.pp64
-rw-r--r--deployment/websites/templates/vhost_meetbot.conf36
-rw-r--r--deployment/websites/templates/vhost_static.conf83
-rw-r--r--deployment/websites/templates/vhost_www.conf13
-rw-r--r--deployment/websites/templates/vhost_www_rewrite.conf22
-rw-r--r--deployment/wikis/manifests/init.pp30
-rw-r--r--deployment/wikis/templates/wiki_settings46
-rw-r--r--deployment/wikis/templates/wiki_vhost.conf19
-rw-r--r--external/.gitignore1
-rw-r--r--external/concat/CHANGELOG (renamed from modules/concat/CHANGELOG)0
-rw-r--r--external/concat/README.markdown (renamed from modules/concat/README.markdown)0
-rwxr-xr-xexternal/concat/files/concatfragments.sh (renamed from modules/concat/files/concatfragments.sh)0
-rw-r--r--external/concat/files/null/.gitignore (renamed from modules/concat/files/null/.gitignore)0
-rw-r--r--external/concat/manifests/fragment.pp (renamed from modules/concat/manifests/fragment.pp)6
-rw-r--r--external/concat/manifests/init.pp (renamed from modules/concat/manifests/init.pp)36
-rw-r--r--external/concat/manifests/setup.pp (renamed from modules/concat/manifests/setup.pp)12
-rw-r--r--external/sshkeys/COPYING674
-rw-r--r--external/sshkeys/README.rst73
-rw-r--r--external/sshkeys/manifests/create_key.pp29
-rw-r--r--external/sshkeys/manifests/init.pp2
-rw-r--r--external/sshkeys/manifests/keymaster.pp13
-rw-r--r--external/sshkeys/manifests/namecheck.pp12
-rw-r--r--external/sshkeys/manifests/set_authorized_keys.pp58
-rw-r--r--external/sshkeys/manifests/set_client_key_pair.pp39
-rw-r--r--external/sshkeys/manifests/setup_key_master.pp87
-rw-r--r--external/sshkeys/manifests/var.pp4
-rw-r--r--manifests/common.pp98
-rw-r--r--manifests/defaults.pp35
-rw-r--r--manifests/extlookup.pp6
-rw-r--r--manifests/nodes.pp120
-rw-r--r--manifests/nodes/armlet1.pp7
-rw-r--r--manifests/nodes/armlet2.pp7
-rw-r--r--manifests/nodes/duvel.pp56
-rw-r--r--manifests/nodes/ec2aa1.pp7
-rw-r--r--manifests/nodes/ec2aa2.pp7
-rw-r--r--manifests/nodes/ec2aa3.pp7
-rw-r--r--manifests/nodes/ec2x1.pp7
-rw-r--r--manifests/nodes/ec2x2.pp7
-rw-r--r--manifests/nodes/ecosse.pp7
-rw-r--r--manifests/nodes/fiona.pp10
-rw-r--r--manifests/nodes/friteuse.pp7
-rw-r--r--manifests/nodes/ncaa1.pp7
-rw-r--r--manifests/nodes/neru.pp45
-rw-r--r--manifests/nodes/ociaa1.pp7
-rw-r--r--manifests/nodes/pktaa1.pp7
-rw-r--r--manifests/nodes/rabbit.pp32
-rw-r--r--manifests/nodes/sucuk.pp131
-rw-r--r--manifests/nodes_ip.pp70
-rw-r--r--manifests/site.pp7
-rw-r--r--modules/amavis/manifests/init.pp13
-rw-r--r--modules/amavis/templates/amavisd.conf782
-rw-r--r--modules/apache/manifests/base.pp37
-rw-r--r--modules/apache/manifests/config.pp6
-rw-r--r--modules/apache/manifests/cve-2011-3192.pp9
-rw-r--r--modules/apache/manifests/init.pp163
-rw-r--r--modules/apache/manifests/mod/fastcgi.pp5
-rw-r--r--modules/apache/manifests/mod/fcgid.pp11
-rw-r--r--modules/apache/manifests/mod/geoip.pp4
-rw-r--r--modules/apache/manifests/mod/perl.pp4
-rw-r--r--modules/apache/manifests/mod/php.pp10
-rw-r--r--modules/apache/manifests/mod/proxy.pp4
-rw-r--r--modules/apache/manifests/mod/public_html.pp4
-rw-r--r--modules/apache/manifests/mod/ssl.pp20
-rw-r--r--modules/apache/manifests/mod/wsgi.pp12
-rw-r--r--modules/apache/manifests/var.pp12
-rw-r--r--modules/apache/manifests/vhost/base.pp50
-rw-r--r--modules/apache/manifests/vhost/catalyst_app.pp24
-rw-r--r--modules/apache/manifests/vhost/django_app.pp22
-rw-r--r--modules/apache/manifests/vhost/other_app.pp6
-rw-r--r--modules/apache/manifests/vhost/redirect_ssl.pp6
-rw-r--r--modules/apache/manifests/vhost/reverse_proxy.pp11
-rw-r--r--modules/apache/manifests/vhost/wsgi.pp10
-rw-r--r--modules/apache/manifests/webapp_other.pp7
-rw-r--r--modules/apache/templates/00_default_vhosts.conf10
-rw-r--r--modules/apache/templates/01_default_ssl_vhost.conf169
-rw-r--r--modules/apache/templates/50_mod_deflate.conf36
-rw-r--r--modules/apache/templates/CVE-2011-3192.conf12
-rw-r--r--modules/apache/templates/customization.conf1
-rw-r--r--modules/apache/templates/django.wsgi13
-rw-r--r--modules/apache/templates/logrotate23
-rw-r--r--modules/apache/templates/mod/php.conf5
-rw-r--r--modules/apache/templates/mod/ssl_vhost.conf1
-rw-r--r--modules/apache/templates/mod/wsgi.conf12
-rw-r--r--modules/apache/templates/no_hidden_file_dir.conf4
-rw-r--r--modules/apache/templates/urlescape9
-rw-r--r--modules/apache/templates/vhost_base.conf53
-rw-r--r--modules/apache/templates/vhost_catalyst_app.conf30
-rw-r--r--modules/apache/templates/vhost_django_app.conf13
-rw-r--r--modules/apache/templates/vhost_fcgid.conf6
-rw-r--r--modules/apache/templates/vhost_fcgid_norobot.conf45
-rw-r--r--modules/apache/templates/vhost_redirect.conf2
-rw-r--r--modules/apache/templates/vhost_reverse_proxy.conf15
-rw-r--r--modules/apache/templates/vhost_simple.conf14
-rw-r--r--modules/apache/templates/vhost_ssl.conf13
-rw-r--r--modules/apache/templates/vhost_ssl_redirect.conf5
-rw-r--r--modules/apache/templates/vhost_wsgi.conf3
-rw-r--r--modules/auto_installation/manifests/download.rb21
-rw-r--r--modules/auto_installation/manifests/init.pp140
-rw-r--r--modules/auto_installation/templates/default15
-rw-r--r--modules/auto_installation/templates/menu5
-rw-r--r--modules/bcd/manifests/base.pp29
-rw-r--r--modules/bcd/manifests/init.pp5
-rw-r--r--modules/bcd/manifests/rsync.pp7
-rw-r--r--modules/bcd/manifests/web.pp9
-rw-r--r--modules/bcd/templates/rsyncd.conf12
-rw-r--r--modules/bcd/templates/sudoers.bcd10
-rw-r--r--modules/bcd/templates/vhost_bcd.conf12
-rw-r--r--modules/bind/manifests/init.pp61
-rw-r--r--modules/bind/manifests/master.pp17
-rw-r--r--modules/bind/manifests/slave.pp6
-rw-r--r--modules/bind/manifests/zone.pp13
-rw-r--r--modules/bind/manifests/zone/master.pp6
-rw-r--r--modules/bind/manifests/zone/reverse.pp6
-rw-r--r--modules/bind/templates/named_base.conf25
-rw-r--r--modules/bind/templates/named_master.conf19
-rw-r--r--modules/bind/templates/named_slave.conf25
-rw-r--r--modules/bind/templates/zones/mageia.fr.zone27
-rw-r--r--modules/bind/templates/zones/mageia.org.zone87
-rw-r--r--modules/blog/manifests/init.pp114
-rw-r--r--modules/blog/templates/.htaccess10
-rwxr-xr-xmodules/blog/templates/backup_blog-db.sh23
-rwxr-xr-xmodules/blog/templates/backup_blog-files.sh24
-rw-r--r--modules/blog/templates/blogs_vhosts.conf16
-rwxr-xr-xmodules/blog/templates/check_new-blog-post.sh43
-rwxr-xr-xmodules/bugzilla-dev/manifests/init.pp81
-rwxr-xr-xmodules/bugzilla-dev/templates/localconfig121
-rw-r--r--modules/bugzilla-dev/templates/params.json104
-rwxr-xr-xmodules/bugzilla-dev/templates/robots.txt10
-rwxr-xr-xmodules/bugzilla-dev/templates/vhost.conf2
-rwxr-xr-xmodules/bugzilla-dev/templates/webapp_bugzilla.conf73
-rwxr-xr-x[-rw-r--r--]modules/bugzilla/manifests/init.pp212
-rw-r--r--modules/bugzilla/templates/localconfig123
-rw-r--r--modules/bugzilla/templates/params58
-rw-r--r--modules/bugzilla/templates/params.json104
-rw-r--r--modules/bugzilla/templates/vhost.conf14
-rw-r--r--modules/bugzilla/templates/vhost_bugs.conf13
-rw-r--r--modules/bugzilla/templates/webapp_bugzilla.conf36
-rw-r--r--modules/buildsystem/files/Mageia.pm509
-rwxr-xr-xmodules/buildsystem/files/signbot/mga-signpackage31
-rw-r--r--modules/buildsystem/files/signbot/sign-check-package37
-rw-r--r--modules/buildsystem/files/signbot/signbot-rpmmacros3
-rw-r--r--modules/buildsystem/manifests/binrepo.pp48
-rw-r--r--modules/buildsystem/manifests/buildnode.pp12
-rw-r--r--modules/buildsystem/manifests/create_upload_dir.rb28
-rw-r--r--modules/buildsystem/manifests/distros.rb97
-rw-r--r--modules/buildsystem/manifests/gatherer.pp5
-rw-r--r--modules/buildsystem/manifests/init.pp81
-rw-r--r--modules/buildsystem/manifests/iurt.pp26
-rw-r--r--modules/buildsystem/manifests/iurt/config.pp50
-rw-r--r--modules/buildsystem/manifests/iurt/packages.pp3
-rw-r--r--modules/buildsystem/manifests/iurt/upload.pp16
-rw-r--r--modules/buildsystem/manifests/iurt/user.pp11
-rw-r--r--modules/buildsystem/manifests/mainnode.pp23
-rw-r--r--modules/buildsystem/manifests/maintdb.pp58
-rw-r--r--modules/buildsystem/manifests/media_cfg.pp11
-rw-r--r--modules/buildsystem/manifests/mgarepo.pp36
-rw-r--r--modules/buildsystem/manifests/release.pp5
-rw-r--r--modules/buildsystem/manifests/repoctl.pp11
-rw-r--r--modules/buildsystem/manifests/repository.pp11
-rw-r--r--modules/buildsystem/manifests/rpmlint.pp3
-rw-r--r--modules/buildsystem/manifests/scheduler.pp57
-rw-r--r--modules/buildsystem/manifests/signbot.pp31
-rw-r--r--modules/buildsystem/manifests/sshkeys.pp5
-rw-r--r--modules/buildsystem/manifests/sshuser.pp36
-rw-r--r--modules/buildsystem/manifests/var/binrepo.pp15
-rw-r--r--modules/buildsystem/manifests/var/distros.pp126
-rw-r--r--modules/buildsystem/manifests/var/groups.pp9
-rw-r--r--modules/buildsystem/manifests/var/iurt.pp5
-rw-r--r--modules/buildsystem/manifests/var/maintdb.pp11
-rw-r--r--modules/buildsystem/manifests/var/mgarepo.pp22
-rw-r--r--modules/buildsystem/manifests/var/repository.pp9
-rw-r--r--modules/buildsystem/manifests/var/scheduler.pp31
-rw-r--r--modules/buildsystem/manifests/var/signbot.pp15
-rw-r--r--modules/buildsystem/manifests/var/webstatus.pp25
-rw-r--r--modules/buildsystem/manifests/var/youri.pp401
-rw-r--r--modules/buildsystem/manifests/webstatus.pp44
-rw-r--r--modules/buildsystem/manifests/youri_submit.pp83
-rw-r--r--modules/buildsystem/manifests/youri_submit_conf.pp6
-rw-r--r--modules/buildsystem/templates/binrepo/sudoers.binrepo1
-rwxr-xr-xmodules/buildsystem/templates/binrepo/upload-bin32
-rw-r--r--modules/buildsystem/templates/binrepo/vhost_binrepo.conf3
-rw-r--r--modules/buildsystem/templates/binrepo/wrapper.upload-bin26
-rw-r--r--modules/buildsystem/templates/bs-webstatus.conf32
-rwxr-xr-xmodules/buildsystem/templates/cleaner.rb235
-rw-r--r--modules/buildsystem/templates/cleaner_test.rb83
-rw-r--r--modules/buildsystem/templates/iurt.cauldron.conf30
-rw-r--r--modules/buildsystem/templates/iurt.conf37
-rwxr-xr-xmodules/buildsystem/templates/maintdb/maintdb.bin98
-rw-r--r--modules/buildsystem/templates/maintdb/sudoers.maintdb4
-rw-r--r--modules/buildsystem/templates/maintdb/vhost_maintdb.conf3
-rw-r--r--modules/buildsystem/templates/maintdb/wrapper.maintdb25
-rw-r--r--modules/buildsystem/templates/media.cfg142
-rwxr-xr-xmodules/buildsystem/templates/mga-youri-submit2
-rwxr-xr-xmodules/buildsystem/templates/mga-youri-submit.wrapper36
-rw-r--r--modules/buildsystem/templates/mgarepo.conf88
-rw-r--r--modules/buildsystem/templates/repoctl.conf40
-rw-r--r--modules/buildsystem/templates/rpmlint.conf7
-rw-r--r--modules/buildsystem/templates/signbot/sudoers.signpackage2
-rwxr-xr-xmodules/buildsystem/templates/submit_package.pl18
-rw-r--r--modules/buildsystem/templates/sudoers.iurt2
-rw-r--r--modules/buildsystem/templates/sudoers.youri6
-rw-r--r--modules/buildsystem/templates/upload.conf131
-rw-r--r--modules/buildsystem/templates/vhost_repository.conf73
-rw-r--r--modules/buildsystem/templates/vhost_webstatus.conf13
-rw-r--r--modules/buildsystem/templates/youri/acl.conf1
-rw-r--r--modules/buildsystem/templates/youri/host.conf23
-rw-r--r--modules/buildsystem/templates/youri/submit.conf140
-rw-r--r--modules/catdap/manifests/init.pp67
-rw-r--r--modules/catdap/manifests/snapshot.pp21
-rw-r--r--modules/catdap/templates/catdap_local.yml111
-rw-r--r--modules/cgit/manifests/init.pp27
-rw-r--r--modules/cgit/templates/cgitrc137
-rwxr-xr-xmodules/cgit/templates/filter.commit-links.sh44
-rw-r--r--modules/cgit/templates/vhost.conf8
-rw-r--r--modules/cgit/templates/webapp.conf3
-rw-r--r--modules/cron/manifests/init.pp7
-rw-r--r--modules/dashboard/manifests/init.pp44
-rw-r--r--modules/dashboard/templates/make_report8
-rw-r--r--modules/django_application/files/custom_backend.py7
-rw-r--r--modules/django_application/files/django_add_permission_to_group.py27
-rw-r--r--modules/django_application/files/django_create_group.py10
-rw-r--r--modules/django_application/manifests/add_permission_to_group.pp11
-rw-r--r--modules/django_application/manifests/create_group.pp10
-rw-r--r--modules/django_application/manifests/init.pp18
-rw-r--r--modules/django_application/manifests/script.pp9
-rwxr-xr-xmodules/draklive/files/clean-live.sh15
-rw-r--r--modules/draklive/manifests/init.pp58
-rw-r--r--modules/draklive/templates/sudoers.draklive3
-rw-r--r--modules/epoll/manifests/create_db.pp7
-rw-r--r--modules/epoll/manifests/init.pp31
-rw-r--r--modules/epoll/manifests/var.pp35
-rw-r--r--modules/epoll/templates/epoll.yml4
-rw-r--r--modules/facter/lib/facter/dc_suffix.rb4
-rw-r--r--modules/facter/lib/facter/lib_dir.rb6
-rw-r--r--modules/facter/lib/facter/wildcard_sslcert.rb16
-rw-r--r--modules/facter/spec/spec_helper.rb34
-rw-r--r--modules/facter/spec/unit/dc_suffix.rb15
-rw-r--r--modules/facter/spec/unit/lib_dir.rb23
-rw-r--r--modules/git/files/apply_git_puppet_config.sh7
-rw-r--r--modules/git/files/create_git_repo.sh10
-rw-r--r--modules/git/files/update_git_svn.sh13
-rw-r--r--modules/git/manifests/client.pp3
-rw-r--r--modules/git/manifests/common.pp3
-rw-r--r--modules/git/manifests/init.pp1
-rw-r--r--modules/git/manifests/mirror.pp20
-rw-r--r--modules/git/manifests/server.pp37
-rw-r--r--modules/git/manifests/snapshot.pp24
-rw-r--r--modules/git/manifests/svn.pp4
-rw-r--r--modules/git/manifests/svn_repository.pp35
-rw-r--r--modules/git/templates/config.puppet0
-rw-r--r--modules/git/templates/post-receive6
-rw-r--r--modules/git/templates/pre-receive5
-rw-r--r--modules/git/templates/xinetd14
-rwxr-xr-xmodules/gitmirror/files/on-the-pull365
-rwxr-xr-xmodules/gitmirror/files/on-the-pull.init67
-rwxr-xr-xmodules/gitmirror/files/rsync-metadata.sh27
-rw-r--r--modules/gitmirror/manifests/init.pp48
-rw-r--r--modules/gitweb/manifests/init.pp32
-rw-r--r--modules/gitweb/templates/gitweb.conf123
-rw-r--r--modules/gitweb/templates/vhost.conf3
-rw-r--r--modules/gitweb/templates/webapp.conf8
-rw-r--r--modules/gitweb/templates/wrapper.sh4
-rw-r--r--modules/gnupg/manifests/client.pp17
-rw-r--r--modules/gnupg/manifests/init.pp1
-rw-r--r--modules/gnupg/manifests/keys.pp38
-rw-r--r--modules/gnupg/templates/batch8
-rw-r--r--modules/gnupg/templates/create_gnupg_keys.sh26
-rw-r--r--modules/icecream/manifests/client.pp6
-rw-r--r--modules/icecream/manifests/client_common.pp7
-rw-r--r--modules/icecream/manifests/init.pp1
-rw-r--r--modules/icecream/manifests/scheduler.pp7
-rw-r--r--modules/icecream/templates/sysconfig89
-rw-r--r--modules/ii/manifests/init.pp38
-rw-r--r--modules/ii/templates/ii_wrapper.pl15
-rw-r--r--modules/irkerd/manifests/init.pp9
-rw-r--r--modules/libvirtd/files/network_add.py61
-rw-r--r--modules/libvirtd/files/storage_add.py27
-rw-r--r--modules/libvirtd/manifests/init.pp109
-rw-r--r--modules/libvirtd/templates/50-template-libvirt-remote-access.pkla6
-rw-r--r--modules/mediawiki/files/init_wiki.php31
-rw-r--r--modules/mediawiki/files/robots.txt4
-rw-r--r--modules/mediawiki/manifests/base.pp46
-rw-r--r--modules/mediawiki/manifests/config.pp9
-rw-r--r--modules/mediawiki/manifests/init.pp1
-rw-r--r--modules/mediawiki/manifests/instance.pp100
-rw-r--r--modules/mediawiki/templates/LocalSettings.php208
-rw-r--r--modules/mediawiki/templates/wiki_vhost.conf17
-rw-r--r--modules/memcached/files/memcached.sysconfig23
-rw-r--r--modules/memcached/manifests/init.pp13
-rw-r--r--modules/mga-advisories/manifests/init.pp98
-rw-r--r--modules/mga-advisories/templates/adv-move-pkg8
-rw-r--r--modules/mga-advisories/templates/mga-advisories.conf14
-rw-r--r--modules/mga-advisories/templates/sudoers.adv-move-pkg1
-rw-r--r--modules/mga-advisories/templates/update_script16
-rwxr-xr-xmodules/mga-mirrors/files/check_mirrors_status271
-rw-r--r--modules/mga-mirrors/manifests/init.pp61
-rw-r--r--modules/mga-mirrors/templates/cron-mga_mirrors2
-rw-r--r--modules/mga-mirrors/templates/mga-mirrors.ini4
-rw-r--r--modules/mga-treasurer/manifests/init.pp91
-rw-r--r--modules/mga-treasurer/templates/mga-treasurer.conf2
-rw-r--r--modules/mga-treasurer/templates/update_script6
-rw-r--r--modules/mga-treasurer/templates/vhost_mga-treasurer.conf3
-rw-r--r--modules/mga_common/lib/puppet/parser/functions/group_members.rb14
-rw-r--r--modules/mga_common/lib/puppet/parser/functions/hash_keys.rb10
-rw-r--r--modules/mga_common/lib/puppet/parser/functions/hash_merge.rb11
-rw-r--r--modules/mga_common/lib/puppet/parser/functions/str_join.rb11
-rw-r--r--modules/mga_common/manifests/local_script.pp22
-rw-r--r--modules/mga_common/manifests/var/perl.pp3
-rw-r--r--modules/mgapeople/manifests/init.pp77
-rw-r--r--modules/mgapeople/templates/mgapeople.conf17
-rw-r--r--modules/mgasoft/manifests/init.pp36
-rw-r--r--modules/mgasoft/templates/mgasoft.conf5
-rw-r--r--modules/mirror/manifests/base.pp15
-rw-r--r--modules/mirror/manifests/init.pp41
-rw-r--r--modules/mirror/manifests/mageia.pp7
-rw-r--r--modules/mirror/manifests/main.pp14
-rw-r--r--modules/mirror/manifests/mdv2010spring.pp7
-rw-r--r--modules/mirror/manifests/mirrordir.pp23
-rw-r--r--modules/mirror/templates/mirrordir15
-rw-r--r--modules/mirror/templates/update_timestamp2
-rwxr-xr-xmodules/mirror_cleaner/files/orphans_cleaner.pl76
-rw-r--r--modules/mirror_cleaner/manifests/base.pp6
-rw-r--r--modules/mirror_cleaner/manifests/init.pp1
-rw-r--r--modules/mirror_cleaner/manifests/orphans.pp27
-rw-r--r--modules/mirrorbrain/manifests/init.pp154
-rw-r--r--modules/mirrorbrain/templates/geoip.conf5
-rw-r--r--modules/mirrorbrain/templates/mirrorbrain.conf14
-rw-r--r--modules/mirrorbrain/templates/mod_mirrorbrain.conf3
-rw-r--r--modules/mirrorbrain/templates/webapp.conf16
-rw-r--r--modules/mysql/manifests/init.pp26
-rw-r--r--modules/ntp/manifests/init.pp27
-rw-r--r--modules/ntp/templates/ntp.conf6
-rw-r--r--modules/opendkim/Gemfile19
-rw-r--r--modules/opendkim/LICENSE202
-rw-r--r--modules/opendkim/Modulefile8
-rw-r--r--modules/opendkim/Puppetfile7
-rw-r--r--modules/opendkim/README.md98
-rw-r--r--modules/opendkim/Rakefile12
-rw-r--r--modules/opendkim/manifests/domain.pp46
-rw-r--r--modules/opendkim/manifests/init.pp105
-rw-r--r--modules/opendkim/manifests/trusted.pp13
-rw-r--r--modules/opendkim/metadata.json60
-rw-r--r--modules/opendkim/spec/classes/init_spec.rb7
-rw-r--r--modules/opendkim/spec/classes/opendkim_spec.rb13
-rw-r--r--modules/opendkim/spec/spec_helper.rb1
-rw-r--r--modules/opendkim/templates/opendkim.conf52
-rw-r--r--modules/opendkim/tests/init.pp15
-rw-r--r--modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb13
-rw-r--r--modules/openldap/manifests/config.pp7
-rw-r--r--modules/openldap/manifests/exported_slave.pp3
-rw-r--r--modules/openldap/manifests/init.pp81
-rw-r--r--modules/openldap/manifests/master.pp50
-rw-r--r--modules/openldap/manifests/slave.pp23
-rw-r--r--modules/openldap/manifests/slave_instance.pp8
-rw-r--r--modules/openldap/manifests/var.pp3
-rw-r--r--modules/openldap/templates/init_ldap.sh40
-rw-r--r--modules/openldap/templates/mandriva-dit-access.conf137
-rw-r--r--modules/openldap/templates/slapd-slave.sysconfig38
-rw-r--r--modules/openldap/templates/slapd.conf56
-rw-r--r--modules/openldap/templates/slapd.syncrepl.conf11
-rw-r--r--modules/openldap/templates/slapd.sysconfig37
-rw-r--r--modules/openldap/templates/slapd.test.conf9
-rw-r--r--modules/openssh/manifests/init.pp26
-rw-r--r--modules/openssh/manifests/server.pp17
-rw-r--r--modules/openssh/manifests/ssh_keys_from_ldap.pp20
-rwxr-xr-xmodules/openssh/templates/ldap-sshkey2file.py194
-rw-r--r--modules/openssh/templates/sshd_config22
-rw-r--r--modules/openssl/manifests/init.pp44
-rw-r--r--modules/pam/manifests/base.pp32
-rw-r--r--modules/pam/manifests/init.pp43
-rw-r--r--modules/pam/manifests/multiple_ldap_access.pp15
-rw-r--r--modules/pam/templates/ldap.conf13
-rw-r--r--modules/pam/templates/nsswitch.conf3
-rw-r--r--modules/pam/templates/openldap.ldap.conf25
-rw-r--r--modules/pam/templates/system-auth20
-rw-r--r--modules/phpbb/files/phpbb_apply_config.pl28
-rw-r--r--modules/phpbb/files/robots.txt7
-rw-r--r--modules/phpbb/manifests/base.pp57
-rw-r--r--modules/phpbb/manifests/config.pp12
-rw-r--r--modules/phpbb/manifests/databases.pp3
-rw-r--r--modules/phpbb/manifests/init.pp1
-rw-r--r--modules/phpbb/manifests/instance.pp80
-rw-r--r--modules/phpbb/manifests/locale_db.pp12
-rw-r--r--modules/phpbb/manifests/redirection_instance.pp7
-rw-r--r--modules/phpbb/templates/config.php17
-rw-r--r--modules/phpbb/templates/forums_redirect.conf2
-rw-r--r--modules/phpbb/templates/forums_vhost.conf62
-rw-r--r--modules/planet/manifests/init.pp57
-rwxr-xr-xmodules/planet/templates/backup_planet-files.sh23
-rwxr-xr-xmodules/planet/templates/deploy_new-planet.sh41
-rw-r--r--modules/planet/templates/index.php23
-rw-r--r--modules/planet/templates/planet_vhosts.conf11
-rw-r--r--modules/postfix/manifests/init.pp67
-rw-r--r--modules/postfix/manifests/server.pp13
-rw-r--r--modules/postfix/manifests/server/primary.pp43
-rw-r--r--modules/postfix/manifests/server/secondary.pp1
-rw-r--r--modules/postfix/manifests/simple_relay.pp9
-rw-r--r--modules/postfix/templates/group_aliases.conf15
-rw-r--r--modules/postfix/templates/ldap_aliases.conf20
-rw-r--r--modules/postfix/templates/main.cf118
-rw-r--r--modules/postfix/templates/primary_master.cf3
-rw-r--r--modules/postfix/templates/simple_relay_main.cf23
-rw-r--r--modules/postfix/templates/sympa_aliases8
-rw-r--r--modules/postfix/templates/transport_regexp6
-rw-r--r--modules/postfix/templates/virtual_aliases33
-rw-r--r--modules/postgresql/manifests/config.pp10
-rw-r--r--modules/postgresql/manifests/database.pp20
-rw-r--r--modules/postgresql/manifests/database_callback.pp9
-rw-r--r--modules/postgresql/manifests/db_and_user.pp15
-rw-r--r--modules/postgresql/manifests/hba_entry.pp40
-rw-r--r--modules/postgresql/manifests/init.pp61
-rw-r--r--modules/postgresql/manifests/pg_hba.pp13
-rw-r--r--modules/postgresql/manifests/remote_database.pp15
-rw-r--r--modules/postgresql/manifests/remote_db_and_user.pp18
-rw-r--r--modules/postgresql/manifests/remote_user.pp10
-rw-r--r--modules/postgresql/manifests/server.pp53
-rw-r--r--modules/postgresql/manifests/tagged.pp8
-rw-r--r--modules/postgresql/manifests/user.pp13
-rw-r--r--modules/postgresql/manifests/var.pp7
-rw-r--r--modules/postgresql/templates/pg_hba.conf70
-rw-r--r--modules/postgresql/templates/postgresql.conf12
-rw-r--r--modules/postgrey/manifests/init.pp36
-rw-r--r--modules/postgrey/templates/postgrey.sysconfig10
-rw-r--r--modules/postgrey/templates/whitelist_clients.local7
-rw-r--r--modules/puppet/manifests/client.pp15
-rw-r--r--modules/puppet/manifests/hiera.pp14
-rw-r--r--modules/puppet/manifests/init.pp53
-rw-r--r--modules/puppet/manifests/master.pp54
-rw-r--r--modules/puppet/manifests/queue.pp13
-rw-r--r--modules/puppet/manifests/stored_config.pp26
-rw-r--r--modules/puppet/manifests/thin.pp35
-rw-r--r--modules/puppet/templates/apache_proxy_vhost.conf42
-rw-r--r--modules/puppet/templates/config.ru16
-rw-r--r--modules/puppet/templates/db_config.erb10
-rw-r--r--modules/puppet/templates/hiera.yaml9
-rw-r--r--modules/puppet/templates/puppet.agent.conf27
-rw-r--r--modules/puppet/templates/puppet.conf29
-rw-r--r--modules/puppet/templates/puppet.master.conf14
-rw-r--r--modules/puppet/templates/tagmail.conf2
-rw-r--r--modules/puppet/templates/thin.yml18
-rw-r--r--modules/report-socket/lib/puppet/reports/socket.rb33
-rw-r--r--modules/restrictshell/manifests/allow.pp7
-rw-r--r--modules/restrictshell/manifests/allow_git.pp3
-rw-r--r--modules/restrictshell/manifests/allow_maintdb.pp3
-rw-r--r--modules/restrictshell/manifests/allow_pkgsubmit.pp3
-rw-r--r--modules/restrictshell/manifests/allow_rsync.pp3
-rw-r--r--modules/restrictshell/manifests/allow_scp.pp3
-rw-r--r--modules/restrictshell/manifests/allow_sftp.pp3
-rw-r--r--modules/restrictshell/manifests/allow_svn.pp3
-rw-r--r--modules/restrictshell/manifests/allow_upload_bin.pp3
-rw-r--r--modules/restrictshell/manifests/init.pp56
-rw-r--r--modules/restrictshell/manifests/shell.pp14
-rwxr-xr-xmodules/restrictshell/templates/ldap-sshkey2file.py92
-rwxr-xr-xmodules/restrictshell/templates/membersh-conf.pl14
-rw-r--r--modules/restrictshell/templates/sv_membersh.pl39
-rw-r--r--modules/rsnapshot/manifests/init.pp74
-rw-r--r--modules/rsnapshot/templates/cron_file5
-rw-r--r--modules/rsnapshot/templates/rsnapshot.conf209
-rw-r--r--modules/rsyncd/manifests/init.pp34
-rw-r--r--modules/rsyncd/templates/rsyncd.conf12
-rw-r--r--modules/rsyncd/templates/xinetd5
-rw-r--r--modules/serial_console/manifests/init.pp1
-rw-r--r--modules/serial_console/manifests/serial_console.pp8
-rw-r--r--modules/shorewall/manifests/init.pp128
-rw-r--r--modules/spamassassin/manifests/init.pp18
-rw-r--r--modules/spamassassin/templates/local.cf95
-rw-r--r--modules/spec-tree-reports/manifests/init.pp50
-rw-r--r--modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report10
-rw-r--r--modules/ssh/manifests/init.pp336
-rw-r--r--modules/ssmtp/manifests/init.pp7
-rw-r--r--modules/ssmtp/templates/ssmtp.conf9
-rw-r--r--modules/stompserver/manifests/init.pp7
-rw-r--r--modules/stored_config/lib/puppet/parser/functions/get_fact.rb19
-rw-r--r--modules/stored_config/lib/puppet/parser/functions/get_param_values.rb25
-rw-r--r--modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb17
-rw-r--r--modules/subversion/manifests/client.pp13
-rw-r--r--modules/subversion/manifests/hook.pp9
-rw-r--r--modules/subversion/manifests/hook/post_commit.pp6
-rw-r--r--modules/subversion/manifests/hook/pre_commit.pp6
-rw-r--r--modules/subversion/manifests/init.pp247
-rw-r--r--modules/subversion/manifests/mirror.pp6
-rw-r--r--modules/subversion/manifests/mirror_repository.pp15
-rw-r--r--modules/subversion/manifests/pre_commit_link.pp8
-rw-r--r--modules/subversion/manifests/repository.pp132
-rw-r--r--modules/subversion/manifests/snapshot.pp21
-rw-r--r--modules/subversion/manifests/tools.pp3
-rw-r--r--modules/subversion/templates/converted_to_git16
-rw-r--r--modules/subversion/templates/create_svn_mirror.sh13
-rw-r--r--modules/subversion/templates/hook_commit.sh18
-rw-r--r--modules/subversion/templates/hook_irker4
-rw-r--r--modules/subversion/templates/hook_sendmail.pl21
-rw-r--r--modules/subversion/templates/irker.conf7
-rw-r--r--modules/subversion/templates/no_binary14
-rw-r--r--modules/subversion/templates/pre-revprop-change15
-rw-r--r--modules/subversion/templates/restricted_to_user12
-rw-r--r--modules/subversion/templates/single_word_commit12
-rw-r--r--modules/subversion/templates/syntax_check.sh3
-rw-r--r--modules/subversion/templates/xinetd14
-rw-r--r--modules/sudo/manifests/init.pp19
-rw-r--r--modules/sudo/manifests/sudoers_config.pp6
-rw-r--r--modules/sudo/templates/sudoers13
-rw-r--r--modules/sympa/files/scenari/forbidden2
-rw-r--r--modules/sympa/files/scenari/open_web_only_notify5
-rw-r--r--modules/sympa/files/topics.conf32
-rw-r--r--modules/sympa/manifests/datasource/ldap_group.pp5
-rw-r--r--modules/sympa/manifests/init.pp41
-rw-r--r--modules/sympa/manifests/list.pp57
-rw-r--r--modules/sympa/manifests/list/announce.pp21
-rw-r--r--modules/sympa/manifests/list/private.pp16
-rw-r--r--modules/sympa/manifests/list/public.pp16
-rw-r--r--modules/sympa/manifests/list/public_restricted.pp17
-rw-r--r--modules/sympa/manifests/scenario/sender_restricted.pp9
-rw-r--r--modules/sympa/manifests/search_filter/ldap.pp5
-rw-r--r--modules/sympa/manifests/server.pp103
-rw-r--r--modules/sympa/manifests/variable.pp3
-rw-r--r--modules/sympa/templates/auth.conf14
-rw-r--r--modules/sympa/templates/config103
-rw-r--r--modules/sympa/templates/data_sources/ldap_group.incl17
-rw-r--r--modules/sympa/templates/list.xml16
-rw-r--r--modules/sympa/templates/scenari/sender.restricted17
-rw-r--r--modules/sympa/templates/search_filters/group.ldap9
-rw-r--r--modules/sympa/templates/sympa.conf830
-rw-r--r--modules/sympa/templates/vhost_ml.conf30
-rw-r--r--modules/sympa/templates/webapp_sympa.conf21
-rw-r--r--modules/testvm/manifests/init.pp51
-rw-r--r--modules/timezone/manifests/init.pp9
-rw-r--r--modules/timezone/manifests/timezone.pp6
-rw-r--r--modules/transifex/manifests/init.pp113
-rw-r--r--modules/transifex/templates/20-engines.conf6
-rw-r--r--modules/transifex/templates/30-site.conf2
-rw-r--r--modules/transifex/templates/40-apps.conf58
-rw-r--r--modules/transifex/templates/45-ldap.conf48
-rw-r--r--modules/transifex/templates/50-project.conf85
-rw-r--r--modules/viewvc/files/robots.txt29
-rw-r--r--modules/viewvc/files/setcookieredirect.html28
-rw-r--r--modules/viewvc/manifests/init.pp74
-rw-r--r--modules/viewvc/manifests/var.pp9
-rwxr-xr-xmodules/viewvc/templates/kill_viewvc.sh12
-rw-r--r--modules/viewvc/templates/viewvc.conf1002
-rw-r--r--modules/viewvc/templates/webapp.conf2
-rw-r--r--modules/xinetd/manifests/init.pp7
-rw-r--r--modules/xinetd/manifests/port_forward.pp8
-rw-r--r--modules/xinetd/manifests/service.pp9
-rw-r--r--modules/xinetd/templates/port_forward15
-rw-r--r--modules/xymon/manifests/client.pp19
-rw-r--r--modules/xymon/manifests/init.pp2
-rw-r--r--modules/xymon/manifests/server.pp45
-rw-r--r--modules/xymon/templates/bb-hosts52
-rw-r--r--modules/xymon/templates/client-local.cfg131
-rw-r--r--modules/xymon/templates/hobbit-alerts.cfg128
-rw-r--r--modules/xymon/templates/hobbit-clients.cfg380
-rw-r--r--modules/xymon/templates/hobbitserver.cfg230
-rw-r--r--modules/xymon/templates/xymon-client21
-rw-r--r--modules/youri-check/manifests/init.pp133
-rw-r--r--modules/youri-check/templates/9.conf241
-rw-r--r--modules/youri-check/templates/cauldron.conf504
-rw-r--r--modules/youri-check/templates/vhost_check.conf2
630 files changed, 27658 insertions, 2741 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..77371c93
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+ssl
+puppet.conf
+extdata
+tagmail.conf
+*~
+.*.swp
+auth.conf
+socket.yaml
+autosign.conf
+hiera.yaml
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..8a8f7d73
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,28 @@
+The content of this repository is licensed under a standard BSD
+license, unless noted otherwise ( ie, the external directory )
+
+----------------------------------------------------------------
+Copyright (c) 2010-2017 Mageia.Org
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..78279f7f
--- /dev/null
+++ b/README.md
@@ -0,0 +1,32 @@
+Puppet layout
+=============
+
+deployment
+----------
+
+Contains code specific to our deployment (static website, mailling list
+definition), most of it should not be reusable
+
+external
+--------
+
+Modules that were not written by us, and should be synced (either by hand,
+git submodules or something else appropriate). We should make sure that
+proper credit is given, as well as proper location
+
+modules
+-------
+
+Our own modules
+
+manifests
+---------
+
+Main puppet manifests
+
+extdata
+-------
+
+Puppet extlookup datafie, should not be in git (mainly used for password)
+
+
diff --git a/deployment/access_classes/manifests/admin.pp b/deployment/access_classes/manifests/admin.pp
new file mode 100644
index 00000000..186c9c87
--- /dev/null
+++ b/deployment/access_classes/manifests/admin.pp
@@ -0,0 +1,8 @@
+# for server where only admins can connect (allowed by default)
+class access_classes::admin {
+ class { 'pam::multiple_ldap_access':
+ access_classes => []
+ }
+}
+
+
diff --git a/deployment/access_classes/manifests/committers.pp b/deployment/access_classes/manifests/committers.pp
new file mode 100644
index 00000000..37c0e266
--- /dev/null
+++ b/deployment/access_classes/manifests/committers.pp
@@ -0,0 +1,14 @@
+# for server where people can connect with ssh ( git, svn )
+class access_classes::committers {
+ # this is required, as we force the shell to be the restricted one
+ # openssh will detect if the file do not exist and while refuse to log the
+ # user, and erase the password ( see pam_auth.c in openssh code,
+ # seek badpw )
+ # so the file must exist
+ # permission to use svn, git, etc must be added separately
+
+ class { 'pam::multiple_ldap_access':
+ access_classes => ['mga-shell_access'],
+ restricted_shell => true,
+ }
+}
diff --git a/deployment/access_classes/manifests/init.pp b/deployment/access_classes/manifests/init.pp
new file mode 100644
index 00000000..a414f3e0
--- /dev/null
+++ b/deployment/access_classes/manifests/init.pp
@@ -0,0 +1,5 @@
+class access_classes {
+ # beware , theses classes are exclusives
+ # if you need multiple group access, you need to define you own class
+ # of access
+}
diff --git a/deployment/access_classes/manifests/iso_makers.pp b/deployment/access_classes/manifests/iso_makers.pp
new file mode 100644
index 00000000..c645205e
--- /dev/null
+++ b/deployment/access_classes/manifests/iso_makers.pp
@@ -0,0 +1,5 @@
+class access_classes::iso_makers {
+ class { 'pam::multiple_ldap_access':
+ access_classes => ['mga-iso_makers']
+ }
+}
diff --git a/deployment/access_classes/manifests/web.pp b/deployment/access_classes/manifests/web.pp
new file mode 100644
index 00000000..fa2c7df5
--- /dev/null
+++ b/deployment/access_classes/manifests/web.pp
@@ -0,0 +1,5 @@
+class access_classes::web {
+ class { 'pam::multiple_ldap_access':
+ access_classes => ['mga-web']
+ }
+}
diff --git a/deployment/backups/manifests/init.pp b/deployment/backups/manifests/init.pp
new file mode 100644
index 00000000..ba2d16d5
--- /dev/null
+++ b/deployment/backups/manifests/init.pp
@@ -0,0 +1,23 @@
+class backups {
+ class server {
+
+ $backups_dir = '/data/backups'
+ $confdir = "${backups_dir}/conf"
+
+ class { 'rsnapshot::base':
+ confdir => $confdir,
+ }
+
+ file { $backups_dir:
+ ensure => directory,
+ owner => root,
+ group => root,
+ mode => '0700',
+ }
+
+ rsnapshot::backup{ 'neru':
+ snapshot_root => "${backups_dir}/neru",
+ backup => [ "root@neru.${::domain}:/home/irc_bots/meetings meetbot" ],
+ }
+ }
+}
diff --git a/deployment/common/manifests/base_packages.pp b/deployment/common/manifests/base_packages.pp
new file mode 100644
index 00000000..091e7c3e
--- /dev/null
+++ b/deployment/common/manifests/base_packages.pp
@@ -0,0 +1,30 @@
+class common::base_packages {
+ # packages installed everywhere
+ # asked by misc: screen, vim-enhanced, htop, lsof, tcpdump, less,
+ # lvm2, lshw, iotop
+ # asked by nanar: rsync
+ # asked bu dams: watchdog, wget
+ $package_list= ['screen',
+ 'vim-enhanced',
+ 'htop',
+ 'lsof',
+ 'tcpdump',
+ 'rsync',
+ 'less',
+ 'lshw',
+ 'lvm2',
+ 'iotop',
+ 'wget']
+
+ if $::architecture == 'x86_64' {
+ package { ['mcelog']: }
+ }
+
+ package { $package_list: }
+
+ # removed as it mess up with our policy for password
+ # and is not really used
+ package { 'msec':
+ ensure => 'absent',
+ }
+}
diff --git a/deployment/common/manifests/default_ssh_root_key.pp b/deployment/common/manifests/default_ssh_root_key.pp
new file mode 100644
index 00000000..ab17466d
--- /dev/null
+++ b/deployment/common/manifests/default_ssh_root_key.pp
@@ -0,0 +1,91 @@
+class common::default_ssh_root_key {
+ Ssh_authorized_key {
+ user => 'root'
+ }
+
+ ssh_authorized_key { 'ssh_key_misc':
+ # initially removed on 2012-10-17
+ ensure => 'absent',
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAABIwAAAgEA4fpjTvcL09Yzv7iV40TPjiXGHOOS5MldSh5ezSk7AMLVjAAloiidl8O3xwlxwUnjUx5zv1+RlbV76sdiSD32lBht72OZPg0UqQIB8nHeVJBdJ8YpnQ3LynNPPYJ65dvdr0uE2KRlN/1emi2N+O+f2apwc1YiL8nySEK/zLvCKO5xj16bIVuGFilDdp75X/t3C/PDsZU+CUyWL5Ly3T2+ljGc+nEAK9P0PNnvl9bRK9dqu457xjca8nXwWVI1fd6Jnt1jISFdQXy6/+9326Z6aAxvWKCrCvmdg+tAUN3fEj0WXZEPZQ1Ot0tBxKYl+xhV1Jv/ILLbInT0JZkSEKNBnJn4G7O4v+syoMqA7myHre73oGn/ocRWGJskIM33aXrJkZkJ4LkF1GLJPFI4y7bzj024sPAVvBwDrV7inwsOy0DSQ5tCbfX25TTXbK+WMXzz0pBbSi6mPgjtzlSYsLZrTa7ARYhggDG2miuOAFrup8vP7/aH2yZ+hZuF70FsMh4lf/eXDGwfypyfYrjfVSkFfY0ZU294ouTBn3HtHmgFu82vOMvLNtI9UyERluCBpLBXOT8xgM97aWFeUJEKrVxkGIiNwVNylMEYp8r16njv810NdTLA3jZu9CLueVvME6GLsGve5idtGmaYuGYNRnSRx3PQuJZl1Nj7uQHsgAaWdiM=',
+ }
+
+ ssh_authorized_key { 'ssh_key_dams':
+ ensure => 'absent',
+ type => 'ssh-dss',
+ key => 'AAAAB3NzaC1kc3MAAACBAP7Dz4U90iWjb8SXVpMsC/snU0Albjsi5rSVFdK5IqG0jcQ3K5F/X8ufUN+yOHxpUKeE6FAPlvDxIQD8hHv53yAoObM4J4h2SVd7xXpIDTXhdQ9kMYbgIQzyI/2jptF77dxlwiH9TirmmpUSb680z55IkVutwSVJUOZCOWFXfa35AAAAFQDqYF7tWvnzM/zeSFsFZ9hqKCj47QAAAIBdmaPCyf4iU9xGUSWi1p7Y6OlUcfu2KgpETy8WdOmZ4lB3MXdGIoK5/LLeLeeGomAVwJMw3twOOzj4e1Hz16WM+fWsMVFnZftLFo9L2LvSQElIEznjIxqfmIsc2Id2c0gI+kEnigOWbBJ7h0O09uT7/eNBysqgseCMErzedy5ZnAAAAIAGrjfWjVtJQa888Sl8KjKM9MXXvgnyCDCBP9i4pncsFOWEWGMWPY0Z9CD0OZYDdvWLnFkrnoMaIvWQU7pb4/u/Tz9Dsm65eQzUaLSGFzROAX6OB47L7spMS4xd6SF+ASawy/aYiHf241zumJLvPkUpXceBv2s7QOp2g6S6qCtypA==',
+ }
+
+ ssh_authorized_key { 'ssh_key_blino':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDOyX/M3w0UdN5xwDLpTKj2e7pgXNZIPvWicNkp8BKqEu/ZALJ17QFZro1qrg/fLTsfs15YRMuwQH7MJ9uQsAqyE/aCYt18E/MLdtJSniqdQczSjjCTtB7+KtMh8pXFE5m9KEt0vhutdoB+VoGfbhVaBmjjnDPheM5Qive62askFT0pGlyMdY4PP9q8u10Tiqbb6w0yD7sbtF9GN1HpZBb97YaYyOGu9RpkA+Hb+Ma/faWkbOoP8OIJrGUjhVbSglzKBsEIo/i7+uQ86eMWJHB/o4tN7bU/6QQiGF4pN4E5jKPQHUZsQWI5SAKfkgOEGppYxiMF6pmCdI4Lx9VhttXN',
+ }
+
+ ssh_authorized_key { 'ssh_key_nanar':
+ ensure => 'absent',
+ type => 'ssh-dss',
+ key => 'AAAAB3NzaC1kc3MAAACBAMLWdzwlo5b9yr1IR5XbbYpESJQpTiZH4gTzVaUIdtbU6S2R41Enn/xZSLgbWcCX79WEcQlfKDS3BcrjWybpwCQD+i1yIA4wmYaQ3KwYBaIsTe5UtPF41hs8Jb8MhTPe9z9hNi5E1R6QQ2wPu3vDAi4zTZ4415ctr6xtW+IDYNOLAAAAFQC9ku78wdBEZKurZj5hJmhU0GSOjwAAAIAeGorkIHQ0Q8iAzKmFQA5PcuuD6X7vaflerTM3srnJOdfMa/Ac7oLV+n5oWj0BhuV09w8dB678rRxl/yVLOgHR9absSicKDkYMZlLU7K1oNFwM4taCdZZ1iyEpJVzzUOVCo8LqK6OZJhbFI0zbarq4YM/1Sr+MIiGv5FK7SCpheAAAAIEAwP95amGY7BgPzyDDFeOkeBPJQA/l7w0dEfG8A+2xui679mGJibhlXiUWqE0NqeDkD17Oc+eOV/ou5DA62tMDSus119JjqYhDEOs0l5dvA6aTzObZDhiUDQbNoS9AIPxgsqdc2vBRxonHUm/7maV8jvWVSy1429CNhnyWKuTe2qU=',
+ }
+
+ ssh_authorized_key { 'ssh_key_dmorgan':
+ ensure => 'absent',
+ type => 'ssh-dss',
+ key => 'AAAAB3NzaC1kc3MAAACBAOsCjs1EionxMBkyCOXqhDlGUvT/ZORSjqrEhZrro2oPdnMvj3A7IHf1R8+CVVrJlnOHFEwfdC3SB5LYhmUi/XaBq1eqUiVFQLFURrYlrWFh1xSqGUFvvUfMFXOZCn4f9eJYDVaRtWBL7IZCijwZS6bbE0FLW0f6pPzhHtMkSRW/AAAAFQCyg7km5gCZ6W4iRKqr87Wy+LajMwAAAIBZ3+oM/hQ9MS2QkMa8wZk9taEO9PJQHXO3IHyo3wMUj7DYnwgyHQIIeTgPwrE+z0TkM3K3pQlf8xQmsQo7T2kQHCLFZnueEoNB+y+LySLtLDoptYlkqJ9Db0kJti+W8EFc8I+s87HuVdkXpqid222zmRfzYufjbosb8abtGUODXAAAAIBWlhkUEZsbQXkimAnfelHb7EYFnwUgHPSzrzB4xhybma9ofOfM3alZubx9acv94OrAnlvSTfgETKyT0Q+JYvtxZr9srcueSogFq8D8tQoCFJIqpEvjTxjSlg1Fws0zHBH7uO7Kp8zhnuTalhQC1XorFPJD3z40fe62fO6a02EUCQ==',
+ }
+
+ ssh_authorized_key { 'ssh_key_coling':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAABIwAAAIEAr04pPIWNWxihA2UxlN+I6jubWofbRMlIhvqsADJjEWSr5YBDpEpWEsdtCjBrzbrrYfpGWwpeSL1mbKhmO8+pxygyzWBVcNHEcyp8DzfwT0b2tGiCox+owkyjtyOoogTu8tLvPSvMOhDgfP4WCcMuBZwRVhMR1NKJyk73T9W8qtM=',
+ }
+
+ ssh_authorized_key { 'ssh_key_boklm':
+ # requested by boklm on 2014-03-23
+ ensure => 'absent',
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAACAQD4dKlLuPipueSWeX70QbrQ88tSVcfW4efoHS2vYmqbe5VE73yOCLFemDb3yk8PNxsCV5KIIesdKq6uRYiUKfCKJsF+UnFRdTniETsI3gmr895mvJMgdxuYCCRqdhjyKfWSds11cptBJpw2eXpsY6O5GPqVYG23/DWH4sNCFMawek9dWZy21qcGSjyXayek0nqXBLRv1SVNdipMeLVa99haOHViIV+gmqe3FKqT1UsNBSUQ9BdwtN9rvM2Qo1WYgptFffWRmYicSSbJSf8ru0sRcsD2hknlA0Eu5CO7dKD0Nc2cXsuwkiQXW+QKMduB5SGznyRpRBMIPKr1mnwLKEWXlMppgfzoI06xfqmasJZfvtR/dNQ5ugl+8J+AlPo/tegNNrHzzmplx7118kMgqBnYg9zjkju1iv9RblAcWitKjEN+zL795nmkbIPD+bo1Ql/YkMe9bGTHB3isHAAtzvjWHatyKzyq3D/k8+tJxSy91a9JIX5sHvnBev4VrzV+JE7QAT//9ELaG93bIljn8pDKKxjiNexjwiKpDRSB2AAWAMf7Qm3FBXrwfoIsREfGrPAjKsU1YUKC27xBDKSLE2ThqhkSYRKeNoECB5Ab7MhbNx8/sXI/y6YboDnFlFkXSht9WklCiGXpf36F0ei0oiA5O+v4TU6ix/WWrpnTazicKw==',
+ }
+
+ ssh_authorized_key { 'ssh_key_buchan':
+ ensure => 'absent',
+ type => 'ssh-dss',
+ key => 'AAAAB3NzaC1kc3MAAACBALpYDQtkZcfXdOILynCGa7IAbW4+etmzpIMjw6BfvZOfLT6UPfDwajhDBMBNSbgigxkxxEdsa0/UMIE3Yrpr8YivhbL79sFw2N/FeWCs3Vk8JXNjBGA6itAIz9nwfh6qCDUj2t8LTdOQdYrSFOO7x2dFgeCwi21V27Ga2vqsvkUnAAAAFQD708pfON6Itq/5S+4kkNdNNDKWCwAAAIEAkRQeugul6KmOC0C2EmgVJvKK1qImlwHir08W1LTESnujmRIWLRst8sDoKjJpNevFuHGybPQ3palvM9qTQ84k3NMsJYJZSjSexsKydHJbD4ErKk8W6k+Xo7GAtH4nUcNskbnLHUpfvzm0jWs2yeHS0TCrljuTQwX1UsvGKJanzEoAAACBAIurf3TAfN2FKKIpKt5vyNv2ENBVcxAHN36VH8JP4uDUERg/T0OyLrIxW8px9naI6AQ1o+fPLquJ3Byn9A1RZsvWAQJI/J0oUit1KQM5FKBtXNBuFhIMSLPwbtp5pZ+m0DAFo6IcY1pl1TimGa20ajrToUhDh1NpE2ZK//8fw2i7',
+ }
+
+ ssh_authorized_key { 'ssh_key_tmb':
+ ensure => 'absent',
+ type => 'ssh-dss',
+ key => 'AAAAB3NzaC1kc3MAAACBAMFaCUsen6ZYH8hsjGK0tlaguduw4YT2KD3TaDEK24ltKzvQ+NDiPRms1zPhTpRL0p0U5QVdIMxm/asAtuiMLMxdmU+Crry6s110mKKY2930ZEk6N4YJ4DbqSiYe2JBmpJVIEJ6Betgn7yZRR2mRM7j134PddAl8BGG+RUvzib7JAAAAFQDzu/G2R+6oe3vjIbbFpOTyR3PAbwAAAIEAmqXAGybY9CVgGChSztPEdvaZ1xOVGJtmxmlWvitWGpu8m5JBf57VhzdpT4Fsf4fiVZ7NWiwPm1DzqNX7xCH7IPLPK0jQSd937xG9Un584CguNB76aEQXv0Yl5VjOrC3DggIEfZ1KLV7GcpOukw0RerxKz99rYAThp6+qzBIrv38AAACBAKhXi7uNlajescWFjiCZ3fpnxdyGAgtKzvlz60mGKwwNyaQCVmPSmYeBI2tg1qk+0I5K6LZUxWkdhuE1UfvAbIrEdwyD8p53dPg1J9DpdQ1KqApeKqLxO02KJtfomuy3cRQXmdfOTovYN7zAu1NCp51uUNTzhIpDHx0MZ6bsWSFv',
+ }
+
+ ssh_authorized_key { 'ssh_key_tmb_rsa':
+ ensure => 'absent',
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAACAQC+PaVLUaIvBg/lDK0esX2xvVe9IspiXq2ES4Ti/KvmTqbsAhgUuW+IR4fxY2vffCdbTo2B247bEh4kTB8P+ZKmrJPv/c18CtidJHKXpV4EwEAD3abeKKYcUlDXyX0zTER64d86yT0PN2eA9BJNfpZeVCMpn32C7tYBzQztp1FVJB+i2oslayKE4Q9FRxpJFzhvepOsUC4ZnePSz7ymeg4Y6vgWjoH9Eo33FSJ0fEm8+/bk8kir9X1wu+BbfQodnkS6wTehXqb0hj1uNIkngy+nA+T+ckhhddlRELKYt44VMp/X8wtCE7Y8nLOi15sQiSIgtrXDIdwoIzfyLKZM1/fH/pYAKL/tjhomHgzSWeyHNA+gFT6B868tnBkSaBMfdAJmNu7RfAobFWmTHp2dF2Q5AsbzqFBWcofO3qvrP1xvz5Ckp+GzmflXdqg2M7XXNP5G1NAAq5dsMGRUBN99xYF25EpmDjbPaSXUDbvtzGY4B2Doc3OhR6Ask4undDvj+oOJw4Ldi9q4xexl1L9P0DbbfGhl/d/0T5snnagIAgXOZvG8Nwywzhv4oIoM2o6C1HNiUea3ODHMMl8f7w0ofDoX7JH4gQvRxgPRniZgBKSTDl7fD0Fh5FAZ6KbcZ2VCit8pLZN0OVHOr7kNBvsRClLe8O8R4S0wACe44U2Jq2U+6Q==',
+ }
+
+ ssh_authorized_key { 'ssh_key_pterjan':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAABIwAAAQEAspyZMl5zAkk5SL45zFvtJF7UhXTRb0bEaZ3nuCC1Ql5wM3GWuftqd5zLH88dCu7ZO/BVh213LZTq/UHb6lI7kWalygk53qtdEx2cywjWFOW23Rg6xybatCEZ2/ZrpGZoBGnu63otAp4h2Nnj/VkOio3pGwD8vavmZ4xPrcECPAwtMPJsYf44Ptu2JdXizi4iY8I0/HKitQ113I4NbDcAiMKbTXSbOfqC+ldcgW3+9xShx/kuMFTKeJOy4LI4GR6gykzkV6+vfnalp24x/SIEjuohBarCRQKo4megHqZOzdMYAHqq0QuNubXURNb0Mvz1sE7Y8AFIxwSfXdQGi5hcQQ==',
+ }
+
+ ssh_authorized_key { 'ssh_key_neoclust':
+ ensure => 'absent',
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDGyI8oIP8SgahPbMZ04Msr/vnI2gf4yx//QhEnZv8b++WIH0oibfK8g5Lz4HXReJRHzNXN1EhKQXoClgAKwv7zqkTLxV44tVcz8cwvfldkKNB+QxfL74JgsxCyNW8mpJdrJ71kbkT4Jt6AxeEd10ltQyqT7QDQMM7fxH8dbOCMcc7jtgOqwPXLy7hEumqkYxNuMxjrAbiDk2Nx0ddP2Ta4NJjSsGzUzSPsGhLVCO3+Wv6Ymss9Vacbe684ERwqz6odi5ZX0utfXXNphqqAckKCxurrI+LoWzt9MgWtR9iJC1joVDqRbggNm6bNNPZIdhmi5/yJrk3x7qwXb7uQNiE7',
+ }
+
+ ssh_authorized_key { 'ssh_key_maat':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAACAQDNQqKZkAhPiMFoeE3uqioaTFLDAP6cTwBsRA7angVMDpAThbYxGK439oe3xaj/cgGrlEApAnCmEPL81in6hubQaTRMR/RigA3FEkoO3H/J2Xng1rD+aVQFFK0EerjzxDa+bxUu7eRcHv1ar49LY7spNjk0LzNtI/n32L+t3WifCE/ithHi80Qh2kMq36kTm10wW4Gxpz9tYzSQz/f7dfrzhX+yOVUbmnevuS1BDeF21hmxmltBFQZvBh3jiUiWeTMHePHaod8jI8voJkxXJ+TxsniJ3AfxgCaMmIoU0a0rBxeTpzQVkUHJUsmsmji8WdeW9J9gNwXYYv2PuSli8iz5mAWG0eo6y2W+tOHNy6RhvcIkPi4pViycxBjQvoxki5nCZDXo1KwWCYoJ0wg5YIrqdqBb70ibAqMOS1wXSO5KwWbUoVYrP+tSvz+i1EQtgEHtamgCzFAkJXQrjXhvJ/L1GVJLsvqTpee+kN/9NnbH8GnHKvnenvE7ecITcMoy8hODulYaqZrx+0jWivTv5UpHO7gX9RwsDB2nviT4rluYWjIugIjCnBJIroD1QP6UWFFwG6MM44QPaByXjz8AC7vw1fNefhWFmS+CT7dQ8Vd2zglP4gQwlWePTC8sASHDLSoe0nIXGXcfY2Af8bThV1fMEteI850thUvIKfrx34z4rw==',
+ }
+
+ ssh_authorized_key { 'ssh_key_wally':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAABIwAAAQEAsB/PAEQJE/M5c3keyef6rKQvCtTk5cdw6ujXl6n8G7D7Q6h4IgIccd5mYcBU7ij2S5N3lfOQmKJqf2Pa5pByLfXlQnhCLzsgL9X45WJmpsoVK1MzjDY8iY+aL/74tj3wiMzuzAAwwpE3EftyfscxhSwf2e11B3qDzVRmNnxPVKlm85nTygnrZ0ag4nOC6O4yC3Hh1ULhKGtNAsGNF2yRGs7IcN9ytcVhGF3WGJfRI2c2kIuKW/lXxeE04sWWb+k019ys4ah0iQoLja6xVSHgxbVlm3oDz+mGGsPtoSvtoWpvF3q9FKqGclJpboWRMo3jyP6yDRVcTMXUSONmq3N8uw==',
+ }
+
+ ssh_authorized_key { 'ssh_key_danf':
+ type => 'ssh-rsa',
+ key => 'AAAAB3NzaC1yc2EAAAADAQABAAABAQCgWFg4EsUkZ5uh34ScVbfwhVdP7kTLRwsojeF+DgmwXSPbM9NUxiCmyFrHuh3m6bxG3BPMwrDskqUrQ3z/5WX6dB/CzSP/j03EkslzaE7eTzIpGt/vKIuZHR+4Z9FZcY1pyoI44rdgW5MVC+yBoJkvBerOkvNzfiRSfQ9R4eopPNTif3vb4MP/cFzFfa3o8NMqHxhgGFhF945NlzCUmnec13sNggx1wGNFHMpWttSaQ0izgvSdb61WSswNnCjBF5t3oyh7DgI80TN/XfXfDWZPjkQUzLrh9inuPollAWfreeInoCmF8ou268efaRoSfRMZ3qdRkJLDDy2Os8eL/d3d',
+ }
+}
diff --git a/deployment/common/manifests/export_ssh_keys.pp b/deployment/common/manifests/export_ssh_keys.pp
new file mode 100644
index 00000000..55b8f33b
--- /dev/null
+++ b/deployment/common/manifests/export_ssh_keys.pp
@@ -0,0 +1,7 @@
+class common::export_ssh_keys {
+ @@sshkey { $::fqdn:
+ type => 'rsa',
+ key => $::sshrsakey,
+ host_aliases => [$::ipaddress,$::hostname],
+ }
+}
diff --git a/deployment/common/manifests/i18n.pp b/deployment/common/manifests/i18n.pp
new file mode 100644
index 00000000..43b1fc3a
--- /dev/null
+++ b/deployment/common/manifests/i18n.pp
@@ -0,0 +1,12 @@
+class common::i18n {
+ package { 'locales-en': }
+
+ # push the locale everywhere, as it affects facter
+ file { '/etc/sysconfig/i18n':
+ content => template('common/i18n'),
+ }
+
+ file { '/etc/locale.conf':
+ content => template('common/locale.conf'),
+ }
+}
diff --git a/deployment/common/manifests/import_ssh_keys.pp b/deployment/common/manifests/import_ssh_keys.pp
new file mode 100644
index 00000000..da67f8b1
--- /dev/null
+++ b/deployment/common/manifests/import_ssh_keys.pp
@@ -0,0 +1,3 @@
+class common::import_ssh_keys {
+ Sshkey <<| |>>
+}
diff --git a/deployment/common/manifests/init.pp b/deployment/common/manifests/init.pp
new file mode 100644
index 00000000..c7b7486d
--- /dev/null
+++ b/deployment/common/manifests/init.pp
@@ -0,0 +1,33 @@
+class common {
+ class default_mageia_server_no_smtp {
+
+ include shadow
+ include openssh::server
+ include common::default_ssh_root_key
+ include common::base_packages
+ include common::export_ssh_keys
+ include common::import_ssh_keys
+ include common::i18n
+ include common::sudo_sysadmin
+ include ntp
+ include common::urpmi_update
+ include puppet::client
+ include xymon::client
+ include cron
+
+ # provided by lsb-core, but it also pull
+ # various unneeded stuff for our server
+ file { '/srv/':
+ ensure => directory
+ }
+
+ host { "${::hostname}.${::domain}":
+ ip => '127.0.0.1',
+ host_aliases => [ "${::hostname}", 'localhost' ],
+ }
+ }
+
+ class default_mageia_server inherits default_mageia_server_no_smtp {
+ include postfix::simple_relay
+ }
+}
diff --git a/deployment/common/manifests/sudo_sysadmin.pp b/deployment/common/manifests/sudo_sysadmin.pp
new file mode 100644
index 00000000..1247c02c
--- /dev/null
+++ b/deployment/common/manifests/sudo_sysadmin.pp
@@ -0,0 +1,7 @@
+class common::sudo_sysadmin {
+ include sudo
+
+ sudo::sudoers_config { '00-sysadmin':
+ content => template('common/sudoers.sysadmin')
+ }
+}
diff --git a/deployment/common/manifests/urpmi_update.pp b/deployment/common/manifests/urpmi_update.pp
new file mode 100644
index 00000000..2eb39fdc
--- /dev/null
+++ b/deployment/common/manifests/urpmi_update.pp
@@ -0,0 +1,8 @@
+class common::urpmi_update {
+ cron { 'urpmi_update':
+ user => 'root',
+ hour => '*/4',
+ minute => 0,
+ command => '/usr/sbin/urpmi.update -a -q',
+ }
+}
diff --git a/deployment/common/templates/i18n b/deployment/common/templates/i18n
new file mode 100644
index 00000000..b85efe78
--- /dev/null
+++ b/deployment/common/templates/i18n
@@ -0,0 +1,20 @@
+<%-
+# should not be changed
+locale = 'en_US.UTF-8'
+
+-%>
+LC_TELEPHONE=<%= locale %>
+LC_CTYPE=<%= locale %>
+LANGUAGE=<%= locale %>:<%= locale.split('.')[0] %>:<%= locale.split('.')[0].split('_')[0] %>
+LC_MONETARY=<%= locale %>
+LC_ADDRESS=<%= locale %>
+LC_COLLATE=<%= locale %>
+LC_PAPER=<%= locale %>
+LC_NAME=<%= locale %>
+LC_NUMERIC=<%= locale %>
+SYSFONT=lat0-16
+LC_MEASUREMENT=<%= locale %>
+LC_TIME=<%= locale %>
+LANG=<%= locale %>
+LC_IDENTIFICATION=<%= locale %>
+LC_MESSAGES=<%= locale %>
diff --git a/deployment/common/templates/locale.conf b/deployment/common/templates/locale.conf
new file mode 100644
index 00000000..e9fc2e06
--- /dev/null
+++ b/deployment/common/templates/locale.conf
@@ -0,0 +1,20 @@
+<%-
+# should not be changed
+locale = 'en_US.UTF-8'
+
+-%>
+LC_TELEPHONE=<%= locale %>
+LC_CTYPE=<%= locale %>
+LANGUAGE=<%= locale %>:<%= locale.split('.')[0] %>:<%= locale.split('.')[0].split('_')[0] %>
+LC_MONETARY=<%= locale %>
+LC_ADDRESS=<%= locale %>
+LC_COLLATE=<%= locale %>
+LC_PAPER=<%= locale %>
+LC_NAME=<%= locale %>
+LC_NUMERIC=<%= locale %>
+LC_MEASUREMENT=<%= locale %>
+LC_TIME=<%= locale %>
+LANG=<%= locale %>
+LC_IDENTIFICATION=<%= locale %>
+LC_MESSAGES=<%= locale %>
+
diff --git a/deployment/common/templates/sudoers.sysadmin b/deployment/common/templates/sudoers.sysadmin
new file mode 100644
index 00000000..874b1858
--- /dev/null
+++ b/deployment/common/templates/sudoers.sysadmin
@@ -0,0 +1 @@
+%mga-sysadmin ALL=(ALL) ALL
diff --git a/deployment/dns/manifests/init.pp b/deployment/dns/manifests/init.pp
new file mode 100644
index 00000000..a84c8db7
--- /dev/null
+++ b/deployment/dns/manifests/init.pp
@@ -0,0 +1 @@
+class dns { }
diff --git a/deployment/dns/manifests/reverse_zone.pp b/deployment/dns/manifests/reverse_zone.pp
new file mode 100644
index 00000000..9095251d
--- /dev/null
+++ b/deployment/dns/manifests/reverse_zone.pp
@@ -0,0 +1,5 @@
+define dns::reverse_zone {
+ bind::zone::reverse { $name:
+ content => template("dns/${name}.zone")
+ }
+}
diff --git a/deployment/dns/manifests/server.pp b/deployment/dns/manifests/server.pp
new file mode 100644
index 00000000..c9467de8
--- /dev/null
+++ b/deployment/dns/manifests/server.pp
@@ -0,0 +1,7 @@
+class dns::server {
+ include bind::master
+ dns::zone { 'mageia.org': }
+
+ dns::reverse_zone { '7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa': }
+ dns::reverse_zone { '2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa': }
+}
diff --git a/deployment/dns/manifests/zone.pp b/deployment/dns/manifests/zone.pp
new file mode 100644
index 00000000..7d4da311
--- /dev/null
+++ b/deployment/dns/manifests/zone.pp
@@ -0,0 +1,5 @@
+define dns::zone {
+ bind::zone::master { $name:
+ content => template("dns/${name}.zone")
+ }
+}
diff --git a/deployment/dns/templates/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone b/deployment/dns/templates/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone
new file mode 100644
index 00000000..8ab67138
--- /dev/null
+++ b/deployment/dns/templates/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone
@@ -0,0 +1,12 @@
+$TTL 3D
+@ IN SOA ns0.mageia.org. root.mageia.org. (
+ 2024090202 ; Serial
+ 3600 ; Refresh
+ 3600 ; Retry
+ 3600000 ; Expire
+ 3600 ; Minimum TTL
+)
+
+; nameservers
+@ IN NS ns0.mageia.org.
+@ IN NS ns1.mageia.org.
diff --git a/deployment/dns/templates/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone b/deployment/dns/templates/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone
new file mode 100644
index 00000000..fdb83e63
--- /dev/null
+++ b/deployment/dns/templates/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone
@@ -0,0 +1,19 @@
+$TTL 3D
+@ IN SOA ns0.mageia.org. root.mageia.org. (
+ 2024090202 ; Serial
+ 3600 ; Refresh
+ 3600 ; Retry
+ 3600000 ; Expire
+ 3600 ; Minimum TTL
+)
+
+; nameservers
+@ IN NS ns0.mageia.org.
+@ IN NS ns1.mageia.org.
+
+1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR gw-ipv6.mageia.org.
+4.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR ecosse.mageia.org.
+6.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR fiona.mageia.org.
+7.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR sucuk.mageia.org.
+8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR rabbit.mageia.org.
+9.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 IN PTR duvel.mageia.org.
diff --git a/deployment/dns/templates/mageia.org.zone b/deployment/dns/templates/mageia.org.zone
new file mode 100644
index 00000000..a04ca19b
--- /dev/null
+++ b/deployment/dns/templates/mageia.org.zone
@@ -0,0 +1,174 @@
+<%-
+ # nodes list defined in puppet/manifests/nodes_ip.pp
+ nodes = scope.lookupvar('::nodes_ipaddr')
+-%>
+; puppet-distributed file
+; local modifications will be lost
+; $Id$
+$TTL 30m
+@ IN SOA ns0.mageia.org. root.mageia.org. (
+ 2025100701 ; Serial
+ 7200 ; Refresh
+ 3600 ; Retry
+ 3600000 ; Expire
+ 300 ; Minimum TTL
+ )
+
+; nameservers
+@ IN NS ns0.mageia.org.
+@ IN NS ns1.mageia.org.
+
+@ IN MX 10 sucuk.mageia.org.
+@ IN MX 20 neru.mageia.org.
+
+; DKIM for mageia.org
+sucuk._domainkey IN TXT "v=DKIM1; k=rsa; t=s; s=email; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGH25Jb2Al84XlTfSWuqZL8f6K6b+QhJjvV3zbF1/t31WmLwEt0So+p3FbFeKmaq/e0nJ+wKteTSVZsl3xwux+MaARKJDpEXslEgy+ojCedWqqpP6xLUjPuYPimGPljwkLwDoJxwvjiLa2POebec7C+R/nzaGm2nnTFwYQomqlvQIDAQAB"
+sucuk._domainkey.group IN TXT "v=DKIM1; k=rsa; t=s; s=email; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDBRrdmAaYpDBHCtzkephaLX9LrMFJvgq84dS0ogTIb0xD32qxQF69FU/gEUlfTjzJooTJQC3PK7R3oLnfoWttMlbHCGg/llSfoSI0gD/4UolZokzWZY3qdqMz+zKi9+bfjz0y4Fwx5EPyda1ihHhVB6c+wq6cekhDNOH8PHhO74QIDAQAB"
+sucuk._domainkey.duvel IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDHYgFMZTHMYlymX0WJ17ZvgchABE+5O/c6np1gj5sBV2BPIJGs+h/i+Iq6jLYVhSOWEI+6wQKza/8r3Vr4ddi3/UPDzllfqMnKsbPHC/LscyIkQmpNiO2n0nIUhKbuVU1SsRC1B8svO9iNmEjg33/lrLiaV3DtDbGr0ozmBmeFVwIDAQAB"
+sucuk._domainkey.fiona IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDeFoY9MTeZD4Z3OnxUJvp6Nr5UF6+rBwCg0TwVWwe/17uCQ4M6ptDxPSGgVIMYJowg/VUcbqNLlt56kluC4mO/gVVUyPQe6EjYib+NV5PkvgHx2TOJfb27ANPiZ4f57eEFqmE3eD7SxqUqF9j2Vobt0J+XgFuyFUBzHZsRTNUpzQIDAQAB"
+sucuk._domainkey.forums IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDEVhhONroS/ayEPs+9fmom34EWsny7asKVxIuyJh8EzvPJmx6ODYtX/tN1ul++3xoFNHeAe5YSSGyK+7EgJ5E5wlhw6FwnHPnYp/eMsShDI2dyfYsQnS2Yc1VXkI9s83ZWaVTL9uPRDETMKDIF+QjljFQZAN+eaH55q9u3EZRrWwIDAQAB"
+sucuk._domainkey.identity IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDBv4aqFb9cQQkPB30zRfCtcquWKsP5G2Nhh3HSEdN0fFvOegQnGykuGq6lDED9iJuiNSVGO2cjtWtFTwX3+1/W1AW7pmaUD7U9HzPoZgxGPWtvFcJ/tZ1mjKNoGaPa5vLaVpXwxNKjPUCI+w2t5cM8JPnemW1Vm/LeEJ0XLE0InwIDAQAB"
+sucuk._domainkey.madb IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDI9WOO3aRQLLnXc08q9HP15VY79TQZR5GqdBcYu0H+jAiuR+OKz6NUSNoYdeNQ4FSvrz27elW6thNcKQg4wYNT4tsJ8d4OU5ScFcrPJszPucVyMpkl/ybCgVq0CmXgOh1yXYwl2YY4AfzUQ6skpTE5G2abIWBvPOvs8Q92vYJ1nwIDAQAB"
+sucuk._domainkey.rabbit IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDZYdG5dEd0CHAYGPRG+OXm2gJTDVpjmsKkn5+4BISToAOXXyogRcJN/P6oPySlG+CyUl5PW/2nBIiiUfHNKxVSa9gPO3vS0nlEppSHulkhth4deNu8YXRgJQp31IgaD0/Cbu7CKcDJbxTKGdnMV7XPKoIxB/Mjn0TxUS+WC2WY6QIDAQAB"
+sucuk._domainkey.sucuk IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdzn4W4Tl4sJ0pfhktNVlWRYFXnIwaMENqmi2vgc/P8M/zVxysVuWPcEwhy+IiVT8tMleXMt9dreErzJS+8ZmMd8oTqRXM55ZzRuBtqiecKnbIrXpecYUhh+2o0BMouTRHZvrPK5PV6Y2PrXkXwLF8qOS/eslZDk7hLRk2XBVDWwIDAQAB"
+sucuk._domainkey.ml IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4uPUsb1kvNCXT1AsEBldhU/9akmeRrRHOQtI8g60K+y2fRRur5l+TJDZ/+bnyVS69AMhyfeWEaWGhQytvmkKZBQyHZ6JzS2him+HT/x7xCYOHlQ5vixy0t4jYqbYZ04pdokJ4jcJ3pU7CFisgzk2Ln7HA4JDD1Dc+kCYbOvivtQIDAQAB"
+sucuk._domainkey.neru IN TXT "v=DKIM1; k=rsa; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC4uPUsb1kvNCXT1AsEBldhU/9akmeRrRHOQtI8g60K+y2fRRur5l+TJDZ/+bnyVS69AMhyfeWEaWGhQytvmkKZBQyHZ6JzS2him+HT/x7xCYOHlQ5vixy0t4jYqbYZ04pdokJ4jcJ3pU7CFisgzk2Ln7HA4JDD1Dc+kCYbOvivtQIDAQAB"
+
+; TODO use a loop here
+ml IN MX 10 sucuk.mageia.org.
+ml IN MX 20 neru.mageia.org.
+
+; Sender Policy Framework for mailing lists & some automated mails
+@ IN TXT "v=spf1 include:smtp.dnamail.fi mx ~all"
+ml IN TXT "v=spf1 mx ~all"
+group IN TXT "v=spf1 mx ~all"
+
+group IN MX 10 sucuk.mageia.org.
+group IN MX 20 neru.mageia.org.
+
+
+; machines
+<%-
+ nodes_txt = ''
+ nodes.keys.sort.each{|nodename|
+ spf = ''
+ if nodes[nodename].has_key?('ipv4')
+ nodes_txt += nodename + ' IN A ' + nodes[nodename]['ipv4'] + "\n"
+ spf += ' ip4:' + nodes[nodename]['ipv4']
+ end
+ if nodes[nodename].has_key?('ipv6')
+ nodes_txt += nodename + ' IN AAAA ' + nodes[nodename]['ipv6'] + "\n"
+ spf += ' ip6:' + nodes[nodename]['ipv6']
+ end
+ nodes_txt += nodename + ' IN TXT "v=spf1 ' + spf + ' mx:mageia.org ~all" ' + "\n"
+ }
+-%>
+<%= nodes_txt %>
+
+;SSHFP
+
+; sucuk
+ns0 IN A <%= nodes['sucuk']['ipv4'] %>
+ns0 IN AAAA <%= nodes['sucuk']['ipv6'] %>
+; neru
+ns1 IN A <%= nodes['neru']['ipv4'] %>
+ns1 IN AAAA <%= nodes['neru']['ipv6'] %>
+
+; mageia.org set to IP of neru
+mageia.org. IN A <%= nodes['neru']['ipv4'] %>
+mageia.org. IN AAAA <%= nodes['neru']['ipv6'] %>
+
+; madb on mageia.madb.org
+;madb IN A 163.172.201.211
+; temporarily for hosting a redirect while the real madb is down
+madb IN CNAME neru
+
+; since we have a subdomain, we cannot use a CNAME
+ml IN A <%= nodes['sucuk']['ipv4'] %>
+ml IN AAAA <%= nodes['sucuk']['ipv6'] %>
+
+; aliases
+ldap-slave-1 IN CNAME neru
+
+archives IN CNAME neru
+blog IN CNAME neru
+dashboard IN CNAME neru
+doc IN CNAME neru
+hugs IN CNAME neru
+meetbot IN CNAME neru
+planet IN CNAME neru
+releases IN CNAME neru
+start IN CNAME neru
+static IN CNAME neru
+www-test IN CNAME neru
+
+rsync IN CNAME duvel
+svn IN CNAME duvel
+git IN CNAME duvel
+puppetmaster IN CNAME duvel
+puppet IN CNAME duvel
+pkgsubmit IN CNAME duvel
+binrepo IN CNAME duvel
+repository IN CNAME duvel
+maintdb IN CNAME duvel
+ldap IN CNAME duvel
+ldap-master IN CNAME duvel
+advisories IN CNAME duvel
+projects IN CNAME duvel
+bcd IN CNAME rabbit
+
+epoll IN CNAME sucuk
+forums IN CNAME sucuk
+forum IN CNAME sucuk
+
+send IN CNAME sucuk
+bugs IN CNAME sucuk
+check IN CNAME sucuk
+gitweb IN CNAME sucuk
+identity IN A <%= nodes['sucuk']['ipv4'] %>
+identity-trunk IN CNAME sucuk
+mirrors IN CNAME sucuk
+nav IN CNAME sucuk
+people IN CNAME sucuk
+perl IN CNAME sucuk
+pg IN CNAME sucuk
+pkgcpan IN CNAME sucuk
+svnweb IN CNAME sucuk
+treasurer IN CNAME sucuk
+wiki IN CNAME sucuk
+www IN CNAME sucuk
+xymon IN CNAME sucuk
+
+; build nodes aliases
+ecosse0 IN CNAME ecosse
+ecosse1 IN CNAME ecosse
+rabbit0 IN CNAME rabbit
+rabbit1 IN CNAME rabbit
+rabbit2 IN CNAME rabbit
+ec2aa1-a IN CNAME ec2aa1
+ec2aa1-b IN CNAME ec2aa1
+ec2aa2-a IN CNAME ec2aa2
+ec2aa2-b IN CNAME ec2aa2
+ec2aa3-a IN CNAME ec2aa3
+ec2aa3-b IN CNAME ec2aa3
+ec2x1-a IN CNAME ec2x1
+ec2x1-b IN CNAME ec2x1
+ec2x2-a IN CNAME ec2x2
+ec2x2-b IN CNAME ec2x2
+pktaa1-a IN CNAME pktaa1
+pktaa1-b IN CNAME pktaa1
+pktaa1-c IN CNAME pktaa1
+pktaa1-d IN CNAME pktaa1
+pktaa1-e IN CNAME pktaa1
+pktaa1-f IN CNAME pktaa1
+ociaa1-a IN CNAME ociaa1
+ociaa1-b IN CNAME ociaa1
+ociaa1-c IN CNAME ociaa1
+ociaa2-a IN CNAME ociaa2
+ociaa2-b IN CNAME ociaa2
+ociaa2-c IN CNAME ociaa2
+ncaa1-a IN CNAME ncaa1
+ncaa1-b IN CNAME ncaa1
+ncaa1-c IN CNAME ncaa1
+
+<%# vim: set filetype=bindzone : -%>
diff --git a/deployment/forums/manifests/init.pp b/deployment/forums/manifests/init.pp
new file mode 100644
index 00000000..0ff256cd
--- /dev/null
+++ b/deployment/forums/manifests/init.pp
@@ -0,0 +1,22 @@
+class forums {
+
+ phpbb::instance { 'en': }
+
+ phpbb::instance { 'de': }
+
+ phpbb::redirection_instance{ 'fr':
+ url => "https://forums.${::domain}/en/viewforum.php?f=19"
+ }
+
+ phpbb::redirection_instance{ 'es':
+ url => "https://forums.${::domain}/en/viewforum.php?f=22"
+ }
+
+ phpbb::redirection_instance{ 'zh-cn':
+ url => "https://forums.${::domain}/en/viewforum.php?f=27"
+ }
+
+ phpbb::redirection_instance{ 'pt-br':
+ url => "https://forums.${::domain}/en/viewforum.php?f=28"
+ }
+}
diff --git a/deployment/lists/manifests/init.pp b/deployment/lists/manifests/init.pp
new file mode 100755
index 00000000..3f06aa1f
--- /dev/null
+++ b/deployment/lists/manifests/init.pp
@@ -0,0 +1,420 @@
+class lists {
+ # When adding a new list, also add it to the wiki page :
+ # https://wiki.mageia.org/en/Mailing_lists
+
+ # Note: an e-mail of "FOO@group.${::domain}" corresponds to LDAP group
+ # mga-FOO, but only for a user sending from that group; the members are
+ # not expanded. subscriber_ldap_group and sender_ldap_group are expanded to
+ # their group members.
+
+ sympa::list::announce { 'announce':
+ subject => 'Mageia announces',
+ reply_to => "discuss@ml.${::domain}",
+ sender_email => [ "sysadmin@group.${::domain}"],
+ }
+
+ sympa::list::announce {'atelier-bugs':
+ subject => 'Atelier bug reports from bugzilla',
+ reply_to => "atelier-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'atelier',
+ }
+
+ sympa::list::announce {'atelier-commits':
+ subject => 'Commits on atelier repositories (Artwork, Web, etc ...)',
+ reply_to => "atelier-discuss@ml.${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'atelier',
+ }
+
+ sympa::list::public {'atelier-discuss':
+ subject => 'Discussions about artwork, web, marketing, communication',
+ topics => 'atelier',
+ }
+
+ sympa::list::public {'basesystem':
+ subject => 'Development discussion list about mageia basesystem',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::private { 'blog-moderation':
+ subject => 'Blog comments moderation',
+ subscriber_ldap_group => 'mga-blog-moderators',
+ sender_email => [ "wordpress@blog.${::domain}" ],
+ topics => 'atelier',
+ }
+
+ sympa::list::public {'bugsquad-discuss':
+ subject => 'Bugsquad team discussions',
+ topics => 'bugsquad',
+ }
+
+ sympa::list::public {'dev':
+ subject => 'Development discussion list',
+ topics => 'developers',
+ }
+
+ sympa::list::public {'discuss':
+ subject => 'General discussion list',
+ topics => 'users',
+ }
+
+ sympa::list::public {'gnome':
+ subject => 'Development discussion list about mageia Gnome integration',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'i18n-discuss':
+ subject => 'Translation team discussions',
+ topics => 'i18n',
+ }
+
+ sympa::list::announce { 'i18n-bugs':
+ subject => 'Translation bug reports from bugzilla',
+ reply_to => "i18n-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'i18n',
+ }
+
+ sympa::list {'i18n-reports':
+ subject => 'Automated reports for translations',
+ reply_to => "i18n-discuss@ml.${::domain}",
+ sender_subscriber => true,
+ sender_email => [
+ # 'r2d2@vargas.calenco.com',
+ # "blog@${::domain}",
+ "root@${::domain}",
+ "subversion_noreply@ml.${::domain}",
+ ],
+ topics => 'i18n',
+ }
+
+ # please check that the list use the proper code for
+ # language ( not to be confused with tld or country code )
+ sympa::list::public {'i18n-af':
+ subject => 'Translation to Afrikaans',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-ar':
+ subject => 'Translation to Arabic',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-de':
+ subject => 'Translation to German',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-el':
+ subject => 'Translation to Greek',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-en':
+ subject => 'Translation to English',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-et':
+ subject => 'Translation to Estonian',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-fr':
+ subject => 'Translation to French',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-hu':
+ subject => 'Translation to Hungarian',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-it':
+ subject => 'Translation to Italian',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-nl':
+ subject => 'Translation to Dutch',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-pl':
+ subject => 'Translation to Polish',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-pt_br':
+ subject => 'Translation to Brazilian Portuguese',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-pt_pt':
+ subject => 'Translation to Portuguese',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-ro':
+ subject => 'Translation to Romanian',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-ru':
+ subject => 'Translation to Russian',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-tr':
+ subject => 'Translation to Turkish',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'i18n-zh_tw':
+ subject => 'Translation to Taiwanese',
+ topics => 'i18n',
+ }
+
+ sympa::list::public {'isobuild':
+ subject => 'Development discussion list about Mageia isos',
+ topics => 'developers',
+ }
+
+ sympa::list::public {'java':
+ subject => 'Development discussion list about Java',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'kde':
+ subject => 'Development discussion list about KDE',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'kernel':
+ subject => 'Development discussion list about Kernel',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'mageiatools':
+ subject => 'Development discussion list about Mageiatools',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'perl':
+ subject => 'Development discussion list about Perl',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'php':
+ subject => 'Development discussion list about Php',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'python':
+ subject => 'Development discussion list about Python',
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::public {'qa-discuss':
+ subject => 'Discussions about QA tasks and requests',
+ topics => 'qa',
+ }
+
+ sympa::list::public {'rpmstack':
+ subject => 'Development discussion list about Mageia rpm stack',
+ topics => 'developers',
+ }
+
+ sympa::list::announce {'qa-bugs':
+ subject => 'QA bug reports from bugzilla',
+ reply_to => "qa-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'qa',
+ }
+
+ sympa::list::announce {'qa-reports':
+ subject => 'Automated reports from QA tools',
+ reply_to => "qa-discuss@ml.${::domain}",
+ sender_email => [ "buildsystem-daemon@${::domain}" ],
+ topics => 'qa',
+ }
+
+ sympa::list::announce {'qa-commits':
+ subject => 'Update advisories commits',
+ reply_to => "qa-discuss@ml.${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'qa',
+ }
+
+ sympa::list::public {'forums-discuss':
+ subject => 'Discuss forums matters, policies and processes, and publish summaries of notable events/feedback',
+ topics => 'forums',
+ }
+
+ sympa::list::announce {'forums-bugs':
+ subject => 'Forums bug reports from bugzilla',
+ reply_to => "forums-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'forums',
+ }
+
+ sympa::list::public {'doc-discuss':
+ subject => 'Discussions about Mageia documentation',
+ topics => 'doc',
+ }
+
+ sympa::list::announce { 'doc-bugs':
+ subject => 'Documentation bug reports from bugzilla',
+ reply_to => "doc-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'doc',
+ }
+
+ sympa::list::announce { 'packages-commits':
+ subject => 'Commits on packages repository',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "subversion_noreply@ml.${::domain}", "binrepo_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'mirrors-announce':
+ subject => 'Important announces about mirrors updates',
+ reply_to => "sysadmin-discuss@ml.${::domain}",
+ sender_email => [ "root@${::domain}" ],
+ topics => 'sysadmin',
+ }
+
+ sympa::list::announce {'sysadmin-commits':
+ subject => 'Commits on sysadmin repository',
+ reply_to => "sysadmin-discuss@ml.${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'sysadmin',
+ }
+
+ sympa::list::public {'sysadmin-discuss':
+ subject => 'Sysadmin team discussions',
+ topics => 'sysadmin',
+ }
+
+ sympa::list::announce {'sysadmin-reports':
+ subject => 'Automated reports from various pieces of infrastructure',
+ reply_to => "sysadmin-discuss@ml.${::domain}",
+ sender_email => [ "root@${::domain}" ],
+ topics => 'sysadmin',
+ }
+
+ sympa::list::announce { 'sysadmin-bugs':
+ subject => 'Sysadmin bug reports from bugzilla',
+ reply_to => "sysadmin-discuss@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'sysadmin',
+ critical => true,
+ }
+
+ sympa::list::announce { 'soft-commits':
+ subject => 'Commits on soft repository',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'bugs':
+ subject => 'Bug reports from bugzilla',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'pkg-bugs':
+ subject => 'Packaging bug reports from bugzilla',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "bugzilla_noreply@ml.${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'updates-announce':
+ subject => 'Packages update for stable release',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "buildsystem-daemon@${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'backports-announce':
+ subject => 'Package backports for stable release',
+ reply_to => "dev@ml.${::domain}",
+ sender_ldap_group => "mga-qa-committers",
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'changelog':
+ subject => 'Announces for new packages uploaded',
+ reply_to => "dev@ml.${::domain}",
+ sender_email => [ "buildsystem-daemon@${::domain}" ],
+ topics => 'developers',
+ }
+
+ sympa::list::announce { 'board-commits':
+ subject => 'Commits on Mageia.Org status and organisation documents',
+ reply_to => "board-public@ml.${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'governance',
+ subscriber_ldap_group => 'mga-board',
+ }
+
+ sympa::list::public_restricted { 'board-public':
+ subject => 'Public board discussion',
+ subscriber_ldap_group => 'mga-board',
+ topics => 'governance',
+ }
+
+ sympa::list::private { 'board-private':
+ subject => 'Private board discussion',
+ subscriber_ldap_group => 'mga-board',
+ topics => 'governance',
+ }
+
+ sympa::list::announce {'treasurer-commits':
+ subject => 'Commits on Mageia.Org association treasurer repository',
+ reply_to => "treasurer@${::domain}",
+ sender_email => [ "root@${::domain}", "subversion_noreply@ml.${::domain}" ],
+ topics => 'governance',
+ }
+
+ sympa::list::public_restricted { 'council':
+ subject => 'Council discussions',
+ subscriber_ldap_group => 'mga-council',
+ topics => 'governance',
+ }
+
+ sympa::list::public {'local-discuss':
+ subject => 'Discussions about Local Community Team',
+ topics => 'local',
+ }
+
+ sympa::list::public {'discuss-fr':
+ subject => 'French discussions about Mageia',
+ topics => 'users',
+ }
+
+ sympa::list::public {'discuss-pt-br':
+ subject => 'Discussions about Mageia in Brazilian Portuguese',
+ topics => 'users',
+ }
+
+ sympa::list::private { 'mageia-association-members':
+ subject => 'Discussions between association members',
+ subscriber_ldap_group => 'mga-association-members',
+ topics => 'Mageia Association Members',
+ }
+}
+
diff --git a/deployment/main_mirror/files/README b/deployment/main_mirror/files/README
new file mode 100644
index 00000000..10ffe4c7
--- /dev/null
+++ b/deployment/main_mirror/files/README
@@ -0,0 +1,24 @@
+Description of the directory
+
+bootstrap
+---------
+
+Used only for the initial release and bootstrapping.
+
+mageiatools
+-----------
+
+Mandriva repository holding our own tools, should be merged
+with cooker ( and others distribution too ).
+
+mandriva
+--------
+
+Local 2010.1 mandriva mirror used for initial bootstrapping, should be
+removed once bootstrap is finished.
+
+mirror
+------
+
+Main mageia mirror, shared on rsync for everybody.
+
diff --git a/deployment/main_mirror/files/mirror/mirror.readme b/deployment/main_mirror/files/mirror/mirror.readme
new file mode 100644
index 00000000..5846d12e
--- /dev/null
+++ b/deployment/main_mirror/files/mirror/mirror.readme
@@ -0,0 +1,79 @@
+ __ __ _
+| \/ | __ _ __ _ ___(_) __ _
+| |\/| |/ _` |/ _` |/ _ \ |/ _` |
+| | | | (_| | (_| | __/ | (_| |
+|_| |_|\__,_|\__, |\___|_|\__,_|
+ |___/
+
+This document describes the way to implement a Mageia Mirror.
+
+1) Prerequisite
+---------------
+
+The expected size of the mirror is around 1.8-2TB.
+The mirror only contains the last 2 stable releases + the development
+branch called cauldron.
+
+For older unsupported releases, you can find them at the archive
+provided by distrib-coffee mirror:
+https://distrib-coffee.ipsl.jussieu.fr/pub/linux/Mageia-archive/
+
+Look here to see the (planned) end of support date for each version:
+https://www.mageia.org/en/support/#lifecycle
+
+You need rsync software to synchronise the tree.
+
+2) Official source
+------------------
+
+For a public mirror, we encourage you to use one of our Tier1 mirrors.
+
+The servers below synchronise the tree directly from the Mageia rsync server.
+
+ Check https://mirrors.mageia.org/ for their bandwidths.
+ Check https://mirrors.mageia.org/status for their current statuses.
+
+ o rsync://mageia.c3sl.ufpr.br/mageia/
+ located in Curitiba (Brasil)
+ o rsync://mirrors.kernel.org/mirrors/mageia/
+ located in USA and Europe
+ o rsync://mirror.accum.se/mirror/mageia/
+ located in Umea (Sweden)
+ o rsync://mirror.math.princeton.edu/pub/mageia/
+ located in Princeton (USA)
+
+3) Rsync options
+----------------
+
+Ensure you're using at least these options:
+
+ -a -H
+
+We appreciate if, in addition, you also add the options:
+
+ --delete-after -S
+
+Don't use the compression and checksum options, they create excessive
+load on the remote server
+
+4) Automated update procedure
+-----------------------------
+
+The tree must be synchronized at least every 2 hours.
+Tier 1 mirrors should preferably sync at least every hour.
+
+Please ensure that another rsync process is not started while a first
+one is still running. Use a lock file.
+
+5) Registering your mirror
+--------------------------
+
+Go to https://mirrors.mageia.org/new and enter all possible protocols.
+
+6) Subscribe to the mirrors-announce mailing list
+-------------------------------------------------
+
+To get infos about new releases uploaded to the mirrors and other mirror
+updates, you can subscribe to the mirrors-announce mailing list :
+https://ml.mageia.org/wwsympa-wrapper.fcgi/info/mirrors-announce
+
diff --git a/deployment/main_mirror/files/mirror/paths.readme b/deployment/main_mirror/files/mirror/paths.readme
new file mode 100644
index 00000000..f8066850
--- /dev/null
+++ b/deployment/main_mirror/files/mirror/paths.readme
@@ -0,0 +1,34 @@
+Description of the path on the mirror
+
+distrib
+-------
+
+Contains the supported and developpement releases of
+the distribution.
+
+Cauldron is the name of the developpement release.
+
+X (a number) are the stable releases.
+
+iso
+---
+
+Contains the various isos used for burning and installation.
+
+mageia_timestamp
+----------------
+
+Contains the timestamp, filled by cron every 5 minutes, used to check mirrors
+freshness.
+
+people
+------
+
+Contains various file, as pushed by Mageia members. The goal and usage are not
+yet decided.
+
+software
+--------
+
+Contains the various tarballs of software produced by Mageia, to be reused
+on others systems and distributions.
diff --git a/deployment/main_mirror/manifests/init.pp b/deployment/main_mirror/manifests/init.pp
new file mode 100644
index 00000000..9b26a64d
--- /dev/null
+++ b/deployment/main_mirror/manifests/init.pp
@@ -0,0 +1,21 @@
+class main_mirror {
+ # FIXME shouldn't the various code in this module ?
+ include mirror::main
+
+ class { 'rsyncd':
+ rsyncd_conf => 'main_mirror/rsyncd.conf'
+ }
+
+ $mirror = '/distrib'
+ file { [$mirror,
+ "${mirror}/mirror",
+ "${mirror}/archive"]:
+ ensure => directory,
+ }
+
+ file {
+ "${mirror}/README": source => 'puppet:///modules/main_mirror/README';
+ "${mirror}/mirror/mirror.readme": source => 'puppet:///modules/main_mirror/mirror/mirror.readme';
+ "${mirror}/mirror/paths.readme": source => 'puppet:///modules/main_mirror/mirror/paths.readme';
+ }
+}
diff --git a/deployment/main_mirror/templates/rsyncd.conf b/deployment/main_mirror/templates/rsyncd.conf
new file mode 100644
index 00000000..9fc93860
--- /dev/null
+++ b/deployment/main_mirror/templates/rsyncd.conf
@@ -0,0 +1,31 @@
+# $Id$
+
+uid = nobody
+gid = nogroup
+
+[mageia]
+ path = /distrib/mirror/
+ comment = Mageia Mirror Tree
+ hosts allow = \
+ 10.42.0.0/24 \
+ 2a02:2178:2:7::/64 \
+ rabbit.<%= @domain %> \
+ sucuk.<%= @domain %> \
+ distrib-coffee.ipsl.jussieu.fr \
+ ftp.proxad.net \
+ jobbot0.ibiblio.org \
+ jobbot1.ibiblio.org \
+ mirror.math.princeton.edu \
+ poincare.accum.se \
+ poincare.acc.umu.se \
+ sagres.c3sl.ufpr.br \
+ sv.mirrors.kernel.org \
+ ny.mirrors.kernel.org \
+ 147.75.69.246 \
+ 2001:14ba:a417:eb00::1 \
+ 2001:14ba:a417:eb00::2
+
+[git]
+ path = /git
+ comment = Mageia Git repos
+ hosts allow = sucuk.<%= @domain %>
diff --git a/deployment/mga_buildsystem/manifests/buildnode.pp b/deployment/mga_buildsystem/manifests/buildnode.pp
new file mode 100644
index 00000000..f6bf70ea
--- /dev/null
+++ b/deployment/mga_buildsystem/manifests/buildnode.pp
@@ -0,0 +1,4 @@
+class mga_buildsystem::buildnode {
+ include mga_buildsystem::config
+ include buildsystem::buildnode
+}
diff --git a/deployment/mga_buildsystem/manifests/config.pp b/deployment/mga_buildsystem/manifests/config.pp
new file mode 100644
index 00000000..c0c62cc4
--- /dev/null
+++ b/deployment/mga_buildsystem/manifests/config.pp
@@ -0,0 +1,668 @@
+class mga_buildsystem::config {
+ class { 'buildsystem::var::signbot':
+ keyid => '80420F66',
+ keyemail => "packages@${::domain}",
+ keyname => 'Mageia Packages',
+ }
+
+ class { 'buildsystem::var::groups':
+ packagers => 'mga-packagers',
+ packagers_committers => 'mga-packagers-committers',
+ }
+
+ class { 'buildsystem::var::webstatus' :
+ package_commit_url => "https://svnweb.${::domain}/packages?view=revision&revision=%d",
+ theme_name => 'mageia',
+ }
+
+ class { 'buildsystem::var::iurt':
+ timeout_multiplier => $::architecture ? {
+ /arm/ => 4,
+ 'aarch64' => 2,
+ default => 1,
+ }
+ }
+
+ class { 'buildsystem::var::scheduler' :
+ admin_mail => "sysadmin@group.${::domain}",
+ build_nodes => {
+ 'i586' => [ 'ecosse0', 'rabbit0', 'ecosse1', 'rabbit1', 'rabbit2' ],
+ 'i686' => [ 'ecosse0', 'rabbit0', 'ecosse1', 'rabbit1', 'rabbit2' ],
+ 'x86_64' => [ 'rabbit0', 'ecosse0', 'rabbit1', 'ecosse1', 'rabbit2' ],
+ 'armv7hl' => [ 'ncaa1-a', 'ncaa1-b', 'ncaa1-c', 'ociaa1-a', 'ociaa1-b'],
+ 'aarch64' => [ 'ncaa1-a', 'ncaa1-b', 'ncaa1-c', 'ociaa1-a', 'ociaa1-b'],
+ },
+ build_nodes_aliases => {
+ 'ecosse0' => "ecosse.${::domain}",
+ 'ecosse1' => "ecosse.${::domain}",
+ 'rabbit0' => "rabbit.${::domain}",
+ 'rabbit1' => "rabbit.${::domain}",
+ 'rabbit2' => "rabbit.${::domain}",
+ 'ociaa1-a' => "ociaa1.${::domain}",
+ 'ociaa1-b' => "ociaa1.${::domain}",
+ 'ociaa1-c' => "ociaa1.${::domain}",
+ },
+ build_src_node => 'duvel',
+ }
+ include buildsystem::var::repository
+ class { 'buildsystem::var::binrepo':
+ uploadmail_from => "binrepo_noreply@ml.${::domain}",
+ uploadmail_to => "packages-commits@ml.${::domain}",
+ }
+
+ $svn_hostname = "svn.${::domain}"
+ $svn_root_packages = "svn://${svn_hostname}/svn/packages"
+ $svn_root_packages_ssh = "svn+ssh://${svn_hostname}/svn/packages"
+ class { 'buildsystem::var::mgarepo':
+ submit_host => "pkgsubmit.${::domain}",
+ svn_hostname => $svn_hostname,
+ svn_root_packages => $svn_root_packages,
+ svn_root_packages_ssh => $svn_root_packages_ssh,
+ oldurl => "${svn_root_packages_ssh}/misc",
+ conf => {
+ 'global' => {
+ 'ldap-server' => "ldap.${::domain}",
+ 'ldap-base' => "ou=People,${::dc_suffix}",
+ 'ldap-filterformat' => '(&(objectClass=inetOrgPerson)(uid=$username))',
+ 'ldap-resultformat' => '$cn <$mail>',
+ }
+ }
+ }
+
+ include stdlib
+
+ $std_arch = ['x86_64', 'i586']
+ $x86_arch = ['x86_64', 'i686']
+ $arm32_arch = ['armv5tl', 'armv7hl']
+ $std_repos = {
+ 'release' => {
+ 'media_type' => [ 'release' ],
+ 'requires' => [],
+ 'order' => 0,
+ },
+ 'updates' => {
+ 'media_type' => [ 'updates' ],
+ 'updates_for' => 'release',
+ 'requires' => [ 'release' ],
+ 'order' => 1,
+ },
+ 'updates_testing' => {
+ 'media_type' => [ 'testing' ],
+ 'noauto' => '1',
+ 'requires' => [ 'updates' ],
+ 'order' => 2,
+ },
+ 'backports' => {
+ 'media_type' => [ 'backports' ],
+ 'noauto' => '1',
+ 'requires' => [ 'updates' ],
+ 'order' => 3,
+ },
+ 'backports_testing' => {
+ 'media_type' => [ 'testing' ],
+ 'noauto' => '1',
+ 'requires' => [ 'backports' ],
+ 'order' => 4,
+ },
+ }
+ $std_medias = {
+ 'core' => {
+ 'repos' => $std_repos,
+ 'media_type' => [ 'official', 'free' ],
+ 'order' => 0,
+ },
+ 'nonfree' => {
+ 'repos' => $std_repos,
+ 'media_type' => [ 'official' ],
+ 'noauto' => '1',
+ 'requires' => [ 'core' ],
+ 'order' => 1,
+ },
+ 'tainted' => {
+ 'repos' => $std_repos,
+ 'media_type' => [ 'official' ],
+ 'noauto' => '1',
+ 'requires' => [ 'core' ],
+ 'order' => 2,
+ },
+ }
+ $std_base_media = [ 'core/release', 'core/updates' ]
+ $infra_medias = {
+ 'infra' => {
+ 'repos' => {
+ 'updates' => {
+ 'media_type' => [ 'updates' ],
+ 'requires' => [ 'release' ],
+ 'order' => 0,
+ },
+ },
+ 'media_type' => [ 'infra' ],
+ 'requires' => [ 'core' ],
+ 'order' => 0,
+ },
+ }
+ $std_macros = {
+ 'distsuffix' => '.mga',
+ 'distribution' => 'Mageia',
+ 'vendor' => 'Mageia.Org',
+ '_real_vendor' => 'mageia',
+ }
+ $repo_allow_from_ips = [
+ $::nodes_ipaddr[duvel][ipv6],
+ $::nodes_ipaddr[duvel][ipv4],
+ $::nodes_ipaddr[ecosse][ipv6],
+ $::nodes_ipaddr[ecosse][ipv4],
+ $::nodes_ipaddr[fiona][ipv6],
+ $::nodes_ipaddr[fiona][ipv4],
+ '10.42.0',
+ $::nodes_ipaddr[rabbit][ipv4],
+ $::nodes_ipaddr[rabbit][ipv6],
+ $::nodes_ipaddr[sucuk][ipv4],
+ $::nodes_ipaddr[sucuk][ipv6],
+ '85.134.55.73',
+ $::nodes_ipaddr[neru][ipv4],
+ $::nodes_ipaddr[neru][ipv6],
+ '2001:bc8:4400:2700::2729',
+ '147.75.83.250',
+ '2604:1380:2000:f100::1',
+ '2a05:d014:e9:2c00::/56',
+ '147.75.69.244/30',
+ '2604:1380:1001:4900::/127',
+ # Will be new neru
+ '51.15.220.93',
+ '2001:bc8:628:1f00::1',
+ # Oracle cloud VMs
+ '2603:c026:c101:f00::/64',
+ $::nodes_ipaddr[ncaa1][ipv4],
+ $::nodes_ipaddr[ncaa1][ipv6],
+ ]
+ $repo_allow_from_domains = [
+ ".${::domain}",
+ ]
+
+ # the list of checks, actions, posts for cauldron in youri-upload
+ $cauldron_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'markrelease',
+ 'sign',
+ 'install',
+ 'link',
+ 'unpack_release_notes',
+ 'unpack_gfxboot_theme',
+ 'unpack_meta_task',
+ 'unpack_installer_images',
+ 'unpack_installer_images_nonfree',
+ 'unpack_installer_stage2',
+ 'unpack_installer_advertising',
+ 'unpack_installer_rescue',
+ 'unpack_syslinux',
+ 'unpack_pci_usb_ids',
+ 'archive',
+ 'mail',
+ 'maintdb',
+ ],
+ 'posts' => [
+ 'genhdlist2_zstd',
+ 'createrepo_cauldron',
+ 'appstream_cauldron',
+ 'clean_rpmsrate',
+ 'mirror',
+ ],
+ }
+
+ # TODO: mga >= 6 should use std config + createrepo, not a different one
+ $mga6_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'sign',
+ 'install',
+ 'link',
+ 'archive',
+ 'mail',
+ ],
+ 'posts' => [
+ 'genhdlist2',
+ 'createrepo_mga6',
+ 'appstream_mga6',
+ 'mirror',
+ ],
+ }
+
+ $mga7_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'sign',
+ 'install',
+ 'link',
+ 'archive',
+ 'mail',
+ ],
+ 'posts' => [
+ 'genhdlist2',
+ 'createrepo_mga7',
+ 'appstream_mga7',
+ 'mirror',
+ ],
+ }
+
+ $mga8_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'sign',
+ 'install',
+ 'link',
+ 'archive',
+ 'mail',
+ ],
+ 'posts' => [
+ 'genhdlist2',
+ 'createrepo_mga8',
+ 'appstream_mga8',
+ 'mirror',
+ ],
+ }
+
+ $mga9_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'sign',
+ 'install',
+ 'link',
+ 'archive',
+ 'mail',
+ ],
+ 'posts' => [
+ 'genhdlist2',
+ 'createrepo_mga9',
+ 'appstream_mga9',
+ 'mirror',
+ ],
+ }
+
+ # the list of checks, actions, posts for infra distros in youri-upload
+ $infra_youri_upload_targets = {
+ 'checks' => [
+ 'version',
+ 'tag',
+ 'acl',
+ 'rpmlint',
+ 'recency',
+ ],
+ 'actions' => [
+ 'sign',
+ 'install',
+ 'link',
+ 'archive',
+ ],
+ 'posts' => [
+ 'genhdlist2',
+ ],
+ }
+
+ # the list of checks, actions, posts for cauldron in youri-todo
+ $cauldron_youri_todo_targets = {
+ 'checks' => [
+ 'source',
+ 'deps',
+ 'version',
+ 'tag',
+ 'acl',
+ 'host',
+ 'rpmlint',
+ 'recency',
+ 'queue_recency',
+ ],
+ 'actions' => [
+ 'send',
+ 'dependencies',
+ 'rpminfo',
+ 'ulri',
+ ],
+ }
+
+ # the list of checks, actions, posts for stable and infra distros in youri-todo
+ $std_youri_todo_targets = {
+ 'checks' => [
+ 'source',
+ 'version',
+ 'tag',
+ 'acl',
+ 'host',
+ 'rpmlint',
+ 'recency',
+ 'queue_recency',
+ ],
+ 'actions' => [
+ 'send',
+ 'dependencies',
+ 'rpminfo',
+ 'ulri',
+ ],
+ }
+
+ # rpmlint check options for stable and cauldron
+ $mga_rpmlint = {
+ 'config' => '/usr/share/rpmlint/config',
+ 'path' => '/usr/bin/rpmlint',
+ 'results' => [
+ 'buildprereq-use',
+ 'no-description-tag',
+ 'no-summary-tag',
+ 'non-standard-group',
+ 'non-xdg-migrated-menu',
+ 'percent-in-conflicts',
+ 'percent-in-dependency',
+ 'percent-in-obsoletes',
+ 'percent-in-provides',
+ 'summary-ended-with-dot',
+ 'unexpanded-macro',
+ 'unknown-lsb-keyword',
+ 'malformed-line-in-lsb-comment-block',
+ 'empty-%postun',
+ 'empty-%post',
+ 'invalid-desktopfile',
+ 'standard-dir-owned-by-package',
+ 'use-tmp-in-%postun',
+ 'bogus-variable-use-in-%posttrans',
+ 'dir-or-file-in-usr-local',
+ 'dir-or-file-in-tmp',
+ 'dir-or-file-in-mnt',
+ 'dir-or-file-in-opt',
+ 'dir-or-file-in-home',
+ 'dir-or-file-in-var-local',
+ 'tmpfiles-conf-in-etc',
+ 'non-ghost-in-run',
+ 'non-ghost-in-var-run',
+ 'non-ghost-in-var-lock',
+ 'systemd-unit-in-etc',
+ 'udev-rule-in-etc',
+ ],
+ }
+
+ # list of users allowed to submit packages when cauldron is frozen
+ $cauldron_authorized_users = str_join(group_members('mga-release_managers'), '|')
+ $cauldron_version_check = {
+ 'authorized_sections' => '^[a-z]+/updates_testing$',
+ #'authorized_sections' => 'none_section_authorized',
+ #'authorized_packages' => 'none_package_authorized',
+ 'authorized_packages' => 'drak|^(urpmi|perl-(MDK-Common|Gtk3|Glib(-Object-Introspection)?|URPM)|mgaonline|net_monitor|perl_checker|mandi|indexhtml|ldetect(-lst)?|msec|manatools|rpm-(mageia-setup|helper)|(mga-|mageia).*|iurt)$',
+ 'authorized_arches' => 'none',
+ 'authorized_users' => "^${cauldron_authorized_users}\$",
+ 'mode' => 'normal',
+ #'mode' => 'version_freeze',
+ #'mode' => 'freeze',
+ }
+
+ # for EOL distributions
+ $frozen_version_check = {
+ 'authorized_packages' => 'none_package_authorized',
+ 'authorized_sections' => 'none_section_authorized',
+ 'authorized_arches' => 'none',
+ 'mode' => 'freeze',
+ }
+
+ # for supported stable distributions
+ $std_version_check = {
+ 'authorized_packages' => 'none_package_authorized',
+ 'authorized_sections' => '^(core|nonfree|tainted)/(updates_testing|backports_testing)$',
+ 'authorized_arches' => 'none',
+ 'mode' => 'freeze',
+ }
+
+ $infra_authorized_users = str_join(group_members('mga-sysadmin'), '|')
+ $infra_version_check = {
+ 'authorized_users' => "^${infra_authorized_users}\$",
+ 'mode' => 'freeze',
+ }
+
+ class { 'buildsystem::var::distros':
+ default_distro => 'cauldron',
+ repo_allow_from_ips => $repo_allow_from_ips,
+ repo_allow_from_domains => $repo_allow_from_domains,
+ distros => {
+ 'cauldron' => {
+ 'arch' => concat($x86_arch, ['armv7hl', 'aarch64']),
+ 'mandatory_arch' => concat($x86_arch, ['aarch64']),
+ 'no_media_cfg_update' => true,
+ 'medias' => $std_medias,
+ 'base_media' => $std_base_media,
+ 'branch' => 'Devel',
+ 'version' => '10',
+ 'submit_allowed' => "${svn_root_packages}/cauldron",
+ 'macros' => $std_macros,
+ 'youri' => {
+ 'upload' => {
+ 'targets' => $cauldron_youri_upload_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ },
+ },
+ 'todo' => {
+ 'targets' => $cauldron_youri_todo_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ 'version' => $cauldron_version_check,
+ },
+ },
+ },
+ },
+
+
+ '8' => {
+ 'arch' => concat($std_arch, ['armv7hl', 'aarch64']),
+ 'mandatory_arch' => concat($std_arch, ['aarch64']),
+ 'no_media_cfg_update' => true,
+ 'medias' => $std_medias,
+ 'base_media' => $std_base_media,
+ 'branch' => 'Official',
+ 'version' => '8',
+ 'submit_allowed' => "${svn_root_packages}/updates/8",
+ 'backports_allowed' => "${svn_root_packages}/backports/8",
+ 'macros' => $std_macros,
+ 'youri' => {
+ 'upload' => {
+ 'targets' => $mga8_youri_upload_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ },
+ },
+ 'todo' => {
+ 'targets' => $std_youri_todo_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ 'version' => $std_version_check,
+ },
+ },
+ },
+ },
+
+ '9' => {
+ 'arch' => concat($std_arch, ['armv7hl', 'aarch64']),
+ 'mandatory_arch' => concat($std_arch, ['aarch64']),
+ 'no_media_cfg_update' => true,
+ 'medias' => $std_medias,
+ 'base_media' => $std_base_media,
+ 'branch' => 'Official',
+ 'version' => '9',
+ 'submit_allowed' => "${svn_root_packages}/updates/9",
+ 'backports_allowed' => "${svn_root_packages}/backports/9",
+ 'macros' => $std_macros,
+ 'youri' => {
+ 'upload' => {
+ 'targets' => $mga9_youri_upload_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ },
+ },
+ 'todo' => {
+ 'targets' => $std_youri_todo_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ 'version' => $std_version_check,
+ },
+ },
+ },
+ },
+
+ 'infra_8' => {
+ 'arch' => concat($std_arch, ['armv7hl', 'aarch64']),
+ 'medias' => $infra_medias,
+ 'base_media' => [ '8/core/release', '8/core/updates', 'infra/updates' ],
+ 'branch' => 'Official',
+ 'version' => '8',
+ 'submit_allowed' => "${svn_root_packages}/updates/infra_8",
+ 'macros' => $std_macros,
+ 'based_on' => {
+ '8' => {
+ 'core' => [ 'release', 'updates' ],
+ },
+ },
+ 'youri' => {
+ 'upload' => {
+ 'targets' => $infra_youri_upload_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ },
+ },
+ 'todo' => {
+ 'targets' => $std_youri_todo_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ 'version' => $infra_version_check,
+ },
+ },
+ },
+ 'no_mirror' => true,
+ },
+
+ 'infra_9' => {
+ 'arch' => concat($std_arch, ['armv7hl', 'aarch64']),
+ 'medias' => $infra_medias,
+ 'base_media' => [ '9/core/release', '9/core/updates', 'infra/updates' ],
+ 'branch' => 'Official',
+ 'version' => '9',
+ 'submit_allowed' => "${svn_root_packages}/updates/infra_9",
+ 'macros' => $std_macros,
+ 'based_on' => {
+ '9' => {
+ 'core' => [ 'release', 'updates' ],
+ },
+ },
+ 'youri' => {
+ 'upload' => {
+ 'targets' => $infra_youri_upload_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ },
+ },
+ 'todo' => {
+ 'targets' => $std_youri_todo_targets,
+ 'checks' => {
+ 'rpmlint' => $mga_rpmlint,
+ 'version' => $infra_version_check,
+ },
+ },
+ },
+ 'no_mirror' => true,
+ },
+ }
+ }
+ $checks_tag_options = {
+ 'tags' => {
+ 'release' => inline_template("^[^~]*<%= std_macros['distsuffix'] %>\\d+"),
+ 'distribution' => inline_template("^<%= std_macros['distribution'] %>"),
+ 'vendor' => inline_template("^<%= std_macros['vendor'] %>$"),
+ },
+ }
+ class { 'buildsystem::var::youri':
+ packages_archivedir => "${buildsystem::var::scheduler::homedir}/old",
+ youri_conf => {
+ 'upload' => {
+ 'checks' => {
+ 'tag' => {
+ 'options' => $checks_tag_options,
+ },
+ 'rpmlint' => {
+ 'options' => {
+ 'results' => [
+ 'buildprereq-use',
+ 'no-description-tag',
+ 'no-summary-tag',
+ 'non-standard-group',
+ 'non-xdg-migrated-menu',
+ 'percent-in-conflicts',
+ 'percent-in-dependency',
+ 'percent-in-obsoletes',
+ 'percent-in-provides',
+ 'summary-ended-with-dot',
+ 'unexpanded-macro',
+ 'unknown-lsb-keyword',
+ 'malformed-line-in-lsb-comment-block',
+ 'empty-%postun',
+ 'empty-%post',
+ 'invalid-desktopfile',
+ 'standard-dir-owned-by-package',
+ 'use-tmp-in-%postun',
+ 'bogus-variable-use-in-%posttrans',
+ 'dir-or-file-in-usr-local',
+ 'dir-or-file-in-tmp',
+ 'dir-or-file-in-mnt',
+ 'dir-or-file-in-opt',
+ 'dir-or-file-in-home',
+ 'dir-or-file-in-var-local',
+ ],
+ },
+ },
+ },
+ 'actions' => {
+ 'mail' => {
+ 'options' => {
+ 'to' => "changelog@ml.${::domain}",
+ 'reply_to' => "dev@ml.${::domain}",
+ 'from' => "buildsystem-daemon@${::domain}",
+ 'prefix' => 'RPM',
+ },
+ },
+ },
+ },
+ 'todo' => {
+ 'checks' => {
+ 'tag' => {
+ 'options' => $checks_tag_options,
+ },
+ },
+ },
+ }
+ }
+}
diff --git a/deployment/mga_buildsystem/manifests/init.pp b/deployment/mga_buildsystem/manifests/init.pp
new file mode 100644
index 00000000..a111c3da
--- /dev/null
+++ b/deployment/mga_buildsystem/manifests/init.pp
@@ -0,0 +1,2 @@
+class mga_buildsystem {
+}
diff --git a/deployment/mga_buildsystem/manifests/mainnode.pp b/deployment/mga_buildsystem/manifests/mainnode.pp
new file mode 100644
index 00000000..b614cdbd
--- /dev/null
+++ b/deployment/mga_buildsystem/manifests/mainnode.pp
@@ -0,0 +1,14 @@
+class mga_buildsystem::mainnode {
+ include mga_buildsystem::config
+ include buildsystem::mainnode
+ include buildsystem::release
+ include buildsystem::maintdb
+ include buildsystem::binrepo
+ include buildsystem::repoctl
+ include buildsystem::webstatus
+
+ $rpmlint_packages = [ 'rpmlint-mageia-policy']
+ package { $rpmlint_packages:
+ ensure => installed
+ }
+}
diff --git a/deployment/mgagit/files/git_multimail.py b/deployment/mgagit/files/git_multimail.py
new file mode 100644
index 00000000..0c5c8d7b
--- /dev/null
+++ b/deployment/mgagit/files/git_multimail.py
@@ -0,0 +1,4383 @@
+#! /usr/bin/env python3
+
+__version__ = '1.7.dev'
+
+# Copyright (c) 2015-2022 Matthieu Moy and others
+# Copyright (c) 2012-2014 Michael Haggerty and others
+# Derived from contrib/hooks/post-receive-email, which is
+# Copyright (c) 2007 Andy Parkins
+# and also includes contributions by other authors.
+#
+# This file is part of git-multimail.
+#
+# git-multimail is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License version
+# 2 as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see
+# <http://www.gnu.org/licenses/>.
+
+"""Generate notification emails for pushes to a git repository.
+
+This hook sends emails describing changes introduced by pushes to a
+git repository. For each reference that was changed, it emits one
+ReferenceChange email summarizing how the reference was changed,
+followed by one Revision email for each new commit that was introduced
+by the reference change.
+
+Each commit is announced in exactly one Revision email. If the same
+commit is merged into another branch in the same or a later push, then
+the ReferenceChange email will list the commit's SHA1 and its one-line
+summary, but no new Revision email will be generated.
+
+This script is designed to be used as a "post-receive" hook in a git
+repository (see githooks(5)). It can also be used as an "update"
+script, but this usage is not completely reliable and is deprecated.
+
+To help with debugging, this script accepts a --stdout option, which
+causes the emails to be written to standard output rather than sent
+using sendmail.
+
+See the accompanying README file for the complete documentation.
+
+"""
+
+import sys
+import os
+import re
+import bisect
+import socket
+import subprocess
+import shlex
+import optparse
+import logging
+import smtplib
+try:
+ import ssl
+except ImportError:
+ # Python < 2.6 do not have ssl, but that's OK if we don't use it.
+ pass
+import time
+
+import uuid
+import base64
+
+PYTHON3 = sys.version_info >= (3, 0)
+
+if sys.version_info <= (2, 5):
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
+
+
+def is_ascii(s):
+ return all(ord(c) < 128 and ord(c) > 0 for c in s)
+
+
+if PYTHON3:
+ def is_string(s):
+ return isinstance(s, str)
+
+ def str_to_bytes(s):
+ return s.encode(ENCODING)
+
+ def bytes_to_str(s, errors='strict'):
+ return s.decode(ENCODING, errors)
+
+ unicode = str
+
+ def write_str(f, msg):
+ # Try outputting with the default encoding. If it fails,
+ # try UTF-8.
+ try:
+ f.buffer.write(msg.encode(sys.getdefaultencoding()))
+ except UnicodeEncodeError:
+ f.buffer.write(msg.encode(ENCODING))
+
+ def read_line(f):
+ # Try reading with the default encoding. If it fails,
+ # try UTF-8.
+ out = f.buffer.readline()
+ try:
+ return out.decode(sys.getdefaultencoding())
+ except UnicodeEncodeError:
+ return out.decode(ENCODING)
+
+ import html
+
+ def html_escape(s):
+ return html.escape(s)
+
+else:
+ def is_string(s):
+ try:
+ return isinstance(s, basestring)
+ except NameError: # Silence Pyflakes warning
+ raise
+
+ def str_to_bytes(s):
+ return s
+
+ def bytes_to_str(s, errors='strict'):
+ return s
+
+ def write_str(f, msg):
+ f.write(msg)
+
+ def read_line(f):
+ return f.readline()
+
+ def next(it):
+ return it.next()
+
+ import cgi
+
+ def html_escape(s):
+ return cgi.escape(s, True)
+
+try:
+ from email.charset import Charset
+ from email.utils import make_msgid
+ from email.utils import getaddresses
+ from email.utils import formataddr
+ from email.utils import formatdate
+ from email.header import Header
+except ImportError:
+ # Prior to Python 2.5, the email module used different names:
+ from email.Charset import Charset
+ from email.Utils import make_msgid
+ from email.Utils import getaddresses
+ from email.Utils import formataddr
+ from email.Utils import formatdate
+ from email.Header import Header
+
+
+DEBUG = False
+
+ZEROS = '0' * 40
+LOGBEGIN = '- Log -----------------------------------------------------------------\n'
+LOGEND = '-----------------------------------------------------------------------\n'
+
+ADDR_HEADERS = set(['from', 'to', 'cc', 'bcc', 'reply-to', 'sender'])
+
+# It is assumed in many places that the encoding is uniformly UTF-8,
+# so changing these constants is unsupported. But define them here
+# anyway, to make it easier to find (at least most of) the places
+# where the encoding is important.
+(ENCODING, CHARSET) = ('UTF-8', 'utf-8')
+
+
+REF_CREATED_SUBJECT_TEMPLATE = (
+ '%(emailprefix)s%(refname_type)s %(short_refname)s created'
+ ' (now %(newrev_short)s)'
+ )
+REF_UPDATED_SUBJECT_TEMPLATE = (
+ '%(emailprefix)s%(refname_type)s %(short_refname)s updated'
+ ' (%(oldrev_short)s -> %(newrev_short)s)'
+ )
+REF_DELETED_SUBJECT_TEMPLATE = (
+ '%(emailprefix)s%(refname_type)s %(short_refname)s deleted'
+ ' (was %(oldrev_short)s)'
+ )
+
+COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = (
+ '%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s'
+ )
+
+REFCHANGE_HEADER_TEMPLATE = """\
+Date: %(send_date)s
+To: %(recipients)s
+Subject: %(subject)s
+MIME-Version: 1.0
+Content-Type: text/%(contenttype)s; charset=%(charset)s
+Content-Transfer-Encoding: 8bit
+Message-ID: %(msgid)s
+From: %(fromaddr)s
+Reply-To: %(reply_to)s
+Thread-Index: %(thread_index)s
+X-Git-Host: %(fqdn)s
+X-Git-Repo: %(repo_shortname)s
+X-Git-Refname: %(refname)s
+X-Git-Reftype: %(refname_type)s
+X-Git-Oldrev: %(oldrev)s
+X-Git-Newrev: %(newrev)s
+X-Git-NotificationType: ref_changed
+X-Git-Multimail-Version: %(multimail_version)s
+Auto-Submitted: auto-generated
+"""
+
+REFCHANGE_INTRO_TEMPLATE = """\
+This is an automated email from the git hooks/post-receive script.
+
+%(pusher)s pushed a change to %(refname_type)s %(short_refname)s
+in repository %(repo_shortname)s.
+
+"""
+
+
+FOOTER_TEMPLATE = """\
+
+-- \n\
+To stop receiving notification emails like this one, please contact
+%(administrator)s.
+"""
+
+
+REWIND_ONLY_TEMPLATE = """\
+This update removed existing revisions from the reference, leaving the
+reference pointing at a previous point in the repository history.
+
+ * -- * -- N %(refname)s (%(newrev_short)s)
+ \\
+ O -- O -- O (%(oldrev_short)s)
+
+Any revisions marked "omit" are not gone; other references still
+refer to them. Any revisions marked "discard" are gone forever.
+"""
+
+
+NON_FF_TEMPLATE = """\
+This update added new revisions after undoing existing revisions.
+That is to say, some revisions that were in the old version of the
+%(refname_type)s are not in the new version. This situation occurs
+when a user --force pushes a change and generates a repository
+containing something like this:
+
+ * -- * -- B -- O -- O -- O (%(oldrev_short)s)
+ \\
+ N -- N -- N %(refname)s (%(newrev_short)s)
+
+You should already have received notification emails for all of the O
+revisions, and so the following emails describe only the N revisions
+from the common base, B.
+
+Any revisions marked "omit" are not gone; other references still
+refer to them. Any revisions marked "discard" are gone forever.
+"""
+
+
+NO_NEW_REVISIONS_TEMPLATE = """\
+No new revisions were added by this update.
+"""
+
+
+DISCARDED_REVISIONS_TEMPLATE = """\
+This change permanently discards the following revisions:
+"""
+
+
+NO_DISCARDED_REVISIONS_TEMPLATE = """\
+The revisions that were on this %(refname_type)s are still contained in
+other references; therefore, this change does not discard any commits
+from the repository.
+"""
+
+
+NEW_REVISIONS_TEMPLATE = """\
+The %(tot)s revisions listed above as "new" are entirely new to this
+repository and will be described in separate emails. The revisions
+listed as "add" were already present in the repository and have only
+been added to this reference.
+
+"""
+
+
+TAG_CREATED_TEMPLATE = """\
+ at %(newrev_short)-8s (%(newrev_type)s)
+"""
+
+
+TAG_UPDATED_TEMPLATE = """\
+*** WARNING: tag %(short_refname)s was modified! ***
+
+ from %(oldrev_short)-8s (%(oldrev_type)s)
+ to %(newrev_short)-8s (%(newrev_type)s)
+"""
+
+
+TAG_DELETED_TEMPLATE = """\
+*** WARNING: tag %(short_refname)s was deleted! ***
+
+"""
+
+
+# The template used in summary tables. It looks best if this uses the
+# same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE.
+BRIEF_SUMMARY_TEMPLATE = """\
+%(action)8s %(rev_short)-8s %(text)s
+"""
+
+
+NON_COMMIT_UPDATE_TEMPLATE = """\
+This is an unusual reference change because the reference did not
+refer to a commit either before or after the change. We do not know
+how to provide full information about this reference change.
+"""
+
+
+REVISION_HEADER_TEMPLATE = """\
+Date: %(send_date)s
+To: %(recipients)s
+Cc: %(cc_recipients)s
+Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s
+MIME-Version: 1.0
+Content-Type: text/%(contenttype)s; charset=%(charset)s
+Content-Transfer-Encoding: 8bit
+From: %(fromaddr)s
+Reply-To: %(reply_to)s
+In-Reply-To: %(reply_to_msgid)s
+References: %(reply_to_msgid)s
+Thread-Index: %(thread_index)s
+X-Git-Host: %(fqdn)s
+X-Git-Repo: %(repo_shortname)s
+X-Git-Refname: %(refname)s
+X-Git-Reftype: %(refname_type)s
+X-Git-Rev: %(rev)s
+X-Git-NotificationType: diff
+X-Git-Multimail-Version: %(multimail_version)s
+Auto-Submitted: auto-generated
+"""
+
+REVISION_INTRO_TEMPLATE = """\
+This is an automated email from the git hooks/post-receive script.
+
+%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
+in repository %(repo_shortname)s.
+
+"""
+
+LINK_TEXT_TEMPLATE = """\
+View the commit online:
+%(browse_url)s
+
+"""
+
+LINK_HTML_TEMPLATE = """\
+<p><a href="%(browse_url)s">View the commit online</a>.</p>
+"""
+
+
+REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
+
+
+# Combined, meaning refchange+revision email (for single-commit additions)
+COMBINED_HEADER_TEMPLATE = """\
+Date: %(send_date)s
+To: %(recipients)s
+Subject: %(subject)s
+MIME-Version: 1.0
+Content-Type: text/%(contenttype)s; charset=%(charset)s
+Content-Transfer-Encoding: 8bit
+Message-ID: %(msgid)s
+From: %(fromaddr)s
+Reply-To: %(reply_to)s
+X-Git-Host: %(fqdn)s
+X-Git-Repo: %(repo_shortname)s
+X-Git-Refname: %(refname)s
+X-Git-Reftype: %(refname_type)s
+X-Git-Oldrev: %(oldrev)s
+X-Git-Newrev: %(newrev)s
+X-Git-Rev: %(rev)s
+X-Git-NotificationType: ref_changed_plus_diff
+X-Git-Multimail-Version: %(multimail_version)s
+Auto-Submitted: auto-generated
+"""
+
+COMBINED_INTRO_TEMPLATE = """\
+This is an automated email from the git hooks/post-receive script.
+
+%(pusher)s pushed a commit to %(refname_type)s %(short_refname)s
+in repository %(repo_shortname)s.
+
+"""
+
+COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE
+
+
+class CommandError(Exception):
+ def __init__(self, cmd, retcode):
+ self.cmd = cmd
+ self.retcode = retcode
+ Exception.__init__(
+ self,
+ 'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,)
+ )
+
+
+class ConfigurationException(Exception):
+ pass
+
+
+# The "git" program (this could be changed to include a full path):
+GIT_EXECUTABLE = 'git'
+
+
+# How "git" should be invoked (including global arguments), as a list
+# of words. This variable is usually initialized automatically by
+# read_git_output() via choose_git_command(), but if a value is set
+# here then it will be used unconditionally.
+GIT_CMD = None
+
+
+def choose_git_command():
+ """Decide how to invoke git, and record the choice in GIT_CMD."""
+
+ global GIT_CMD
+
+ if GIT_CMD is None:
+ try:
+ # Check to see whether the "-c" option is accepted (it was
+ # only added in Git 1.7.2). We don't actually use the
+ # output of "git --version", though if we needed more
+ # specific version information this would be the place to
+ # do it.
+ cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version']
+ read_output(cmd)
+ GIT_CMD = [GIT_EXECUTABLE, '-c', 'i18n.logoutputencoding=%s' % (ENCODING,)]
+ except CommandError:
+ GIT_CMD = [GIT_EXECUTABLE]
+
+
+def read_git_output(args, input=None, keepends=False, **kw):
+ """Read the output of a Git command."""
+
+ if GIT_CMD is None:
+ choose_git_command()
+
+ return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw)
+
+
+def read_output(cmd, input=None, keepends=False, **kw):
+ if input:
+ stdin = subprocess.PIPE
+ input = str_to_bytes(input)
+ else:
+ stdin = None
+ errors = 'strict'
+ if 'errors' in kw:
+ errors = kw['errors']
+ del kw['errors']
+ p = subprocess.Popen(
+ tuple(str_to_bytes(w) for w in cmd),
+ stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw
+ )
+ (out, err) = p.communicate(input)
+ out = bytes_to_str(out, errors=errors)
+ retcode = p.wait()
+ if retcode:
+ raise CommandError(cmd, retcode)
+ if not keepends:
+ out = out.rstrip('\n\r')
+ return out
+
+
+def read_git_lines(args, keepends=False, **kw):
+ """Return the lines output by Git command.
+
+ Return as single lines, with newlines stripped off."""
+
+ return read_git_output(args, keepends=True, **kw).splitlines(keepends)
+
+
+def git_rev_list_ish(cmd, spec, args=None, **kw):
+ """Common functionality for invoking a 'git rev-list'-like command.
+
+ Parameters:
+ * cmd is the Git command to run, e.g., 'rev-list' or 'log'.
+ * spec is a list of revision arguments to pass to the named
+ command. If None, this function returns an empty list.
+ * args is a list of extra arguments passed to the named command.
+ * All other keyword arguments (if any) are passed to the
+ underlying read_git_lines() function.
+
+ Return the output of the Git command in the form of a list, one
+ entry per output line.
+ """
+ if spec is None:
+ return []
+ if args is None:
+ args = []
+ args = [cmd, '--stdin'] + args
+ spec_stdin = ''.join(s + '\n' for s in spec)
+ return read_git_lines(args, input=spec_stdin, **kw)
+
+
+def git_rev_list(spec, **kw):
+ """Run 'git rev-list' with the given list of revision arguments.
+
+ See git_rev_list_ish() for parameter and return value
+ documentation.
+ """
+ return git_rev_list_ish('rev-list', spec, **kw)
+
+
+def git_log(spec, **kw):
+ """Run 'git log' with the given list of revision arguments.
+
+ See git_rev_list_ish() for parameter and return value
+ documentation.
+ """
+ return git_rev_list_ish('log', spec, **kw)
+
+
+def header_encode(text, header_name=None):
+ """Encode and line-wrap the value of an email header field."""
+
+ # Convert to unicode, if required.
+ if not isinstance(text, unicode):
+ text = unicode(text, 'utf-8')
+
+ if is_ascii(text):
+ charset = 'ascii'
+ else:
+ charset = 'utf-8'
+
+ return Header(text, header_name=header_name, charset=Charset(charset)).encode()
+
+
+def addr_header_encode(text, header_name=None):
+ """Encode and line-wrap the value of an email header field containing
+ email addresses."""
+
+ # Convert to unicode, if required.
+ if not isinstance(text, unicode):
+ text = unicode(text, 'utf-8')
+
+ text = ', '.join(
+ formataddr((header_encode(name), emailaddr))
+ for name, emailaddr in getaddresses([text])
+ )
+
+ if is_ascii(text):
+ charset = 'ascii'
+ else:
+ charset = 'utf-8'
+
+ return Header(text, header_name=header_name, charset=Charset(charset)).encode()
+
+
+class Config(object):
+ def __init__(self, section, git_config=None):
+ """Represent a section of the git configuration.
+
+ If git_config is specified, it is passed to "git config" in
+ the GIT_CONFIG environment variable, meaning that "git config"
+ will read the specified path rather than the Git default
+ config paths."""
+
+ self.section = section
+ if git_config:
+ self.env = os.environ.copy()
+ self.env['GIT_CONFIG'] = git_config
+ else:
+ self.env = None
+
+ @staticmethod
+ def _split(s):
+ """Split NUL-terminated values."""
+
+ words = s.split('\0')
+ assert words[-1] == ''
+ return words[:-1]
+
+ @staticmethod
+ def add_config_parameters(c):
+ """Add configuration parameters to Git.
+
+ c is either an str or a list of str, each element being of the
+ form 'var=val' or 'var', with the same syntax and meaning as
+ the argument of 'git -c var=val'.
+ """
+ if isinstance(c, str):
+ c = (c,)
+ parameters = os.environ.get('GIT_CONFIG_PARAMETERS', '')
+ if parameters:
+ parameters += ' '
+ # git expects GIT_CONFIG_PARAMETERS to be of the form
+ # "'name1=value1' 'name2=value2' 'name3=value3'"
+ # including everything inside the double quotes (but not the double
+ # quotes themselves). Spacing is critical. Also, if a value contains
+ # a literal single quote that quote must be represented using the
+ # four character sequence: '\''
+ parameters += ' '.join("'" + x.replace("'", "'\\''") + "'" for x in c)
+ os.environ['GIT_CONFIG_PARAMETERS'] = parameters
+
+ def get(self, name, default=None):
+ try:
+ values = self._split(read_git_output(
+ ['config', '--get', '--null', '%s.%s' % (self.section, name)],
+ env=self.env, keepends=True,
+ ))
+ assert len(values) == 1
+ return values[0]
+ except CommandError:
+ return default
+
+ def get_bool(self, name, default=None):
+ try:
+ value = read_git_output(
+ ['config', '--get', '--bool', '%s.%s' % (self.section, name)],
+ env=self.env,
+ )
+ except CommandError:
+ return default
+ return value == 'true'
+
+ def get_all(self, name, default=None):
+ """Read a (possibly multivalued) setting from the configuration.
+
+ Return the result as a list of values, or default if the name
+ is unset."""
+
+ try:
+ return self._split(read_git_output(
+ ['config', '--get-all', '--null', '%s.%s' % (self.section, name)],
+ env=self.env, keepends=True,
+ ))
+ except CommandError:
+ t, e, traceback = sys.exc_info()
+ if e.retcode == 1:
+ # "the section or key is invalid"; i.e., there is no
+ # value for the specified key.
+ return default
+ else:
+ raise
+
+ def set(self, name, value):
+ read_git_output(
+ ['config', '%s.%s' % (self.section, name), value],
+ env=self.env,
+ )
+
+ def add(self, name, value):
+ read_git_output(
+ ['config', '--add', '%s.%s' % (self.section, name), value],
+ env=self.env,
+ )
+
+ def __contains__(self, name):
+ return self.get_all(name, default=None) is not None
+
+ # We don't use this method anymore internally, but keep it here in
+ # case somebody is calling it from their own code:
+ def has_key(self, name):
+ return name in self
+
+ def unset_all(self, name):
+ try:
+ read_git_output(
+ ['config', '--unset-all', '%s.%s' % (self.section, name)],
+ env=self.env,
+ )
+ except CommandError:
+ t, e, traceback = sys.exc_info()
+ if e.retcode == 5:
+ # The name doesn't exist, which is what we wanted anyway...
+ pass
+ else:
+ raise
+
+ def set_recipients(self, name, value):
+ self.unset_all(name)
+ for pair in getaddresses([value]):
+ self.add(name, formataddr(pair))
+
+
+def generate_summaries(*log_args):
+ """Generate a brief summary for each revision requested.
+
+ log_args are strings that will be passed directly to "git log" as
+ revision selectors. Iterate over (sha1_short, subject) for each
+ commit specified by log_args (subject is the first line of the
+ commit message as a string without EOLs)."""
+
+ cmd = [
+ 'log', '--abbrev', '--format=%h %s',
+ ] + list(log_args) + ['--']
+ for line in read_git_lines(cmd):
+ yield tuple(line.split(' ', 1))
+
+
+def limit_lines(lines, max_lines):
+ for (index, line) in enumerate(lines):
+ if index < max_lines:
+ yield line
+
+ if index >= max_lines:
+ yield '... %d lines suppressed ...\n' % (index + 1 - max_lines,)
+
+
+def limit_linelength(lines, max_linelength):
+ for line in lines:
+ # Don't forget that lines always include a trailing newline.
+ if len(line) > max_linelength + 1:
+ line = line[:max_linelength - 7] + ' [...]\n'
+ yield line
+
+
+class CommitSet(object):
+ """A (constant) set of object names.
+
+ The set should be initialized with full SHA1 object names. The
+ __contains__() method returns True iff its argument is an
+ abbreviation of any the names in the set."""
+
+ def __init__(self, names):
+ self._names = sorted(names)
+
+ def __len__(self):
+ return len(self._names)
+
+ def __contains__(self, sha1_abbrev):
+ """Return True iff this set contains sha1_abbrev (which might be abbreviated)."""
+
+ i = bisect.bisect_left(self._names, sha1_abbrev)
+ return i < len(self) and self._names[i].startswith(sha1_abbrev)
+
+
+class GitObject(object):
+ def __init__(self, sha1, type=None):
+ if sha1 == ZEROS:
+ self.sha1 = self.type = self.commit_sha1 = None
+ else:
+ self.sha1 = sha1
+ self.type = type or read_git_output(['cat-file', '-t', self.sha1])
+
+ if self.type == 'commit':
+ self.commit_sha1 = self.sha1
+ elif self.type == 'tag':
+ try:
+ self.commit_sha1 = read_git_output(
+ ['rev-parse', '--verify', '%s^0' % (self.sha1,)]
+ )
+ except CommandError:
+ # Cannot deref tag to determine commit_sha1
+ self.commit_sha1 = None
+ else:
+ self.commit_sha1 = None
+
+ self.short = read_git_output(['rev-parse', '--short', sha1])
+
+ def get_summary(self):
+ """Return (sha1_short, subject) for this commit."""
+
+ if not self.sha1:
+ raise ValueError('Empty commit has no summary')
+
+ return next(iter(generate_summaries('--no-walk', self.sha1)))
+
+ def __eq__(self, other):
+ return isinstance(other, GitObject) and self.sha1 == other.sha1
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self.sha1)
+
+ def __nonzero__(self):
+ return bool(self.sha1)
+
+ def __bool__(self):
+ """Python 2 backward compatibility"""
+ return self.__nonzero__()
+
+ def __str__(self):
+ return self.sha1 or ZEROS
+
+
+class Change(object):
+ """A Change that has been made to the Git repository.
+
+ Abstract class from which both Revisions and ReferenceChanges are
+ derived. A Change knows how to generate a notification email
+ describing itself."""
+
+ def __init__(self, environment):
+ self.environment = environment
+ self._values = None
+ self._contains_html_diff = False
+
+ def _contains_diff(self):
+ # We do contain a diff, should it be rendered in HTML?
+ if self.environment.commit_email_format == "html":
+ self._contains_html_diff = True
+
+ def _compute_values(self):
+ """Return a dictionary {keyword: expansion} for this Change.
+
+ Derived classes overload this method to add more entries to
+ the return value. This method is used internally by
+ get_values(). The return value should always be a new
+ dictionary."""
+
+ values = self.environment.get_values()
+ fromaddr = self.environment.get_fromaddr(change=self)
+ if fromaddr is not None:
+ values['fromaddr'] = fromaddr
+ values['multimail_version'] = get_version()
+ return values
+
+ # Aliases usable in template strings. Tuple of pairs (destination,
+ # source).
+ VALUES_ALIAS = (
+ ("id", "newrev"),
+ )
+
+ def get_values(self, **extra_values):
+ """Return a dictionary {keyword: expansion} for this Change.
+
+ Return a dictionary mapping keywords to the values that they
+ should be expanded to for this Change (used when interpolating
+ template strings). If any keyword arguments are supplied, add
+ those to the return value as well. The return value is always
+ a new dictionary."""
+
+ if self._values is None:
+ self._values = self._compute_values()
+
+ values = self._values.copy()
+ if extra_values:
+ values.update(extra_values)
+
+ for alias, val in self.VALUES_ALIAS:
+ values[alias] = values[val]
+ return values
+
+ def expand(self, template, **extra_values):
+ """Expand template.
+
+ Expand the template (which should be a string) using string
+ interpolation of the values for this Change. If any keyword
+ arguments are provided, also include those in the keywords
+ available for interpolation."""
+
+ return template % self.get_values(**extra_values)
+
+ def expand_lines(self, template, html_escape_val=False, **extra_values):
+ """Break template into lines and expand each line."""
+
+ values = self.get_values(**extra_values)
+ if html_escape_val:
+ for k in values:
+ if is_string(values[k]):
+ values[k] = html_escape(values[k])
+ for line in template.splitlines(True):
+ yield line % values
+
+ def expand_header_lines(self, template, **extra_values):
+ """Break template into lines and expand each line as an RFC 2822 header.
+
+ Encode values and split up lines that are too long. Silently
+ skip lines that contain references to unknown variables."""
+
+ values = self.get_values(**extra_values)
+ if self._contains_html_diff:
+ self._content_type = 'html'
+ else:
+ self._content_type = 'plain'
+ values['contenttype'] = self._content_type
+
+ for line in template.splitlines():
+ (name, value) = line.split(': ', 1)
+
+ try:
+ value = value % values
+ except KeyError:
+ t, e, traceback = sys.exc_info()
+ if DEBUG:
+ self.environment.log_warning(
+ 'Warning: unknown variable %r in the following line; line skipped:\n'
+ ' %s\n'
+ % (e.args[0], line,)
+ )
+ else:
+ if name.lower() in ADDR_HEADERS:
+ value = addr_header_encode(value, name)
+ else:
+ value = header_encode(value, name)
+ for splitline in ('%s: %s\n' % (name, value)).splitlines(True):
+ yield splitline
+
+ def generate_email_header(self):
+ """Generate the RFC 2822 email headers for this Change, a line at a time.
+
+ The output should not include the trailing blank line."""
+
+ raise NotImplementedError()
+
+ def generate_browse_link(self, base_url):
+ """Generate a link to an online repository browser."""
+ return iter(())
+
+ def generate_email_intro(self, html_escape_val=False):
+ """Generate the email intro for this Change, a line at a time.
+
+ The output will be used as the standard boilerplate at the top
+ of the email body."""
+
+ raise NotImplementedError()
+
+ def generate_email_body(self, push):
+ """Generate the main part of the email body, a line at a time.
+
+ The text in the body might be truncated after a specified
+ number of lines (see multimailhook.emailmaxlines)."""
+
+ raise NotImplementedError()
+
+ def generate_email_footer(self, html_escape_val):
+ """Generate the footer of the email, a line at a time.
+
+ The footer is always included, irrespective of
+ multimailhook.emailmaxlines."""
+
+ raise NotImplementedError()
+
+ def _wrap_for_html(self, lines):
+ """Wrap the lines in HTML <pre> tag when using HTML format.
+
+ Escape special HTML characters and add <pre> and </pre> tags around
+ the given lines if we should be generating HTML as indicated by
+ self._contains_html_diff being set to true.
+ """
+ if self._contains_html_diff:
+ yield "<pre style='margin:0'>\n"
+
+ for line in lines:
+ yield html_escape(line)
+
+ yield '</pre>\n'
+ else:
+ for line in lines:
+ yield line
+
+ def generate_email(self, push, body_filter=None, extra_header_values={}):
+ """Generate an email describing this change.
+
+ Iterate over the lines (including the header lines) of an
+ email describing this change. If body_filter is not None,
+ then use it to filter the lines that are intended for the
+ email body.
+
+ The extra_header_values field is received as a dict and not as
+ **kwargs, to allow passing other keyword arguments in the
+ future (e.g. passing extra values to generate_email_intro()"""
+
+ for line in self.generate_email_header(**extra_header_values):
+ yield line
+ yield '\n'
+ html_escape_val = (self.environment.html_in_intro and
+ self._contains_html_diff)
+ intro = self.generate_email_intro(html_escape_val)
+ if not self.environment.html_in_intro:
+ intro = self._wrap_for_html(intro)
+ for line in intro:
+ yield line
+
+ if self.environment.commitBrowseURL:
+ for line in self.generate_browse_link(self.environment.commitBrowseURL):
+ yield line
+
+ body = self.generate_email_body(push)
+ if body_filter is not None:
+ body = body_filter(body)
+
+ diff_started = False
+ if self._contains_html_diff:
+ # "white-space: pre" is the default, but we need to
+ # specify it again in case the message is viewed in a
+ # webmail which wraps it in an element setting white-space
+ # to something else (Zimbra does this and sets
+ # white-space: pre-line).
+ yield '<pre style="white-space: pre; background: #F8F8F8">'
+ for line in body:
+ if self._contains_html_diff:
+ # This is very, very naive. It would be much better to really
+ # parse the diff, i.e. look at how many lines do we have in
+ # the hunk headers instead of blindly highlighting everything
+ # that looks like it might be part of a diff.
+ bgcolor = ''
+ fgcolor = ''
+ if line.startswith('--- a/'):
+ diff_started = True
+ bgcolor = 'e0e0ff'
+ elif line.startswith('diff ') or line.startswith('index '):
+ diff_started = True
+ fgcolor = '808080'
+ elif diff_started:
+ if line.startswith('+++ '):
+ bgcolor = 'e0e0ff'
+ elif line.startswith('@@'):
+ bgcolor = 'e0e0e0'
+ elif line.startswith('+'):
+ bgcolor = 'e0ffe0'
+ elif line.startswith('-'):
+ bgcolor = 'ffe0e0'
+ elif line.startswith('commit '):
+ fgcolor = '808000'
+ elif line.startswith(' '):
+ fgcolor = '404040'
+
+ # Chop the trailing LF, we don't want it inside <pre>.
+ line = html_escape(line[:-1])
+
+ if bgcolor or fgcolor:
+ style = 'display:block; white-space:pre;'
+ if bgcolor:
+ style += 'background:#' + bgcolor + ';'
+ if fgcolor:
+ style += 'color:#' + fgcolor + ';'
+ # Use a <span style='display:block> to color the
+ # whole line. The newline must be inside the span
+ # to display properly both in Firefox and in
+ # text-based browser.
+ line = "<span style='%s'>%s\n</span>" % (style, line)
+ else:
+ line = line + '\n'
+
+ yield line
+ if self._contains_html_diff:
+ yield '</pre>'
+ html_escape_val = (self.environment.html_in_footer and
+ self._contains_html_diff)
+ footer = self.generate_email_footer(html_escape_val)
+ if not self.environment.html_in_footer:
+ footer = self._wrap_for_html(footer)
+ for line in footer:
+ yield line
+
+ def get_specific_fromaddr(self):
+ """For kinds of Changes which specify it, return the kind-specific
+ From address to use."""
+ return None
+
+
+class Revision(Change):
+ """A Change consisting of a single git commit."""
+
+ CC_RE = re.compile(r'^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$')
+
+ def __init__(self, reference_change, rev, num, tot):
+ Change.__init__(self, reference_change.environment)
+ self.reference_change = reference_change
+ self.rev = rev
+ self.change_type = self.reference_change.change_type
+ self.refname = self.reference_change.refname
+ self.num = num
+ self.tot = tot
+ self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1])
+ self.recipients = self.environment.get_revision_recipients(self)
+
+ # -s is short for --no-patch, but -s works on older git's (e.g. 1.7)
+ self.parents = read_git_lines(['show', '-s', '--format=%P',
+ self.rev.sha1])[0].split()
+
+ self.cc_recipients = ''
+ if self.environment.get_scancommitforcc():
+ self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients())
+ if self.cc_recipients:
+ self.environment.log_msg(
+ 'Add %s to CC for %s' % (self.cc_recipients, self.rev.sha1))
+
+ def _cc_recipients(self):
+ cc_recipients = []
+ message = read_git_output(['log', '--no-walk', '--format=%b', self.rev.sha1])
+ lines = message.strip().split('\n')
+ for line in lines:
+ m = re.match(self.CC_RE, line)
+ if m:
+ cc_recipients.append(m.group('to'))
+
+ return cc_recipients
+
+ def _compute_values(self):
+ values = Change._compute_values(self)
+
+ oneline = read_git_output(
+ ['log', '--format=%s', '--no-walk', self.rev.sha1]
+ )
+
+ max_subject_length = self.environment.get_max_subject_length()
+ if max_subject_length > 0 and len(oneline) > max_subject_length:
+ oneline = oneline[:max_subject_length - 6] + ' [...]'
+
+ values['rev'] = self.rev.sha1
+ values['parents'] = ' '.join(self.parents)
+ values['rev_short'] = self.rev.short
+ values['change_type'] = self.change_type
+ values['refname'] = self.refname
+ values['newrev'] = self.rev.sha1
+ values['short_refname'] = self.reference_change.short_refname
+ values['refname_type'] = self.reference_change.refname_type
+ values['reply_to_msgid'] = self.reference_change.msgid
+ values['thread_index'] = self.reference_change.thread_index
+ values['num'] = self.num
+ values['tot'] = self.tot
+ values['recipients'] = self.recipients
+ if self.cc_recipients:
+ values['cc_recipients'] = self.cc_recipients
+ values['oneline'] = oneline
+ values['author'] = self.author
+
+ reply_to = self.environment.get_reply_to_commit(self)
+ if reply_to:
+ values['reply_to'] = reply_to
+
+ return values
+
+ def generate_email_header(self, **extra_values):
+ for line in self.expand_header_lines(
+ REVISION_HEADER_TEMPLATE, **extra_values
+ ):
+ yield line
+
+ def generate_browse_link(self, base_url):
+ if '%(' not in base_url:
+ base_url += '%(id)s'
+ url = "".join(self.expand_lines(base_url))
+ if self._content_type == 'html':
+ for line in self.expand_lines(LINK_HTML_TEMPLATE,
+ html_escape_val=True,
+ browse_url=url):
+ yield line
+ elif self._content_type == 'plain':
+ for line in self.expand_lines(LINK_TEXT_TEMPLATE,
+ html_escape_val=False,
+ browse_url=url):
+ yield line
+ else:
+ raise NotImplementedError("Content-type %s unsupported. Please report it as a bug.")
+
+ def generate_email_intro(self, html_escape_val=False):
+ for line in self.expand_lines(REVISION_INTRO_TEMPLATE,
+ html_escape_val=html_escape_val):
+ yield line
+
+ def generate_email_body(self, push):
+ """Show this revision."""
+
+ for line in read_git_lines(
+ ['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1],
+ keepends=True,
+ errors='replace'):
+ if line.startswith('Date: ') and self.environment.date_substitute:
+ yield self.environment.date_substitute + line[len('Date: '):]
+ else:
+ yield line
+
+ def generate_email_footer(self, html_escape_val):
+ return self.expand_lines(REVISION_FOOTER_TEMPLATE,
+ html_escape_val=html_escape_val)
+
+ def generate_email(self, push, body_filter=None, extra_header_values={}):
+ self._contains_diff()
+ return Change.generate_email(self, push, body_filter, extra_header_values)
+
+ def get_specific_fromaddr(self):
+ return self.environment.from_commit
+
+
+class ReferenceChange(Change):
+ """A Change to a Git reference.
+
+ An abstract class representing a create, update, or delete of a
+ Git reference. Derived classes handle specific types of reference
+ (e.g., tags vs. branches). These classes generate the main
+ reference change email summarizing the reference change and
+ whether it caused any any commits to be added or removed.
+
+ ReferenceChange objects are usually created using the static
+ create() method, which has the logic to decide which derived class
+ to instantiate."""
+
+ REF_RE = re.compile(r'^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$')
+
+ @staticmethod
+ def create(environment, oldrev, newrev, refname):
+ """Return a ReferenceChange object representing the change.
+
+ Return an object that represents the type of change that is being
+ made. oldrev and newrev should be SHA1s or ZEROS."""
+
+ old = GitObject(oldrev)
+ new = GitObject(newrev)
+ rev = new or old
+
+ # The revision type tells us what type the commit is, combined with
+ # the location of the ref we can decide between
+ # - working branch
+ # - tracking branch
+ # - unannotated tag
+ # - annotated tag
+ m = ReferenceChange.REF_RE.match(refname)
+ if m:
+ area = m.group('area')
+ short_refname = m.group('shortname')
+ else:
+ area = ''
+ short_refname = refname
+
+ if rev.type == 'tag':
+ # Annotated tag:
+ klass = AnnotatedTagChange
+ elif rev.type == 'commit':
+ if area == 'tags':
+ # Non-annotated tag:
+ klass = NonAnnotatedTagChange
+ elif area == 'heads':
+ # Branch:
+ klass = BranchChange
+ elif area == 'remotes':
+ # Tracking branch:
+ environment.log_warning(
+ '*** Push-update of tracking branch %r\n'
+ '*** - incomplete email generated.'
+ % (refname,)
+ )
+ klass = OtherReferenceChange
+ else:
+ # Some other reference namespace:
+ environment.log_warning(
+ '*** Push-update of strange reference %r\n'
+ '*** - incomplete email generated.'
+ % (refname,)
+ )
+ klass = OtherReferenceChange
+ else:
+ # Anything else (is there anything else?)
+ environment.log_warning(
+ '*** Unknown type of update to %r (%s)\n'
+ '*** - incomplete email generated.'
+ % (refname, rev.type,)
+ )
+ klass = OtherReferenceChange
+
+ return klass(
+ environment,
+ refname=refname, short_refname=short_refname,
+ old=old, new=new, rev=rev,
+ )
+
+ @staticmethod
+ def make_thread_index():
+ """Return a string appropriate for the Thread-Index header,
+ needed by MS Outlook to get threading right.
+
+ The format is (base64-encoded):
+ - 1 byte must be 1
+ - 5 bytes encode a date (hardcoded here)
+ - 16 bytes for a globally unique identifier
+
+ FIXME: Unfortunately, even with the Thread-Index field, MS
+ Outlook doesn't seem to do the threading reliably (see
+ https://github.com/git-multimail/git-multimail/pull/194).
+ """
+ thread_index = b'\x01\x00\x00\x12\x34\x56' + uuid.uuid4().bytes
+ return base64.standard_b64encode(thread_index).decode('ascii')
+
+ def __init__(self, environment, refname, short_refname, old, new, rev):
+ Change.__init__(self, environment)
+ self.change_type = {
+ (False, True): 'create',
+ (True, True): 'update',
+ (True, False): 'delete',
+ }[bool(old), bool(new)]
+ self.refname = refname
+ self.short_refname = short_refname
+ self.old = old
+ self.new = new
+ self.rev = rev
+ self.msgid = make_msgid()
+ self.thread_index = self.make_thread_index()
+ self.diffopts = environment.diffopts
+ self.graphopts = environment.graphopts
+ self.logopts = environment.logopts
+ self.commitlogopts = environment.commitlogopts
+ self.showgraph = environment.refchange_showgraph
+ self.showlog = environment.refchange_showlog
+
+ self.header_template = REFCHANGE_HEADER_TEMPLATE
+ self.intro_template = REFCHANGE_INTRO_TEMPLATE
+ self.footer_template = FOOTER_TEMPLATE
+
+ def _compute_values(self):
+ values = Change._compute_values(self)
+
+ values['change_type'] = self.change_type
+ values['refname_type'] = self.refname_type
+ values['refname'] = self.refname
+ values['short_refname'] = self.short_refname
+ values['msgid'] = self.msgid
+ values['thread_index'] = self.thread_index
+ values['recipients'] = self.recipients
+ values['oldrev'] = str(self.old)
+ values['oldrev_short'] = self.old.short
+ values['newrev'] = str(self.new)
+ values['newrev_short'] = self.new.short
+
+ if self.old:
+ values['oldrev_type'] = self.old.type
+ if self.new:
+ values['newrev_type'] = self.new.type
+
+ reply_to = self.environment.get_reply_to_refchange(self)
+ if reply_to:
+ values['reply_to'] = reply_to
+
+ return values
+
+ def send_single_combined_email(self, known_added_sha1s):
+ """Determine if a combined refchange/revision email should be sent
+
+ If there is only a single new (non-merge) commit added by a
+ change, it is useful to combine the ReferenceChange and
+ Revision emails into one. In such a case, return the single
+ revision; otherwise, return None.
+
+ This method is overridden in BranchChange."""
+
+ return None
+
+ def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
+ """Generate an email describing this change AND specified revision.
+
+ Iterate over the lines (including the header lines) of an
+ email describing this change. If body_filter is not None,
+ then use it to filter the lines that are intended for the
+ email body.
+
+ The extra_header_values field is received as a dict and not as
+ **kwargs, to allow passing other keyword arguments in the
+ future (e.g. passing extra values to generate_email_intro()
+
+ This method is overridden in BranchChange."""
+
+ raise NotImplementedError
+
+ def get_subject(self):
+ template = {
+ 'create': REF_CREATED_SUBJECT_TEMPLATE,
+ 'update': REF_UPDATED_SUBJECT_TEMPLATE,
+ 'delete': REF_DELETED_SUBJECT_TEMPLATE,
+ }[self.change_type]
+ return self.expand(template)
+
+ def generate_email_header(self, **extra_values):
+ if 'subject' not in extra_values:
+ extra_values['subject'] = self.get_subject()
+
+ for line in self.expand_header_lines(
+ self.header_template, **extra_values
+ ):
+ yield line
+
+ def generate_email_intro(self, html_escape_val=False):
+ for line in self.expand_lines(self.intro_template,
+ html_escape_val=html_escape_val):
+ yield line
+
+ def generate_email_body(self, push):
+ """Call the appropriate body-generation routine.
+
+ Call one of generate_create_summary() /
+ generate_update_summary() / generate_delete_summary()."""
+
+ change_summary = {
+ 'create': self.generate_create_summary,
+ 'delete': self.generate_delete_summary,
+ 'update': self.generate_update_summary,
+ }[self.change_type](push)
+ for line in change_summary:
+ yield line
+
+ for line in self.generate_revision_change_summary(push):
+ yield line
+
+ def generate_email_footer(self, html_escape_val):
+ return self.expand_lines(self.footer_template,
+ html_escape_val=html_escape_val)
+
+ def generate_revision_change_graph(self, push):
+ if self.showgraph:
+ args = ['--graph'] + self.graphopts
+ for newold in ('new', 'old'):
+ has_newold = False
+ spec = push.get_commits_spec(newold, self)
+ for line in git_log(spec, args=args, keepends=True):
+ if not has_newold:
+ has_newold = True
+ yield '\n'
+ yield 'Graph of %s commits:\n\n' % (
+ {'new': 'new', 'old': 'discarded'}[newold],)
+ yield ' ' + line
+ if has_newold:
+ yield '\n'
+
+ def generate_revision_change_log(self, new_commits_list):
+ if self.showlog:
+ yield '\n'
+ yield 'Detailed log of new commits:\n\n'
+ for line in read_git_lines(
+ ['log', '--no-walk'] +
+ self.logopts +
+ new_commits_list +
+ ['--'],
+ keepends=True,
+ ):
+ yield line
+
+ def generate_new_revision_summary(self, tot, new_commits_list, push):
+ for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot):
+ yield line
+ for line in self.generate_revision_change_graph(push):
+ yield line
+ for line in self.generate_revision_change_log(new_commits_list):
+ yield line
+
+ def generate_revision_change_summary(self, push):
+ """Generate a summary of the revisions added/removed by this change."""
+
+ if self.new.commit_sha1 and not self.old.commit_sha1:
+ # A new reference was created. List the new revisions
+ # brought by the new reference (i.e., those revisions that
+ # were not in the repository before this reference
+ # change).
+ sha1s = list(push.get_new_commits(self))
+ sha1s.reverse()
+ tot = len(sha1s)
+ new_revisions = [
+ Revision(self, GitObject(sha1), num=i + 1, tot=tot)
+ for (i, sha1) in enumerate(sha1s)
+ ]
+
+ if new_revisions:
+ yield self.expand('This %(refname_type)s includes the following new commits:\n')
+ yield '\n'
+ for r in new_revisions:
+ (sha1, subject) = r.rev.get_summary()
+ yield r.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='new', text=subject,
+ )
+ yield '\n'
+ for line in self.generate_new_revision_summary(
+ tot, [r.rev.sha1 for r in new_revisions], push):
+ yield line
+ else:
+ for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
+ yield line
+
+ elif self.new.commit_sha1 and self.old.commit_sha1:
+ # A reference was changed to point at a different commit.
+ # List the revisions that were removed and/or added *from
+ # that reference* by this reference change, along with a
+ # diff between the trees for its old and new values.
+
+ # List of the revisions that were added to the branch by
+ # this update. Note this list can include revisions that
+ # have already had notification emails; we want such
+ # revisions in the summary even though we will not send
+ # new notification emails for them.
+ adds = list(generate_summaries(
+ '--topo-order', '--reverse', '%s..%s'
+ % (self.old.commit_sha1, self.new.commit_sha1,)
+ ))
+
+ # List of the revisions that were removed from the branch
+ # by this update. This will be empty except for
+ # non-fast-forward updates.
+ discards = list(generate_summaries(
+ '%s..%s' % (self.new.commit_sha1, self.old.commit_sha1,)
+ ))
+
+ if adds:
+ new_commits_list = push.get_new_commits(self)
+ else:
+ new_commits_list = []
+ new_commits = CommitSet(new_commits_list)
+
+ if discards:
+ discarded_commits = CommitSet(push.get_discarded_commits(self))
+ else:
+ discarded_commits = CommitSet([])
+
+ if discards and adds:
+ for (sha1, subject) in discards:
+ if sha1 in discarded_commits:
+ action = 'discard'
+ else:
+ action = 'omit'
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action=action,
+ rev_short=sha1, text=subject,
+ )
+ for (sha1, subject) in adds:
+ if sha1 in new_commits:
+ action = 'new'
+ else:
+ action = 'add'
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action=action,
+ rev_short=sha1, text=subject,
+ )
+ yield '\n'
+ for line in self.expand_lines(NON_FF_TEMPLATE):
+ yield line
+
+ elif discards:
+ for (sha1, subject) in discards:
+ if sha1 in discarded_commits:
+ action = 'discard'
+ else:
+ action = 'omit'
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action=action,
+ rev_short=sha1, text=subject,
+ )
+ yield '\n'
+ for line in self.expand_lines(REWIND_ONLY_TEMPLATE):
+ yield line
+
+ elif adds:
+ (sha1, subject) = self.old.get_summary()
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='from',
+ rev_short=sha1, text=subject,
+ )
+ for (sha1, subject) in adds:
+ if sha1 in new_commits:
+ action = 'new'
+ else:
+ action = 'add'
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action=action,
+ rev_short=sha1, text=subject,
+ )
+
+ yield '\n'
+
+ if new_commits:
+ for line in self.generate_new_revision_summary(
+ len(new_commits), new_commits_list, push):
+ yield line
+ else:
+ for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE):
+ yield line
+ for line in self.generate_revision_change_graph(push):
+ yield line
+
+ # The diffstat is shown from the old revision to the new
+ # revision. This is to show the truth of what happened in
+ # this change. There's no point showing the stat from the
+ # base to the new revision because the base is effectively a
+ # random revision at this point - the user will be interested
+ # in what this revision changed - including the undoing of
+ # previous revisions in the case of non-fast-forward updates.
+ yield '\n'
+ yield 'Summary of changes:\n'
+ for line in read_git_lines(
+ ['diff-tree'] +
+ self.diffopts +
+ ['%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,)],
+ keepends=True,
+ ):
+ yield line
+
+ elif self.old.commit_sha1 and not self.new.commit_sha1:
+ # A reference was deleted. List the revisions that were
+ # removed from the repository by this reference change.
+
+ sha1s = list(push.get_discarded_commits(self))
+ tot = len(sha1s)
+ discarded_revisions = [
+ Revision(self, GitObject(sha1), num=i + 1, tot=tot)
+ for (i, sha1) in enumerate(sha1s)
+ ]
+
+ if discarded_revisions:
+ for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE):
+ yield line
+ yield '\n'
+ for r in discarded_revisions:
+ (sha1, subject) = r.rev.get_summary()
+ yield r.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='discard', text=subject,
+ )
+ for line in self.generate_revision_change_graph(push):
+ yield line
+ else:
+ for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE):
+ yield line
+
+ elif not self.old.commit_sha1 and not self.new.commit_sha1:
+ for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE):
+ yield line
+
+ def generate_create_summary(self, push):
+ """Called for the creation of a reference."""
+
+ # This is a new reference and so oldrev is not valid
+ (sha1, subject) = self.new.get_summary()
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='at',
+ rev_short=sha1, text=subject,
+ )
+ yield '\n'
+
+ def generate_update_summary(self, push):
+ """Called for the change of a pre-existing branch."""
+
+ return iter([])
+
+ def generate_delete_summary(self, push):
+ """Called for the deletion of any type of reference."""
+
+ (sha1, subject) = self.old.get_summary()
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='was',
+ rev_short=sha1, text=subject,
+ )
+ yield '\n'
+
+ def get_specific_fromaddr(self):
+ return self.environment.from_refchange
+
+
+class BranchChange(ReferenceChange):
+ refname_type = 'branch'
+
+ def __init__(self, environment, refname, short_refname, old, new, rev):
+ ReferenceChange.__init__(
+ self, environment,
+ refname=refname, short_refname=short_refname,
+ old=old, new=new, rev=rev,
+ )
+ self.recipients = environment.get_refchange_recipients(self)
+ self._single_revision = None
+
+ def send_single_combined_email(self, known_added_sha1s):
+ if not self.environment.combine_when_single_commit:
+ return None
+
+ # In the sadly-all-too-frequent usecase of people pushing only
+ # one of their commits at a time to a repository, users feel
+ # the reference change summary emails are noise rather than
+ # important signal. This is because, in this particular
+ # usecase, there is a reference change summary email for each
+ # new commit, and all these summaries do is point out that
+ # there is one new commit (which can readily be inferred by
+ # the existence of the individual revision email that is also
+ # sent). In such cases, our users prefer there to be a combined
+ # reference change summary/new revision email.
+ #
+ # So, if the change is an update and it doesn't discard any
+ # commits, and it adds exactly one non-merge commit (gerrit
+ # forces a workflow where every commit is individually merged
+ # and the git-multimail hook fired off for just this one
+ # change), then we send a combined refchange/revision email.
+ try:
+ # If this change is a reference update that doesn't discard
+ # any commits...
+ if self.change_type != 'update':
+ return None
+
+ if read_git_lines(
+ ['merge-base', self.old.sha1, self.new.sha1]
+ ) != [self.old.sha1]:
+ return None
+
+ # Check if this update introduced exactly one non-merge
+ # commit:
+
+ def split_line(line):
+ """Split line into (sha1, [parent,...])."""
+
+ words = line.split()
+ return (words[0], words[1:])
+
+ # Get the new commits introduced by the push as a list of
+ # (sha1, [parent,...])
+ new_commits = [
+ split_line(line)
+ for line in read_git_lines(
+ [
+ 'log', '-3', '--format=%H %P',
+ '%s..%s' % (self.old.sha1, self.new.sha1),
+ ]
+ )
+ ]
+
+ if not new_commits:
+ return None
+
+ # If the newest commit is a merge, save it for a later check
+ # but otherwise ignore it
+ merge = None
+ tot = len(new_commits)
+ if len(new_commits[0][1]) > 1:
+ merge = new_commits[0][0]
+ del new_commits[0]
+
+ # Our primary check: we can't combine if more than one commit
+ # is introduced. We also currently only combine if the new
+ # commit is a non-merge commit, though it may make sense to
+ # combine if it is a merge as well.
+ if not (
+ len(new_commits) == 1 and
+ len(new_commits[0][1]) == 1 and
+ new_commits[0][0] in known_added_sha1s
+ ):
+ return None
+
+ # We do not want to combine revision and refchange emails if
+ # those go to separate locations.
+ rev = Revision(self, GitObject(new_commits[0][0]), 1, tot)
+ if rev.recipients != self.recipients:
+ return None
+
+ # We ignored the newest commit if it was just a merge of the one
+ # commit being introduced. But we don't want to ignore that
+ # merge commit it it involved conflict resolutions. Check that.
+ if merge and merge != read_git_output(['diff-tree', '--cc', merge]):
+ return None
+
+ # We can combine the refchange and one new revision emails
+ # into one. Return the Revision that a combined email should
+ # be sent about.
+ return rev
+ except CommandError:
+ # Cannot determine number of commits in old..new or new..old;
+ # don't combine reference/revision emails:
+ return None
+
+ def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}):
+ values = revision.get_values()
+ if extra_header_values:
+ values.update(extra_header_values)
+ if 'subject' not in extra_header_values:
+ values['subject'] = self.expand(COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values)
+
+ self._single_revision = revision
+ self._contains_diff()
+ self.header_template = COMBINED_HEADER_TEMPLATE
+ self.intro_template = COMBINED_INTRO_TEMPLATE
+ self.footer_template = COMBINED_FOOTER_TEMPLATE
+
+ def revision_gen_link(base_url):
+ # revision is used only to generate the body, and
+ # _content_type is set while generating headers. Get it
+ # from the BranchChange object.
+ revision._content_type = self._content_type
+ return revision.generate_browse_link(base_url)
+ self.generate_browse_link = revision_gen_link
+ for line in self.generate_email(push, body_filter, values):
+ yield line
+
+ def generate_email_body(self, push):
+ '''Call the appropriate body generation routine.
+
+ If this is a combined refchange/revision email, the special logic
+ for handling this combined email comes from this function. For
+ other cases, we just use the normal handling.'''
+
+ # If self._single_revision isn't set; don't override
+ if not self._single_revision:
+ for line in super(BranchChange, self).generate_email_body(push):
+ yield line
+ return
+
+ # This is a combined refchange/revision email; we first provide
+ # some info from the refchange portion, and then call the revision
+ # generate_email_body function to handle the revision portion.
+ adds = list(generate_summaries(
+ '--topo-order', '--reverse', '%s..%s'
+ % (self.old.commit_sha1, self.new.commit_sha1,)
+ ))
+
+ yield self.expand("The following commit(s) were added to %(refname)s by this push:\n")
+ for (sha1, subject) in adds:
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='new',
+ rev_short=sha1, text=subject,
+ )
+
+ yield self._single_revision.rev.short + " is described below\n"
+ yield '\n'
+
+ for line in self._single_revision.generate_email_body(push):
+ yield line
+
+
+class AnnotatedTagChange(ReferenceChange):
+ refname_type = 'annotated tag'
+
+ def __init__(self, environment, refname, short_refname, old, new, rev):
+ ReferenceChange.__init__(
+ self, environment,
+ refname=refname, short_refname=short_refname,
+ old=old, new=new, rev=rev,
+ )
+ self.recipients = environment.get_announce_recipients(self)
+ self.show_shortlog = environment.announce_show_shortlog
+
+ ANNOTATED_TAG_FORMAT = (
+ '%(*objectname)\n'
+ '%(*objecttype)\n'
+ '%(taggername)\n'
+ '%(taggerdate)'
+ )
+
+ def describe_tag(self, push):
+ """Describe the new value of an annotated tag."""
+
+ # Use git for-each-ref to pull out the individual fields from
+ # the tag
+ [tagobject, tagtype, tagger, tagged] = read_git_lines(
+ ['for-each-ref', '--format=%s' % (self.ANNOTATED_TAG_FORMAT,), self.refname],
+ )
+
+ yield self.expand(
+ BRIEF_SUMMARY_TEMPLATE, action='tagging',
+ rev_short=tagobject, text='(%s)' % (tagtype,),
+ )
+ if tagtype == 'commit':
+ # If the tagged object is a commit, then we assume this is a
+ # release, and so we calculate which tag this tag is
+ # replacing
+ try:
+ prevtag = read_git_output(['describe', '--abbrev=0', '%s^' % (self.new,)])
+ except CommandError:
+ prevtag = None
+ if prevtag:
+ yield ' replaces %s\n' % (prevtag,)
+ else:
+ prevtag = None
+ yield ' length %s bytes\n' % (read_git_output(['cat-file', '-s', tagobject]),)
+
+ yield ' by %s\n' % (tagger,)
+ yield ' on %s\n' % (tagged,)
+ yield '\n'
+
+ # Show the content of the tag message; this might contain a
+ # change log or release notes so is worth displaying.
+ yield LOGBEGIN
+ contents = list(read_git_lines(['cat-file', 'tag', self.new.sha1], keepends=True))
+ contents = contents[contents.index('\n') + 1:]
+ if contents and contents[-1][-1:] != '\n':
+ contents.append('\n')
+ for line in contents:
+ yield line
+
+ if self.show_shortlog and tagtype == 'commit':
+ # Only commit tags make sense to have rev-list operations
+ # performed on them
+ yield '\n'
+ if prevtag:
+ # Show changes since the previous release
+ revlist = read_git_output(
+ ['rev-list', '--pretty=short', '%s..%s' % (prevtag, self.new,)],
+ keepends=True,
+ )
+ else:
+ # No previous tag, show all the changes since time
+ # began
+ revlist = read_git_output(
+ ['rev-list', '--pretty=short', '%s' % (self.new,)],
+ keepends=True,
+ )
+ for line in read_git_lines(['shortlog'], input=revlist, keepends=True):
+ yield line
+
+ yield LOGEND
+ yield '\n'
+
+ def generate_create_summary(self, push):
+ """Called for the creation of an annotated tag."""
+
+ for line in self.expand_lines(TAG_CREATED_TEMPLATE):
+ yield line
+
+ for line in self.describe_tag(push):
+ yield line
+
+ def generate_update_summary(self, push):
+ """Called for the update of an annotated tag.
+
+ This is probably a rare event and may not even be allowed."""
+
+ for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
+ yield line
+
+ for line in self.describe_tag(push):
+ yield line
+
+ def generate_delete_summary(self, push):
+ """Called when a non-annotated reference is updated."""
+
+ for line in self.expand_lines(TAG_DELETED_TEMPLATE):
+ yield line
+
+ yield self.expand(' tag was %(oldrev_short)s\n')
+ yield '\n'
+
+
+class NonAnnotatedTagChange(ReferenceChange):
+ refname_type = 'tag'
+
+ def __init__(self, environment, refname, short_refname, old, new, rev):
+ ReferenceChange.__init__(
+ self, environment,
+ refname=refname, short_refname=short_refname,
+ old=old, new=new, rev=rev,
+ )
+ self.recipients = environment.get_refchange_recipients(self)
+
+ def generate_create_summary(self, push):
+ """Called for the creation of an annotated tag."""
+
+ for line in self.expand_lines(TAG_CREATED_TEMPLATE):
+ yield line
+
+ def generate_update_summary(self, push):
+ """Called when a non-annotated reference is updated."""
+
+ for line in self.expand_lines(TAG_UPDATED_TEMPLATE):
+ yield line
+
+ def generate_delete_summary(self, push):
+ """Called when a non-annotated reference is updated."""
+
+ for line in self.expand_lines(TAG_DELETED_TEMPLATE):
+ yield line
+
+ for line in ReferenceChange.generate_delete_summary(self, push):
+ yield line
+
+
+class OtherReferenceChange(ReferenceChange):
+ refname_type = 'reference'
+
+ def __init__(self, environment, refname, short_refname, old, new, rev):
+ # We use the full refname as short_refname, because otherwise
+ # the full name of the reference would not be obvious from the
+ # text of the email.
+ ReferenceChange.__init__(
+ self, environment,
+ refname=refname, short_refname=refname,
+ old=old, new=new, rev=rev,
+ )
+ self.recipients = environment.get_refchange_recipients(self)
+
+
+class Mailer(object):
+ """An object that can send emails."""
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def close(self):
+ pass
+
+ def send(self, lines, to_addrs):
+ """Send an email consisting of lines.
+
+ lines must be an iterable over the lines constituting the
+ header and body of the email. to_addrs is a list of recipient
+ addresses (can be needed even if lines already contains a
+ "To:" field). It can be either a string (comma-separated list
+ of email addresses) or a Python list of individual email
+ addresses.
+
+ """
+
+ raise NotImplementedError()
+
+
+class SendMailer(Mailer):
+ """Send emails using 'sendmail -oi -t'."""
+
+ SENDMAIL_CANDIDATES = [
+ '/usr/sbin/sendmail',
+ '/usr/lib/sendmail',
+ ]
+
+ @staticmethod
+ def find_sendmail():
+ for path in SendMailer.SENDMAIL_CANDIDATES:
+ if os.access(path, os.X_OK):
+ return path
+ else:
+ raise ConfigurationException(
+ 'No sendmail executable found. '
+ 'Try setting multimailhook.sendmailCommand.'
+ )
+
+ def __init__(self, environment, command=None, envelopesender=None):
+ """Construct a SendMailer instance.
+
+ command should be the command and arguments used to invoke
+ sendmail, as a list of strings. If an envelopesender is
+ provided, it will also be passed to the command, via '-f
+ envelopesender'."""
+ super(SendMailer, self).__init__(environment)
+ if command:
+ self.command = command[:]
+ else:
+ self.command = [self.find_sendmail(), '-oi', '-t']
+
+ if envelopesender:
+ self.command.extend(['-f', envelopesender])
+
+ def send(self, lines, to_addrs):
+ try:
+ p = subprocess.Popen(self.command, stdin=subprocess.PIPE)
+ except OSError:
+ self.environment.get_logger().error(
+ '*** Cannot execute command: %s\n' % ' '.join(self.command) +
+ '*** %s\n' % sys.exc_info()[1] +
+ '*** Try setting multimailhook.mailer to "smtp"\n' +
+ '*** to send emails without using the sendmail command.\n'
+ )
+ sys.exit(1)
+ try:
+ lines = (str_to_bytes(line) for line in lines)
+ p.stdin.writelines(lines)
+ except Exception:
+ self.environment.get_logger().error(
+ '*** Error while generating commit email\n'
+ '*** - mail sending aborted.\n'
+ )
+ if hasattr(p, 'terminate'):
+ # subprocess.terminate() is not available in Python 2.4
+ p.terminate()
+ else:
+ import signal
+ os.kill(p.pid, signal.SIGTERM)
+ raise
+ else:
+ p.stdin.close()
+ retcode = p.wait()
+ if retcode:
+ raise CommandError(self.command, retcode)
+
+
+class SMTPMailer(Mailer):
+ """Send emails using Python's smtplib."""
+
+ def __init__(self, environment,
+ envelopesender, smtpserver,
+ smtpservertimeout=10.0, smtpserverdebuglevel=0,
+ smtpencryption='none',
+ smtpuser='', smtppass='',
+ smtpcacerts=''
+ ):
+ super(SMTPMailer, self).__init__(environment)
+ if not envelopesender:
+ self.environment.get_logger().error(
+ 'fatal: git_multimail: cannot use SMTPMailer without a sender address.\n'
+ 'please set either multimailhook.envelopeSender or user.email\n'
+ )
+ sys.exit(1)
+ if smtpencryption == 'ssl' and not (smtpuser and smtppass):
+ raise ConfigurationException(
+ 'Cannot use SMTPMailer with security option ssl '
+ 'without options username and password.'
+ )
+ self.envelopesender = envelopesender
+ self.smtpserver = smtpserver
+ self.smtpservertimeout = smtpservertimeout
+ self.smtpserverdebuglevel = smtpserverdebuglevel
+ self.security = smtpencryption
+ self.username = smtpuser
+ self.password = smtppass
+ self.smtpcacerts = smtpcacerts
+ self.loggedin = False
+ try:
+ def call(klass, server, timeout):
+ try:
+ return klass(server, timeout=timeout)
+ except TypeError:
+ # Old Python versions do not have timeout= argument.
+ return klass(server)
+ if self.security == 'none':
+ self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
+ elif self.security == 'ssl':
+ if self.smtpcacerts:
+ raise smtplib.SMTPException(
+ "Checking certificate is not supported for ssl, prefer starttls"
+ )
+ self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout)
+ elif self.security == 'tls':
+ if 'ssl' not in sys.modules:
+ self.environment.get_logger().error(
+ '*** Your Python version does not have the ssl library installed\n'
+ '*** smtpEncryption=tls is not available.\n'
+ '*** Either upgrade Python to 2.6 or later\n'
+ ' or use git_multimail.py version 1.2.\n')
+ if ':' not in self.smtpserver:
+ self.smtpserver += ':587' # default port for TLS
+ self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
+ # start: ehlo + starttls
+ # equivalent to
+ # self.smtp.ehlo()
+ # self.smtp.starttls()
+ # with access to the ssl layer
+ self.smtp.ehlo()
+ if not self.smtp.has_extn("starttls"):
+ raise smtplib.SMTPException("STARTTLS extension not supported by server")
+ resp, reply = self.smtp.docmd("STARTTLS")
+ if resp != 220:
+ raise smtplib.SMTPException("Wrong answer to the STARTTLS command")
+ if self.smtpcacerts:
+ self.smtp.sock = ssl.wrap_socket(
+ self.smtp.sock,
+ ca_certs=self.smtpcacerts,
+ cert_reqs=ssl.CERT_REQUIRED
+ )
+ else:
+ self.smtp.sock = ssl.wrap_socket(
+ self.smtp.sock,
+ cert_reqs=ssl.CERT_NONE
+ )
+ self.environment.get_logger().error(
+ '*** Warning, the server certificate is not verified (smtp) ***\n'
+ '*** set the option smtpCACerts ***\n'
+ )
+ if not hasattr(self.smtp.sock, "read"):
+ # using httplib.FakeSocket with Python 2.5.x or earlier
+ self.smtp.sock.read = self.smtp.sock.recv
+ self.smtp.file = self.smtp.sock.makefile('rb')
+ self.smtp.helo_resp = None
+ self.smtp.ehlo_resp = None
+ self.smtp.esmtp_features = {}
+ self.smtp.does_esmtp = 0
+ # end: ehlo + starttls
+ self.smtp.ehlo()
+ else:
+ sys.stdout.write('*** Error: Control reached an invalid option. ***')
+ sys.exit(1)
+ if self.smtpserverdebuglevel > 0:
+ sys.stdout.write(
+ "*** Setting debug on for SMTP server connection (%s) ***\n"
+ % self.smtpserverdebuglevel)
+ self.smtp.set_debuglevel(self.smtpserverdebuglevel)
+ except Exception:
+ self.environment.get_logger().error(
+ '*** Error establishing SMTP connection to %s ***\n'
+ '*** %s\n'
+ % (self.smtpserver, sys.exc_info()[1]))
+ sys.exit(1)
+
+ def close(self):
+ if hasattr(self, 'smtp'):
+ self.smtp.quit()
+ del self.smtp
+
+ def __del__(self):
+ self.close()
+
+ def send(self, lines, to_addrs):
+ try:
+ if self.username or self.password:
+ if not self.loggedin:
+ self.smtp.login(self.username, self.password)
+ self.loggedin = True
+ msg = ''.join(lines)
+ # turn comma-separated list into Python list if needed.
+ if is_string(to_addrs):
+ to_addrs = [email for (name, email) in getaddresses([to_addrs])]
+ self.smtp.sendmail(self.envelopesender, to_addrs, msg.encode('utf8'))
+ except socket.timeout:
+ self.environment.get_logger().error(
+ '*** Error sending email ***\n'
+ '*** SMTP server timed out (timeout is %s)\n'
+ % self.smtpservertimeout)
+ except smtplib.SMTPResponseException:
+ err = sys.exc_info()[1]
+ self.environment.get_logger().error(
+ '*** Error sending email ***\n'
+ '*** Error %d: %s\n'
+ % (err.smtp_code, bytes_to_str(err.smtp_error)))
+ try:
+ smtp = self.smtp
+ # delete the field before quit() so that in case of
+ # error, self.smtp is deleted anyway.
+ del self.smtp
+ smtp.quit()
+ except:
+ self.environment.get_logger().error(
+ '*** Error closing the SMTP connection ***\n'
+ '*** Exiting anyway ... ***\n'
+ '*** %s\n' % sys.exc_info()[1])
+ sys.exit(1)
+
+
+class OutputMailer(Mailer):
+ """Write emails to an output stream, bracketed by lines of '=' characters.
+
+ This is intended for debugging purposes."""
+
+ SEPARATOR = '=' * 75 + '\n'
+
+ def __init__(self, f, environment=None):
+ super(OutputMailer, self).__init__(environment=environment)
+ self.f = f
+
+ def send(self, lines, to_addrs):
+ write_str(self.f, self.SEPARATOR)
+ for line in lines:
+ write_str(self.f, line)
+ write_str(self.f, self.SEPARATOR)
+
+
+def get_git_dir():
+ """Determine GIT_DIR.
+
+ Determine GIT_DIR either from the GIT_DIR environment variable or
+ from the working directory, using Git's usual rules."""
+
+ try:
+ return read_git_output(['rev-parse', '--git-dir'])
+ except CommandError:
+ sys.stderr.write('fatal: git_multimail: not in a git directory\n')
+ sys.exit(1)
+
+
+class Environment(object):
+ """Describes the environment in which the push is occurring.
+
+ An Environment object encapsulates information about the local
+ environment. For example, it knows how to determine:
+
+ * the name of the repository to which the push occurred
+
+ * what user did the push
+
+ * what users want to be informed about various types of changes.
+
+ An Environment object is expected to have the following methods:
+
+ get_repo_shortname()
+
+ Return a short name for the repository, for display
+ purposes.
+
+ get_repo_path()
+
+ Return the absolute path to the Git repository.
+
+ get_emailprefix()
+
+ Return a string that will be prefixed to every email's
+ subject.
+
+ get_pusher()
+
+ Return the username of the person who pushed the changes.
+ This value is used in the email body to indicate who
+ pushed the change.
+
+ get_pusher_email() (may return None)
+
+ Return the email address of the person who pushed the
+ changes. The value should be a single RFC 2822 email
+ address as a string; e.g., "Joe User <user@example.com>"
+ if available, otherwise "user@example.com". If set, the
+ value is used as the Reply-To address for refchange
+ emails. If it is impossible to determine the pusher's
+ email, this attribute should be set to None (in which case
+ no Reply-To header will be output).
+
+ get_sender()
+
+ Return the address to be used as the 'From' email address
+ in the email envelope.
+
+ get_fromaddr(change=None)
+
+ Return the 'From' email address used in the email 'From:'
+ headers. If the change is known when this function is
+ called, it is passed in as the 'change' parameter. (May
+ be a full RFC 2822 email address like 'Joe User
+ <user@example.com>'.)
+
+ get_administrator()
+
+ Return the name and/or email of the repository
+ administrator. This value is used in the footer as the
+ person to whom requests to be removed from the
+ notification list should be sent. Ideally, it should
+ include a valid email address.
+
+ get_reply_to_refchange()
+ get_reply_to_commit()
+
+ Return the address to use in the email "Reply-To" header,
+ as a string. These can be an RFC 2822 email address, or
+ None to omit the "Reply-To" header.
+ get_reply_to_refchange() is used for refchange emails;
+ get_reply_to_commit() is used for individual commit
+ emails.
+
+ get_ref_filter_regex()
+
+ Return a tuple -- a compiled regex, and a boolean indicating
+ whether the regex picks refs to include (if False, the regex
+ matches on refs to exclude).
+
+ get_default_ref_ignore_regex()
+
+ Return a regex that should be ignored for both what emails
+ to send and when computing what commits are considered new
+ to the repository. Default is "^refs/notes/".
+
+ get_max_subject_length()
+
+ Return an int giving the maximal length for the subject
+ (git log --oneline).
+
+ They should also define the following attributes:
+
+ announce_show_shortlog (bool)
+
+ True iff announce emails should include a shortlog.
+
+ commit_email_format (string)
+
+ If "html", generate commit emails in HTML instead of plain text
+ used by default.
+
+ html_in_intro (bool)
+ html_in_footer (bool)
+
+ When generating HTML emails, the introduction (respectively,
+ the footer) will be HTML-escaped iff html_in_intro (respectively,
+ the footer) is true. When false, only the values used to expand
+ the template are escaped.
+
+ refchange_showgraph (bool)
+
+ True iff refchanges emails should include a detailed graph.
+
+ refchange_showlog (bool)
+
+ True iff refchanges emails should include a detailed log.
+
+ diffopts (list of strings)
+
+ The options that should be passed to 'git diff' for the
+ summary email. The value should be a list of strings
+ representing words to be passed to the command.
+
+ graphopts (list of strings)
+
+ Analogous to diffopts, but contains options passed to
+ 'git log --graph' when generating the detailed graph for
+ a set of commits (see refchange_showgraph)
+
+ logopts (list of strings)
+
+ Analogous to diffopts, but contains options passed to
+ 'git log' when generating the detailed log for a set of
+ commits (see refchange_showlog)
+
+ commitlogopts (list of strings)
+
+ The options that should be passed to 'git log' for each
+ commit mail. The value should be a list of strings
+ representing words to be passed to the command.
+
+ date_substitute (string)
+
+ String to be used in substitution for 'Date:' at start of
+ line in the output of 'git log'.
+
+ quiet (bool)
+ On success do not write to stderr
+
+ stdout (bool)
+ Write email to stdout rather than emailing. Useful for debugging
+
+ combine_when_single_commit (bool)
+
+ True if a combined email should be produced when a single
+ new commit is pushed to a branch, False otherwise.
+
+ from_refchange, from_commit (strings)
+
+ Addresses to use for the From: field for refchange emails
+ and commit emails respectively. Set from
+ multimailhook.fromRefchange and multimailhook.fromCommit
+ by ConfigEnvironmentMixin.
+
+ log_file, error_log_file, debug_log_file (string)
+
+ Name of a file to which logs should be sent.
+
+ verbose (int)
+
+ How verbose the system should be.
+ - 0 (default): show info, errors, ...
+ - 1 : show basic debug info
+ """
+
+ REPO_NAME_RE = re.compile(r'^(?P<name>.+?)(?:\.git)$')
+
+ def __init__(self, osenv=None):
+ self.osenv = osenv or os.environ
+ self.announce_show_shortlog = False
+ self.commit_email_format = "text"
+ self.html_in_intro = False
+ self.html_in_footer = False
+ self.commitBrowseURL = None
+ self.maxcommitemails = 500
+ self.excludemergerevisions = False
+ self.diffopts = ['--stat', '--summary', '--find-copies-harder']
+ self.graphopts = ['--oneline', '--decorate']
+ self.logopts = []
+ self.refchange_showgraph = False
+ self.refchange_showlog = False
+ self.commitlogopts = ['-C', '--stat', '-p', '--cc']
+ self.date_substitute = 'AuthorDate: '
+ self.quiet = False
+ self.stdout = False
+ self.combine_when_single_commit = True
+ self.logger = None
+
+ self.COMPUTED_KEYS = [
+ 'administrator',
+ 'charset',
+ 'emailprefix',
+ 'pusher',
+ 'pusher_email',
+ 'repo_path',
+ 'repo_shortname',
+ 'sender',
+ ]
+
+ self._values = None
+
+ def get_logger(self):
+ """Get (possibly creates) the logger associated to this environment."""
+ if self.logger is None:
+ self.logger = Logger(self)
+ return self.logger
+
+ def get_repo_shortname(self):
+ """Use the last part of the repo path, with ".git" stripped off if present."""
+
+ basename = os.path.basename(os.path.abspath(self.get_repo_path()))
+ m = self.REPO_NAME_RE.match(basename)
+ if m:
+ return m.group('name')
+ else:
+ return basename
+
+ def get_pusher(self):
+ raise NotImplementedError()
+
+ def get_pusher_email(self):
+ return None
+
+ def get_fromaddr(self, change=None):
+ config = Config('user')
+ fromname = config.get('name', default='')
+ fromemail = config.get('email', default='')
+ if fromemail:
+ return formataddr([fromname, fromemail])
+ return self.get_sender()
+
+ def get_administrator(self):
+ return 'the administrator of this repository'
+
+ def get_emailprefix(self):
+ return ''
+
+ def get_repo_path(self):
+ if read_git_output(['rev-parse', '--is-bare-repository']) == 'true':
+ path = get_git_dir()
+ else:
+ path = read_git_output(['rev-parse', '--show-toplevel'])
+ return os.path.abspath(path)
+
+ def get_charset(self):
+ return CHARSET
+
+ def get_values(self):
+ """Return a dictionary {keyword: expansion} for this Environment.
+
+ This method is called by Change._compute_values(). The keys
+ in the returned dictionary are available to be used in any of
+ the templates. The dictionary is created by calling
+ self.get_NAME() for each of the attributes named in
+ COMPUTED_KEYS and recording those that do not return None.
+ The return value is always a new dictionary."""
+
+ if self._values is None:
+ values = {'': ''} # %()s expands to the empty string.
+
+ for key in self.COMPUTED_KEYS:
+ value = getattr(self, 'get_%s' % (key,))()
+ if value is not None:
+ values[key] = value
+
+ self._values = values
+
+ return self._values.copy()
+
+ def get_refchange_recipients(self, refchange):
+ """Return the recipients for notifications about refchange.
+
+ Return the list of email addresses to which notifications
+ about the specified ReferenceChange should be sent."""
+
+ raise NotImplementedError()
+
+ def get_announce_recipients(self, annotated_tag_change):
+ """Return the recipients for notifications about annotated_tag_change.
+
+ Return the list of email addresses to which notifications
+ about the specified AnnotatedTagChange should be sent."""
+
+ raise NotImplementedError()
+
+ def get_reply_to_refchange(self, refchange):
+ return self.get_pusher_email()
+
+ def get_revision_recipients(self, revision):
+ """Return the recipients for messages about revision.
+
+ Return the list of email addresses to which notifications
+ about the specified Revision should be sent. This method
+ could be overridden, for example, to take into account the
+ contents of the revision when deciding whom to notify about
+ it. For example, there could be a scheme for users to express
+ interest in particular files or subdirectories, and only
+ receive notification emails for revisions that affecting those
+ files."""
+
+ raise NotImplementedError()
+
+ def get_reply_to_commit(self, revision):
+ return revision.author
+
+ def get_default_ref_ignore_regex(self):
+ # The commit messages of git notes are essentially meaningless
+ # and "filenames" in git notes commits are an implementation
+ # detail that might surprise users at first. As such, we
+ # would need a completely different method for handling emails
+ # of git notes in order for them to be of benefit for users,
+ # which we simply do not have right now.
+ return "^refs/notes/"
+
+ def get_max_subject_length(self):
+ """Return the maximal subject line (git log --oneline) length.
+ Longer subject lines will be truncated."""
+ raise NotImplementedError()
+
+ def filter_body(self, lines):
+ """Filter the lines intended for an email body.
+
+ lines is an iterable over the lines that would go into the
+ email body. Filter it (e.g., limit the number of lines, the
+ line length, character set, etc.), returning another iterable.
+ See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin
+ for classes implementing this functionality."""
+
+ return lines
+
+ def log_msg(self, msg):
+ """Write the string msg on a log file or on stderr.
+
+ Sends the text to stderr by default, override to change the behavior."""
+ self.get_logger().info(msg)
+
+ def log_warning(self, msg):
+ """Write the string msg on a log file or on stderr.
+
+ Sends the text to stderr by default, override to change the behavior."""
+ self.get_logger().warning(msg)
+
+ def log_error(self, msg):
+ """Write the string msg on a log file or on stderr.
+
+ Sends the text to stderr by default, override to change the behavior."""
+ self.get_logger().error(msg)
+
+ def check(self):
+ pass
+
+
+class ConfigEnvironmentMixin(Environment):
+ """A mixin that sets self.config to its constructor's config argument.
+
+ This class's constructor consumes the "config" argument.
+
+ Mixins that need to inspect the config should inherit from this
+ class (1) to make sure that "config" is still in the constructor
+ arguments with its own constructor runs and/or (2) to be sure that
+ self.config is set after construction."""
+
+ def __init__(self, config, **kw):
+ super(ConfigEnvironmentMixin, self).__init__(**kw)
+ self.config = config
+
+
+class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin):
+ """An Environment that reads most of its information from "git config"."""
+
+ @staticmethod
+ def forbid_field_values(name, value, forbidden):
+ for forbidden_val in forbidden:
+ if value is not None and value.lower() == forbidden_val:
+ raise ConfigurationException(
+ '"%s" is not an allowed setting for %s' % (value, name)
+ )
+
+ def __init__(self, config, **kw):
+ super(ConfigOptionsEnvironmentMixin, self).__init__(
+ config=config, **kw
+ )
+
+ for var, cfg in (
+ ('announce_show_shortlog', 'announceshortlog'),
+ ('refchange_showgraph', 'refchangeShowGraph'),
+ ('refchange_showlog', 'refchangeshowlog'),
+ ('quiet', 'quiet'),
+ ('stdout', 'stdout'),
+ ):
+ val = config.get_bool(cfg)
+ if val is not None:
+ setattr(self, var, val)
+
+ commit_email_format = config.get('commitEmailFormat')
+ if commit_email_format is not None:
+ if commit_email_format != "html" and commit_email_format != "text":
+ self.log_warning(
+ '*** Unknown value for multimailhook.commitEmailFormat: %s\n' %
+ commit_email_format +
+ '*** Expected either "text" or "html". Ignoring.\n'
+ )
+ else:
+ self.commit_email_format = commit_email_format
+
+ html_in_intro = config.get_bool('htmlInIntro')
+ if html_in_intro is not None:
+ self.html_in_intro = html_in_intro
+
+ html_in_footer = config.get_bool('htmlInFooter')
+ if html_in_footer is not None:
+ self.html_in_footer = html_in_footer
+
+ self.commitBrowseURL = config.get('commitBrowseURL')
+
+ self.excludemergerevisions = config.get('excludeMergeRevisions')
+
+ maxcommitemails = config.get('maxcommitemails')
+ if maxcommitemails is not None:
+ try:
+ self.maxcommitemails = int(maxcommitemails)
+ except ValueError:
+ self.log_warning(
+ '*** Malformed value for multimailhook.maxCommitEmails: %s\n'
+ % maxcommitemails +
+ '*** Expected a number. Ignoring.\n'
+ )
+
+ diffopts = config.get('diffopts')
+ if diffopts is not None:
+ self.diffopts = shlex.split(diffopts)
+
+ graphopts = config.get('graphOpts')
+ if graphopts is not None:
+ self.graphopts = shlex.split(graphopts)
+
+ logopts = config.get('logopts')
+ if logopts is not None:
+ self.logopts = shlex.split(logopts)
+
+ commitlogopts = config.get('commitlogopts')
+ if commitlogopts is not None:
+ self.commitlogopts = shlex.split(commitlogopts)
+
+ date_substitute = config.get('dateSubstitute')
+ if date_substitute == 'none':
+ self.date_substitute = None
+ elif date_substitute is not None:
+ self.date_substitute = date_substitute
+
+ reply_to = config.get('replyTo')
+ self.__reply_to_refchange = config.get('replyToRefchange', default=reply_to)
+ self.forbid_field_values('replyToRefchange',
+ self.__reply_to_refchange,
+ ['author'])
+ self.__reply_to_commit = config.get('replyToCommit', default=reply_to)
+
+ self.from_refchange = config.get('fromRefchange')
+ self.forbid_field_values('fromRefchange',
+ self.from_refchange,
+ ['author', 'none'])
+ self.from_commit = config.get('fromCommit')
+ self.forbid_field_values('fromCommit',
+ self.from_commit,
+ ['none'])
+
+ combine = config.get_bool('combineWhenSingleCommit')
+ if combine is not None:
+ self.combine_when_single_commit = combine
+
+ self.log_file = config.get('logFile', default=None)
+ self.error_log_file = config.get('errorLogFile', default=None)
+ self.debug_log_file = config.get('debugLogFile', default=None)
+ if config.get_bool('Verbose', default=False):
+ self.verbose = 1
+ else:
+ self.verbose = 0
+
+ def get_administrator(self):
+ return (
+ self.config.get('administrator') or
+ self.get_sender() or
+ super(ConfigOptionsEnvironmentMixin, self).get_administrator()
+ )
+
+ def get_repo_shortname(self):
+ return (
+ self.config.get('reponame') or
+ super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname()
+ )
+
+ def get_emailprefix(self):
+ emailprefix = self.config.get('emailprefix')
+ if emailprefix is not None:
+ emailprefix = emailprefix.strip()
+ if emailprefix:
+ emailprefix += ' '
+ else:
+ emailprefix = '[%(repo_shortname)s] '
+ short_name = self.get_repo_shortname()
+ try:
+ return emailprefix % {'repo_shortname': short_name}
+ except:
+ self.get_logger().error(
+ '*** Invalid multimailhook.emailPrefix: %s\n' % emailprefix +
+ '*** %s\n' % sys.exc_info()[1] +
+ "*** Only the '%(repo_shortname)s' placeholder is allowed\n"
+ )
+ raise ConfigurationException(
+ '"%s" is not an allowed setting for emailPrefix' % emailprefix
+ )
+
+ def get_sender(self):
+ return self.config.get('envelopesender')
+
+ def process_addr(self, addr, change):
+ if addr.lower() == 'author':
+ if hasattr(change, 'author'):
+ return change.author
+ else:
+ return None
+ elif addr.lower() == 'pusher':
+ return self.get_pusher_email()
+ elif addr.lower() == 'none':
+ return None
+ else:
+ return addr
+
+ def get_fromaddr(self, change=None):
+ fromaddr = self.config.get('from')
+ if change:
+ specific_fromaddr = change.get_specific_fromaddr()
+ if specific_fromaddr:
+ fromaddr = specific_fromaddr
+ if fromaddr:
+ fromaddr = self.process_addr(fromaddr, change)
+ if fromaddr:
+ return fromaddr
+ return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr(change)
+
+ def get_reply_to_refchange(self, refchange):
+ if self.__reply_to_refchange is None:
+ return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_refchange(refchange)
+ else:
+ return self.process_addr(self.__reply_to_refchange, refchange)
+
+ def get_reply_to_commit(self, revision):
+ if self.__reply_to_commit is None:
+ return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_commit(revision)
+ else:
+ return self.process_addr(self.__reply_to_commit, revision)
+
+ def get_scancommitforcc(self):
+ return self.config.get('scancommitforcc')
+
+
+class FilterLinesEnvironmentMixin(Environment):
+ """Handle encoding and maximum line length of body lines.
+
+ email_max_line_length (int or None)
+
+ The maximum length of any single line in the email body.
+ Longer lines are truncated at that length with ' [...]'
+ appended.
+
+ strict_utf8 (bool)
+
+ If this field is set to True, then the email body text is
+ expected to be UTF-8. Any invalid characters are
+ converted to U+FFFD, the Unicode replacement character
+ (encoded as UTF-8, of course).
+
+ """
+
+ def __init__(self, strict_utf8=True,
+ email_max_line_length=500, max_subject_length=500,
+ **kw):
+ super(FilterLinesEnvironmentMixin, self).__init__(**kw)
+ self.__strict_utf8 = strict_utf8
+ self.__email_max_line_length = email_max_line_length
+ self.__max_subject_length = max_subject_length
+
+ def filter_body(self, lines):
+ lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines)
+ if self.__strict_utf8:
+ if not PYTHON3:
+ lines = (line.decode(ENCODING, 'replace') for line in lines)
+ # Limit the line length in Unicode-space to avoid
+ # splitting characters:
+ if self.__email_max_line_length > 0:
+ lines = limit_linelength(lines, self.__email_max_line_length)
+ if not PYTHON3:
+ lines = (line.encode(ENCODING, 'replace') for line in lines)
+ elif self.__email_max_line_length:
+ lines = limit_linelength(lines, self.__email_max_line_length)
+
+ return lines
+
+ def get_max_subject_length(self):
+ return self.__max_subject_length
+
+
+class ConfigFilterLinesEnvironmentMixin(
+ ConfigEnvironmentMixin,
+ FilterLinesEnvironmentMixin,
+ ):
+ """Handle encoding and maximum line length based on config."""
+
+ def __init__(self, config, **kw):
+ strict_utf8 = config.get_bool('emailstrictutf8', default=None)
+ if strict_utf8 is not None:
+ kw['strict_utf8'] = strict_utf8
+
+ email_max_line_length = config.get('emailmaxlinelength')
+ if email_max_line_length is not None:
+ kw['email_max_line_length'] = int(email_max_line_length)
+
+ max_subject_length = config.get('subjectMaxLength', default=email_max_line_length)
+ if max_subject_length is not None:
+ kw['max_subject_length'] = int(max_subject_length)
+
+ super(ConfigFilterLinesEnvironmentMixin, self).__init__(
+ config=config, **kw
+ )
+
+
+class MaxlinesEnvironmentMixin(Environment):
+ """Limit the email body to a specified number of lines."""
+
+ def __init__(self, emailmaxlines, **kw):
+ super(MaxlinesEnvironmentMixin, self).__init__(**kw)
+ self.__emailmaxlines = emailmaxlines
+
+ def filter_body(self, lines):
+ lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines)
+ if self.__emailmaxlines > 0:
+ lines = limit_lines(lines, self.__emailmaxlines)
+ return lines
+
+
+class ConfigMaxlinesEnvironmentMixin(
+ ConfigEnvironmentMixin,
+ MaxlinesEnvironmentMixin,
+ ):
+ """Limit the email body to the number of lines specified in config."""
+
+ def __init__(self, config, **kw):
+ emailmaxlines = int(config.get('emailmaxlines', default='0'))
+ super(ConfigMaxlinesEnvironmentMixin, self).__init__(
+ config=config,
+ emailmaxlines=emailmaxlines,
+ **kw
+ )
+
+
+class FQDNEnvironmentMixin(Environment):
+ """A mixin that sets the host's FQDN to its constructor argument."""
+
+ def __init__(self, fqdn, **kw):
+ super(FQDNEnvironmentMixin, self).__init__(**kw)
+ self.COMPUTED_KEYS += ['fqdn']
+ self.__fqdn = fqdn
+
+ def get_fqdn(self):
+ """Return the fully-qualified domain name for this host.
+
+ Return None if it is unavailable or unwanted."""
+
+ return self.__fqdn
+
+
+class ConfigFQDNEnvironmentMixin(
+ ConfigEnvironmentMixin,
+ FQDNEnvironmentMixin,
+ ):
+ """Read the FQDN from the config."""
+
+ def __init__(self, config, **kw):
+ fqdn = config.get('fqdn')
+ super(ConfigFQDNEnvironmentMixin, self).__init__(
+ config=config,
+ fqdn=fqdn,
+ **kw
+ )
+
+
+class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin):
+ """Get the FQDN by calling socket.getfqdn()."""
+
+ def __init__(self, **kw):
+ super(ComputeFQDNEnvironmentMixin, self).__init__(
+ fqdn=self.get_fqdn(),
+ **kw
+ )
+
+ def get_fqdn(self):
+ fqdn = socket.getfqdn()
+ # Sometimes, socket.getfqdn() returns localhost or
+ # localhost.localhost, which isn't very helpful. In this case,
+ # fall-back to socket.gethostname() which may return an actual
+ # hostname.
+ if fqdn == 'localhost' or fqdn == 'localhost.localdomain':
+ fqdn = socket.gethostname()
+ return fqdn
+
+
+class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin):
+ """Deduce pusher_email from pusher by appending an emaildomain."""
+
+ def __init__(self, **kw):
+ super(PusherDomainEnvironmentMixin, self).__init__(**kw)
+ self.__emaildomain = self.config.get('emaildomain')
+
+ def get_pusher_email(self):
+ if self.__emaildomain:
+ # Derive the pusher's full email address in the default way:
+ return '%s@%s' % (self.get_pusher(), self.__emaildomain)
+ else:
+ return super(PusherDomainEnvironmentMixin, self).get_pusher_email()
+
+
+class StaticRecipientsEnvironmentMixin(Environment):
+ """Set recipients statically based on constructor parameters."""
+
+ def __init__(
+ self,
+ refchange_recipients, announce_recipients, revision_recipients, scancommitforcc,
+ **kw
+ ):
+ super(StaticRecipientsEnvironmentMixin, self).__init__(**kw)
+
+ # The recipients for various types of notification emails, as
+ # RFC 2822 email addresses separated by commas (or the empty
+ # string if no recipients are configured). Although there is
+ # a mechanism to choose the recipient lists based on on the
+ # actual *contents* of the change being reported, we only
+ # choose based on the *type* of the change. Therefore we can
+ # compute them once and for all:
+ self.__refchange_recipients = refchange_recipients
+ self.__announce_recipients = announce_recipients
+ self.__revision_recipients = revision_recipients
+
+ def check(self):
+ if not (self.get_refchange_recipients(None) or
+ self.get_announce_recipients(None) or
+ self.get_revision_recipients(None) or
+ self.get_scancommitforcc()):
+ raise ConfigurationException('No email recipients configured!')
+ super(StaticRecipientsEnvironmentMixin, self).check()
+
+ def get_refchange_recipients(self, refchange):
+ if self.__refchange_recipients is None:
+ return super(StaticRecipientsEnvironmentMixin,
+ self).get_refchange_recipients(refchange)
+ return self.__refchange_recipients
+
+ def get_announce_recipients(self, annotated_tag_change):
+ if self.__announce_recipients is None:
+ return super(StaticRecipientsEnvironmentMixin,
+ self).get_refchange_recipients(annotated_tag_change)
+ return self.__announce_recipients
+
+ def get_revision_recipients(self, revision):
+ if self.__revision_recipients is None:
+ return super(StaticRecipientsEnvironmentMixin,
+ self).get_refchange_recipients(revision)
+ return self.__revision_recipients
+
+
+class CLIRecipientsEnvironmentMixin(Environment):
+ """Mixin storing recipients information coming from the
+ command-line."""
+
+ def __init__(self, cli_recipients=None, **kw):
+ super(CLIRecipientsEnvironmentMixin, self).__init__(**kw)
+ self.__cli_recipients = cli_recipients
+
+ def get_refchange_recipients(self, refchange):
+ if self.__cli_recipients is None:
+ return super(CLIRecipientsEnvironmentMixin,
+ self).get_refchange_recipients(refchange)
+ return self.__cli_recipients
+
+ def get_announce_recipients(self, annotated_tag_change):
+ if self.__cli_recipients is None:
+ return super(CLIRecipientsEnvironmentMixin,
+ self).get_announce_recipients(annotated_tag_change)
+ return self.__cli_recipients
+
+ def get_revision_recipients(self, revision):
+ if self.__cli_recipients is None:
+ return super(CLIRecipientsEnvironmentMixin,
+ self).get_revision_recipients(revision)
+ return self.__cli_recipients
+
+
+class ConfigRecipientsEnvironmentMixin(
+ ConfigEnvironmentMixin,
+ StaticRecipientsEnvironmentMixin
+ ):
+ """Determine recipients statically based on config."""
+
+ def __init__(self, config, **kw):
+ super(ConfigRecipientsEnvironmentMixin, self).__init__(
+ config=config,
+ refchange_recipients=self._get_recipients(
+ config, 'refchangelist', 'mailinglist',
+ ),
+ announce_recipients=self._get_recipients(
+ config, 'announcelist', 'refchangelist', 'mailinglist',
+ ),
+ revision_recipients=self._get_recipients(
+ config, 'commitlist', 'mailinglist',
+ ),
+ scancommitforcc=config.get('scancommitforcc'),
+ **kw
+ )
+
+ def _get_recipients(self, config, *names):
+ """Return the recipients for a particular type of message.
+
+ Return the list of email addresses to which a particular type
+ of notification email should be sent, by looking at the config
+ value for "multimailhook.$name" for each of names. Use the
+ value from the first name that is configured. The return
+ value is a (possibly empty) string containing RFC 2822 email
+ addresses separated by commas. If no configuration could be
+ found, raise a ConfigurationException."""
+
+ for name in names:
+ lines = config.get_all(name)
+ if lines is not None:
+ lines = [line.strip() for line in lines]
+ # Single "none" is a special value equivalence to empty string.
+ if lines == ['none']:
+ lines = ['']
+ return ', '.join(lines)
+ else:
+ return ''
+
+
+class StaticRefFilterEnvironmentMixin(Environment):
+ """Set branch filter statically based on constructor parameters."""
+
+ def __init__(self, ref_filter_incl_regex, ref_filter_excl_regex,
+ ref_filter_do_send_regex, ref_filter_dont_send_regex,
+ **kw):
+ super(StaticRefFilterEnvironmentMixin, self).__init__(**kw)
+
+ if ref_filter_incl_regex and ref_filter_excl_regex:
+ raise ConfigurationException(
+ "Cannot specify both a ref inclusion and exclusion regex.")
+ self.__is_inclusion_filter = bool(ref_filter_incl_regex)
+ default_exclude = self.get_default_ref_ignore_regex()
+ if ref_filter_incl_regex:
+ ref_filter_regex = ref_filter_incl_regex
+ elif ref_filter_excl_regex:
+ ref_filter_regex = ref_filter_excl_regex + '|' + default_exclude
+ else:
+ ref_filter_regex = default_exclude
+ try:
+ self.__compiled_regex = re.compile(ref_filter_regex)
+ except Exception:
+ raise ConfigurationException(
+ 'Invalid Ref Filter Regex "%s": %s' % (ref_filter_regex, sys.exc_info()[1]))
+
+ if ref_filter_do_send_regex and ref_filter_dont_send_regex:
+ raise ConfigurationException(
+ "Cannot specify both a ref doSend and dontSend regex.")
+ self.__is_do_send_filter = bool(ref_filter_do_send_regex)
+ if ref_filter_do_send_regex:
+ ref_filter_send_regex = ref_filter_do_send_regex
+ elif ref_filter_dont_send_regex:
+ ref_filter_send_regex = ref_filter_dont_send_regex
+ else:
+ ref_filter_send_regex = '.*'
+ self.__is_do_send_filter = True
+ try:
+ self.__send_compiled_regex = re.compile(ref_filter_send_regex)
+ except Exception:
+ raise ConfigurationException(
+ 'Invalid Ref Filter Regex "%s": %s' %
+ (ref_filter_send_regex, sys.exc_info()[1]))
+
+ def get_ref_filter_regex(self, send_filter=False):
+ if send_filter:
+ return self.__send_compiled_regex, self.__is_do_send_filter
+ else:
+ return self.__compiled_regex, self.__is_inclusion_filter
+
+
+class ConfigRefFilterEnvironmentMixin(
+ ConfigEnvironmentMixin,
+ StaticRefFilterEnvironmentMixin
+ ):
+ """Determine branch filtering statically based on config."""
+
+ def _get_regex(self, config, key):
+ """Get a list of whitespace-separated regex. The refFilter* config
+ variables are multivalued (hence the use of get_all), and we
+ allow each entry to be a whitespace-separated list (hence the
+ split on each line). The whole thing is glued into a single regex."""
+ values = config.get_all(key)
+ if values is None:
+ return values
+ items = []
+ for line in values:
+ for i in line.split():
+ items.append(i)
+ if items == []:
+ return None
+ return '|'.join(items)
+
+ def __init__(self, config, **kw):
+ super(ConfigRefFilterEnvironmentMixin, self).__init__(
+ config=config,
+ ref_filter_incl_regex=self._get_regex(config, 'refFilterInclusionRegex'),
+ ref_filter_excl_regex=self._get_regex(config, 'refFilterExclusionRegex'),
+ ref_filter_do_send_regex=self._get_regex(config, 'refFilterDoSendRegex'),
+ ref_filter_dont_send_regex=self._get_regex(config, 'refFilterDontSendRegex'),
+ **kw
+ )
+
+
+class ProjectdescEnvironmentMixin(Environment):
+ """Make a "projectdesc" value available for templates.
+
+ By default, it is set to the first line of $GIT_DIR/description
+ (if that file is present and appears to be set meaningfully)."""
+
+ def __init__(self, **kw):
+ super(ProjectdescEnvironmentMixin, self).__init__(**kw)
+ self.COMPUTED_KEYS += ['projectdesc']
+
+ def get_projectdesc(self):
+ """Return a one-line description of the project."""
+
+ git_dir = get_git_dir()
+ try:
+ projectdesc = open(os.path.join(git_dir, 'description')).readline().strip()
+ if projectdesc and not projectdesc.startswith('Unnamed repository'):
+ return projectdesc
+ except IOError:
+ pass
+
+ return 'UNNAMED PROJECT'
+
+
+class GenericEnvironmentMixin(Environment):
+ def get_pusher(self):
+ return self.osenv.get('USER', self.osenv.get('USERNAME', 'unknown user'))
+
+
+class GitoliteEnvironmentHighPrecMixin(Environment):
+ def get_pusher(self):
+ return self.osenv.get('GL_USER', 'unknown user')
+
+
+class GitoliteEnvironmentLowPrecMixin(
+ ConfigEnvironmentMixin,
+ Environment):
+
+ def get_repo_shortname(self):
+ # The gitolite environment variable $GL_REPO is a pretty good
+ # repo_shortname (though it's probably not as good as a value
+ # the user might have explicitly put in his config).
+ return (
+ self.osenv.get('GL_REPO', None) or
+ super(GitoliteEnvironmentLowPrecMixin, self).get_repo_shortname()
+ )
+
+ @staticmethod
+ def _compile_regex(re_template):
+ return (
+ re.compile(re_template % x)
+ for x in (
+ r'BEGIN\s+USER\s+EMAILS',
+ r'([^\s]+)\s+(.*)',
+ r'END\s+USER\s+EMAILS',
+ ))
+
+ def get_fromaddr(self, change=None):
+ GL_USER = self.osenv.get('GL_USER')
+ if GL_USER is not None:
+ # Find the path to gitolite.conf. Note that gitolite v3
+ # did away with the GL_ADMINDIR and GL_CONF environment
+ # variables (they are now hard-coded).
+ GL_ADMINDIR = self.osenv.get(
+ 'GL_ADMINDIR',
+ os.path.expanduser(os.path.join('~', '.gitolite')))
+ GL_CONF = self.osenv.get(
+ 'GL_CONF',
+ os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf'))
+
+ mailaddress_map = self.config.get('MailaddressMap')
+ # If relative, consider relative to GL_CONF:
+ if mailaddress_map:
+ mailaddress_map = os.path.join(os.path.dirname(GL_CONF),
+ mailaddress_map)
+ if os.path.isfile(mailaddress_map):
+ f = open(mailaddress_map, 'rU')
+ try:
+ # Leading '#' is optional
+ re_begin, re_user, re_end = self._compile_regex(
+ r'^(?:\s*#)?\s*%s\s*$')
+ for l in f:
+ l = l.rstrip('\n')
+ if re_begin.match(l) or re_end.match(l):
+ continue # Ignore these lines
+ m = re_user.match(l)
+ if m:
+ if m.group(1) == GL_USER:
+ return m.group(2)
+ else:
+ continue # Not this user, but not an error
+ raise ConfigurationException(
+ "Syntax error in mail address map.\n"
+ "Check file {}.\n"
+ "Line: {}".format(mailaddress_map, l))
+
+ finally:
+ f.close()
+
+ if os.path.isfile(GL_CONF):
+ f = open(GL_CONF, 'rU')
+ try:
+ in_user_emails_section = False
+ re_begin, re_user, re_end = self._compile_regex(
+ r'^\s*#\s*%s\s*$')
+ for l in f:
+ l = l.rstrip('\n')
+ if not in_user_emails_section:
+ if re_begin.match(l):
+ in_user_emails_section = True
+ continue
+ if re_end.match(l):
+ break
+ m = re_user.match(l)
+ if m and m.group(1) == GL_USER:
+ return m.group(2)
+ finally:
+ f.close()
+ return super(GitoliteEnvironmentLowPrecMixin, self).get_fromaddr(change)
+
+
+class IncrementalDateTime(object):
+ """Simple wrapper to give incremental date/times.
+
+ Each call will result in a date/time a second later than the
+ previous call. This can be used to falsify email headers, to
+ increase the likelihood that email clients sort the emails
+ correctly."""
+
+ def __init__(self):
+ self.time = time.time()
+ self.next = self.__next__ # Python 2 backward compatibility
+
+ def __next__(self):
+ formatted = formatdate(self.time, True)
+ self.time += 1
+ return formatted
+
+
+class StashEnvironmentHighPrecMixin(Environment):
+ def __init__(self, user=None, repo=None, **kw):
+ super(StashEnvironmentHighPrecMixin,
+ self).__init__(user=user, repo=repo, **kw)
+ self.__user = user
+ self.__repo = repo
+
+ def get_pusher(self):
+ return re.match(r'(.*?)\s*<', self.__user).group(1)
+
+ def get_pusher_email(self):
+ return self.__user
+
+
+class StashEnvironmentLowPrecMixin(Environment):
+ def __init__(self, user=None, repo=None, **kw):
+ super(StashEnvironmentLowPrecMixin, self).__init__(**kw)
+ self.__repo = repo
+ self.__user = user
+
+ def get_repo_shortname(self):
+ return self.__repo
+
+ def get_fromaddr(self, change=None):
+ return self.__user
+
+
+class GerritEnvironmentHighPrecMixin(Environment):
+ def __init__(self, project=None, submitter=None, update_method=None, **kw):
+ super(GerritEnvironmentHighPrecMixin,
+ self).__init__(submitter=submitter, project=project, **kw)
+ self.__project = project
+ self.__submitter = submitter
+ self.__update_method = update_method
+ "Make an 'update_method' value available for templates."
+ self.COMPUTED_KEYS += ['update_method']
+
+ def get_pusher(self):
+ if self.__submitter:
+ if self.__submitter.find('<') != -1:
+ # Submitter has a configured email, we transformed
+ # __submitter into an RFC 2822 string already.
+ return re.match(r'(.*?)\s*<', self.__submitter).group(1)
+ else:
+ # Submitter has no configured email, it's just his name.
+ return self.__submitter
+ else:
+ # If we arrive here, this means someone pushed "Submit" from
+ # the gerrit web UI for the CR (or used one of the programmatic
+ # APIs to do the same, such as gerrit review) and the
+ # merge/push was done by the Gerrit user. It was technically
+ # triggered by someone else, but sadly we have no way of
+ # determining who that someone else is at this point.
+ return 'Gerrit' # 'unknown user'?
+
+ def get_pusher_email(self):
+ if self.__submitter:
+ return self.__submitter
+ else:
+ return super(GerritEnvironmentHighPrecMixin, self).get_pusher_email()
+
+ def get_default_ref_ignore_regex(self):
+ default = super(GerritEnvironmentHighPrecMixin, self).get_default_ref_ignore_regex()
+ return default + '|^refs/changes/|^refs/cache-automerge/|^refs/meta/'
+
+ def get_revision_recipients(self, revision):
+ # Merge commits created by Gerrit when users hit "Submit this patchset"
+ # in the Web UI (or do equivalently with REST APIs or the gerrit review
+ # command) are not something users want to see an individual email for.
+ # Filter them out.
+ committer = read_git_output(['log', '--no-walk', '--format=%cN',
+ revision.rev.sha1])
+ if committer == 'Gerrit Code Review':
+ return []
+ else:
+ return super(GerritEnvironmentHighPrecMixin, self).get_revision_recipients(revision)
+
+ def get_update_method(self):
+ return self.__update_method
+
+
+class GerritEnvironmentLowPrecMixin(Environment):
+ def __init__(self, project=None, submitter=None, **kw):
+ super(GerritEnvironmentLowPrecMixin, self).__init__(**kw)
+ self.__project = project
+ self.__submitter = submitter
+
+ def get_repo_shortname(self):
+ return self.__project
+
+ def get_fromaddr(self, change=None):
+ if self.__submitter and self.__submitter.find('<') != -1:
+ return self.__submitter
+ else:
+ return super(GerritEnvironmentLowPrecMixin, self).get_fromaddr(change)
+
+
+class GiteaEnvironmentHighPrecMixin(Environment):
+ def get_pusher(self):
+ return self.osenv.get('GITEA_PUSHER_NAME', 'unknown user')
+
+ def get_pusher_email(self):
+ return self.osenv.get('GITEA_PUSHER_EMAIL')
+
+
+class GiteaEnvironmentLowPrecMixin(Environment):
+ def get_repo_shortname(self):
+ return self.osenv.get('GITEA_REPO_NAME', 'unknown repository')
+
+ def get_fromaddr(self, change=None):
+ # GITEA_PUSHER_NAME doesn't include the full name, just the user name
+ # at Gitea level, so it doesn't seem useful to use it and there
+ # doesn't seem to be any simple way to get it from Gitea neither.
+ return self.osenv.get('GITEA_PUSHER_EMAIL')
+
+
+class Push(object):
+ """Represent an entire push (i.e., a group of ReferenceChanges).
+
+ It is easy to figure out what commits were added to a *branch* by
+ a Reference change:
+
+ git rev-list change.old..change.new
+
+ or removed from a *branch*:
+
+ git rev-list change.new..change.old
+
+ But it is not quite so trivial to determine which entirely new
+ commits were added to the *repository* by a push and which old
+ commits were discarded by a push. A big part of the job of this
+ class is to figure out these things, and to make sure that new
+ commits are only detailed once even if they were added to multiple
+ references.
+
+ The first step is to determine the "other" references--those
+ unaffected by the current push. They are computed by listing all
+ references then removing any affected by this push. The results
+ are stored in Push._other_ref_sha1s.
+
+ The commits contained in the repository before this push were
+
+ git rev-list other1 other2 other3 ... change1.old change2.old ...
+
+ Where "changeN.old" is the old value of one of the references
+ affected by this push.
+
+ The commits contained in the repository after this push are
+
+ git rev-list other1 other2 other3 ... change1.new change2.new ...
+
+ The commits added by this push are the difference between these
+ two sets, which can be written
+
+ git rev-list \
+ ^other1 ^other2 ... \
+ ^change1.old ^change2.old ... \
+ change1.new change2.new ...
+
+ The commits removed by this push can be computed by
+
+ git rev-list \
+ ^other1 ^other2 ... \
+ ^change1.new ^change2.new ... \
+ change1.old change2.old ...
+
+ The last point is that it is possible that other pushes are
+ occurring simultaneously to this one, so reference values can
+ change at any time. It is impossible to eliminate all race
+ conditions, but we reduce the window of time during which problems
+ can occur by translating reference names to SHA1s as soon as
+ possible and working with SHA1s thereafter (because SHA1s are
+ immutable)."""
+
+ # A map {(changeclass, changetype): integer} specifying the order
+ # that reference changes will be processed if multiple reference
+ # changes are included in a single push. The order is significant
+ # mostly because new commit notifications are threaded together
+ # with the first reference change that includes the commit. The
+ # following order thus causes commits to be grouped with branch
+ # changes (as opposed to tag changes) if possible.
+ SORT_ORDER = dict(
+ (value, i) for (i, value) in enumerate([
+ (BranchChange, 'update'),
+ (BranchChange, 'create'),
+ (AnnotatedTagChange, 'update'),
+ (AnnotatedTagChange, 'create'),
+ (NonAnnotatedTagChange, 'update'),
+ (NonAnnotatedTagChange, 'create'),
+ (BranchChange, 'delete'),
+ (AnnotatedTagChange, 'delete'),
+ (NonAnnotatedTagChange, 'delete'),
+ (OtherReferenceChange, 'update'),
+ (OtherReferenceChange, 'create'),
+ (OtherReferenceChange, 'delete'),
+ ])
+ )
+
+ def __init__(self, environment, changes, ignore_other_refs=False):
+ self.changes = sorted(changes, key=self._sort_key)
+ self.__other_ref_sha1s = None
+ self.__cached_commits_spec = {}
+ self.environment = environment
+
+ if ignore_other_refs:
+ self.__other_ref_sha1s = set()
+
+ @classmethod
+ def _sort_key(klass, change):
+ return (klass.SORT_ORDER[change.__class__, change.change_type], change.refname,)
+
+ @property
+ def _other_ref_sha1s(self):
+ """The GitObjects referred to by references unaffected by this push.
+ """
+ if self.__other_ref_sha1s is None:
+ # The refnames being changed by this push:
+ updated_refs = set(
+ change.refname
+ for change in self.changes
+ )
+
+ # The SHA-1s of commits referred to by all references in this
+ # repository *except* updated_refs:
+ sha1s = set()
+ fmt = (
+ '%(objectname) %(objecttype) %(refname)\n'
+ '%(*objectname) %(*objecttype) %(refname)'
+ )
+ ref_filter_regex, is_inclusion_filter = \
+ self.environment.get_ref_filter_regex()
+ for line in read_git_lines(
+ ['for-each-ref', '--format=%s' % (fmt,)]):
+ (sha1, type, name) = line.split(' ', 2)
+ if (sha1 and type == 'commit' and
+ name not in updated_refs and
+ include_ref(name, ref_filter_regex, is_inclusion_filter)):
+ sha1s.add(sha1)
+
+ self.__other_ref_sha1s = sha1s
+
+ return self.__other_ref_sha1s
+
+ def _get_commits_spec_incl(self, new_or_old, reference_change=None):
+ """Get new or old SHA-1 from one or each of the changed refs.
+
+ Return a list of SHA-1 commit identifier strings suitable as
+ arguments to 'git rev-list' (or 'git log' or ...). The
+ returned identifiers are either the old or new values from one
+ or all of the changed references, depending on the values of
+ new_or_old and reference_change.
+
+ new_or_old is either the string 'new' or the string 'old'. If
+ 'new', the returned SHA-1 identifiers are the new values from
+ each changed reference. If 'old', the SHA-1 identifiers are
+ the old values from each changed reference.
+
+ If reference_change is specified and not None, only the new or
+ old reference from the specified reference is included in the
+ return value.
+
+ This function returns None if there are no matching revisions
+ (e.g., because a branch was deleted and new_or_old is 'new').
+ """
+
+ if not reference_change:
+ incl_spec = sorted(
+ getattr(change, new_or_old).sha1
+ for change in self.changes
+ if getattr(change, new_or_old)
+ )
+ if not incl_spec:
+ incl_spec = None
+ elif not getattr(reference_change, new_or_old).commit_sha1:
+ incl_spec = None
+ else:
+ incl_spec = [getattr(reference_change, new_or_old).commit_sha1]
+ return incl_spec
+
+ def _get_commits_spec_excl(self, new_or_old):
+ """Get exclusion revisions for determining new or discarded commits.
+
+ Return a list of strings suitable as arguments to 'git
+ rev-list' (or 'git log' or ...) that will exclude all
+ commits that, depending on the value of new_or_old, were
+ either previously in the repository (useful for determining
+ which commits are new to the repository) or currently in the
+ repository (useful for determining which commits were
+ discarded from the repository).
+
+ new_or_old is either the string 'new' or the string 'old'. If
+ 'new', the commits to be excluded are those that were in the
+ repository before the push. If 'old', the commits to be
+ excluded are those that are currently in the repository. """
+
+ old_or_new = {'old': 'new', 'new': 'old'}[new_or_old]
+ excl_revs = self._other_ref_sha1s.union(
+ getattr(change, old_or_new).sha1
+ for change in self.changes
+ if getattr(change, old_or_new).type in ['commit', 'tag']
+ )
+ return ['^' + sha1 for sha1 in sorted(excl_revs)]
+
+ def get_commits_spec(self, new_or_old, reference_change=None):
+ """Get rev-list arguments for added or discarded commits.
+
+ Return a list of strings suitable as arguments to 'git
+ rev-list' (or 'git log' or ...) that select those commits
+ that, depending on the value of new_or_old, are either new to
+ the repository or were discarded from the repository.
+
+ new_or_old is either the string 'new' or the string 'old'. If
+ 'new', the returned list is used to select commits that are
+ new to the repository. If 'old', the returned value is used
+ to select the commits that have been discarded from the
+ repository.
+
+ If reference_change is specified and not None, the new or
+ discarded commits are limited to those that are reachable from
+ the new or old value of the specified reference.
+
+ This function returns None if there are no added (or discarded)
+ revisions.
+ """
+ key = (new_or_old, reference_change)
+ if key not in self.__cached_commits_spec:
+ ret = self._get_commits_spec_incl(new_or_old, reference_change)
+ if ret is not None:
+ ret.extend(self._get_commits_spec_excl(new_or_old))
+ self.__cached_commits_spec[key] = ret
+ return self.__cached_commits_spec[key]
+
+ def get_new_commits(self, reference_change=None):
+ """Return a list of commits added by this push.
+
+ Return a list of the object names of commits that were added
+ by the part of this push represented by reference_change. If
+ reference_change is None, then return a list of *all* commits
+ added by this push."""
+
+ spec = self.get_commits_spec('new', reference_change)
+ return git_rev_list(spec)
+
+ def get_discarded_commits(self, reference_change):
+ """Return a list of commits discarded by this push.
+
+ Return a list of the object names of commits that were
+ entirely discarded from the repository by the part of this
+ push represented by reference_change."""
+
+ spec = self.get_commits_spec('old', reference_change)
+ return git_rev_list(spec)
+
+ def send_emails(self, mailer, body_filter=None):
+ """Use send all of the notification emails needed for this push.
+
+ Use send all of the notification emails (including reference
+ change emails and commit emails) needed for this push. Send
+ the emails using mailer. If body_filter is not None, then use
+ it to filter the lines that are intended for the email
+ body."""
+
+ # The sha1s of commits that were introduced by this push.
+ # They will be removed from this set as they are processed, to
+ # guarantee that one (and only one) email is generated for
+ # each new commit.
+ unhandled_sha1s = set(self.get_new_commits())
+ send_date = IncrementalDateTime()
+ for change in self.changes:
+ sha1s = []
+ for sha1 in reversed(list(self.get_new_commits(change))):
+ if sha1 in unhandled_sha1s:
+ sha1s.append(sha1)
+ unhandled_sha1s.remove(sha1)
+
+ # Check if we've got anyone to send to
+ if not change.recipients:
+ # mga: avoid unnecessary error messages when the summary
+ # email address is not configured (used for i18n mails).
+# change.environment.log_warning(
+# '*** no recipients configured so no email will be sent\n'
+# '*** for %r update %s->%s'
+# % (change.refname, change.old.sha1, change.new.sha1,)
+# )
+ pass
+ else:
+ if not change.environment.quiet:
+ change.environment.log_msg(
+ 'Sending notification emails to: %s' % (change.recipients,))
+ extra_values = {'send_date': next(send_date)}
+
+ rev = change.send_single_combined_email(sha1s)
+ if rev:
+ mailer.send(
+ change.generate_combined_email(self, rev, body_filter, extra_values),
+ rev.recipients,
+ )
+ # This change is now fully handled; no need to handle
+ # individual revisions any further.
+ continue
+ else:
+ mailer.send(
+ change.generate_email(self, body_filter, extra_values),
+ change.recipients,
+ )
+
+ max_emails = change.environment.maxcommitemails
+ if max_emails and len(sha1s) > max_emails:
+ change.environment.log_warning(
+ '*** Too many new commits (%d), not sending commit emails.\n' % len(sha1s) +
+ '*** Try setting multimailhook.maxCommitEmails to a greater value\n' +
+ '*** Currently, multimailhook.maxCommitEmails=%d' % max_emails
+ )
+ return
+
+ for (num, sha1) in enumerate(sha1s):
+ rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s))
+ if len(rev.parents) > 1 and change.environment.excludemergerevisions:
+ # skipping a merge commit
+ continue
+ if not rev.recipients and rev.cc_recipients:
+ change.environment.log_msg('*** Replacing Cc: with To:')
+ rev.recipients = rev.cc_recipients
+ rev.cc_recipients = None
+ if rev.recipients:
+ extra_values = {'send_date': next(send_date)}
+ mailer.send(
+ rev.generate_email(self, body_filter, extra_values),
+ rev.recipients,
+ )
+
+ # Consistency check:
+ if unhandled_sha1s:
+ change.environment.log_error(
+ 'ERROR: No emails were sent for the following new commits:\n'
+ ' %s'
+ % ('\n '.join(sorted(unhandled_sha1s)),)
+ )
+
+
+def include_ref(refname, ref_filter_regex, is_inclusion_filter):
+ does_match = bool(ref_filter_regex.search(refname))
+ if is_inclusion_filter:
+ return does_match
+ else: # exclusion filter -- we include the ref if the regex doesn't match
+ return not does_match
+
+
+def run_as_post_receive_hook(environment, mailer):
+ environment.check()
+ send_filter_regex, send_is_inclusion_filter = environment.get_ref_filter_regex(True)
+ ref_filter_regex, is_inclusion_filter = environment.get_ref_filter_regex(False)
+ changes = []
+ while True:
+ line = read_line(sys.stdin)
+ if line == '':
+ break
+ (oldrev, newrev, refname) = line.strip().split(' ', 2)
+ environment.get_logger().debug(
+ "run_as_post_receive_hook: oldrev=%s, newrev=%s, refname=%s" %
+ (oldrev, newrev, refname))
+
+ if not include_ref(refname, ref_filter_regex, is_inclusion_filter):
+ continue
+ if not include_ref(refname, send_filter_regex, send_is_inclusion_filter):
+ continue
+ changes.append(
+ ReferenceChange.create(environment, oldrev, newrev, refname)
+ )
+ if not changes:
+ mailer.close()
+ return
+ push = Push(environment, changes)
+ try:
+ push.send_emails(mailer, body_filter=environment.filter_body)
+ finally:
+ mailer.close()
+
+
+def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
+ environment.check()
+ send_filter_regex, send_is_inclusion_filter = environment.get_ref_filter_regex(True)
+ ref_filter_regex, is_inclusion_filter = environment.get_ref_filter_regex(False)
+ if not include_ref(refname, ref_filter_regex, is_inclusion_filter):
+ return
+ if not include_ref(refname, send_filter_regex, send_is_inclusion_filter):
+ return
+ changes = [
+ ReferenceChange.create(
+ environment,
+ read_git_output(['rev-parse', '--verify', oldrev]),
+ read_git_output(['rev-parse', '--verify', newrev]),
+ refname,
+ ),
+ ]
+ if not changes:
+ mailer.close()
+ return
+ push = Push(environment, changes, force_send)
+ try:
+ push.send_emails(mailer, body_filter=environment.filter_body)
+ finally:
+ mailer.close()
+
+
+def check_ref_filter(environment):
+ send_filter_regex, send_is_inclusion = environment.get_ref_filter_regex(True)
+ ref_filter_regex, ref_is_inclusion = environment.get_ref_filter_regex(False)
+
+ def inc_exc_lusion(b):
+ if b:
+ return 'inclusion'
+ else:
+ return 'exclusion'
+
+ if send_filter_regex:
+ sys.stdout.write("DoSend/DontSend filter regex (" +
+ (inc_exc_lusion(send_is_inclusion)) +
+ '): ' + send_filter_regex.pattern +
+ '\n')
+ if send_filter_regex:
+ sys.stdout.write("Include/Exclude filter regex (" +
+ (inc_exc_lusion(ref_is_inclusion)) +
+ '): ' + ref_filter_regex.pattern +
+ '\n')
+ sys.stdout.write(os.linesep)
+
+ sys.stdout.write(
+ "Refs marked as EXCLUDE are excluded by either refFilterInclusionRegex\n"
+ "or refFilterExclusionRegex. No emails will be sent for commits included\n"
+ "in these refs.\n"
+ "Refs marked as DONT-SEND are excluded by either refFilterDoSendRegex or\n"
+ "refFilterDontSendRegex, but not by either refFilterInclusionRegex or\n"
+ "refFilterExclusionRegex. Emails will be sent for commits included in these\n"
+ "refs only when the commit reaches a ref which isn't excluded.\n"
+ "Refs marked as DO-SEND are not excluded by any filter. Emails will\n"
+ "be sent normally for commits included in these refs.\n")
+
+ sys.stdout.write(os.linesep)
+
+ for refname in read_git_lines(['for-each-ref', '--format', '%(refname)']):
+ sys.stdout.write(refname)
+ if not include_ref(refname, ref_filter_regex, ref_is_inclusion):
+ sys.stdout.write(' EXCLUDE')
+ elif not include_ref(refname, send_filter_regex, send_is_inclusion):
+ sys.stdout.write(' DONT-SEND')
+ else:
+ sys.stdout.write(' DO-SEND')
+
+ sys.stdout.write(os.linesep)
+
+
+def show_env(environment, out):
+ out.write('Environment values:\n')
+ for (k, v) in sorted(environment.get_values().items()):
+ if k: # Don't show the {'' : ''} pair.
+ out.write(' %s : %r\n' % (k, v))
+ out.write('\n')
+ # Flush to avoid interleaving with further log output
+ out.flush()
+
+
+def check_setup(environment):
+ environment.check()
+ show_env(environment, sys.stdout)
+ sys.stdout.write("Now, checking that git-multimail's standard input "
+ "is properly set ..." + os.linesep)
+ sys.stdout.write("Please type some text and then press Return" + os.linesep)
+ stdin = sys.stdin.readline()
+ sys.stdout.write("You have just entered:" + os.linesep)
+ sys.stdout.write(stdin)
+ sys.stdout.write("git-multimail seems properly set up." + os.linesep)
+
+
+def choose_mailer(config, environment):
+ mailer = config.get('mailer', default='sendmail')
+
+ if mailer == 'smtp':
+ smtpserver = config.get('smtpserver', default='localhost')
+ smtpservertimeout = float(config.get('smtpservertimeout', default=10.0))
+ smtpserverdebuglevel = int(config.get('smtpserverdebuglevel', default=0))
+ smtpencryption = config.get('smtpencryption', default='none')
+ smtpuser = config.get('smtpuser', default='')
+ smtppass = config.get('smtppass', default='')
+ smtpcacerts = config.get('smtpcacerts', default='')
+ mailer = SMTPMailer(
+ environment,
+ envelopesender=(environment.get_sender() or environment.get_fromaddr()),
+ smtpserver=smtpserver, smtpservertimeout=smtpservertimeout,
+ smtpserverdebuglevel=smtpserverdebuglevel,
+ smtpencryption=smtpencryption,
+ smtpuser=smtpuser,
+ smtppass=smtppass,
+ smtpcacerts=smtpcacerts
+ )
+ elif mailer == 'sendmail':
+ command = config.get('sendmailcommand')
+ if command:
+ command = shlex.split(command)
+ mailer = SendMailer(environment,
+ command=command, envelopesender=environment.get_sender())
+ else:
+ environment.log_error(
+ 'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n' % mailer +
+ 'please use one of "smtp" or "sendmail".'
+ )
+ sys.exit(1)
+ return mailer
+
+
+KNOWN_ENVIRONMENTS = {
+ 'generic': {'highprec': GenericEnvironmentMixin},
+ 'gitolite': {'highprec': GitoliteEnvironmentHighPrecMixin,
+ 'lowprec': GitoliteEnvironmentLowPrecMixin},
+ 'stash': {'highprec': StashEnvironmentHighPrecMixin,
+ 'lowprec': StashEnvironmentLowPrecMixin},
+ 'gerrit': {'highprec': GerritEnvironmentHighPrecMixin,
+ 'lowprec': GerritEnvironmentLowPrecMixin},
+ 'gitea': {'highprec': GiteaEnvironmentHighPrecMixin,
+ 'lowprec': GiteaEnvironmentLowPrecMixin},
+ }
+
+
+def choose_environment(config, osenv=None, env=None, recipients=None,
+ hook_info=None):
+ env_name = choose_environment_name(config, env, osenv)
+ environment_klass = build_environment_klass(env_name)
+ env = build_environment(environment_klass, env_name, config,
+ osenv, recipients, hook_info)
+ return env
+
+
+def choose_environment_name(config, env, osenv):
+ if not osenv:
+ osenv = os.environ
+
+ if not env:
+ env = config.get('environment')
+
+ if not env:
+ if 'GL_USER' in osenv and 'GL_REPO' in osenv:
+ env = 'gitolite'
+ elif 'GITEA_PUSHER_NAME' in osenv and 'GITEA_REPO_NAME' in osenv:
+ env = 'gitea'
+ else:
+ env = 'generic'
+ return env
+
+
+COMMON_ENVIRONMENT_MIXINS = [
+ ConfigRecipientsEnvironmentMixin,
+ CLIRecipientsEnvironmentMixin,
+ ConfigRefFilterEnvironmentMixin,
+ ProjectdescEnvironmentMixin,
+ ConfigMaxlinesEnvironmentMixin,
+ ComputeFQDNEnvironmentMixin,
+ ConfigFilterLinesEnvironmentMixin,
+ PusherDomainEnvironmentMixin,
+ ConfigOptionsEnvironmentMixin,
+ ]
+
+
+def build_environment_klass(env_name):
+ if 'class' in KNOWN_ENVIRONMENTS[env_name]:
+ return KNOWN_ENVIRONMENTS[env_name]['class']
+
+ environment_mixins = []
+ known_env = KNOWN_ENVIRONMENTS[env_name]
+ if 'highprec' in known_env:
+ high_prec_mixin = known_env['highprec']
+ environment_mixins.append(high_prec_mixin)
+ environment_mixins = environment_mixins + COMMON_ENVIRONMENT_MIXINS
+ if 'lowprec' in known_env:
+ low_prec_mixin = known_env['lowprec']
+ environment_mixins.append(low_prec_mixin)
+ environment_mixins.append(Environment)
+ klass_name = env_name.capitalize() + 'Environment'
+ environment_klass = type(
+ klass_name,
+ tuple(environment_mixins),
+ {},
+ )
+ KNOWN_ENVIRONMENTS[env_name]['class'] = environment_klass
+ return environment_klass
+
+
+GiteaEnvironment = build_environment_klass('gitea')
+GerritEnvironment = build_environment_klass('gerrit')
+StashEnvironment = build_environment_klass('stash')
+GitoliteEnvironment = build_environment_klass('gitolite')
+GenericEnvironment = build_environment_klass('generic')
+
+
+def build_environment(environment_klass, env, config,
+ osenv, recipients, hook_info):
+ environment_kw = {
+ 'osenv': osenv,
+ 'config': config,
+ }
+
+ if env == 'stash':
+ environment_kw['user'] = hook_info['stash_user']
+ environment_kw['repo'] = hook_info['stash_repo']
+ elif env == 'gerrit':
+ environment_kw['project'] = hook_info['project']
+ environment_kw['submitter'] = hook_info['submitter']
+ environment_kw['update_method'] = hook_info['update_method']
+
+ environment_kw['cli_recipients'] = recipients
+
+ return environment_klass(**environment_kw)
+
+
+def get_version():
+ oldcwd = os.getcwd()
+ try:
+ try:
+ os.chdir(os.path.dirname(os.path.realpath(__file__)))
+ git_version = read_git_output(['describe', '--tags', 'HEAD'])
+ if git_version == __version__:
+ return git_version
+ else:
+ return '%s (%s)' % (__version__, git_version)
+ except:
+ pass
+ finally:
+ os.chdir(oldcwd)
+ return __version__
+
+
+def compute_gerrit_options(options, args, required_gerrit_options,
+ raw_refname):
+ if None in required_gerrit_options:
+ raise SystemExit("Error: Specify all of --oldrev, --newrev, --refname, "
+ "and --project; or none of them.")
+
+ if options.environment not in (None, 'gerrit'):
+ raise SystemExit("Non-gerrit environments incompatible with --oldrev, "
+ "--newrev, --refname, and --project")
+ options.environment = 'gerrit'
+
+ if args:
+ raise SystemExit("Error: Positional parameters not allowed with "
+ "--oldrev, --newrev, and --refname.")
+
+ # Gerrit oddly omits 'refs/heads/' in the refname when calling
+ # ref-updated hook; put it back.
+ git_dir = get_git_dir()
+ if (not os.path.exists(os.path.join(git_dir, raw_refname)) and
+ os.path.exists(os.path.join(git_dir, 'refs', 'heads',
+ raw_refname))):
+ options.refname = 'refs/heads/' + options.refname
+
+ # New revisions can appear in a gerrit repository either due to someone
+ # pushing directly (in which case options.submitter will be set), or they
+ # can press "Submit this patchset" in the web UI for some CR (in which
+ # case options.submitter will not be set and gerrit will not have provided
+ # us the information about who pressed the button).
+ #
+ # Note for the nit-picky: I'm lumping in REST API calls and the ssh
+ # gerrit review command in with "Submit this patchset" button, since they
+ # have the same effect.
+ if options.submitter:
+ update_method = 'pushed'
+ # The submitter argument is almost an RFC 2822 email address; change it
+ # from 'User Name (email@domain)' to 'User Name <email@domain>' so it is
+ options.submitter = options.submitter.replace('(', '<').replace(')', '>')
+ else:
+ update_method = 'submitted'
+ # Gerrit knew who submitted this patchset, but threw that information
+ # away when it invoked this hook. However, *IF* Gerrit created a
+ # merge to bring the patchset in (project 'Submit Type' is either
+ # "Always Merge", or is "Merge if Necessary" and happens to be
+ # necessary for this particular CR), then it will have the committer
+ # of that merge be 'Gerrit Code Review' and the author will be the
+ # person who requested the submission of the CR. Since this is fairly
+ # likely for most gerrit installations (of a reasonable size), it's
+ # worth the extra effort to try to determine the actual submitter.
+ rev_info = read_git_lines(['log', '--no-walk', '--merges',
+ '--format=%cN%n%aN <%aE>', options.newrev])
+ if rev_info and rev_info[0] == 'Gerrit Code Review':
+ options.submitter = rev_info[1]
+
+ # We pass back refname, oldrev, newrev as args because then the
+ # gerrit ref-updated hook is much like the git update hook
+ return (options,
+ [options.refname, options.oldrev, options.newrev],
+ {'project': options.project, 'submitter': options.submitter,
+ 'update_method': update_method})
+
+
+def check_hook_specific_args(options, args):
+ raw_refname = options.refname
+ # Convert each string option unicode for Python3.
+ if PYTHON3:
+ opts = ['environment', 'recipients', 'oldrev', 'newrev', 'refname',
+ 'project', 'submitter', 'stash_user', 'stash_repo']
+ for opt in opts:
+ if not hasattr(options, opt):
+ continue
+ obj = getattr(options, opt)
+ if obj:
+ enc = obj.encode('utf-8', 'surrogateescape')
+ dec = enc.decode('utf-8', 'replace')
+ setattr(options, opt, dec)
+
+ # First check for stash arguments
+ if (options.stash_user is None) != (options.stash_repo is None):
+ raise SystemExit("Error: Specify both of --stash-user and "
+ "--stash-repo or neither.")
+ if options.stash_user:
+ options.environment = 'stash'
+ return options, args, {'stash_user': options.stash_user,
+ 'stash_repo': options.stash_repo}
+
+ # Finally, check for gerrit specific arguments
+ required_gerrit_options = (options.oldrev, options.newrev, options.refname,
+ options.project)
+ if required_gerrit_options != (None,) * 4:
+ return compute_gerrit_options(options, args, required_gerrit_options,
+ raw_refname)
+
+ # No special options in use, just return what we started with
+ return options, args, {}
+
+
+class Logger(object):
+ def parse_verbose(self, verbose):
+ if verbose > 0:
+ return logging.DEBUG
+ else:
+ return logging.INFO
+
+ def create_log_file(self, environment, name, path, verbosity):
+ log_file = logging.getLogger(name)
+ file_handler = logging.FileHandler(path)
+ log_fmt = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
+ file_handler.setFormatter(log_fmt)
+ log_file.addHandler(file_handler)
+ log_file.setLevel(verbosity)
+ return log_file
+
+ def __init__(self, environment):
+ self.environment = environment
+ self.loggers = []
+ stderr_log = logging.getLogger('git_multimail.stderr')
+
+ class EncodedStderr(object):
+ def write(self, x):
+ write_str(sys.stderr, x)
+
+ def flush(self):
+ sys.stderr.flush()
+
+ stderr_handler = logging.StreamHandler(EncodedStderr())
+ stderr_log.addHandler(stderr_handler)
+ stderr_log.setLevel(self.parse_verbose(environment.verbose))
+ self.loggers.append(stderr_log)
+
+ if environment.debug_log_file is not None:
+ debug_log_file = self.create_log_file(
+ environment, 'git_multimail.debug', environment.debug_log_file, logging.DEBUG)
+ self.loggers.append(debug_log_file)
+
+ if environment.log_file is not None:
+ log_file = self.create_log_file(
+ environment, 'git_multimail.file', environment.log_file, logging.INFO)
+ self.loggers.append(log_file)
+
+ if environment.error_log_file is not None:
+ error_log_file = self.create_log_file(
+ environment, 'git_multimail.error', environment.error_log_file, logging.ERROR)
+ self.loggers.append(error_log_file)
+
+ def info(self, msg, *args, **kwargs):
+ for l in self.loggers:
+ l.info(msg, *args, **kwargs)
+
+ def debug(self, msg, *args, **kwargs):
+ for l in self.loggers:
+ l.debug(msg, *args, **kwargs)
+
+ def warning(self, msg, *args, **kwargs):
+ for l in self.loggers:
+ l.warning(msg, *args, **kwargs)
+
+ def error(self, msg, *args, **kwargs):
+ for l in self.loggers:
+ l.error(msg, *args, **kwargs)
+
+
+def main(args):
+ parser = optparse.OptionParser(
+ description=__doc__,
+ usage='%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV',
+ )
+
+ parser.add_option(
+ '--environment', '--env', action='store', type='choice',
+ choices=list(KNOWN_ENVIRONMENTS.keys()), default=None,
+ help=(
+ 'Choose type of environment is in use. Default is taken from '
+ 'multimailhook.environment if set; otherwise "generic".'
+ ),
+ )
+ parser.add_option(
+ '--stdout', action='store_true', default=False,
+ help='Output emails to stdout rather than sending them.',
+ )
+ parser.add_option(
+ '--recipients', action='store', default=None,
+ help='Set list of email recipients for all types of emails.',
+ )
+ parser.add_option(
+ '--show-env', action='store_true', default=False,
+ help=(
+ 'Write to stderr the values determined for the environment '
+ '(intended for debugging purposes), then proceed normally.'
+ ),
+ )
+ parser.add_option(
+ '--force-send', action='store_true', default=False,
+ help=(
+ 'Force sending refchange email when using as an update hook. '
+ 'This is useful to work around the unreliable new commits '
+ 'detection in this mode.'
+ ),
+ )
+ parser.add_option(
+ '-c', metavar="<name>=<value>", action='append',
+ help=(
+ 'Pass a configuration parameter through to git. The value given '
+ 'will override values from configuration files. See the -c option '
+ 'of git(1) for more details. (Only works with git >= 1.7.3)'
+ ),
+ )
+ parser.add_option(
+ '--version', '-v', action='store_true', default=False,
+ help=(
+ "Display git-multimail's version"
+ ),
+ )
+
+ parser.add_option(
+ '--python-version', action='store_true', default=False,
+ help=(
+ "Display the version of Python used by git-multimail"
+ ),
+ )
+
+ parser.add_option(
+ '--check-ref-filter', action='store_true', default=False,
+ help=(
+ 'List refs and show information on how git-multimail '
+ 'will process them.'
+ )
+ )
+
+ # The following options permit this script to be run as a gerrit
+ # ref-updated hook. See e.g.
+ # code.google.com/p/gerrit/source/browse/Documentation/config-hooks.txt
+ # We suppress help for these items, since these are specific to gerrit,
+ # and we don't want users directly using them any way other than how the
+ # gerrit ref-updated hook is called.
+ parser.add_option('--oldrev', action='store', help=optparse.SUPPRESS_HELP)
+ parser.add_option('--newrev', action='store', help=optparse.SUPPRESS_HELP)
+ parser.add_option('--refname', action='store', help=optparse.SUPPRESS_HELP)
+ parser.add_option('--project', action='store', help=optparse.SUPPRESS_HELP)
+ parser.add_option('--submitter', action='store', help=optparse.SUPPRESS_HELP)
+
+ # The following allow this to be run as a stash asynchronous post-receive
+ # hook (almost identical to a git post-receive hook but triggered also for
+ # merges of pull requests from the UI). We suppress help for these items,
+ # since these are specific to stash.
+ parser.add_option('--stash-user', action='store', help=optparse.SUPPRESS_HELP)
+ parser.add_option('--stash-repo', action='store', help=optparse.SUPPRESS_HELP)
+
+ (options, args) = parser.parse_args(args)
+ (options, args, hook_info) = check_hook_specific_args(options, args)
+
+ if options.version:
+ sys.stdout.write('git-multimail version ' + get_version() + '\n')
+ return
+
+ if options.python_version:
+ sys.stdout.write('Python version ' + sys.version + '\n')
+ return
+
+ if options.c:
+ Config.add_config_parameters(options.c)
+
+ config = Config('multimailhook')
+
+ environment = None
+ try:
+ environment = choose_environment(
+ config, osenv=os.environ,
+ env=options.environment,
+ recipients=options.recipients,
+ hook_info=hook_info,
+ )
+
+ if options.show_env:
+ show_env(environment, sys.stderr)
+
+ if options.stdout or environment.stdout:
+ mailer = OutputMailer(sys.stdout, environment)
+ else:
+ mailer = choose_mailer(config, environment)
+
+ must_check_setup = os.environ.get('GIT_MULTIMAIL_CHECK_SETUP')
+ if must_check_setup == '':
+ must_check_setup = False
+ if options.check_ref_filter:
+ check_ref_filter(environment)
+ elif must_check_setup:
+ check_setup(environment)
+ # Dual mode: if arguments were specified on the command line, run
+ # like an update hook; otherwise, run as a post-receive hook.
+ elif args:
+ if len(args) != 3:
+ parser.error('Need zero or three non-option arguments')
+ (refname, oldrev, newrev) = args
+ environment.get_logger().debug(
+ "run_as_update_hook: refname=%s, oldrev=%s, newrev=%s, force_send=%s" %
+ (refname, oldrev, newrev, options.force_send))
+ run_as_update_hook(environment, mailer, refname, oldrev, newrev, options.force_send)
+ else:
+ run_as_post_receive_hook(environment, mailer)
+ except ConfigurationException:
+ sys.exit(sys.exc_info()[1])
+ except SystemExit:
+ raise
+ except Exception:
+ t, e, tb = sys.exc_info()
+ import traceback
+ sys.stderr.write('\n') # Avoid mixing message with previous output
+ msg = (
+ 'Exception \'' + t.__name__ +
+ '\' raised. Please report this as a bug to\n'
+ 'https://github.com/git-multimail/git-multimail/issues\n'
+ 'with the information below:\n\n'
+ 'git-multimail version ' + get_version() + '\n'
+ 'Python version ' + sys.version + '\n' +
+ traceback.format_exc())
+ try:
+ environment.get_logger().error(msg)
+ except:
+ sys.stderr.write(msg)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/deployment/mgagit/manifests/init.pp b/deployment/mgagit/manifests/init.pp
new file mode 100644
index 00000000..42753b03
--- /dev/null
+++ b/deployment/mgagit/manifests/init.pp
@@ -0,0 +1,170 @@
+class mgagit(
+ $git_dir = '/git',
+ $ldap_server = "ldap.${::domain}",
+ $binddn = 'uid=mgagit,ou=People,dc=mageia,dc=org',
+ $vhost = "projects.${::domain}",
+ $bindpw
+){
+ $git_login = 'git'
+ $git_homedir = "/var/lib/${git_login}"
+ $gitolite_dir = "${git_homedir}/.gitolite"
+ $gitolite_keydir = "${gitolite_dir}/keydir"
+ $gitolite_tmpldir = '/etc/mgagit/tmpl'
+ $gitolite_confdir = "${gitolite_dir}/conf"
+ $gitolite_hooksdir = "${gitolite_dir}/hooks"
+ $gitolite_commonhooksdir = "${gitolite_hooksdir}/common"
+ $gitolite_conf = "${gitolite_confdir}/gitolite.conf"
+ $gitoliterc = "${git_homedir}/.gitolite.rc"
+ $bindpwfile = '/etc/mgagit.secret'
+ $reposconf_dir = "${git_homedir}/repos-config"
+ $vhostdir = "${git_homedir}/www"
+
+ package { ['mgagit', 'gitolite', 'python3-bugz']:
+ ensure => installed,
+ }
+
+ group { $git_login:
+ ensure => present,
+ }
+
+ user { $git_login:
+ ensure => present,
+ home => $git_homedir,
+ managehome => true,
+ gid => $git_login,
+ }
+
+ file { '/etc/mgagit.conf':
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mgagit/mgagit.conf'),
+ require => Package['mgagit'],
+ }
+
+ file { "${gitolite_commonhooksdir}/git_multimail.py":
+ ensure => present,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0644',
+ source => 'puppet:///modules/mgagit/git_multimail.py',
+ require => File[$gitolite_commonhooksdir],
+ }
+
+ file { "${gitolite_commonhooksdir}/post-receive":
+ ensure => present,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0755',
+ content => template('mgagit/git-post-receive-hook'),
+ require => File[$gitolite_commonhooksdir],
+ }
+
+ file { "${gitolite_commonhooksdir}/post-update":
+ ensure => present,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0755',
+ content => template('mgagit/git-post-update-hook'),
+ require => File[$gitolite_commonhooksdir],
+ }
+
+ file { $gitolite_tmpldir:
+ ensure => directory,
+ owner => root,
+ group => root,
+ mode => '0755',
+ }
+
+ file { "${gitolite_tmpldir}/group.gl":
+ ensure => 'link',
+ target => '/usr/share/mgagit/tmpl/group.gl',
+ }
+
+ file { "${gitolite_tmpldir}/repodef_repo.gl":
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mgagit/repodef_repo.gl'),
+ }
+
+ mgagit::tmpl { 'artwork':
+ tmpldir => $gitolite_tmpldir,
+ ml => 'atelier',
+ }
+
+ mgagit::tmpl { 'doc':
+ tmpldir => $gitolite_tmpldir,
+ ml => 'atelier', # NB This is wrong, we should have a doc-commits@ ML (and thus remove this line)
+ }
+
+ mgagit::tmpl { 'infrastructure':
+ tmpldir => $gitolite_tmpldir,
+ group => 'sysadmin',
+ ml => 'sysadmin',
+ }
+
+ mgagit::tmpl { 'org':
+ tmpldir => $gitolite_tmpldir,
+ group => 'board',
+ ml => 'board',
+ }
+
+ mgagit::tmpl { 'qa':
+ tmpldir => $gitolite_tmpldir,
+ }
+
+ mgagit::tmpl { 'soft':
+ tmpldir => $gitolite_tmpldir,
+ group => 'packagers-committers',
+ }
+
+ mgagit::tmpl { 'web':
+ tmpldir => $gitolite_tmpldir,
+ ml => 'atelier',
+ }
+
+ file { [$gitolite_dir, $gitolite_keydir, $gitolite_confdir,
+ $gitolite_hooksdir, $gitolite_commonhooksdir,
+ $reposconf_dir, $vhostdir]:
+ ensure => directory,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0755',
+ }
+
+ file { $gitoliterc:
+ ensure => present,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0644',
+ content => template('mgagit/gitolite.rc'),
+ }
+
+ file { $bindpwfile:
+ ensure => present,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0600',
+ content => inline_template('<%= @bindpw %>'),
+ }
+
+ file { $git_dir:
+ ensure => directory,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0755',
+ }
+
+ file { "${git_homedir}/repositories":
+ ensure => 'link',
+ target => $git_dir,
+ }
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ }
+}
+# vim: sw=2
diff --git a/deployment/mgagit/manifests/tmpl.pp b/deployment/mgagit/manifests/tmpl.pp
new file mode 100644
index 00000000..ef188ed2
--- /dev/null
+++ b/deployment/mgagit/manifests/tmpl.pp
@@ -0,0 +1,9 @@
+define mgagit::tmpl($tmpldir, $group = $title, $ml = $title) {
+ file { "${tmpldir}/${title}_repo.gl":
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mgagit/group_owned_repo.gl'),
+ }
+}
diff --git a/deployment/mgagit/templates/git-post-receive-hook b/deployment/mgagit/templates/git-post-receive-hook
new file mode 100755
index 00000000..68da3200
--- /dev/null
+++ b/deployment/mgagit/templates/git-post-receive-hook
@@ -0,0 +1,314 @@
+#!/usr/bin/python3
+
+import configparser
+import os
+import re
+import sys
+import urllib.error
+import urllib.parse
+import urllib.request
+import xmlrpc.client
+from dataclasses import dataclass
+
+LIBDIR = '<%= @gitolite_commonhooksdir %>'
+sys.path.insert(0, LIBDIR)
+
+import bugz.settings
+
+import git_multimail
+
+# When editing this list, remember to edit the same list in
+# modules/cgit/templates/filter.commit-links.sh
+BUG_REFS = {
+ 'Mageia': { 're': re.compile('mga#([0-9]+)'), 'replace': 'https://bugs.mageia.org/%s' },
+ 'Red Hat': { 're': re.compile('rhbz#([0-9]+)'), 'replace': 'https://bugzilla.redhat.com/show_bug.cgi?id=%s' },
+ 'Free Desktop': { 're': re.compile('fdo#([0-9]+)'), 'replace': 'https://bugs.freedesktop.org/show_bug.cgi?id=%s' },
+ 'KDE': { 're': re.compile('(?:bko|kde)#([0-9]+)'), 'replace': 'https://bugs.kde.org/show_bug.cgi?id=%s' },
+ 'GNOME': { 're': re.compile('(?:bgo|gnome)#([0-9]+)'), 'replace': 'https://bugzilla.gnome.org/show_bug.cgi?id=%s' },
+ 'Launchpad': { 're': re.compile('lp#([0-9]+)'), 'replace': 'https://launchpad.net/bugs/%s' },
+}
+
+COMMIT_RE = re.compile('^commit ([a-f0-9]{40})')
+COMMIT_REPLACE = 'https://gitweb.mageia.org/%s/commit/?id=%s'
+
+MAGEIA_BUGZILLA_URL = 'https://bugs.mageia.org/xmlrpc.cgi'
+MAGEIA_BUGZILLA_PASSWORD_FILE = '.gitzilla-password'
+MAGEIA_BUGZILLA_APIKEY_FILE = '.gitzilla-apikey' # this holds a Bugzilla API key
+GITWEB_UPDATE_URL = 'http://gitweb.mageia.org:8000'
+
+# Bugzilla user
+USER_LOGIN = 'bot'
+
+# Recipient of i18n notifications
+I18N_MAIL = 'i18n-reports@ml.mageia.org'
+
+# Set to 1..3 for debug logging (WARNING: this will show passwords to git committers!)
+DEBUG = 0
+
+git_multimail.FOOTER_TEMPLATE = """\
+
+-- \n\
+Mageia Git Monkeys.
+"""
+git_multimail.REVISION_FOOTER_TEMPLATE = git_multimail.FOOTER_TEMPLATE
+
+I18N_REVISION_HEADER_TEMPLATE = """\
+Date: %(send_date)s
+To: %(recipients)s
+Subject: %(emailprefix)s%(oneline)s
+MIME-Version: 1.0
+Content-Type: text/plain; charset=%(charset)s
+Content-Transfer-Encoding: 8bit
+From: %(fromaddr)s
+Reply-To: %(reply_to)s
+In-Reply-To: %(reply_to_msgid)s
+References: %(reply_to_msgid)s
+X-Git-Host: %(fqdn)s
+X-Git-Repo: %(repo_shortname)s
+X-Git-Refname: %(refname)s
+X-Git-Reftype: %(refname_type)s
+X-Git-Rev: %(rev)s
+Auto-Submitted: auto-generated
+"""
+
+
+REPO_NAME_RE = re.compile(r'^/git/(?P<name>.+?)(?:\.git)?$')
+
+
+# Log a debug message when logging is enabled
+def debug(s, *a):
+ if DEBUG > 0:
+ print(s % a)
+
+
+def repo_shortname():
+ basename = os.path.abspath(git_multimail.get_git_dir())
+ m = REPO_NAME_RE.match(basename)
+ if m:
+ return m.group('name')
+ else:
+ return basename
+
+
+# Override the Environment class to generate an appropriate short name which is
+# used in git links and as an email prefix
+class MageiaEnvironment(git_multimail.Environment):
+ def get_repo_shortname(self):
+ return repo_shortname()
+
+
+git_multimail.Environment = MageiaEnvironment
+
+
+# Override the Revision class to inject gitweb/cgit links and any referenced
+# bug URLs
+class MageiaLinksRevision(git_multimail.Revision):
+ def __init__(self, reference_change, rev, num, tot):
+ super().__init__(reference_change, rev, num, tot)
+ self.bz = None
+
+ def bugzilla_init(self):
+ if not self.bz:
+ tokenfile = os.path.join(os.environ['HOME'], MAGEIA_BUGZILLA_APIKEY_FILE)
+ token = None
+ try:
+ token = open(tokenfile, 'r').readline().rstrip()
+ except IOError:
+ # Assume username/password will be used instead
+ pass
+
+ passwordfile = os.path.join(os.environ['HOME'], MAGEIA_BUGZILLA_PASSWORD_FILE)
+ pword = None
+ try:
+ pword = open(passwordfile, 'r').readline().rstrip()
+ except IOError:
+ print('Error: no Bugzilla credentials available; trying anyway')
+ # There's no real point in continuing, but why not
+
+ class ConfigSettings:
+ pass
+
+ cfg = ConfigSettings()
+ cfg.connection = 'Mageia'
+ if token:
+ # If an API key is found, that will be used instead of user/password
+ cfg.key = token
+ cfg.user = USER_LOGIN
+ cfg.password = pword
+ cfg.base = MAGEIA_BUGZILLA_URL
+ cfg.debug = DEBUG
+
+ cfile = configparser.ConfigParser()
+ cfile.add_section(cfg.connection)
+ self.bz = bugz.settings.Settings(cfg, cfile)
+
+ def generate_email_body(self, push):
+ """Show this revision."""
+ output = git_multimail.read_git_lines(
+ ['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1],
+ keepends=True,
+ )
+ bugs = {}
+ commit = None
+ idx = 0
+ # Modify the mail output on-the-fly to add links; this is sensitive to
+ # the mail format produced by git_multimail. Also, update Mageia
+ # Bugzilla if a bug reference is found.
+ for line in output:
+ idx += 1
+ if line == "---\n":
+ if commit and COMMIT_REPLACE:
+ output.insert(idx, "\n")
+ output.insert(idx, " %s\n" % (COMMIT_REPLACE % (self.environment.get_repo_shortname(), commit)))
+ output.insert(idx, " Commit Link:\n")
+ idx += 3
+ if bugs:
+ output.insert(idx, " Bug links:\n")
+ idx += 1
+ for tracker, bugnos in bugs.items():
+ output.insert(idx, " %s\n" % tracker)
+ idx += 1
+ for bugno in bugnos:
+ output.insert(idx, " %s\n" % (BUG_REFS[tracker]['replace'] % bugno))
+ idx += 1
+ output.insert(idx, "\n")
+ idx += 1
+
+ # Attempt to modify Bugzilla
+ if "Mageia" in bugs:
+ try:
+ self.bugzilla_init()
+
+ # Mask email address
+ comment = None
+ # Suppress the "Bug links:" section if only one bug
+ # is referenced
+ if len(bugs) == 1 and len(bugs['Mageia']) == 1:
+ comment = output[0:idx-4]
+ else:
+ comment = output[0:idx]
+ comment[1] = re.sub(r'^(Author: [^@]*)@.*(>)?', r'\1@...>', comment[1])
+ comment = "".join(comment)
+
+ params = {}
+ params['ids'] = bugs['Mageia']
+ params['comment'] = { 'body': comment }
+ self.bz.call_bz(self.bz.bz.Bug.update, params)
+ print("Updated bugzilla bugs: %s" % ", ".join(bugs['Mageia']))
+ except:
+ print("Unable to post to bugzilla bugs: %s :(" % ", ".join(bugs['Mageia']))
+ print(sys.exc_info()[1])
+
+ break
+
+ m = COMMIT_RE.search(line)
+ if m:
+ commit = m.group(1)
+ for tracker in BUG_REFS.keys():
+ foundbugs = BUG_REFS[tracker]['re'].findall(line)
+ if foundbugs:
+ if tracker not in bugs:
+ bugs[tracker] = foundbugs
+ else:
+ bugs[tracker] = list(set(bugs[tracker] + foundbugs))
+
+ return output
+
+
+# Override the Revision class to inject gitweb/cgit links and any referenced
+# bug URLs
+class MageiaI18NRevision(git_multimail.Revision):
+ """A Change consisting of a single git commit."""
+
+ def __init__(self, reference_change, rev, num, tot):
+ super().__init__(reference_change, rev, num, tot)
+
+ # Don't send to any of the normal recipients
+ self.recipients = False
+ self.cc_recipients = ''
+
+ i18n_folders = []
+ # Check files and find i18n folders
+ for line in git_multimail.read_git_lines(['ls-tree', '-rd', self.rev.sha1]):
+ (modetypesha1, name) = line.split("\t", 1)
+ if name.endswith("/.tx"):
+ i18n_folders.append(os.path.dirname(name))
+
+ if i18n_folders:
+ self.output = git_multimail.read_git_lines(
+ ['log', '-C', '--stat', '-p', '--no-walk', self.rev.sha1, '--'] + i18n_folders,
+ keepends=True,
+ )
+ if self.output:
+ # We have some output so let's send the mail...
+ self.recipients = I18N_MAIL
+ print(f'Sending i8n notification to {self.recipients}')
+
+
+ def generate_email_body(self, push):
+ """Show this revision."""
+
+ return self.output
+
+
+def main():
+ # Attempt to write a last-updated file for cgit cosmetics
+ git_dir = git_multimail.get_git_dir()
+ infowebdir = os.path.join(git_dir, 'info', 'web')
+ lastupdated = git_multimail.read_git_output(
+ ['for-each-ref', '--sort=-committerdate', "--format=%(committerdate:iso8601)", '--count=1', 'refs/heads'],
+ )
+ try:
+ if not os.path.exists(infowebdir):
+ os.makedirs(infowebdir)
+ with open(os.path.join(infowebdir, 'last-modified'), 'w') as modfile:
+ modfile.write(lastupdated)
+ except Exception:
+ debug('Warning: could not update git last-modified date: %s', sys.exc_info()[1])
+
+ # Contact the on-the-pull service on the gitweb server with the updated repo
+ try:
+ req = urllib.request.Request(GITWEB_UPDATE_URL, (repo_shortname() + '.git').encode('utf-8'))
+ req.add_header('Content-Type', 'x-git/repo')
+ fp = urllib.request.urlopen(req, timeout=5)
+ fp.close()
+ except Exception:
+ debug('Warning: could not contact gitweb server: %s', sys.exc_info()[1])
+
+ config = git_multimail.Config('multimailhook')
+
+ try:
+ environment = git_multimail.choose_environment(
+ config, osenv=os.environ,
+ )
+
+ mailer = git_multimail.choose_mailer(config, environment)
+ # For testing...send mail to stdout only
+ #mailer = git_multimail.OutputMailer(sys.stdout)
+
+ changes = []
+ for line in sys.stdin:
+ (oldrev, newrev, refname) = line.strip().split(' ', 2)
+ changes.append(
+ git_multimail.ReferenceChange.create(environment, oldrev, newrev, refname)
+ )
+ push = git_multimail.Push(environment, changes)
+
+ # First pass - regular commit mails
+ git_multimail.Revision = MageiaLinksRevision
+ push.send_emails(mailer, body_filter=environment.filter_body)
+
+ # Second pass - i18n commit mails
+ git_multimail.REVISION_HEADER_TEMPLATE = I18N_REVISION_HEADER_TEMPLATE
+ git_multimail.Revision = MageiaI18NRevision
+ # Don't send the summary email, so nuke the change recipients
+ for change in push.changes:
+ change.recipients = False
+ push.send_emails(mailer, body_filter=environment.filter_body)
+
+ except git_multimail.ConfigurationException as e:
+ sys.exit(str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deployment/mgagit/templates/git-post-update-hook b/deployment/mgagit/templates/git-post-update-hook
new file mode 100644
index 00000000..a2550ad3
--- /dev/null
+++ b/deployment/mgagit/templates/git-post-update-hook
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+if [ "${GL_REPO:0:28}" == "infrastructure/repositories/" ]; then
+ OK=no
+ (unset GIT_DIR && mgagit glconf && mgagit glrun) >/dev/null 2>&1 && OK=yes
+ if [ "$OK" == "yes" ]; then
+ echo
+ echo " *** Repository definitions updated"
+ echo
+ fi
+fi
+exit 0 \ No newline at end of file
diff --git a/deployment/mgagit/templates/gitolite.rc b/deployment/mgagit/templates/gitolite.rc
new file mode 100644
index 00000000..c4c925e6
--- /dev/null
+++ b/deployment/mgagit/templates/gitolite.rc
@@ -0,0 +1,161 @@
+# configuration variables for gitolite
+
+# This file is in perl syntax. But you do NOT need to know perl to edit it --
+# just mind the commas, use single quotes unless you know what you're doing,
+# and make sure the brackets and braces stay matched up!
+
+# (Tip: perl allows a comma after the last item in a list also!)
+
+# HELP for commands can be had by running the command with "-h".
+
+# HELP for all the other FEATURES can be found in the documentation (look for
+# "list of non-core programs shipped with gitolite" in the master index) or
+# directly in the corresponding source file.
+
+%RC = (
+
+ # ------------------------------------------------------------------
+
+ # default umask gives you perms of '0700'; see the rc file docs for
+ # how/why you might change this
+ UMASK => 0022,
+
+ # look for "git-config" in the documentation
+ GIT_CONFIG_KEYS => 'gitweb\.description gitweb\.owner multimailhook\.mailingList multimailhook\.emailDomain multimailhook\.envelopeSender',
+
+ # comment out if you don't need all the extra detail in the logfile
+ LOG_EXTRA => 1,
+
+ # roles. add more roles (like MANAGER, TESTER, ...) here.
+ # WARNING: if you make changes to this hash, you MUST run 'gitolite
+ # compile' afterward, and possibly also 'gitolite trigger POST_COMPILE'
+ ROLES => {
+ READERS => 1,
+ WRITERS => 1,
+ },
+
+ # ------------------------------------------------------------------
+
+ # rc variables used by various features
+
+ # the 'info' command prints this as additional info, if it is set
+ # SITE_INFO => 'Please see http://blahblah/gitolite for more help',
+
+ # the 'desc' command uses this
+ # WRITER_CAN_UPDATE_DESC => 1,
+
+ # the CpuTime feature uses these
+ # display user, system, and elapsed times to user after each git operation
+ # DISPLAY_CPU_TIME => 1,
+ # display a warning if total CPU times (u, s, cu, cs) crosses this limit
+ # CPU_TIME_WARN_LIMIT => 0.1,
+
+ # the Mirroring feature needs this
+ # HOSTNAME => "foo",
+
+ # if you enabled 'Shell', you need this
+ # SHELL_USERS_LIST => "$ENV{HOME}/.gitolite.shell-users",
+
+ # ------------------------------------------------------------------
+
+ # List of commands and features to enable
+
+ ENABLE => [
+
+ # COMMANDS
+
+ # These are the commands enabled by default
+ 'help',
+ 'desc',
+ 'info',
+ 'perms',
+ 'writable',
+
+ # Uncomment or add new commands here.
+ # 'create',
+ # 'fork',
+ # 'mirror',
+ # 'sskm',
+ # 'D',
+
+ # These FEATURES are enabled by default.
+
+ # essential (unless you're using smart-http mode)
+ 'ssh-authkeys',
+
+ # creates git-config entities from gitolite.conf file entries like 'config foo.bar = baz'
+ 'git-config',
+
+ # creates git-daemon-export-ok files; if you don't use git-daemon, comment this out
+ 'daemon',
+
+ # creates projects.list file; if you don't use gitweb, comment this out
+ 'gitweb',
+
+ # These FEATURES are disabled by default; uncomment to enable. If you
+ # need to add new ones, ask on the mailing list :-)
+
+ # user-visible behaviour
+
+ # prevent wild repos auto-create on fetch/clone
+ # 'no-create-on-read',
+ # no auto-create at all (don't forget to enable the 'create' command!)
+ # 'no-auto-create',
+
+ # access a repo by another (possibly legacy) name
+ # 'Alias',
+
+ # give some users direct shell access
+ # 'Shell',
+
+ # set default roles from lines like 'option default.roles-1 = ...', etc.
+ # 'set-default-roles',
+
+ # system admin stuff
+
+ # enable mirroring (don't forget to set the HOSTNAME too!)
+ # 'Mirroring',
+
+ # allow people to submit pub files with more than one key in them
+ # 'ssh-authkeys-split',
+
+ # selective read control hack
+ # 'partial-copy',
+
+ # manage local, gitolite-controlled, copies of read-only upstream repos
+ # 'upstream',
+
+ # updates 'description' file instead of 'gitweb.description' config item
+ 'cgit',
+
+ # performance, logging, monitoring...
+
+ # be nice
+ # 'renice 10',
+
+ # log CPU times (user, system, cumulative user, cumulative system)
+ # 'CpuTime',
+
+ # syntactic_sugar for gitolite.conf and included files
+
+ # allow backslash-escaped continuation lines in gitolite.conf
+ # 'continuation-lines',
+
+ # create implicit user groups from directory names in keydir/
+ # 'keysubdirs-as-groups',
+
+ # allow simple line-oriented macros
+ # 'macros',
+
+ ],
+
+);
+
+# ------------------------------------------------------------------------------
+# per perl rules, this should be the last line in such a file:
+1;
+
+# Local variables:
+# mode: perl
+# End:
+# vim: set syn=perl:
diff --git a/deployment/mgagit/templates/group_owned_repo.gl b/deployment/mgagit/templates/group_owned_repo.gl
new file mode 100644
index 00000000..14431d6c
--- /dev/null
+++ b/deployment/mgagit/templates/group_owned_repo.gl
@@ -0,0 +1,36 @@
+[% SET maintainer = r.repos.$repo.maintainer %]
+repo [% repo %]
+ RW+ master$ = [% maintainer %]
+ RW+ distro/ = [% maintainer %]
+ RW+ topic/ = [% maintainer %]
+ RW+ refs/tags/ = [% maintainer %]
+[% IF r.repos.$repo.lockdown != 'yes' -%]
+ RW master$ = @mga-<%= @group %>
+ RW distro/ = @mga-<%= @group %>
+ RW+ topic/ = @mga-<%= @group %>
+ RW refs/tags/ = @mga-<%= @group %>
+ RW master$ = @mga-i18n-committers
+ RW distro/ = @mga-i18n-committers
+ RW topic/ = @mga-i18n-committers
+[% END -%]
+ RW+ master$ = @mga-sysadmin
+ RW+ distro/ = @mga-sysadmin
+ RW+ topic/ = @mga-sysadmin
+ RW+ refs/tags/ = @mga-sysadmin
+ RW+ user/USER/ = @all
+ R = @all
+[% IF r.repos.$repo.noemail -%]
+ config multimailhook.mailingList = ""
+[% ELSE -%]
+[% IF r.repos.$repo.mailingList -%]
+ config multimailhook.mailingList = "[% r.repos.$repo.mailingList %]"
+[% ELSE -%]
+ config multimailhook.mailingList = "<%= @ml %>-commits@ml.mageia.org"
+[% END -%]
+[% END -%]
+ config multimailhook.emailDomain = "mageia.org"
+ config multimailhook.envelopeSender = "root@mageia.org"
+ config gitweb.description = "[% r.repos.$repo.description %]"
+[% IF r.users.$maintainer -%]
+ config gitweb.owner = "[% r.users.$maintainer.cn.0 %] [[% maintainer %]]"
+[% END -%]
diff --git a/deployment/mgagit/templates/mgagit.conf b/deployment/mgagit/templates/mgagit.conf
new file mode 100644
index 00000000..82b1d66a
--- /dev/null
+++ b/deployment/mgagit/templates/mgagit.conf
@@ -0,0 +1,57 @@
+---
+use_ldap: yes
+ldapserver: <%= @ldap_server %>
+binddn: <%= @binddn %>
+bindpwfile: <%= @bindpwfile %>
+pubkey_dir: <%= @gitolite_keydir %>
+tmpl_dir: <%= @gitolite_tmpldir %>
+gitolite_config: <%= @gitolite_conf %>
+run_gitolite: yes
+repodef_dir: <%= @reposconf_dir %>
+repos_config:
+ - prefix: infrastructure/repositories
+ gl_template: repodef_repo
+ repos:
+ - name: artwork
+ maintainer: '@mga-sysadmin'
+ description: Artwork repository definitions
+ - name: doc
+ maintainer: '@mga-sysadmin'
+ description: Documentation repository definitions
+ - name: infrastructure
+ maintainer: '@mga-sysadmin'
+ description: Infrastructure repository definitions
+ - name: org
+ maintainer: '@mga-sysadmin'
+ description: Organization repository definitions
+ - name: qa
+ maintainer: '@mga-sysadmin'
+ description: QA repository definitions
+ - name: software
+ maintainer: '@mga-sysadmin'
+ description: Software repository definitions
+ - name: web
+ maintainer: '@mga-sysadmin'
+ description: Website repository definitions
+ - prefix: artwork
+ gl_template: artwork_repo
+ git_url: file:///git/infrastructure/repositories/artwork.git
+ - prefix: doc
+ gl_template: doc_repo
+ git_url: file:///git/infrastructure/repositories/doc.git
+ - prefix: infrastructure
+ gl_template: infrastructure_repo
+ git_url: file:///git/infrastructure/repositories/infrastructure.git
+ - prefix: org
+ gl_template: org_repo
+ git_url: file:///git/infrastructure/repositories/org.git
+ - prefix: qa
+ gl_template: qa_repo
+ git_url: file:///git/infrastructure/repositories/qa.git
+ - prefix: software
+ gl_template: soft_repo
+ git_url: file:///git/infrastructure/repositories/software.git
+ - prefix: web
+ gl_template: web_repo
+ git_url: file:///git/infrastructure/repositories/web.git
+www_dir: <%= @vhostdir %>
diff --git a/deployment/mgagit/templates/repodef_repo.gl b/deployment/mgagit/templates/repodef_repo.gl
new file mode 100644
index 00000000..12c946f5
--- /dev/null
+++ b/deployment/mgagit/templates/repodef_repo.gl
@@ -0,0 +1,8 @@
+repo [% repo %]
+ RW master$ = [% r.repos.$repo.maintainer %]
+ RW+ user/USER/ = @all
+ R = @all
+ config multimailhook.mailingList = "sysadmin-commits@ml.mageia.org"
+ config multimailhook.emailDomain = "mageia.org"
+ config multimailhook.envelopeSender = "root@mageia.org"
+ config gitweb.description = "[% r.repos.$repo.description %]"
diff --git a/deployment/releasekey/manifests/init.pp b/deployment/releasekey/manifests/init.pp
new file mode 100644
index 00000000..a3c99526
--- /dev/null
+++ b/deployment/releasekey/manifests/init.pp
@@ -0,0 +1,27 @@
+class releasekey {
+ $sign_login = 'releasekey'
+ $sign_home_dir = "/var/lib/${sign_login}"
+ $sign_keydir = "${sign_home_dir}/keys"
+ group { $sign_login: }
+
+ user { $sign_login:
+ comment => 'System user to sign Mageia Releases',
+ home => $sign_home_dir,
+ gid => $sign_login,
+ require => Group[$sign_login],
+ }
+
+ gnupg::keys{ 'release':
+ email => "release@${::domain}",
+ #FIXME there should be a variable somewhere to change the name of the distribution
+ key_name => 'Mageia Release',
+ login => $sign_login,
+ batchdir => "${sign_home_dir}/batches",
+ keydir => $sign_keydir,
+ require => User[$sign_login],
+ }
+
+ mga_common::local_script { 'sign_checksums':
+ content => template('releasekey/sign_checksums'),
+ }
+}
diff --git a/deployment/releasekey/templates/sign_checksums b/deployment/releasekey/templates/sign_checksums
new file mode 100644
index 00000000..5edf7e57
--- /dev/null
+++ b/deployment/releasekey/templates/sign_checksums
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+if [ $# -lt 1 ]; then
+ echo "Usage: $0 <directory>"
+fi
+
+directory=$1
+cd "$directory"
+for chksum in *.md5 *.sha3 *.sha512; do
+ gpg --homedir "<%= @sign_keydir %>" --yes --sign "$chksum"
+done
diff --git a/deployment/reports/manifests/ii.pp b/deployment/reports/manifests/ii.pp
new file mode 100644
index 00000000..1d8ee123
--- /dev/null
+++ b/deployment/reports/manifests/ii.pp
@@ -0,0 +1,15 @@
+class reports::ii {
+ $channel = '#mageia-sysadm'
+ $server = 'irc.freenode.net'
+ # tribute to Masamune Shirow
+ $nick = 'project_2501'
+
+ ii::bot { $nick:
+ channel => $channel,
+ server => $server,
+ }
+
+ file { '/etc/puppet/socket.yaml':
+ content => template('reports/socket.yaml'),
+ }
+}
diff --git a/deployment/reports/templates/socket.yaml b/deployment/reports/templates/socket.yaml
new file mode 100644
index 00000000..6b0a8b33
--- /dev/null
+++ b/deployment/reports/templates/socket.yaml
@@ -0,0 +1,2 @@
+---
+socket_path: /var/lib/ii/<%= @nick %>/<%= @server %>/<%= @channel %>/in
diff --git a/deployment/repositories/manifests/git_mirror.pp b/deployment/repositories/manifests/git_mirror.pp
new file mode 100644
index 00000000..7384b5a8
--- /dev/null
+++ b/deployment/repositories/manifests/git_mirror.pp
@@ -0,0 +1,16 @@
+class repositories::git_mirror {
+ #file { '/git':
+ # ensure => directory,
+ #}
+
+ #git::mirror { '/git/forum/':
+ # description => "Reference code for forum.${::domain}",
+ # source => "git://git.${::domain}/forum/",
+ #}
+
+ #git::mirror { '/git/initscripts/':
+ # description => 'Reference code for Initscripts',
+ # source => "git://git.${::domain}/initscripts/",
+ #}
+}
+
diff --git a/deployment/repositories/manifests/subversion.pp b/deployment/repositories/manifests/subversion.pp
new file mode 100644
index 00000000..742efe67
--- /dev/null
+++ b/deployment/repositories/manifests/subversion.pp
@@ -0,0 +1,73 @@
+class repositories::subversion {
+
+ # Be sure that any mailing list found in commit_mail here whitelists
+ # "subversion_noreply@ml.mageia.org" as a sender by adding it to
+ # sender_email in its sympa::list::X configuration in
+ # deployment/lists/manifests/init.pp
+
+ subversion::repository { '/svn/adm/':
+ group => 'mga-sysadmin',
+ commit_mail => ["sysadmin-commits@ml.${::domain}"],
+ syntax_check => ['check_puppet_templates','check_puppet'],
+ }
+
+ subversion::repository { '/svn/advisories':
+ group => 'mga-qa-committers',
+ commit_mail => ["qa-commits@ml.${::domain}"],
+ }
+
+ sudo::sudoers_config { 'puppet_update':
+ content => template('repositories/puppet_update.sudoers')
+ }
+
+ subversion::hook::post_commit { '/svn/adm/|puppet_update':
+ content => template('repositories/puppet_update.sh')
+ }
+
+ subversion::repository { '/svn/org/':
+ group => 'mga-board',
+ commit_mail => ["board-commits@ml.${::domain}"],
+ }
+
+ subversion::repository { '/svn/soft/':
+ group => 'mga-packagers',
+ commit_mail => ["soft-commits@ml.${::domain}"],
+ syntax_check => ['check_po'],
+ i18n_mail => ["i18n-reports@ml.${::domain}"],
+ }
+
+ subversion::repository { '/svn/soft_publish/':
+ group => 'mga-packagers',
+ commit_mail => ["soft-commits@ml.${::domain}"],
+ }
+
+ subversion::repository { '/svn/web/':
+ group => 'mga-web',
+ commit_mail => ["atelier-commits@ml.${::domain}"],
+ syntax_check => ['check_php'],
+ }
+
+ subversion::repository { '/svn/packages/':
+ group => 'mga-packagers-committers',
+ no_binary => true,
+ commit_mail => ["packages-commits@ml.${::domain}"],
+ }
+
+ subversion::repository { '/svn/test-irker/':
+ group => 'mga-packagers',
+ no_binary => true,
+ commit_mail => ["tmb@${::domain}"],
+ irker_conf => {
+ project => 'mageia',
+ repo => 'testrepo',
+ tinyifier => 'https://is.gd/create.php?format=simple&url=',
+ urlprefix => "https://svnweb.${::domain}/%(repo)?view=revision&revision=",
+ channels => '{irc://chat.freenode.net/commits, irc://chat.freenode.net/test-irker}',
+ },
+ }
+
+ subversion::repository { '/svn/treasurer/':
+ group => 'mga-treasurer',
+ commit_mail => ["treasurer-commits@ml.${::domain}"],
+ }
+}
diff --git a/deployment/repositories/manifests/svn_mirror.pp b/deployment/repositories/manifests/svn_mirror.pp
new file mode 100644
index 00000000..d71e896d
--- /dev/null
+++ b/deployment/repositories/manifests/svn_mirror.pp
@@ -0,0 +1,17 @@
+class repositories::svn_mirror {
+ file { '/svn':
+ ensure => directory,
+ }
+
+ subversion::mirror_repository {
+ '/svn/adm/': source => "svn://svn.${::domain}/svn/adm/";
+ '/svn/advisories/':source => "svn://svn.${::domain}/svn/advisories/";
+ '/svn/soft/': source => "svn://svn.${::domain}/svn/soft/";
+ '/svn/web/': source => "svn://svn.${::domain}/svn/web/";
+ '/svn/packages/': source => "svn://svn.${::domain}/svn/packages/";
+ '/svn/org/': source => "svn://svn.${::domain}/svn/org/";
+ '/svn/treasurer/': source => "svn://svn.${::domain}/svn/treasurer/";
+ }
+
+ # no binrepos, too big to mirror
+}
diff --git a/deployment/repositories/templates/puppet_update.sh b/deployment/repositories/templates/puppet_update.sh
new file mode 100644
index 00000000..07ff18ab
--- /dev/null
+++ b/deployment/repositories/templates/puppet_update.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+sudo /usr/bin/svn update -q --non-interactive --accept theirs-full /etc/puppet
diff --git a/deployment/repositories/templates/puppet_update.sudoers b/deployment/repositories/templates/puppet_update.sudoers
new file mode 100644
index 00000000..42235771
--- /dev/null
+++ b/deployment/repositories/templates/puppet_update.sudoers
@@ -0,0 +1 @@
+%mga-sysadmin ALL= NOPASSWD: /usr/bin/svn update -q --non-interactive --accept theirs-full /etc/puppet
diff --git a/deployment/shadow/manifests/init.pp b/deployment/shadow/manifests/init.pp
new file mode 100644
index 00000000..c24c36bf
--- /dev/null
+++ b/deployment/shadow/manifests/init.pp
@@ -0,0 +1,23 @@
+class shadow {
+ include stdlib
+
+ $login_defs = '/etc/login.defs'
+
+ file { $login_defs:
+ owner => 'root',
+ group => 'shadow',
+ mode => '0640',
+ }
+
+ file_line { 'uid_max':
+ path => $login_defs,
+ line => 'UID_MAX 2000',
+ match => '^UID_MAX\s+',
+ }
+
+ file_line { 'gid_max':
+ path => $login_defs,
+ line => 'GID_MAX 2000',
+ match => '^GID_MAX\s+',
+ }
+}
diff --git a/deployment/softwarekey/manifests/init.pp b/deployment/softwarekey/manifests/init.pp
new file mode 100644
index 00000000..b2c4bcb2
--- /dev/null
+++ b/deployment/softwarekey/manifests/init.pp
@@ -0,0 +1,24 @@
+class softwarekey {
+ $sign_login = 'softwarekey'
+ $sign_home_dir = "/var/lib/${sign_login}"
+ $sign_keydir = "${sign_home_dir}/keys"
+
+ group { $sign_login: }
+
+ user { $sign_login:
+ home => $sign_home_dir,
+ gid => $sign_login,
+ require => Group[$sign_login],
+ }
+
+ gnupg::keys{ 'software':
+ email => "software@${::domain}",
+ #FIXME there should be a variable somewhere to change the
+ # name of the distribution
+ key_name => 'Mageia Software',
+ login => $sign_login,
+ batchdir => "${sign_home_dir}/batches",
+ keydir => $sign_keydir,
+ require => User[$sign_login],
+ }
+}
diff --git a/deployment/tld_redirections/manifests/init.pp b/deployment/tld_redirections/manifests/init.pp
new file mode 100644
index 00000000..18db541c
--- /dev/null
+++ b/deployment/tld_redirections/manifests/init.pp
@@ -0,0 +1,26 @@
+class tld_redirections {
+ define domain {
+ dns::zone { "mageia.${name}": }
+ }
+
+ define redirection($managed_dns = false) {
+
+ if ($managed_dns) {
+ @@tld_redirections::domain { $name: }
+ }
+
+ apache::vhost_redirect { "mageia.${name}":
+ url => "https://www.${::domain}/?fromtld=${name}"
+ }
+
+ apache::vhost_redirect { "www.mageia.${name}":
+ url => "https://www.${::domain}/?fromtld=${name}"
+ }
+ }
+
+ # domain owned by Florin Catalin Russen
+ redirection { "ro": }
+
+ # domain owned by the association
+ redirection { "fr": }
+}
diff --git a/deployment/websites/manifests/archives.pp b/deployment/websites/manifests/archives.pp
new file mode 100644
index 00000000..825e082b
--- /dev/null
+++ b/deployment/websites/manifests/archives.pp
@@ -0,0 +1,20 @@
+class websites::archives {
+ include websites::base
+ $vhost = "archives.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $git_location = "git://git.${::domain}/web/archives"
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ location => $vhostdir,
+ }
+
+ git::snapshot { $vhostdir:
+ source => $git_location,
+ }
+}
diff --git a/deployment/websites/manifests/base.pp b/deployment/websites/manifests/base.pp
new file mode 100644
index 00000000..1c2dbc64
--- /dev/null
+++ b/deployment/websites/manifests/base.pp
@@ -0,0 +1,9 @@
+class websites::base {
+ $webdatadir = '/var/www/vhosts'
+ file { $webdatadir:
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => root
+ }
+}
diff --git a/deployment/websites/manifests/doc.pp b/deployment/websites/manifests/doc.pp
new file mode 100644
index 00000000..01474af2
--- /dev/null
+++ b/deployment/websites/manifests/doc.pp
@@ -0,0 +1,20 @@
+class websites::doc {
+ include websites::base
+ $vhost = "doc.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $git_location = "git://git.${::domain}/web/doc"
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ location => $vhostdir,
+ }
+
+ git::snapshot { $vhostdir:
+ source => $git_location,
+ }
+}
diff --git a/deployment/websites/manifests/forum_proxy.pp b/deployment/websites/manifests/forum_proxy.pp
new file mode 100644
index 00000000..bd8f1fc1
--- /dev/null
+++ b/deployment/websites/manifests/forum_proxy.pp
@@ -0,0 +1,13 @@
+class websites::forum_proxy {
+ $web_domain = "forums.${::domain}"
+
+ apache::vhost::reverse_proxy { $web_domain:
+ url => "http://${web_domain}/",
+ }
+
+ apache::vhost::reverse_proxy { "ssl_${web_domain}":
+ vhost => $web_domain,
+ use_ssl => true,
+ url => "http://${web_domain}/",
+ }
+}
diff --git a/deployment/websites/manifests/git.pp b/deployment/websites/manifests/git.pp
new file mode 100644
index 00000000..e357dfb2
--- /dev/null
+++ b/deployment/websites/manifests/git.pp
@@ -0,0 +1,10 @@
+class websites::git {
+ apache::vhost_redirect { "git.${::domain}":
+ url => "https://gitweb.${::domain}/",
+ }
+ apache::vhost_redirect { "ssl_git.${::domain}":
+ use_ssl => true,
+ vhost => "git.${::domain}",
+ url => "https://gitweb.${::domain}/",
+ }
+}
diff --git a/deployment/websites/manifests/hugs.pp b/deployment/websites/manifests/hugs.pp
new file mode 100644
index 00000000..95246464
--- /dev/null
+++ b/deployment/websites/manifests/hugs.pp
@@ -0,0 +1,16 @@
+class websites::hugs {
+ include websites::base
+
+ $vhostdir = "${websites::base::webdatadir}/hugs.${::domain}"
+ $git_location = "git://git.${::domain}/web/hugs"
+
+ apache::vhost::base { "hugs.${::domain}":
+ location => $vhostdir,
+ }
+
+ git::snapshot { $vhostdir:
+ source => $git_location
+ }
+
+ package { 'php-exif': }
+}
diff --git a/deployment/websites/manifests/init.pp b/deployment/websites/manifests/init.pp
new file mode 100644
index 00000000..afc63997
--- /dev/null
+++ b/deployment/websites/manifests/init.pp
@@ -0,0 +1 @@
+class websites { }
diff --git a/deployment/websites/manifests/meetbot.pp b/deployment/websites/manifests/meetbot.pp
new file mode 100644
index 00000000..04bbcf70
--- /dev/null
+++ b/deployment/websites/manifests/meetbot.pp
@@ -0,0 +1,14 @@
+# We should rather have a meetbot module used to deploy
+# it, setup backups and this website
+class websites::meetbot {
+ $vhost = "meetbot.${::domain}"
+ $vhostdir = "/home/irc_bots/meetings/"
+
+ apache::vhost::other_app { "meetbot.${::domain}":
+ vhost_file => 'websites/vhost_meetbot.conf',
+ }
+
+ file { $vhostdir:
+ ensure => directory,
+ }
+}
diff --git a/deployment/websites/manifests/nav.pp b/deployment/websites/manifests/nav.pp
new file mode 100644
index 00000000..84323c26
--- /dev/null
+++ b/deployment/websites/manifests/nav.pp
@@ -0,0 +1,27 @@
+class websites::nav {
+ include websites::base
+ $vhost = "nav.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $git_location = "git://git.${::domain}/web/nav"
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ location => $vhostdir,
+ }
+
+ git::snapshot { $vhostdir:
+ source => $git_location,
+ }
+
+ file { "${vhostdir}/var/tmp/cache":
+ ensure => directory,
+ mode => '0660',
+ group => $apache::var::apache_group,
+ require => Git::Snapshot[$vhostdir],
+ }
+}
diff --git a/deployment/websites/manifests/perl.pp b/deployment/websites/manifests/perl.pp
new file mode 100644
index 00000000..2b4849fb
--- /dev/null
+++ b/deployment/websites/manifests/perl.pp
@@ -0,0 +1,54 @@
+class websites::perl {
+ include websites::base
+ $vhost = "perl.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $statsdir = "${vhostdir}/stats"
+ $login = 'pkgcpan'
+ $homedir = "/var/lib/${login}"
+
+ user { $login:
+ managehome => true,
+ home => $homedir,
+ }
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ options => [ 'Indexes' ],
+ }
+
+ file { $vhostdir:
+ ensure => directory,
+ owner => $login,
+ group => $login,
+ }
+
+ package { ['perl-Module-Packaged-Generator', 'magpie']: }
+
+ cron { 'update cpanpkg':
+ hour => 23,
+ minute => 0,
+ require => Package['perl-Module-Packaged-Generator'],
+ command => "pkgcpan -q -f ${vhostdir}/cpan_Mageia.db -d Mageia && chmod 644 ${vhostdir}/cpan_Mageia.db",
+ user => $login,
+ }
+
+ file { "${vhostdir}/cpan_Mageia.db":
+ owner => $login,
+ group => $login,
+ }
+
+ file { $statsdir:
+ ensure => directory,
+ owner => $login,
+ group => $login,
+ }
+
+ # https://www.mageia.org/pipermail/mageia-sysadm/2012-March/004337.html
+ cron { 'update pkgcpan stats':
+ hour => 23,
+ minute => 30,
+ require => [ Package['magpie'], File[$statsdir] ],
+ command => "magpie webstatic -qq -d ${statsdir}",
+ user => $login,
+ }
+}
diff --git a/deployment/websites/manifests/releases.pp b/deployment/websites/manifests/releases.pp
new file mode 100644
index 00000000..2b25c8ec
--- /dev/null
+++ b/deployment/websites/manifests/releases.pp
@@ -0,0 +1,22 @@
+class websites::releases {
+ include websites::base
+ $vhost = "releases.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $git_location = "git://git.${::domain}/web/releases"
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ options => [ 'FollowSymLinks' ],
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ location => $vhostdir,
+ options => [ 'FollowSymLinks' ],
+ }
+
+ git::snapshot { $vhostdir:
+ source => $git_location,
+ }
+}
diff --git a/deployment/websites/manifests/start.pp b/deployment/websites/manifests/start.pp
new file mode 100644
index 00000000..9d5b77e5
--- /dev/null
+++ b/deployment/websites/manifests/start.pp
@@ -0,0 +1,11 @@
+class websites::start {
+ include websites::base
+ apache::vhost_redirect { "start.${::domain}":
+ url => "https://www.${::domain}/community/",
+ }
+ apache::vhost_redirect { "ssl_start.${::domain}":
+ use_ssl => true,
+ vhost => "start.${::domain}",
+ url => "https://www.${::domain}/community/",
+ }
+}
diff --git a/deployment/websites/manifests/static.pp b/deployment/websites/manifests/static.pp
new file mode 100644
index 00000000..66711329
--- /dev/null
+++ b/deployment/websites/manifests/static.pp
@@ -0,0 +1,16 @@
+class websites::static {
+ include websites::base
+ $vhostdir = "${websites::base::webdatadir}/static.${::domain}"
+
+ apache::vhost::other_app { "static.${::domain}":
+ vhost_file => 'websites/vhost_static.conf',
+ }
+
+ file { $vhostdir:
+ ensure => directory,
+ }
+
+ git::snapshot { "${vhostdir}":
+ source => "git://git.${::domain}/web/www",
+ }
+}
diff --git a/deployment/websites/manifests/svn.pp b/deployment/websites/manifests/svn.pp
new file mode 100644
index 00000000..973c012d
--- /dev/null
+++ b/deployment/websites/manifests/svn.pp
@@ -0,0 +1,10 @@
+class websites::svn {
+ apache::vhost_redirect { "svn.${::domain}":
+ url => "https://svnweb.${::domain}/",
+ }
+ apache::vhost_redirect { "ssl_svn.${::domain}":
+ use_ssl => true,
+ vhost => "svn.${::domain}",
+ url => "https://svnweb.${::domain}/",
+ }
+}
diff --git a/deployment/websites/manifests/www.pp b/deployment/websites/manifests/www.pp
new file mode 100644
index 00000000..08c232f2
--- /dev/null
+++ b/deployment/websites/manifests/www.pp
@@ -0,0 +1,64 @@
+class websites::www {
+ include websites::base
+ $vhost = "www.${::domain}"
+ $vhostdir = "${websites::base::webdatadir}/${vhost}"
+ $git_location = "git://git.${::domain}/web/www"
+
+ include apache::var
+ include apache::mod::php
+
+ # for mailman reverse proxy, on ssl
+ include apache::mod::proxy
+ include apache::mod::ssl
+
+ git::snapshot { $vhostdir:
+ source => $git_location,
+ }
+
+ file { [ "${vhostdir}/var",
+ "${vhostdir}/var/tmp",
+ "${vhostdir}/var/tmp/cache" ] :
+ ensure => directory,
+ group => $apache::var::apache_group,
+ mode => '0660',
+ require => Git::Snapshot[$vhostdir],
+ }
+
+ file { [ "${vhostdir}/_nav",
+ "${vhostdir}/_nav/var",
+ "${vhostdir}/_nav/var/tmp",
+ "${vhostdir}/_nav/var/tmp/cache" ] :
+ ensure => directory,
+ group => $apache::var::apache_group,
+ mode => '0660',
+ require => Git::Snapshot[$vhostdir],
+ }
+
+ apache::vhost::base { $vhost:
+ content => template('websites/vhost_www.conf',
+ 'websites/vhost_www_rewrite.conf'),
+ location => $vhostdir,
+ options => ['FollowSymLinks'],
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ use_ssl => true,
+ vhost => $vhost,
+ content => template('websites/vhost_www.conf',
+ 'websites/vhost_www_rewrite.conf'),
+ location => $vhostdir,
+ options => ['FollowSymLinks'],
+ }
+
+ apache::vhost_redirect { "${::domain}":
+ url => "https://www.${::domain}/",
+ }
+
+ apache::vhost_redirect { "ssl_${::domain}":
+ use_ssl => true,
+ vhost => "${::domain}",
+ url => "https://www.${::domain}/",
+ }
+
+ package { ['php-mbstring', 'php-mcrypt', 'php-gettext']: }
+}
diff --git a/deployment/websites/templates/vhost_meetbot.conf b/deployment/websites/templates/vhost_meetbot.conf
new file mode 100644
index 00000000..40a0f92a
--- /dev/null
+++ b/deployment/websites/templates/vhost_meetbot.conf
@@ -0,0 +1,36 @@
+<VirtualHost *:80>
+ ServerAdmin sysadm@mageia.org
+ ServerName meetbot.<%= @domain %>
+ DocumentRoot <%= scope.lookupvar("websites::meetbot::vhostdir") %>
+
+ CustomLog /var/log/httpd/access_meetbot_log combined
+ ErrorLog /var/log/httpd/error_meetbot_log
+ <Directory <%= scope.lookupvar("websites::meetbot::vhostdir") %>>
+ Allow from all
+ <IfModule mod_authz_core.c>
+ Require all granted
+ </IfModule>
+ Options +Indexes
+ IndexIgnore .htaccess *.bak *~ *.txt *.log.html
+ </Directory>
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerAdmin sysadm@mageia.org
+ ServerName meetbot.<%= @domain %>
+ DocumentRoot <%= scope.lookupvar("websites::meetbot::vhostdir") %>
+
+ CustomLog /var/log/httpd/access_meetbot_log combined
+ ErrorLog /var/log/httpd/error_meetbot_log
+
+<%= scope.function_template(["apache/vhost_ssl.conf"]) %>
+
+ <Directory <%= scope.lookupvar("websites::meetbot::vhostdir") %>>
+ Allow from all
+ <IfModule mod_authz_core.c>
+ Require all granted
+ </IfModule>
+ Options +Indexes
+ IndexIgnore .htaccess *.bak *~ *.txt *.log.html
+ </Directory>
+</VirtualHost>
diff --git a/deployment/websites/templates/vhost_static.conf b/deployment/websites/templates/vhost_static.conf
new file mode 100644
index 00000000..fcadc425
--- /dev/null
+++ b/deployment/websites/templates/vhost_static.conf
@@ -0,0 +1,83 @@
+<VirtualHost *:80>
+ ServerName static.<%= @domain %>
+
+ DocumentRoot <%= scope.lookupvar("websites::static::vhostdir") %>
+ CustomLog /var/log/httpd/static_log combined
+ ErrorLog /var/log/httpd/error_static_log
+
+ FileETag none
+ Header unset ETag
+ ExpiresActive On
+ ExpiresByType text/css "access plus 1 month"
+ ExpiresByType image/gif "access plus 2 months"
+ ExpiresByType image/png "access plus 2 months"
+ ExpiresByType image/jpeg "access plus 2 months"
+ ExpiresByType image/x-icon "access plus 2 months"
+ ExpiresByType application/x-javascript "access plus 1 month"
+ ExpiresByType text/javascript "access plus 1 month"
+ AddOutputFilterByType DEFLATE text/html text/plain text/xml text/css
+ AddOutputFilterByType DEFLATE application/json text/javascript application/javascript application/x-javascript
+
+ <Location />
+ Deny from all
+ </Location>
+
+ <Location /g/>
+ Allow from all
+ </Location>
+
+ <Directory <%= scope.lookupvar("websites::static::vhostdir") %>>
+ Order deny,allow
+ Deny from All
+ AllowOverride None
+ </Directory>
+
+ <Directory <%= scope.lookupvar("websites::static::vhostdir") %>/g>
+ Order deny,allow
+ Allow from All
+ AllowOverride None
+ </Directory>
+</VirtualHost>
+
+<VirtualHost *:443>
+ ServerName static.<%= @domain %>
+
+ DocumentRoot <%= scope.lookupvar("websites::static::vhostdir") %>
+ CustomLog /var/log/httpd/static_log combined
+ ErrorLog /var/log/httpd/error_static_log
+
+<%= scope.function_template(["apache/vhost_ssl.conf"]) %>
+
+ FileETag none
+ Header unset ETag
+ ExpiresActive On
+ ExpiresByType text/css "access plus 1 month"
+ ExpiresByType image/gif "access plus 2 months"
+ ExpiresByType image/png "access plus 2 months"
+ ExpiresByType image/jpeg "access plus 2 months"
+ ExpiresByType image/x-icon "access plus 2 months"
+ ExpiresByType application/x-javascript "access plus 1 month"
+ ExpiresByType text/javascript "access plus 1 month"
+ AddOutputFilterByType DEFLATE text/html text/plain text/xml text/css
+ AddOutputFilterByType DEFLATE application/json text/javascript application/javascript application/x-javascript
+
+ <Location />
+ Deny from all
+ </Location>
+
+ <Location /g/>
+ Allow from all
+ </Location>
+
+ <Directory <%= scope.lookupvar("websites::static::vhostdir") %>>
+ Order deny,allow
+ Deny from All
+ AllowOverride None
+ </Directory>
+
+ <Directory <%= scope.lookupvar("websites::static::vhostdir") %>/g>
+ Order deny,allow
+ Allow from All
+ AllowOverride None
+ </Directory>
+</VirtualHost>
diff --git a/deployment/websites/templates/vhost_www.conf b/deployment/websites/templates/vhost_www.conf
new file mode 100644
index 00000000..399681be
--- /dev/null
+++ b/deployment/websites/templates/vhost_www.conf
@@ -0,0 +1,13 @@
+Redirect /wiki https://wiki.mageia.org/#
+
+# Everything under /g/ is static content to be served by a secondary host
+RewriteEngine On
+RewriteRule ^g/(.+)$ https://static.mageia.org/g/$1 [R,L,QSA]
+
+ErrorDocument 404 /404.php
+
+php_value short_open_tag false
+
+# switch all to https
+RewriteCond %{HTTPS} !=on
+RewriteRule ^/?(.*) https://%{SERVER_NAME}/$1 [R,L]
diff --git a/deployment/websites/templates/vhost_www_rewrite.conf b/deployment/websites/templates/vhost_www_rewrite.conf
new file mode 100644
index 00000000..c7bb2fd5
--- /dev/null
+++ b/deployment/websites/templates/vhost_www_rewrite.conf
@@ -0,0 +1,22 @@
+RewriteEngine On
+RewriteRule ^/mailman/listinfo/mageia-annnounce https://ml.mageia.org/l/info/announce [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-artwork https://ml.mageia.org/l/info/atelier-discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-bugsquad https://ml.mageia.org/l/info/bugsquad-discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-dev https://ml.mageia.org/l/info/dev [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-discuss https://ml.mageia.org/l/info/discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-i18n https://ml.mageia.org/l/info/i18n-discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-marketing https://ml.mageia.org/l/info/atelier-discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-sysadm https://ml.mageia.org/l/info/sysadmin-discuss [R=301,L]
+RewriteRule ^/mailman/listinfo/mageia-webteam https://ml.mageia.org/l/info/atelier-discuss [R=301,L]
+RewriteRule ^/mailman https://ml.mageia.org/ [R=301,L]
+
+RewriteRule ^/pipermail/mageia-announce/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-announce/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-artwork/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-artwork/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-bugsquad/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-bugsquad/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-dev/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-dev/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-discuss/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-discuss/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-i18n/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-i18n/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-marketing/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-marketing/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-sysadm/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-sysadm/$1 [R=301,L]
+RewriteRule ^/pipermail/mageia-webteam/?(.*)$ https://archives.mageia.org/zarb-ml/mageia-webteam/$1 [R=301,L]
+RewriteRule ^/pipermail https://archives.mageia.org/zarb-ml/ [R=301,L]
diff --git a/deployment/wikis/manifests/init.pp b/deployment/wikis/manifests/init.pp
new file mode 100644
index 00000000..c34b06d5
--- /dev/null
+++ b/deployment/wikis/manifests/init.pp
@@ -0,0 +1,30 @@
+class wikis {
+ $wikis_root = '/srv/wiki'
+ $wikis_templates = '/srv/wiki-templates'
+ class { 'mediawiki::config':
+ pgsql_password => extlookup('mediawiki_pgsql','x'),
+ secretkey => extlookup('mediawiki_secretkey','x'),
+ ldap_password => extlookup('mediawiki_ldap','x'),
+ root => $wikis_root,
+ vhost => false,
+ }
+
+ git::snapshot { $wikis_templates:
+ source => "git://git.${::domain}/web/templates/mediawiki"
+ }
+
+ $wiki_languages = [ 'en','de', 'fr' ]
+ mediawiki::instance { $wiki_languages:
+ title => 'Mageia wiki',
+ wiki_settings => template('wikis/wiki_settings'),
+ skinsdir => "${wikis_templates}/skins",
+ }
+
+ apache::vhost::redirect_ssl { "wiki.${::domain}": }
+
+ apache::vhost::base { "ssl_wiki.${::domain}":
+ use_ssl => true,
+ vhost => "wiki.${::domain}",
+ content => template('wikis/wiki_vhost.conf'),
+ }
+}
diff --git a/deployment/wikis/templates/wiki_settings b/deployment/wikis/templates/wiki_settings
new file mode 100644
index 00000000..ec6e647d
--- /dev/null
+++ b/deployment/wikis/templates/wiki_settings
@@ -0,0 +1,46 @@
+$wgEnableUploads = true;
+$wgFileExtensions = array('png','gif','jpg','jpeg','pdf','tiff','ps','odt','ods','odp','odg', 'ogm', 'txt');
+
+$wgGroupPermissions['*']['edit'] = false;
+$wgGroupPermissions['*']['createtalk'] = false;
+$wgGroupPermissions['*']['createpage'] = false;
+$wgGroupPermissions['*']['writeapi'] = false;
+$wgGroupPermissions['*']['createaccount'] = false;
+$wgGroupPermissions['user']['edit'] = true;
+$wgGroupPermissions['*']['autocreateaccount'] = true;
+
+$wgScriptPath = "/mw-$wgLanguageCode";
+$wgArticlePath = "/$wgLanguageCode/$1";
+$wgUsePathInfo = true;
+$wgStylePath = "$wgScriptPath/skins";
+$wgStyleDirectory = '<%= @wikis_templates %>/skins';
+$wgLogo = "";
+$wgDefaultSkin = 'vector';
+$wgFavicon = '/mw-en/skins/cavendish/favicon.png';
+$wgRightsIcon = 'https://i.creativecommons.org/l/by-sa/3.0/88x31.png';
+
+define('NS_FEATURE', 100);
+define('NS_FEATURE_TALK', 101);
+$wgExtraNamespaces[NS_FEATURE] = 'Feature';
+$wgExtraNamespaces[NS_FEATURE_TALK] = 'Feature_Talk';
+$wgContentNamespaces[] = NS_FEATURE;
+$wgNamespacesToBeSearchedDefault[NS_FEATURE] = true;
+
+define('NS_QA_PROCEDURE', 102);
+define('NS_QA_PROCEDURE_TALK', 103);
+$wgExtraNamespaces[NS_QA_PROCEDURE] = 'QA_procedure';
+$wgExtraNamespaces[NS_QA_PROCEDURE_TALK] = 'QA_procedure_Talk';
+$wgContentNamespaces[] = NS_QA_PROCEDURE;
+$wgNamespacesToBeSearchedDefault[NS_QA_PROCEDURE] = true;
+
+wfLoadExtension('Nuke');
+wfLoadExtension('SpamBlacklist');
+wfLoadExtension('TitleBlacklist');
+$wgTitleBlacklistSources = array(
+ array(
+ 'type' => 'localpage',
+ 'src' => 'MediaWiki:Titleblacklist'
+ )
+);
+
+# $wgReadOnly = 'This wiki is currently read-only';
diff --git a/deployment/wikis/templates/wiki_vhost.conf b/deployment/wikis/templates/wiki_vhost.conf
new file mode 100644
index 00000000..4e1355bc
--- /dev/null
+++ b/deployment/wikis/templates/wiki_vhost.conf
@@ -0,0 +1,19 @@
+<Directory <%= @wikis_root %>>
+ Options +FollowSymLinks
+</Directory>
+
+RewriteEngine On
+RewriteRule ^/?$ /en/ [R]
+
+Alias /robots.txt <%= @wikis_root %>/robots.txt
+
+<%- for lang in wiki_languages -%>
+
+<Directory <%= @wikis_root %>/<%= lang %>/images>
+ SetHandler default-handler
+</Directory>
+
+Alias /<%= lang %> <%= @wikis_root %>/<%= lang %>/index.php
+Alias /mw-<%= lang %> <%= @wikis_root %>/<%= lang %>
+
+<%- end -%>
diff --git a/external/.gitignore b/external/.gitignore
new file mode 100644
index 00000000..f6289ea5
--- /dev/null
+++ b/external/.gitignore
@@ -0,0 +1 @@
+hiera
diff --git a/modules/concat/CHANGELOG b/external/concat/CHANGELOG
index 2f8aecc3..2f8aecc3 100644
--- a/modules/concat/CHANGELOG
+++ b/external/concat/CHANGELOG
diff --git a/modules/concat/README.markdown b/external/concat/README.markdown
index 3f325097..3f325097 100644
--- a/modules/concat/README.markdown
+++ b/external/concat/README.markdown
diff --git a/modules/concat/files/concatfragments.sh b/external/concat/files/concatfragments.sh
index b486047d..b486047d 100755
--- a/modules/concat/files/concatfragments.sh
+++ b/external/concat/files/concatfragments.sh
diff --git a/modules/concat/files/null/.gitignore b/external/concat/files/null/.gitignore
index e69de29b..e69de29b 100644
--- a/modules/concat/files/null/.gitignore
+++ b/external/concat/files/null/.gitignore
diff --git a/modules/concat/manifests/fragment.pp b/external/concat/manifests/fragment.pp
index 890d43a4..ba546766 100644
--- a/modules/concat/manifests/fragment.pp
+++ b/external/concat/manifests/fragment.pp
@@ -1,5 +1,5 @@
# Puts a file fragment into a directory previous setup using concat
-#
+#
# OPTIONS:
# - target The file that these fragments belong to
# - content If present puts the content into the file
@@ -31,17 +31,17 @@ define concat::fragment($target, $content='', $source='', $order=10, $ensure = "
}
}
}
- default: { File{ source => $source } }
+ default: { File{ source => $source } }
}
}
default: { File{ content => $content } }
}
file{"${fragdir}/fragments/${order}_${safe_name}":
+ ensure => $ensure,
mode => $mode,
owner => $owner,
group => $group,
- ensure => $ensure,
backup => $backup,
alias => "concat_fragment_${name}",
notify => Exec["concat_${target}"]
diff --git a/modules/concat/manifests/init.pp b/external/concat/manifests/init.pp
index b94411c2..c2039349 100644
--- a/modules/concat/manifests/init.pp
+++ b/external/concat/manifests/init.pp
@@ -1,13 +1,13 @@
# A system to construct files using fragments from other files or templates.
#
-# This requires at least puppet 0.25 to work correctly as we use some
+# This requires at least puppet 0.25 to work correctly as we use some
# enhancements in recursive directory management and regular expressions
# to do the work here.
#
# USAGE:
# The basic use case is as below:
#
-# concat{"/etc/named.conf":
+# concat{"/etc/named.conf":
# notify => Service["named"]
# }
#
@@ -17,7 +17,7 @@
# content => template("named_conf_zone.erb")
# }
#
-# # add a fragment not managed by puppet so local users
+# # add a fragment not managed by puppet so local users
# # can add content to managed file
# concat::fragment{"foo.com_user_config":
# target => "/etc/named.conf",
@@ -25,7 +25,7 @@
# ensure => "/etc/named.conf.local"
# }
#
-# This will use the template named_conf_zone.erb to build a single
+# This will use the template named_conf_zone.erb to build a single
# bit of config up and put it into the fragments dir. The file
# will have an number prefix of 10, you can use the order option
# to control that and thus control the order the final file gets built in.
@@ -39,16 +39,16 @@
# There's some regular expression magic to figure out the puppet version but
# if you're on an older 0.24 version just set $puppetversion = 24
#
-# Before you can use any of the concat features you should include the
+# Before you can use any of the concat features you should include the
# class concat::setup somewhere on your node first.
#
# DETAIL:
# We use a helper shell script called concatfragments.sh that gets placed
-# in /usr/local/bin to do the concatenation. While this might seem more
+# in /usr/local/bin to do the concatenation. While this might seem more
# complex than some of the one-liner alternatives you might find on the net
-# we do a lot of error checking and safety checks in the script to avoid
+# we do a lot of error checking and safety checks in the script to avoid
# problems that might be caused by complex escaping errors etc.
-#
+#
# LICENSE:
# Apache Version 2
#
@@ -56,13 +56,13 @@
# http://github.com/ripienaar/puppet-concat/
#
# CONTACT:
-# R.I.Pienaar <rip@devco.net>
+# R.I.Pienaar <rip@devco.net>
# Volcane on freenode
# @ripienaar on twitter
# www.devco.net
-# Sets up so that you can use fragments to build a final config file,
+# Sets up so that you can use fragments to build a final config file,
#
# OPTIONS:
# - mode The mode of the final file
@@ -78,15 +78,15 @@
# - Creates fragment directories if it didn't exist already
# - Executes the concatfragments.sh script to build the final file, this script will create
# directory/fragments.concat. Execution happens only when:
-# * The directory changes
-# * fragments.concat != final destination, this means rebuilds will happen whenever
+# * The directory changes
+# * fragments.concat != final destination, this means rebuilds will happen whenever
# someone changes or deletes the final file. Checking is done using /usr/bin/cmp.
# * The Exec gets notified by something else - like the concat::fragment define
# - Copies the file over to the final destination using a file resource
#
# ALIASES:
# - The exec can notified using Exec["concat_/path/to/file"] or Exec["concat_/path/to/directory"]
-# - The final file can be referened as File["/path/to/file"] or File["concat_/path/to/file"]
+# - The final file can be referenced as File["/path/to/file"] or File["concat_/path/to/file"]
define concat($mode = 0644, $owner = "root", $group = "root", $warn = "false", $force = "false", $backup = "puppet") {
$safe_name = regsubst($name, '/', '_', 'G')
$concatdir = $concat::setup::concatdir
@@ -123,7 +123,7 @@ define concat($mode = 0644, $owner = "root", $group = "root", $warn = "false", $
file{$fragdir:
ensure => directory;
- "${fragdir}/fragments":
+ "${fragdir}/fragments":
ensure => directory,
recurse => true,
purge => true,
@@ -135,19 +135,19 @@ define concat($mode = 0644, $owner = "root", $group = "root", $warn = "false", $
},
notify => Exec["concat_${name}"];
- "${fragdir}/fragments.concat":
+ "${fragdir}/fragments.concat":
ensure => present;
- "${fragdir}/${concat_name}":
+ "${fragdir}/${concat_name}":
ensure => present;
- $name:
+ $name:
+ ensure => present,
source => "${fragdir}/${concat_name}",
owner => $owner,
group => $group,
checksum => md5,
mode => $mode,
- ensure => present,
alias => "concat_${name}";
}
diff --git a/modules/concat/manifests/setup.pp b/external/concat/manifests/setup.pp
index 9676fb66..68beb901 100644
--- a/modules/concat/manifests/setup.pp
+++ b/external/concat/manifests/setup.pp
@@ -7,29 +7,29 @@
# $puppetversion should be either 24 or 25 to enable a 24 compatible
# mode, in 24 mode you might see phantom notifies this is a side effect
# of the method we use to clear the fragments directory.
-#
+#
# The regular expression below will try to figure out your puppet version
# but this code will only work in 0.24.8 and newer.
#
# It also copies out the concatfragments.sh file to /usr/local/bin
class concat::setup {
- $concatdir = "/var/lib/puppet/concat"
+ $concatdir = '/var/lib/puppet/concat'
$majorversion = regsubst($puppetversion, '^[0-9]+[.]([0-9]+)[.][0-9]+$', '\1')
- file{"/usr/local/bin/concatfragments.sh":
+ file{'/usr/local/bin/concatfragments.sh':
owner => root,
group => root,
- mode => 755,
+ mode => '0755',
source => $majorversion ? {
24 => "puppet:///concat/concatfragments.sh",
default => "puppet:///modules/concat/concatfragments.sh"
};
- $concatdir:
+ $concatdir:
ensure => directory,
owner => root,
group => root,
- mode => 755;
+ mode => '0755';
}
}
diff --git a/external/sshkeys/COPYING b/external/sshkeys/COPYING
new file mode 100644
index 00000000..94a9ed02
--- /dev/null
+++ b/external/sshkeys/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/external/sshkeys/README.rst b/external/sshkeys/README.rst
new file mode 100644
index 00000000..73b136d6
--- /dev/null
+++ b/external/sshkeys/README.rst
@@ -0,0 +1,73 @@
+sshkeys puppet module
+=====================
+
+The sshkeys puppet module allow the creation and installation of ssh keys.
+
+
+How it works
+============
+
+With the sshkeys module, you define a key that will be generated on the
+puppet master. You define where this key should be installed, for the
+client key pair, and for the authorized_keys.
+
+When the key has not been generated yet, you may need to run puppet
+twice. The key will be generated on the first run, and installed on the
+second run.
+
+
+Usage
+=====
+
+In order to tell which node will generate the keys, you need to include
+the `sshkeys::keymaster` class on the puppet master node::
+
+ include sshkeys::keymaster
+
+Before installing the key, we need to create it. This is done with the
+`create_key` resource, on the puppet master node. We can create the key
+`key1`::
+
+ sshkeys::create_key{key1: }
+
+If we want to install the `key1` key pair for user `user1`, we can use
+the `set_client_key_pair` resource::
+
+ sshkeys::set_client_key_pair{'key1-for-user1':
+ keyname => 'key1',
+ home => '/home/user1',
+ user => 'user1',
+ }
+
+The `key1` private and public keys should now be installed for user
+`user1` on the node on which we created this resource.
+
+If we want to allow the key `key1` to connect to the `user2` account,
+we use the `set_authorized_keys` resource::
+
+ sshkeys::set_authorized_keys{'key1-to-user2':
+ keyname => 'key1',
+ user => 'user2',
+ home => '/home/user2',
+ }
+
+Now, `user1` should have the `key1` key pair installed on his account,
+and be able to login to the `user2` account.
+
+
+License
+=======
+
+This module is released under the GNU General Public License version 3:
+http://www.gnu.org/licenses/gpl-3.0.txt
+
+
+Authors
+=======
+
+The sshkeys module is based on the ssh::auth module written by
+Andrew E. Schulman <andrex at alumni dot utexas dot net>.
+
+The original ssh::auth module is available at this URL :
+http://projects.puppetlabs.com/projects/1/wiki/Module_Ssh_Auth_Patterns
+
diff --git a/external/sshkeys/manifests/create_key.pp b/external/sshkeys/manifests/create_key.pp
new file mode 100644
index 00000000..23a74e8c
--- /dev/null
+++ b/external/sshkeys/manifests/create_key.pp
@@ -0,0 +1,29 @@
+define sshkeys::create_key (
+ $ensure = "present",
+ $filename = "",
+ $force = false,
+ $keytype = "rsa",
+ $length = 2048,
+ $maxdays = "",
+ $mindate = ""
+) {
+ sshkeys::namecheck { "${title}-title": parm => "title", value => $title }
+
+ # apply defaults
+ $_filename = $filename ? { "" => "id_${keytype}", default => $filename }
+ $_length = $keytype ? { "rsa" => $length, "dsa" => 1024 }
+
+ sshkeys::namecheck { "${title}-filename":
+ parm => "filename",
+ value => $_filename,
+ }
+
+ @sshkeys::setup_key_master { $title:
+ ensure => $ensure,
+ force => $force,
+ keytype => $keytype,
+ length => $_length,
+ maxdays => $maxdays,
+ mindate => $mindate,
+ }
+}
diff --git a/external/sshkeys/manifests/init.pp b/external/sshkeys/manifests/init.pp
new file mode 100644
index 00000000..4ab92bff
--- /dev/null
+++ b/external/sshkeys/manifests/init.pp
@@ -0,0 +1,2 @@
+class sshkeys {
+}
diff --git a/external/sshkeys/manifests/keymaster.pp b/external/sshkeys/manifests/keymaster.pp
new file mode 100644
index 00000000..3fc2a1fb
--- /dev/null
+++ b/external/sshkeys/manifests/keymaster.pp
@@ -0,0 +1,13 @@
+# Keymaster host:
+# Create key storage; create, regenerate, and remove key pairs
+class sshkeys::keymaster {
+ include sshkeys::var
+ file { $sshkeys::var::keymaster_storage:
+ ensure => directory,
+ owner => puppet,
+ group => puppet,
+ mode => '0644',
+ }
+ # Realize all virtual master keys
+ Sshkeys::Setup_key_master <| |>
+}
diff --git a/external/sshkeys/manifests/namecheck.pp b/external/sshkeys/manifests/namecheck.pp
new file mode 100644
index 00000000..52c6e51f
--- /dev/null
+++ b/external/sshkeys/manifests/namecheck.pp
@@ -0,0 +1,12 @@
+# Check a name (e.g. key title or filename) for the allowed form
+define sshkeys::namecheck (
+ $parm,
+ $value
+) {
+ if $value !~ /^[A-Za-z0-9]/ {
+ fail("sshkeys::key: ${parm} '${value}' not allowed: must begin with a letter or digit")
+ }
+ if $value !~ /^[A-Za-z0-9_.:@-]+$/ {
+ fail("sshkeys::key: ${parm} '${value}' not allowed: may only contain the characters A-Za-z0-9_.:@-")
+ }
+}
diff --git a/external/sshkeys/manifests/set_authorized_keys.pp b/external/sshkeys/manifests/set_authorized_keys.pp
new file mode 100644
index 00000000..894f8069
--- /dev/null
+++ b/external/sshkeys/manifests/set_authorized_keys.pp
@@ -0,0 +1,58 @@
+# Install a public key into a server user's authorized_keys(5) file.
+define sshkeys::set_authorized_keys (
+ $keyname = '',
+ $ensure = 'present',
+ $group = '',
+ $home = '',
+ $options = '',
+ $user
+) {
+ include sshkeys::var
+ $_keyname = $keyname ? { '' => $title, default => $keyname }
+ $_home = $home ? { "" => "/home/${user}", default => $home }
+ # on the keymaster:
+ $key_src_dir = "${sshkeys::var::keymaster_storage}/${_keyname}"
+ $key_src_file = "${key_src_dir}/key.pub"
+ # on the server:
+ $key_tgt_file = "${_home}/.ssh/authorized_keys"
+
+ File {
+ owner => $user,
+ group => $group ? { "" => $user, default => $group },
+ require => User[$user],
+ mode => '0600',
+ }
+ Ssh_authorized_key {
+ user => $user,
+ target => $key_tgt_file,
+ }
+
+ if $ensure == "absent" {
+ ssh_authorized_key { $title:
+ ensure => "absent",
+ }
+ } else {
+ $key_src_content = file($key_src_file, "/dev/null")
+ if ! $key_src_content {
+ notify {
+ "Public key file ${key_src_file} for key ${_keyname} not found on keymaster; skipping ensure => present":
+ }
+ } else {
+ if $ensure == "present" and $key_src_content !~ /^(ssh-...) ([^ ]*)/ {
+ err("Can't parse public key file ${key_src_file}")
+ notify {
+ "Can't parse public key file ${key_src_file} for key ${_keyname} on the keymaster: skipping ensure => ${ensure}":
+ }
+ } else {
+ $keytype = $1
+ $modulus = $2
+ ssh_authorized_key { $title:
+ ensure => "present",
+ type => $keytype,
+ key => $modulus,
+ options => $options ? { "" => undef, default => $options },
+ }
+ }
+ }
+ }
+}
diff --git a/external/sshkeys/manifests/set_client_key_pair.pp b/external/sshkeys/manifests/set_client_key_pair.pp
new file mode 100644
index 00000000..27780eb4
--- /dev/null
+++ b/external/sshkeys/manifests/set_client_key_pair.pp
@@ -0,0 +1,39 @@
+# Install a key pair into a user's account.
+define sshkeys::set_client_key_pair (
+ $keyname = '',
+ $ensure = 'present',
+ $filename = 'id_rsa',
+ $group = '',
+ $home = '',
+ $user
+) {
+ include sshkeys::var
+ File {
+ owner => $user,
+ group => $group ? { '' => $user, default => $group },
+ mode => '0600',
+ require => [ User[$user], File[$home]],
+ }
+
+ $_keyname = $keyname ? { '' => $title, default => $keyname }
+ $_home = $home ? { '' => "/home/${user}", default => $home }
+ $key_src_file = "${sshkeys::var::keymaster_storage}/${_keyname}/key" # on the keymaster
+ $key_tgt_file = "${_home}/.ssh/${filename}" # on the client
+
+ $key_src_content_pub = file("${key_src_file}.pub", "/dev/null")
+ if $ensure == "absent" or $key_src_content_pub =~ /^(ssh-...) ([^ ]+)/ {
+ $keytype = $1
+ $modulus = $2
+ file {
+ $key_tgt_file:
+ ensure => $ensure,
+ content => file($key_src_file, "/dev/null");
+ "${key_tgt_file}.pub":
+ ensure => $ensure,
+ content => "${keytype} ${modulus} ${title}\n",
+ mode => '0644';
+ }
+ } else {
+ notify { "Private key file ${key_src_file} for key ${title} not found on keymaster; skipping ensure => present": }
+ }
+}
diff --git a/external/sshkeys/manifests/setup_key_master.pp b/external/sshkeys/manifests/setup_key_master.pp
new file mode 100644
index 00000000..9dcd5f86
--- /dev/null
+++ b/external/sshkeys/manifests/setup_key_master.pp
@@ -0,0 +1,87 @@
+# Create/regenerate/remove a key pair on the keymaster.
+# This definition is private, i.e. it is not intended to be called
+# directly by users. sshkeys::create_key calls it to create virtual
+# keys, which are realized in sshkeys::keymaster.
+define sshkeys::setup_key_master (
+ $ensure,
+ $force,
+ $keytype,
+ $length,
+ $maxdays,
+ $mindate
+) {
+ include sshkeys::var
+ Exec { path => "/usr/bin:/usr/sbin:/bin:/sbin" }
+ File {
+ owner => puppet,
+ group => puppet,
+ mode => '0600',
+ }
+
+ $keydir = "${sshkeys::var::keymaster_storage}/${title}"
+ $keyfile = "${keydir}/key"
+
+ file {
+ "${keydir}":
+ ensure => directory,
+ mode => '0644';
+ "${keyfile}":
+ ensure => $ensure;
+ "${keyfile}.pub":
+ ensure => $ensure,
+ mode => '0644';
+ }
+
+ if $ensure == "present" {
+
+ # Remove the existing key pair, if
+ # * $force is true, or
+ # * $maxdays or $mindate criteria aren't met, or
+ # * $keytype or $length have changed
+
+ $keycontent = file("${keyfile}.pub", "/dev/null")
+ if $keycontent {
+
+ if $force {
+ $reason = "force=true"
+ }
+ if !$reason and $mindate and
+ generate("/usr/bin/find", $keyfile, "!", "-newermt", "${mindate}") {
+ $reason = "created before ${mindate}"
+ }
+ if !$reason and $maxdays and
+ generate("/usr/bin/find", $keyfile, "-mtime", "+${maxdays}") {
+ $reason = "older than ${maxdays} days"
+ }
+ if !$reason and $keycontent =~ /^ssh-... [^ ]+ (...) (\d+)$/ {
+ if $keytype != $1 {
+ $reason = "keytype changed: ${1} -> ${keytype}"
+ } else {
+ if $length != $2 {
+ $reason = "length changed: ${2} -> ${length}"
+ }
+ }
+ }
+ if $reason {
+ exec { "Revoke previous key ${title}: ${reason}":
+ command => "rm ${keyfile} ${keyfile}.pub",
+ before => Exec["Create key ${title}: ${keytype}, ${length} bits"],
+ }
+ }
+ }
+
+ # Create the key pair.
+ # We "repurpose" the comment field in public keys on the keymaster to
+ # store data about the key, i.e. $keytype and $length. This avoids
+ # having to rerun ssh-keygen -l on every key at every run to determine
+ # the key length.
+ exec { "Create key ${title}: ${keytype}, ${length} bits":
+ command => "ssh-keygen -t ${keytype} -b ${length} -f ${keyfile} -C \"${keytype} ${length}\" -N \"\"",
+ user => "puppet",
+ group => "puppet",
+ creates => $keyfile,
+ require => File[$keydir],
+ before => File[$keyfile, "${keyfile}.pub"],
+ }
+ }
+}
diff --git a/external/sshkeys/manifests/var.pp b/external/sshkeys/manifests/var.pp
new file mode 100644
index 00000000..13f6992e
--- /dev/null
+++ b/external/sshkeys/manifests/var.pp
@@ -0,0 +1,4 @@
+class sshkeys::var(
+ $keymaster_storage = '/var/lib/puppet-sshkeys'
+) {
+}
diff --git a/manifests/common.pp b/manifests/common.pp
deleted file mode 100644
index 8f839c79..00000000
--- a/manifests/common.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-# to not repeat the setting everywhere
-Exec { path => "/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin/" }
-
-
-class base_packages {
- # packages installed everywhere
- # asked by misc : screen, vim-enhanced, htop, lsof, tcpdump, less
- # asked by nanar : rsync
- $package_list = ['screen', 'vim-enhanced', 'htop', 'lsof', 'tcpdump', 'rsync', 'less']
-
- package { $package_list:
- ensure => installed;
- }
-}
-
-class default_ssh_root_key {
- ssh_authorized_key { "ssh key misc":
- type => "ssh-rsa",
- key => "AAAAB3NzaC1yc2EAAAABIwAAAgEA4fpjTvcL09Yzv7iV40TPjiXGHOOS5MldSh5ezSk7AMLVjAAloiidl8O3xwlxwUnjUx5zv1+RlbV76sdiSD32lBht72OZPg0UqQIB8nHeVJBdJ8YpnQ3LynNPPYJ65dvdr0uE2KRlN/1emi2N+O+f2apwc1YiL8nySEK/zLvCKO5xj16bIVuGFilDdp75X/t3C/PDsZU+CUyWL5Ly3T2+ljGc+nEAK9P0PNnvl9bRK9dqu457xjca8nXwWVI1fd6Jnt1jISFdQXy6/+9326Z6aAxvWKCrCvmdg+tAUN3fEj0WXZEPZQ1Ot0tBxKYl+xhV1Jv/ILLbInT0JZkSEKNBnJn4G7O4v+syoMqA7myHre73oGn/ocRWGJskIM33aXrJkZkJ4LkF1GLJPFI4y7bzj024sPAVvBwDrV7inwsOy0DSQ5tCbfX25TTXbK+WMXzz0pBbSi6mPgjtzlSYsLZrTa7ARYhggDG2miuOAFrup8vP7/aH2yZ+hZuF70FsMh4lf/eXDGwfypyfYrjfVSkFfY0ZU294ouTBn3HtHmgFu82vOMvLNtI9UyERluCBpLBXOT8xgM97aWFeUJEKrVxkGIiNwVNylMEYp8r16njv810NdTLA3jZu9CLueVvME6GLsGve5idtGmaYuGYNRnSRx3PQuJZl1Nj7uQHsgAaWdiM=",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key dams":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBAM+23oU9t9ghBUX2uG3hCeUwVpGoPIdnV3a8eiHf7HgUS0JqyZ/uv4LbVHyFRInt7qV4ZcChmzk6NmLfG3QnfR/3NdPJX4WxPz78BuoaI6t+fA2SdHLEY+Yz3kvI04gHuIUQ+X8e2j5wledg4r4qfkJZPCRhX1pmZFoimsCU99PJAAAAFQCOYJBWQIbV6bwtxIhj1SBR7zgA+wAAAIAA32DgUWrQUMZnXqNzo+AL/xbprE3UxEj8O2nICepTVVJboVPVek37VlKnjChl6mjya3+FkhHqfOW1UUi/L/W6C5sNsn/Ep/TxGjOLAOgG5RaXHS5RQ/Ttfs4EyPllbRmkRwCGkgx15wDo5kKuLbHCw8pLJKrxzBO4Sf6i3n4KswAAAIEAzR/EkWv6sVB6ehEO1G6L21VQQLQ7zHoJQeemuSVD6Yq8z4zcNhdrdMWZfQSiZe554G1l3lQdDmF+5kJ+1BFxXJGnZXb5/hdEBfiYeeZEQO3FvyPpnyWC73UxFksJNzFTGM8IExZ9aV4/JqdisZWMa7CRIDijeq2nQgytNcDCqRs=",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key blino":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAAEBAIuMeFTbzLwcxlKfqSUDmrh2lFVOZyjotQsUm4EGZIh8killmHCBmB8uYvh3ncwvcC8ZwfRU9O8jX6asKJckFIZ37cdHaTQR7fh5ozG4ab652dPND2yKCg1LCwf2x0/Ef1VtyF7jpTG/L9ZaGpeXQ8rykoH4rRnfSdYF0xT7ua9F/J/9ss5FtzQYbQLFMzV3SlXRWp5lzbF4lCyoTyijc8cDrTKeDTu/D5cTpqYxfKUQguGGx0hqUjE3br8r4MPOECqpxAk3gkDr+9mIGftKz07T9aMnHVNNI+hDnjACbbZcG4hZnP99wKmWQ4Pqq7Bten6Z/Hi10E5RiYFyIK8hrR0AAAAVALwhZE/KgdoAM7OV5zxOfOvKrLwJAAABADRU1t5V2XhG07IKgu4PGp9Zgu3v9UkqqPU7F+C8mp2wUw7yTgKaIety8ijShv0qQkF+3YNGj9UnNYeSDWJ62mhMfP6QNQd3RAcbEggPYDjIexoLus44fPGOHtyzvwgSHAGkhBAG9U6GrxTOCUE4ZcZ82r2AdXGzngqnxgvihs9X/thTZu6MuPATueTL6yKShPsFRamgkWmqjJTKP4ggCPHK3FqCiLkrMNbwZ7WECEuodBGou6yCTTGkUXIxGv3/FU96u9FMhqtswClZEElxu+Gajw8gNF8kLnGUSlbubHocfhIAraxfc6s31T+b3Kq6a2JeLhODdgERFM2z/yMbsMMAAAEACqUvqpak3+am+Xz1KOOgTnprpjs8y9cbBU+BzkyhISGNINVSv9fEVEuOIwxW8EZ1gHLORYwAx9onk3WXUKX48DHlMHLwgpahQJnMsuUsJn2QknTiGuML+9MzNrE4ZEoipTEL11UayVcCFYGEB1X0IghX+XmLTGhji6DUBUmepzWN3FXvYMJH50sFLjCok9JszJCgzh8jILp37n8HXgG/FPG5soGG095lHand41s9qdeq4pGchKGDOEia9KAPL6Px5o48dQxxJkMoI8gljFcwVphc0QMmQSqN1paZgnzzwkGp4smuWNxZ+kWdJOceyrlULOsgi9LEkItHZyZtDzufmg==",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key nanar":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBAMLWdzwlo5b9yr1IR5XbbYpESJQpTiZH4gTzVaUIdtbU6S2R41Enn/xZSLgbWcCX79WEcQlfKDS3BcrjWybpwCQD+i1yIA4wmYaQ3KwYBaIsTe5UtPF41hs8Jb8MhTPe9z9hNi5E1R6QQ2wPu3vDAi4zTZ4415ctr6xtW+IDYNOLAAAAFQC9ku78wdBEZKurZj5hJmhU0GSOjwAAAIAeGorkIHQ0Q8iAzKmFQA5PcuuD6X7vaflerTM3srnJOdfMa/Ac7oLV+n5oWj0BhuV09w8dB678rRxl/yVLOgHR9absSicKDkYMZlLU7K1oNFwM4taCdZZ1iyEpJVzzUOVCo8LqK6OZJhbFI0zbarq4YM/1Sr+MIiGv5FK7SCpheAAAAIEAwP95amGY7BgPzyDDFeOkeBPJQA/l7w0dEfG8A+2xui679mGJibhlXiUWqE0NqeDkD17Oc+eOV/ou5DA62tMDSus119JjqYhDEOs0l5dvA6aTzObZDhiUDQbNoS9AIPxgsqdc2vBRxonHUm/7maV8jvWVSy1429CNhnyWKuTe2qU=",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key dmorgan":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBAOsCjs1EionxMBkyCOXqhDlGUvT/ZORSjqrEhZrro2oPdnMvj3A7IHf1R8+CVVrJlnOHFEwfdC3SB5LYhmUi/XaBq1eqUiVFQLFURrYlrWFh1xSqGUFvvUfMFXOZCn4f9eJYDVaRtWBL7IZCijwZS6bbE0FLW0f6pPzhHtMkSRW/AAAAFQCyg7km5gCZ6W4iRKqr87Wy+LajMwAAAIBZ3+oM/hQ9MS2QkMa8wZk9taEO9PJQHXO3IHyo3wMUj7DYnwgyHQIIeTgPwrE+z0TkM3K3pQlf8xQmsQo7T2kQHCLFZnueEoNB+y+LySLtLDoptYlkqJ9Db0kJti+W8EFc8I+s87HuVdkXpqid222zmRfzYufjbosb8abtGUODXAAAAIBWlhkUEZsbQXkimAnfelHb7EYFnwUgHPSzrzB4xhybma9ofOfM3alZubx9acv94OrAnlvSTfgETKyT0Q+JYvtxZr9srcueSogFq8D8tQoCFJIqpEvjTxjSlg1Fws0zHBH7uO7Kp8zhnuTalhQC1XorFPJD3z40fe62fO6a02EUCQ==",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key coling":
- type => "ssh-rsa",
- key => "AAAAB3NzaC1yc2EAAAABIwAAAIEAr04pPIWNWxihA2UxlN+I6jubWofbRMlIhvqsADJjEWSr5YBDpEpWEsdtCjBrzbrrYfpGWwpeSL1mbKhmO8+pxygyzWBVcNHEcyp8DzfwT0b2tGiCox+owkyjtyOoogTu8tLvPSvMOhDgfP4WCcMuBZwRVhMR1NKJyk73T9W8qtM=",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key severine":
- type => "ssh-rsa",
- key => "AAAAB3NzaC1yc2EAAAABIwAAAgEAt9VHEteitx7bR2bg6KPfqkxgnTl/2QsqAZipqvI2axdi+gDDov+JIQP2q7HE7ZgUhlXKqHz6O0Bs894vTYtuT9hu6DaeFwuMELmH+M80CoCbJROvuQMjW7AeSXuE4llk464ubZmhyPzVHMUeKymtJxiMu5AxIV7KGoVO+dSgEMqJ66IeXLwho5uVJ/HELizY4LDm2yzbr4/gXAkYEI151PlKDMR/4FVPsGGp/vFZqIq68C4bSGeFv4e3OE9mBJQQukN1zdm0q0ssb50dEk0QU1ZWoChTip+b8FpuouQbXME8KDaNlCN9CHZwD8IfavY+urZBq5ofluihUewqzjNKPoUA6dj3MzyFZ5vQEYSwwDrSrKLXr92NrDb8QbSCLb7IqsbmXFhOa0JY4BGmqRz2r+ifinK4maZs73q1f15yj/dqBZfCCiKJsbs5GUBN2mqp2kijdpz5gpVTbBIZ3Smio0gF++VjZqVpc3e86/jJ4RwFh6I8fdalQxTIlBTkTk7TkHt0UN+7bSeV7MhpTx2FkKl2hqLCNs50c0KHomFtTrhwRi2czv/cJc+LLPPnjMFPSFv4kP8JTgSTxndPkDb6xMXIwcnk3JsPE45N6PM3zC9FoU2sY8x9U9ZZf1xtI08A+N68xGvSTxxjXJTnWU2ySCcYL3wStAewsLAJxE3O7ys=",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key boklm":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBAIGfoferrHXi7m8Hw3wY3HzIvWzlBKRu4aUpOjFgFTw+aPiS842F8B2bqjzUyLVAv13zHB5QjVeAB0YQ1TvMQbew+7CRAgAVWrY/ckMJxSdNk6eKnxlnLA295xBnyc+jdMhdTKisywtlkLP6Au+2eA/sDKELO8tiIQzSUithppU/AAAAFQCP/IlvpJjhxQwgA4UW1Mg7W3MPVwAAAIAc8BA7W9qDaA8/sQiOu6sSueEVnf7QmJzTJuT0ZJ9HDSB39+fQrwjPZqxiTpAfSboBTC0KiuG9ncCZyh6fAmn2i9WSZ6HYkoLBjHU3nu3u18qlT8LqwajUjgp15jgUKWB8OxvO1dPNaLEsvP1BKPTfDoPNPeUeQmb3WaX9S+pVGwAAAIA63gRktdobLeeuRFAfPdQQ7Imi1GwrfKa2QUgowksDxwgBBo796HN41+yF0W2AOZ2lx25KQRF0Wgc5Abm/TV8u3WbzosYbZgUBiGDqyVhIPU/xF+yPEHPYx3G3nwjEZAaxxf+LaeZkY1Yp15O6NAZAzdyV00iG/tO/ciWBPCMeJA==",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key buchan":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBALpYDQtkZcfXdOILynCGa7IAbW4+etmzpIMjw6BfvZOfLT6UPfDwajhDBMBNSbgigxkxxEdsa0/UMIE3Yrpr8YivhbL79sFw2N/FeWCs3Vk8JXNjBGA6itAIz9nwfh6qCDUj2t8LTdOQdYrSFOO7x2dFgeCwi21V27Ga2vqsvkUnAAAAFQD708pfON6Itq/5S+4kkNdNNDKWCwAAAIEAkRQeugul6KmOC0C2EmgVJvKK1qImlwHir08W1LTESnujmRIWLRst8sDoKjJpNevFuHGybPQ3palvM9qTQ84k3NMsJYJZSjSexsKydHJbD4ErKk8W6k+Xo7GAtH4nUcNskbnLHUpfvzm0jWs2yeHS0TCrljuTQwX1UsvGKJanzEoAAACBAIurf3TAfN2FKKIpKt5vyNv2ENBVcxAHN36VH8JP4uDUERg/T0OyLrIxW8px9naI6AQ1o+fPLquJ3Byn9A1RZsvWAQJI/J0oUit1KQM5FKBtXNBuFhIMSLPwbtp5pZ+m0DAFo6IcY1pl1TimGa20ajrToUhDh1NpE2ZK//8fw2i7",
- user => "root"
- }
-
- ssh_authorized_key { "ssh key tmb":
- type => "ssh-dss",
- key => "AAAAB3NzaC1kc3MAAACBAMFaCUsen6ZYH8hsjGK0tlaguduw4YT2KD3TaDEK24ltKzvQ+NDiPRms1zPhTpRL0p0U5QVdIMxm/asAtuiMLMxdmU+Crry6s110mKKY2930ZEk6N4YJ4DbqSiYe2JBmpJVIEJ6Betgn7yZRR2mRM7j134PddAl8BGG+RUvzib7JAAAAFQDzu/G2R+6oe3vjIbbFpOTyR3PAbwAAAIEAmqXAGybY9CVgGChSztPEdvaZ1xOVGJtmxmlWvitWGpu8m5JBf57VhzdpT4Fsf4fiVZ7NWiwPm1DzqNX7xCH7IPLPK0jQSd937xG9Un584CguNB76aEQXv0Yl5VjOrC3DggIEfZ1KLV7GcpOukw0RerxKz99rYAThp6+qzBIrv38AAACBAKhXi7uNlajescWFjiCZ3fpnxdyGAgtKzvlz60mGKwwNyaQCVmPSmYeBI2tg1qk+0I5K6LZUxWkdhuE1UfvAbIrEdwyD8p53dPg1J9DpdQ1KqApeKqLxO02KJtfomuy3cRQXmdfOTovYN7zAu1NCp51uUNTzhIpDHx0MZ6bsWSFv",
- user => "root"
- }
-}
-
-class urpmi_update {
- cron { urpmi_update:
- user => root,
- hour => '*/4',
- minute => 0,
- command => "urpmi.update -a",
- }
-}
-
-class default_mageia_server {
- include timezone
-
- include openssh
- include default_ssh_root_key
- include base_packages
- include ntp
- include postfix::simple_relay
- include urpmi_update
- include puppet::client
-}
-
diff --git a/manifests/defaults.pp b/manifests/defaults.pp
new file mode 100644
index 00000000..85f3f31c
--- /dev/null
+++ b/manifests/defaults.pp
@@ -0,0 +1,35 @@
+# to not repeat the settings everywhere
+Exec {
+ path => '/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/usr/local/sbin/',
+}
+
+Package {
+ ensure => present,
+}
+
+File {
+ ensure => present,
+ owner => 'root',
+ group => 'root',
+ # on directory, this will be 755
+ # see http://docs.puppetlabs.com/references/2.7.0/type.html#file
+ mode => '0644',
+}
+
+Group {
+ ensure => present,
+}
+
+User {
+ ensure => present,
+ managehome => true,
+ shell => '/bin/bash',
+}
+
+Service {
+ ensure => running,
+}
+
+Service {
+ provider => systemd,
+}
diff --git a/manifests/extlookup.pp b/manifests/extlookup.pp
index 77dc7809..0837818b 100644
--- a/manifests/extlookup.pp
+++ b/manifests/extlookup.pp
@@ -1,4 +1,4 @@
-# see http://www.devco.net/archives/2009/08/31/complex_data_and_puppet.php
-$extlookup_datadir = "/etc/puppet/extdata"
-$extlookup_precedence = ["%{fqdn}", "common"]
+# see https://www.devco.net/archives/2009/08/31/complex_data_and_puppet.php
+$extlookup_datadir = '/etc/puppet/extdata'
+$extlookup_precedence = ['%{fqdn}', 'common']
diff --git a/manifests/nodes.pp b/manifests/nodes.pp
index 1a2ac33b..61a76ee8 100644
--- a/manifests/nodes.pp
+++ b/manifests/nodes.pp
@@ -1,117 +1,7 @@
-# svn, big important server
-node valstar {
-# Location: IELO datacenter (marseille)
-#
-# TODO:
-# - GIT server
-# - setup urli build scheduler
-# - setup youri
-# - setup restricted shell access to allow "mdvsys submit" to work
-# - setup maintainers database (with web interface)
-# - mirroring (Nanar)
-#
- include default_mageia_server
- timezone::timezone { "Europe/Paris": }
- include rsyncd
- include mirror
- include openldap::master
- include subversion::client
- include subversion::server
- include puppet::master
- include buildsystem::mainnode
-
- subversion::snapshot { "/etc/puppet":
- source => "svn://svn.mageia.org/adm/puppet/"
- }
-}
-
-# web apps
-node alamut {
-# Location: IELO datacenter (marseille)
-#
-# TODO:
-# - Review board
-# - nagios
-# - api
-# - mail server
-# - mailing list server
-# - wiki
-# - pastebin
-# - LDAP slave
-#
- include default_mageia_server
- include bind::bind_master
- include postgresql
- bind::zone_master { "mageia.org": }
- bind::zone_master { "mageia.fr": }
- timezone::timezone { "Europe/Paris": }
-
- include catdap
- include mga-mirrors
- include epoll
- include transifex
- include bugzilla
-}
-
-# buildnode
-node jonund {
-# Location: IELO datacenter (marseille)
-#
- include default_mageia_server
- include buildsystem::buildnode
- timezone::timezone { "Europe/Paris": }
- include shorewall
- include shorewall::default_firewall
- include testvm
-}
-
-node ecosse {
-# Location: IELO datacenter (marseille)
-#
- include default_mageia_server
- include buildsystem::buildnode
- timezone::timezone { "Europe/Paris": }
+# that's not for a real node called default, but
+# config applied to every node
+node default {
+ include common::default_mageia_server
}
-
-# backup server
-node fiona {
-# Location: IELO datacenter (marseille)
-#
-# TODO:
-# - buy the server
-# - install the server in datacenter
-# - install a backup system
- include default_mageia_server
-}
-
-# gandi-vm
-node krampouezh {
-# Location: gandi VM
-#
-# TODO:
-# - secondary MX
-# - LDAP slave (for external traffic maybe)
-#
- include default_mageia_server
- include bind::bind_master
- bind::zone_master { "mageia.org": }
- bind::zone_master { "mageia.fr": }
- timezone::timezone { "Europe/Paris": }
-# Other services running on this server :
-# - meetbot
-}
-
-node champagne {
-# Location: gandi VM
-#
-# TODO:
-# - setup mageia.org web site
-# - setup blog
-#
- include default_mageia_server
- timezone::timezone { "Europe/Paris": }
- include blog
-}
-
-
+import 'nodes/*.pp'
diff --git a/manifests/nodes/armlet1.pp b/manifests/nodes/armlet1.pp
new file mode 100644
index 00000000..0d731f08
--- /dev/null
+++ b/manifests/nodes/armlet1.pp
@@ -0,0 +1,7 @@
+node armlet1 {
+# Location: Scaleway (Iliad/Online datacenter)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/armlet2.pp b/manifests/nodes/armlet2.pp
new file mode 100644
index 00000000..7566249f
--- /dev/null
+++ b/manifests/nodes/armlet2.pp
@@ -0,0 +1,7 @@
+node armlet2 {
+# Location: Scaleway (Iliad/Online datacenter)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/duvel.pp b/manifests/nodes/duvel.pp
new file mode 100644
index 00000000..772e43dc
--- /dev/null
+++ b/manifests/nodes/duvel.pp
@@ -0,0 +1,56 @@
+node duvel {
+# Location: IELO datacenter (marseille)
+#
+# TODO:
+# - GIT server
+# - setup maintainers database (with web interface)
+#
+ include common::default_mageia_server
+ timezone::timezone { 'Europe/Paris': }
+ include main_mirror
+ include openldap::master
+ include git::client
+ include subversion::client
+ include subversion::server
+ include puppet::master
+ #include reports::ii
+
+ include sshkeys::keymaster
+ include mga_buildsystem::mainnode
+ include softwarekey
+ include mgasoft
+ include spec-tree-reports
+
+ include access_classes::committers
+ include restrictshell::allow_git
+ include restrictshell::allow_svn
+ include restrictshell::allow_pkgsubmit
+ include restrictshell::allow_maintdb
+ include restrictshell::allow_upload_bin
+ include openssh::ssh_keys_from_ldap
+
+ include repositories::subversion
+
+ # include irkerd
+
+ include websites::svn
+ include websites::git
+
+ class { 'mga-advisories':
+ vhost => "advisories.${::domain}",
+ }
+
+ git::snapshot { '/etc/puppet':
+ source => "git://git.${::domain}/infrastructure/puppet/"
+ }
+
+ mirror_cleaner::orphans { 'cauldron':
+ base => '/distrib/bootstrap/distrib/',
+ }
+
+ class { 'mgagit':
+ ldap_server => "ldap.${::domain}",
+ binddn => 'cn=mgagit-valstar,ou=System Accounts,dc=mageia,dc=org',
+ bindpw => extlookup('mgagit_ldap','x'),
+ }
+}
diff --git a/manifests/nodes/ec2aa1.pp b/manifests/nodes/ec2aa1.pp
new file mode 100644
index 00000000..f000db8a
--- /dev/null
+++ b/manifests/nodes/ec2aa1.pp
@@ -0,0 +1,7 @@
+node ec2aa1 {
+# Location: Amazon (eu-central-1a)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/ec2aa2.pp b/manifests/nodes/ec2aa2.pp
new file mode 100644
index 00000000..a4e1e27f
--- /dev/null
+++ b/manifests/nodes/ec2aa2.pp
@@ -0,0 +1,7 @@
+node ec2aa2 {
+# Location: Amazon (eu-central-1b)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/ec2aa3.pp b/manifests/nodes/ec2aa3.pp
new file mode 100644
index 00000000..763675d7
--- /dev/null
+++ b/manifests/nodes/ec2aa3.pp
@@ -0,0 +1,7 @@
+node ec2aa3 {
+# Location: Amazon (eu-central-1b)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/ec2x1.pp b/manifests/nodes/ec2x1.pp
new file mode 100644
index 00000000..4a0f5a0f
--- /dev/null
+++ b/manifests/nodes/ec2x1.pp
@@ -0,0 +1,7 @@
+node ec2x1 {
+# Location: Amazon (eu-central-1b)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/ec2x2.pp b/manifests/nodes/ec2x2.pp
new file mode 100644
index 00000000..bf25cf8e
--- /dev/null
+++ b/manifests/nodes/ec2x2.pp
@@ -0,0 +1,7 @@
+node ec2x2 {
+# Location: Amazon (eu-central-1a)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/ecosse.pp b/manifests/nodes/ecosse.pp
new file mode 100644
index 00000000..c7fa95e5
--- /dev/null
+++ b/manifests/nodes/ecosse.pp
@@ -0,0 +1,7 @@
+node ecosse {
+# Location: IELO datacenter (marseille)
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/fiona.pp b/manifests/nodes/fiona.pp
new file mode 100644
index 00000000..2093001a
--- /dev/null
+++ b/manifests/nodes/fiona.pp
@@ -0,0 +1,10 @@
+# backup server
+node fiona {
+# Location: IELO datacenter (marseille)
+#
+# TODO:
+# - install a backup system
+ include common::default_mageia_server
+ timezone::timezone { 'Europe/Paris': }
+# include backups::server
+}
diff --git a/manifests/nodes/friteuse.pp b/manifests/nodes/friteuse.pp
new file mode 100644
index 00000000..b096021e
--- /dev/null
+++ b/manifests/nodes/friteuse.pp
@@ -0,0 +1,7 @@
+node friteuse {
+# Location: VM hosted on sucuk
+#
+ include common::default_mageia_server
+ timezone::timezone { 'Europe/Paris': }
+ include forums
+}
diff --git a/manifests/nodes/ncaa1.pp b/manifests/nodes/ncaa1.pp
new file mode 100644
index 00000000..b512939a
--- /dev/null
+++ b/manifests/nodes/ncaa1.pp
@@ -0,0 +1,7 @@
+node ncaa1 {
+# Location: Netcup, Vienna
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/neru.pp b/manifests/nodes/neru.pp
new file mode 100644
index 00000000..66958059
--- /dev/null
+++ b/manifests/nodes/neru.pp
@@ -0,0 +1,45 @@
+node neru {
+# Location: Scaleway Paris
+#
+ include common::default_mageia_server_no_smtp
+ timezone::timezone { 'Europe/Paris': }
+ include postfix::server::secondary
+ include blog::base
+ include blog::db_backup
+ include blog::files_bots
+ include blog::files_backup
+ include mysql::server
+ include dns::server
+
+ include planet
+ include websites::archives
+ include websites::static
+ include websites::hugs
+ include websites::releases
+ include websites::www
+ include websites::doc
+ include websites::start
+ include websites::meetbot
+ include dashboard
+ include access_classes::web
+ include openssh::ssh_keys_from_ldap
+
+ # temporary redirects for madb (2024-11) until it gets hosted on Mageia infra
+ apache::vhost_redirect { "madb.${::domain}":
+ url => "https://madb.mageialinux-online.org/",
+ }
+ apache::vhost_redirect { "ssl_madb.${::domain}":
+ use_ssl => true,
+ vhost => "madb.${::domain}",
+ url => "https://madb.mageialinux-online.org/",
+ }
+
+ openldap::slave_instance { '1':
+ rid => 1,
+ }
+
+ # http server for meetbot logs
+ include apache::base
+}
+# Other services running on this server :
+# - meetbot
diff --git a/manifests/nodes/ociaa1.pp b/manifests/nodes/ociaa1.pp
new file mode 100644
index 00000000..ce476665
--- /dev/null
+++ b/manifests/nodes/ociaa1.pp
@@ -0,0 +1,7 @@
+node ociaa1 {
+# Location: ?
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/pktaa1.pp b/manifests/nodes/pktaa1.pp
new file mode 100644
index 00000000..31f649c4
--- /dev/null
+++ b/manifests/nodes/pktaa1.pp
@@ -0,0 +1,7 @@
+node pktaa1 {
+# Location: Equinix Metal / SV - SJC1
+#
+ include common::default_mageia_server
+ include mga_buildsystem::buildnode
+ timezone::timezone { 'Europe/Paris': }
+}
diff --git a/manifests/nodes/rabbit.pp b/manifests/nodes/rabbit.pp
new file mode 100644
index 00000000..2436219b
--- /dev/null
+++ b/manifests/nodes/rabbit.pp
@@ -0,0 +1,32 @@
+node rabbit {
+# Location: IELO datacenter (marseille)
+#
+# - used to create isos ( and live, and so on )
+#
+ include common::default_mageia_server
+ timezone::timezone { 'Europe/Paris': }
+ include bcd::base
+ #include bcd::web
+ include bcd::rsync
+ include mga_buildsystem::buildnode
+ include draklive
+ include git::svn
+ include access_classes::iso_makers
+ include openssh::ssh_keys_from_ldap
+ # include mirror::mageia
+ include releasekey
+
+ youri-check::config {'config_cauldron':
+ version => 'cauldron',
+ }
+ youri-check::check {'check_cauldron':
+ version => 'cauldron',
+ hour => '1-23/2',
+ minute => 30
+ }
+
+ # for testing iso quickly
+ # include libvirtd::kvm
+ # libvirtd::group_access { 'mga-iso_makers': }
+
+}
diff --git a/manifests/nodes/sucuk.pp b/manifests/nodes/sucuk.pp
new file mode 100644
index 00000000..e56fd113
--- /dev/null
+++ b/manifests/nodes/sucuk.pp
@@ -0,0 +1,131 @@
+# server for various task
+node sucuk {
+# Location: IELO datacenter (marseille)
+ include common::default_mageia_server_no_smtp
+ timezone::timezone { 'Europe/Paris': }
+
+ include openssh::ssh_keys_from_ldap
+ include access_classes::admin
+
+ include postgresql::server
+ postgresql::tagged { 'default': }
+
+ class {'epoll::var':
+ db_password => extlookup('epoll_pgsql','x'),
+ password => extlookup('epoll_password','x'),
+ }
+
+ #include epoll
+ #include epoll::create_db
+
+ include sympa::server
+ include postfix::server::primary
+ include lists
+
+ include catdap
+ include mga-mirrors
+
+ include wikis
+ include websites::perl
+ include websites::www
+ include websites::nav
+
+ include bugzilla
+
+ # gitweb
+ include repositories::git_mirror
+ include cgit
+ include gitmirror
+
+ include repositories::svn_mirror
+ include viewvc
+
+# include mirrorbrain
+
+ include dns::server
+
+ include xymon::server
+ apache::vhost_simple { "xymon.${::domain}":
+ location => '/usr/share/xymon/www',
+ }
+
+ class { 'mgapeople':
+ ldap_server => "ldap.${::domain}",
+ binddn => 'cn=mgapeople-alamut,ou=System Accounts,dc=mageia,dc=org',
+ bindpw => extlookup('mgapeople_ldap','x'),
+ vhost => "people.${::domain}",
+ vhostdir => "/var/www/vhosts/people.${::domain}",
+ maintdburl => "https://pkgsubmit.${::domain}/data/maintdb.txt",
+ }
+
+ class { 'mga-treasurer':
+ vhost => "treasurer.${::domain}",
+ vhostdir => "/var/www/vhosts/treasurer.${::domain}",
+ }
+
+ youri-check::report_www { 'check': }
+
+ youri-check::createdb_user {'config_cauldron':
+ version => 'cauldron',
+ }
+
+ youri-check::config {'config_cauldron':
+ version => 'cauldron',
+ }
+ youri-check::report { 'report_cauldron':
+ version => 'cauldron',
+ hour => '*/2',
+ minute => '0'
+ }
+
+ youri-check::createdb_user {'config_9':
+ version => '9',
+ }
+
+ youri-check::config {'config_9':
+ version => '9',
+ }
+
+ youri-check::report {'report_9':
+ version => '9',
+ hour => '*/4',
+ minute => '56'
+ }
+
+ include tld_redirections
+
+ # temporary, just the time the vm is running there
+ host { 'friteuse':
+ ensure => 'present',
+ ip => '192.168.122.131',
+ host_aliases => [ "friteuse.${::domain}", "forums.${::domain}" ],
+ }
+
+ # to create all phpbb database on sucuk
+ phpbb::databases { $fqdn: }
+
+ apache::vhost::redirect_ssl { "forums.${::domain}": }
+ apache::vhost_redirect { "forum.${::domain}":
+ url => "https://forums.${::domain}/",
+ }
+ apache::vhost_redirect { "ssl_forum.${::domain}":
+ url => "https://forums.${::domain}/",
+ vhost => "forum.${::domain}",
+ use_ssl => true,
+ }
+
+ # forums is running in a VM on the machine so https: isn't necessary
+ apache::vhost::reverse_proxy { "ssl_forums.${::domain}":
+ url => "http://forums.${::domain}/",
+ vhost => "forums.${::domain}",
+ use_ssl => true,
+ content => '
+ RewriteEngine On
+ RewriteCond %{QUERY_STRING} mode=register
+ RewriteRule .*ucp.php - [forbidden]
+ ',
+ }
+
+ include libvirtd::kvm
+
+}
diff --git a/manifests/nodes_ip.pp b/manifests/nodes_ip.pp
new file mode 100644
index 00000000..38553b61
--- /dev/null
+++ b/manifests/nodes_ip.pp
@@ -0,0 +1,70 @@
+# Nodes IP addresses
+
+$nodes_ipaddr = {
+ neru => {
+ ipv4 => '163.172.148.228',
+ ipv6 => '2001:bc8:710:175f:dc00:ff:fe2d:c0ff',
+ },
+ ecosse => {
+ ipv4 => '212.85.158.148',
+ ipv6 => '2a02:2178:2:7::4',
+ },
+ fiona => {
+ ipv4 => '212.85.158.150',
+ ipv6 => '2a02:2178:2:7::6',
+ },
+ sucuk => {
+ ipv4 => '212.85.158.151',
+ ipv6 => '2a02:2178:2:7::7',
+ },
+ rabbit => {
+ ipv4 => '212.85.158.152',
+ ipv6 => '2a02:2178:2:7::8',
+ },
+ duvel => {
+ ipv4 => '212.85.158.153',
+ ipv6 => '2a02:2178:2:7::9',
+ },
+ armlet1 => {
+ ipv4 => '163.172.148.228',
+ },
+ armlet2 => {
+ ipv4 => '163.172.148.228',
+ },
+ friteuse => {
+ ipv4 => '192.168.122.131',
+ },
+ ec2aa1 => {
+ ipv6 => '2a05:d014:e9:2c02:98ca:ec83:c601:371a',
+ },
+ ec2aa2 => {
+ ipv6 => '2a05:d014:e9:2c03:b7e1:fda8:eab9:6692',
+ },
+ ec2aa3 => {
+ ipv6 => '2a05:d014:e9:2c03:17a8:1204:6df6:662c',
+ },
+ ec2aaauto => {
+ ipv6 => '2a05:d014:e9:2c03:c80d:e2d9:658d:4c28',
+ },
+ ec2x1 => {
+ ipv6 => '2a05:d014:e9:2c03:ce2e:f80a:bc2b:da0d',
+ },
+ ec2x2 => {
+ ipv6 => '2a05:d014:e9:2c02:42e4:6e93:ed55:7b2a',
+ },
+ pktaa1 => {
+ ipv4 => '147.75.69.246',
+ },
+ ociaa1 => {
+ ipv6 => '2603:c026:c101:f00::1:1',
+ },
+ ociaa2 => {
+ ipv6 => '2603:c026:c101:f00::1:2',
+ },
+ ncaa1 => {
+ ipv4 => '89.58.19.166',
+ ipv6 => '2a0a:4cc0:0:61c::1',
+ }
+}
+
+# vim: sw=2
diff --git a/manifests/site.pp b/manifests/site.pp
index 247f04cf..376c4213 100644
--- a/manifests/site.pp
+++ b/manifests/site.pp
@@ -1,3 +1,4 @@
-import "extlookup"
-import "common"
-import "nodes"
+import 'extlookup'
+import 'defaults'
+import 'nodes_ip'
+import 'nodes'
diff --git a/modules/amavis/manifests/init.pp b/modules/amavis/manifests/init.pp
new file mode 100644
index 00000000..57af5bd9
--- /dev/null
+++ b/modules/amavis/manifests/init.pp
@@ -0,0 +1,13 @@
+class amavis {
+ package { 'amavisd-new': }
+
+ service { 'amavisd':
+ subscribe => Package['amavisd-new'],
+ }
+
+ file { '/etc/amavisd/amavisd.conf':
+ require => Package['amavisd-new'],
+ content => template('amavis/amavisd.conf'),
+ notify => Service['amavisd'],
+ }
+}
diff --git a/modules/amavis/templates/amavisd.conf b/modules/amavis/templates/amavisd.conf
new file mode 100644
index 00000000..84a44944
--- /dev/null
+++ b/modules/amavis/templates/amavisd.conf
@@ -0,0 +1,782 @@
+use strict;
+
+# a minimalistic configuration file for amavisd-new with all necessary settings
+#
+# see amavisd.conf-default for a list of all variables with their defaults;
+# see amavisd.conf-sample for a traditional-style commented file;
+# for more details see documentation in INSTALL, README_FILES/*
+# and at http://www.ijs.si/software/amavisd/amavisd-new-docs.html
+
+
+# COMMONLY ADJUSTED SETTINGS:
+
+# @bypass_virus_checks_maps = (1); # controls running of anti-virus code
+# @bypass_spam_checks_maps = (1); # controls running of anti-spam code
+# $bypass_decode_parts = 1; # controls running of decoders&dearchivers
+
+$max_servers = 2; # num of pre-forked children (2..30 is common), -m
+$daemon_user = 'amavis'; # (no default; customary: vscan or amavis), -u
+$daemon_group = 'amavis'; # (no default; customary: vscan or amavis), -g
+
+(my $__hn,$mydomain) = split (/\./, $myhostname, 2); # try to discover domainname,
+ # a convenient default for other settings could be localhost.localdomain
+ # or change this as your needs
+
+$MYHOME = '/run/amavis'; # a convenient default for other settings, -H
+$TEMPBASE = "$MYHOME/tmp"; # working directory, needs to exist, -T
+$ENV{TMPDIR} = $TEMPBASE; # environment variable TMPDIR, used by SA, etc.
+$QUARANTINEDIR = '/var/spool/amavis/virusmails'; # -Q
+# $quarantine_subdir_levels = 1; # add level of subdirs to disperse quarantine
+# $release_format = 'resend'; # 'attach', 'plain', 'resend'
+# $report_format = 'arf'; # 'attach', 'plain', 'resend', 'arf'
+
+# $daemon_chroot_dir = $MYHOME; # chroot directory or undef, -R
+
+# $db_home = "$MYHOME/db"; # dir for bdb nanny/cache/snmp databases, -D
+# $helpers_home = "$MYHOME/var"; # working directory for SpamAssassin, -S
+# $lock_file = "$MYHOME/var/lib/amavisd.lock"; # -L
+# $pid_file = "$MYHOME/var/lib/amavisd.pid"; # -P
+
+#NOTE: create directories $MYHOME/tmp, $MYHOME/var, $MYHOME/db manually
+
+$log_level = 0; # verbosity 0..5, -d
+$log_recip_templ = undef; # disable by-recipient level-0 log entries
+$DO_SYSLOG = 1; # log via syslogd (preferred)
+$syslog_facility = 'mail'; # Syslog facility as a string
+ # e.g.: mail, daemon, user, local0, ... local7
+$syslog_priority = 'debug'; # Syslog base (minimal) priority as a string,
+ # choose from: emerg, alert, crit, err, warning, notice, info, debug
+
+$enable_db = 1; # enable use of BerkeleyDB/libdb (SNMP and nanny)
+$enable_global_cache = 1; # enable use of libdb-based cache if $enable_db=1
+$nanny_details_level = 2; # nanny verbosity: 1: traditional, 2: detailed
+$enable_dkim_verification = 1; # enable DKIM signatures verification
+$enable_dkim_signing = 1; # load DKIM signing code, keys defined by dkim_key
+
+@local_domains_maps = ( [".$mydomain"] ); # list of all local domains
+
+@mynetworks = qw( 127.0.0.0/8 [::1] [FE80::]/10 [FEC0::]/10
+ 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 );
+
+$unix_socketname = "$MYHOME/amavisd.sock"; # amavisd-release or amavis-milter
+ # option(s) -p overrides $inet_socket_port and $unix_socketname
+
+$inet_socket_port = 10025; # listen on this local TCP port(s)
+# $inet_socket_port = [10024,10026]; # listen on multiple TCP ports
+
+$policy_bank{'MYNETS'} = { # mail originating from @mynetworks
+ originating => 1, # is true in MYNETS by default, but let's make it explicit
+ os_fingerprint_method => undef, # don't query p0f for internal clients
+};
+
+# it is up to MTA to re-route mail from authenticated roaming users or
+# from internal hosts to a dedicated TCP port (such as 10026) for filtering
+$interface_policy{'10026'} = 'ORIGINATING';
+
+$policy_bank{'ORIGINATING'} = { # mail supposedly originating from our users
+ originating => 1, # declare that mail was submitted by our smtp client
+ allow_disclaimers => 1, # enables disclaimer insertion if available
+ # notify administrator of locally originating malware
+ virus_admin_maps => ["virusalert\@$mydomain"],
+ spam_admin_maps => ["virusalert\@$mydomain"],
+ warnbadhsender => 1,
+ # forward to a smtpd service providing DKIM signing service
+ forward_method => 'smtp:[127.0.0.1]:10027',
+ # force MTA conversion to 7-bit (e.g. before DKIM signing)
+ smtpd_discard_ehlo_keywords => ['8BITMIME'],
+ bypass_banned_checks_maps => [1], # allow sending any file names and types
+ terminate_dsn_on_notify_success => 0, # don't remove NOTIFY=SUCCESS option
+};
+
+$interface_policy{'SOCK'} = 'AM.PDP-SOCK'; # only applies with $unix_socketname
+
+# Use with amavis-release over a socket or with Petr Rehor's amavis-milter.c
+# (with amavis-milter.c from this package or old amavis.c client use 'AM.CL'):
+$policy_bank{'AM.PDP-SOCK'} = {
+ protocol => 'AM.PDP',
+ auth_required_release => 0, # do not require secret_id for amavisd-release
+};
+
+$sa_tag_level_deflt = 1.0; # add spam info headers if at, or above that level
+$sa_tag2_level_deflt = 4.7; # add 'spam detected' headers at that level
+$sa_kill_level_deflt = 4.7; # triggers spam evasive actions (e.g. blocks mail)
+$sa_dsn_cutoff_level = 10; # spam level beyond which a DSN is not sent
+$sa_crediblefrom_dsn_cutoff_level = 18; # likewise, but for a likely valid From
+# $sa_quarantine_cutoff_level = 25; # spam level beyond which quarantine is off
+$penpals_bonus_score = 8; # (no effect without a @storage_sql_dsn database)
+$penpals_threshold_high = $sa_kill_level_deflt; # don't waste time on hi spam
+$bounce_killer_score = 100; # spam score points to add for joe-jobbed bounces
+
+$sa_mail_body_size_limit = 512*1024; # don't waste time on SA if mail is larger
+$sa_local_tests_only = 0; # only tests which do not require internet access?
+
+# @lookup_sql_dsn =
+# ( ['DBI:mysql:database=mail;host=127.0.0.1;port=3306', 'user1', 'passwd1'],
+# ['DBI:mysql:database=mail;host=host2', 'username2', 'password2'],
+# ["DBI:SQLite:dbname=$MYHOME/sql/mail_prefs.sqlite", '', ''] );
+# @storage_sql_dsn = @lookup_sql_dsn; # none, same, or separate database
+
+# $timestamp_fmt_mysql = 1; # if using MySQL *and* msgs.time_iso is TIMESTAMP;
+# defaults to 0, which is good for non-MySQL or if msgs.time_iso is CHAR(16)
+
+$virus_admin = ""; # notifications recip.
+
+$mailfrom_notify_admin = "virusalert\@$mydomain"; # notifications sender
+$mailfrom_notify_recip = "virusalert\@$mydomain"; # notifications sender
+$mailfrom_notify_spamadmin = "spam.police\@$mydomain"; # notifications sender
+$mailfrom_to_quarantine = ''; # null return path; uses original sender if undef
+
+@addr_extension_virus_maps = ('virus');
+@addr_extension_banned_maps = ('banned');
+@addr_extension_spam_maps = ('spam');
+@addr_extension_bad_header_maps = ('badh');
+# $recipient_delimiter = '+'; # undef disables address extensions altogether
+# when enabling addr extensions do also Postfix/main.cf: recipient_delimiter=+
+
+$path = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin';
+# $dspam = 'dspam';
+
+$MAXLEVELS = 14;
+$MAXFILES = 1500;
+$MIN_EXPANSION_QUOTA = 100*1024; # bytes (default undef, not enforced)
+$MAX_EXPANSION_QUOTA = 512*1024*1024; # bytes (default undef, not enforced)
+
+$sa_spam_subject_tag = '***SPAM*** ';
+$defang_virus = 1; # MIME-wrap passed infected mail
+$defang_banned = 1; # MIME-wrap passed mail containing banned name
+# for defanging bad headers only turn on certain minor contents categories:
+$defang_by_ccat{+CC_BADH.",3"} = 1; # NUL or CR character in header
+$defang_by_ccat{+CC_BADH.",5"} = 1; # header line longer than 998 characters
+$defang_by_ccat{+CC_BADH.",6"} = 1; # header field syntax error
+
+
+# OTHER MORE COMMON SETTINGS (defaults may suffice):
+
+# $myhostname = 'host.example.com'; # must be a fully-qualified domain name!
+
+$notify_method = 'smtp:[127.0.0.1]:10026';
+$forward_method = 'smtp:[127.0.0.1]:10026'; # set to undef with milter!
+
+# $final_virus_destiny = D_DISCARD;
+# $final_banned_destiny = D_BOUNCE;
+# $final_spam_destiny = D_PASS;
+# $final_bad_header_destiny = D_PASS;
+# $bad_header_quarantine_method = undef;
+
+# $os_fingerprint_method = 'p0f:*:2345'; # to query p0f-analyzer.pl
+
+## hierarchy by which a final setting is chosen:
+## policy bank (based on port or IP address) -> *_by_ccat
+## *_by_ccat (based on mail contents) -> *_maps
+## *_maps (based on recipient address) -> final configuration value
+
+
+# SOME OTHER VARIABLES WORTH CONSIDERING (see amavisd.conf-default for all)
+
+# $warnbadhsender,
+# $warnvirusrecip, $warnbannedrecip, $warnbadhrecip, (or @warn*recip_maps)
+#
+# @bypass_virus_checks_maps, @bypass_spam_checks_maps,
+# @bypass_banned_checks_maps, @bypass_header_checks_maps,
+#
+# @virus_lovers_maps, @spam_lovers_maps,
+# @banned_files_lovers_maps, @bad_header_lovers_maps,
+#
+# @blacklist_sender_maps, @score_sender_maps,
+#
+# $clean_quarantine_method, $virus_quarantine_to, $banned_quarantine_to,
+# $bad_header_quarantine_to, $spam_quarantine_to,
+#
+# $defang_bad_header, $defang_undecipherable, $defang_spam
+
+
+# REMAINING IMPORTANT VARIABLES ARE LISTED HERE BECAUSE OF LONGER ASSIGNMENTS
+
+@keep_decoded_original_maps = (new_RE(
+ qr'^MAIL$', # retain full original message for virus checking
+ qr'^MAIL-UNDECIPHERABLE$', # recheck full mail if it contains undecipherables
+ qr'^(ASCII(?! cpio)|text|uuencoded|xxencoded|binhex)'i,
+# qr'^Zip archive data', # don't trust Archive::Zip
+));
+
+
+# for $banned_namepath_re (a new-style of banned table) see amavisd.conf-sample
+
+$banned_filename_re = new_RE(
+
+### BLOCKED ANYWHERE
+# qr'^UNDECIPHERABLE$', # is or contains any undecipherable components
+ qr'^\.(exe-ms|dll)$', # banned file(1) types, rudimentary
+# qr'^\.(exe|lha|tnef|cab|dll)$', # banned file(1) types
+
+### BLOCK THE FOLLOWING, EXCEPT WITHIN UNIX ARCHIVES:
+# [ qr'^\.(gz|bz2)$' => 0 ], # allow any in gzip or bzip2
+ [ qr'^\.(rpm|cpio|tar)$' => 0 ], # allow any in Unix-type archives
+
+ qr'.\.(pif|scr)$'i, # banned extensions - rudimentary
+# qr'^\.zip$', # block zip type
+
+### BLOCK THE FOLLOWING, EXCEPT WITHIN ARCHIVES:
+# [ qr'^\.(zip|rar|arc|arj|zoo)$'=> 0 ], # allow any within these archives
+
+ qr'^application/x-msdownload$'i, # block these MIME types
+ qr'^application/x-msdos-program$'i,
+ qr'^application/hta$'i,
+
+# qr'^message/partial$'i, # rfc2046 MIME type
+# qr'^message/external-body$'i, # rfc2046 MIME type
+
+# qr'^(application/x-msmetafile|image/x-wmf)$'i, # Windows Metafile MIME type
+# qr'^\.wmf$', # Windows Metafile file(1) type
+
+ # block certain double extensions in filenames
+ qr'\.[^./]*[A-Za-z][^./]*\.\s*(exe|vbs|pif|scr|bat|cmd|com|cpl|dll)[.\s]*$'i,
+
+# qr'\{[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}\}?'i, # Class ID CLSID, strict
+# qr'\{[0-9a-z]{4,}(-[0-9a-z]{4,}){0,7}\}?'i, # Class ID extension CLSID, loose
+
+ qr'.\.(exe|vbs|pif|scr|cpl)$'i, # banned extension - basic
+# qr'.\.(exe|vbs|pif|scr|cpl|bat|cmd|com)$'i, # banned extension - basic+cmd
+# qr'.\.(ade|adp|app|bas|bat|chm|cmd|com|cpl|crt|emf|exe|fxp|grp|hlp|hta|
+# inf|ins|isp|js|jse|lnk|mda|mdb|mde|mdw|mdt|mdz|msc|msi|msp|mst|
+# ops|pcd|pif|prg|reg|scr|sct|shb|shs|vb|vbe|vbs|
+# wmf|wsc|wsf|wsh)$'ix, # banned ext - long
+# qr'.\.(ani|cur|ico)$'i, # banned cursors and icons filename
+# qr'^\.ani$', # banned animated cursor file(1) type
+
+# qr'.\.(mim|b64|bhx|hqx|xxe|uu|uue)$'i, # banned extension - WinZip vulnerab.
+);
+# See http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262631
+# and http://www.cknow.com/vtutor/vtextensions.htm
+
+
+# ENVELOPE SENDER SOFT-WHITELISTING / SOFT-BLACKLISTING
+
+@score_sender_maps = ({ # a by-recipient hash lookup table,
+ # results from all matching recipient tables are summed
+
+# ## per-recipient personal tables (NOTE: positive: black, negative: white)
+# 'user1@example.com' => [{'bla-mobile.press@example.com' => 10.0}],
+# 'user3@example.com' => [{'.ebay.com' => -3.0}],
+# 'user4@example.com' => [{'cleargreen@cleargreen.com' => -7.0,
+# '.cleargreen.com' => -5.0}],
+
+ ## site-wide opinions about senders (the '.' matches any recipient)
+ '.' => [ # the _first_ matching sender determines the score boost
+
+ new_RE( # regexp-type lookup table, just happens to be all soft-blacklist
+ [qr'^(bulkmail|offers|cheapbenefits|earnmoney|foryou)@'i => 5.0],
+ [qr'^(greatcasino|investments|lose_weight_today|market\.alert)@'i=> 5.0],
+ [qr'^(money2you|MyGreenCard|new\.tld\.registry|opt-out|opt-in)@'i=> 5.0],
+ [qr'^(optin|saveonlsmoking2002k|specialoffer|specialoffers)@'i => 5.0],
+ [qr'^(stockalert|stopsnoring|wantsome|workathome|yesitsfree)@'i => 5.0],
+ [qr'^(your_friend|greatoffers)@'i => 5.0],
+ [qr'^(inkjetplanet|marketopt|MakeMoney)\d*@'i => 5.0],
+ ),
+
+# read_hash("/var/lib/amavis/sender_scores_sitewide"),
+
+ { # a hash-type lookup table (associative array)
+ 'nobody@cert.org' => -3.0,
+ 'cert-advisory@us-cert.gov' => -3.0,
+ 'owner-alert@iss.net' => -3.0,
+ 'slashdot@slashdot.org' => -3.0,
+ 'securityfocus.com' => -3.0,
+ 'ntbugtraq@listserv.ntbugtraq.com' => -3.0,
+ 'security-alerts@linuxsecurity.com' => -3.0,
+ 'mailman-announce-admin@python.org' => -3.0,
+ 'amavis-user-admin@lists.sourceforge.net'=> -3.0,
+ 'amavis-user-bounces@lists.sourceforge.net' => -3.0,
+ 'spamassassin.apache.org' => -3.0,
+ 'notification-return@lists.sophos.com' => -3.0,
+ 'owner-postfix-users@postfix.org' => -3.0,
+ 'owner-postfix-announce@postfix.org' => -3.0,
+ 'owner-sendmail-announce@lists.sendmail.org' => -3.0,
+ 'sendmail-announce-request@lists.sendmail.org' => -3.0,
+ 'donotreply@sendmail.org' => -3.0,
+ 'ca+envelope@sendmail.org' => -3.0,
+ 'noreply@freshmeat.net' => -3.0,
+ 'owner-technews@postel.acm.org' => -3.0,
+ 'ietf-123-owner@loki.ietf.org' => -3.0,
+ 'cvs-commits-list-admin@gnome.org' => -3.0,
+ 'rt-users-admin@lists.fsck.com' => -3.0,
+ 'clp-request@comp.nus.edu.sg' => -3.0,
+ 'surveys-errors@lists.nua.ie' => -3.0,
+ 'emailnews@genomeweb.com' => -5.0,
+ 'yahoo-dev-null@yahoo-inc.com' => -3.0,
+ 'returns.groups.yahoo.com' => -3.0,
+ 'clusternews@linuxnetworx.com' => -3.0,
+ lc('lvs-users-admin@LinuxVirtualServer.org') => -3.0,
+ lc('owner-textbreakingnews@CNNIMAIL12.CNN.COM') => -5.0,
+
+ # soft-blacklisting (positive score)
+ 'sender@example.net' => 3.0,
+ '.example.net' => 1.0,
+
+ },
+ ], # end of site-wide tables
+});
+
+
+@decoders = (
+ ['mail', \&do_mime_decode],
+ ['asc', \&do_ascii],
+ ['uue', \&do_ascii],
+ ['hqx', \&do_ascii],
+ ['ync', \&do_ascii],
+ ['F', \&do_uncompress, ['unfreeze','freeze -d','melt','fcat'] ],
+ ['Z', \&do_uncompress, ['uncompress','gzip -d','zcat'] ],
+ ['gz', \&do_uncompress, 'gzip -d'],
+ ['gz', \&do_gunzip],
+ ['bz2', \&do_uncompress, 'bzip2 -d'],
+ ['lzo', \&do_uncompress, 'lzop -d'],
+ ['rpm', \&do_uncompress, ['rpm2cpio.pl','rpm2cpio'] ],
+ ['cpio', \&do_pax_cpio, ['pax','gcpio','cpio'] ],
+ ['tar', \&do_pax_cpio, ['pax','gcpio','cpio'] ],
+ ['deb', \&do_ar, 'ar'],
+# ['a', \&do_ar, 'ar'], # unpacking .a seems an overkill
+ ['zip', \&do_unzip],
+ ['7z', \&do_7zip, ['7zr','7za','7z'] ],
+ ['rar', \&do_unrar, ['rar','unrar'] ],
+ ['arj', \&do_unarj, ['arj','unarj'] ],
+ ['arc', \&do_arc, ['nomarch','arc'] ],
+ ['zoo', \&do_zoo, ['zoo','unzoo'] ],
+ ['lha', \&do_lha, 'lha'],
+# ['doc', \&do_ole, 'ripole'],
+ ['cab', \&do_cabextract, 'cabextract'],
+ ['tnef', \&do_tnef_ext, 'tnef'],
+ ['tnef', \&do_tnef],
+# ['sit', \&do_unstuff, 'unstuff'], # broken/unsafe decoder
+ ['exe', \&do_executable, ['rar','unrar'], 'lha', ['arj','unarj'] ],
+);
+
+
+@av_scanners = (
+
+# ### http://www.clanfield.info/sophie/ (http://www.vanja.com/tools/sophie/)
+# ['Sophie',
+# \&ask_daemon, ["{}/\n", '/var/run/sophie'],
+# qr/(?x)^ 0+ ( : | [\000\r\n]* $)/m, qr/(?x)^ 1 ( : | [\000\r\n]* $)/m,
+# qr/(?x)^ [-+]? \d+ : (.*?) [\000\r\n]* $/m ],
+
+# ### http://www.csupomona.edu/~henson/www/projects/SAVI-Perl/
+# ['Sophos SAVI', \&sophos_savi ],
+
+# ### http://www.clamav.net/
+# ['ClamAV-clamd',
+# \&ask_daemon, ["CONTSCAN {}\n", "/var/lib/clamav/clamd.socket"],
+# qr/\bOK$/m, qr/\bFOUND$/m,
+# qr/^.*?: (?!Infected Archive)(.*) FOUND$/m ],
+# # NOTE: run clamd under the same user as amavisd, or run it under its own
+# # uid such as clamav, add user clamav to the amavis group, and then add
+# # AllowSupplementaryGroups to clamd.conf;
+# # NOTE: match socket name (LocalSocket) in clamav.conf to the socket name in
+# # this entry; when running chrooted one may prefer socket "$MYHOME/clamd".
+
+# ### http://www.clamav.net/ and CPAN (memory-hungry! clamd is preferred)
+# # note that Mail::ClamAV requires perl to be build with threading!
+# ['Mail::ClamAV', \&ask_clamav, "*", [0], [1], qr/^INFECTED: (.+)/m ],
+
+# ### http://www.openantivirus.org/
+# ['OpenAntiVirus ScannerDaemon (OAV)',
+# \&ask_daemon, ["SCAN {}\n", '127.0.0.1:8127'],
+# qr/^OK/m, qr/^FOUND: /m, qr/^FOUND: (.+)/m ],
+
+# ### http://www.vanja.com/tools/trophie/
+# ['Trophie',
+# \&ask_daemon, ["{}/\n", '/var/run/trophie'],
+# qr/(?x)^ 0+ ( : | [\000\r\n]* $)/m, qr/(?x)^ 1 ( : | [\000\r\n]* $)/m,
+# qr/(?x)^ [-+]? \d+ : (.*?) [\000\r\n]* $/m ],
+
+# ### http://www.grisoft.com/
+# ['AVG Anti-Virus',
+# \&ask_daemon, ["SCAN {}\n", '127.0.0.1:55555'],
+# qr/^200/m, qr/^403/m, qr/^403 .*?: ([^\r\n]+)/m ],
+
+# ### http://www.f-prot.com/
+# ['F-Prot fpscand', # F-PROT Antivirus for BSD/Linux/Solaris, version 6
+# \&ask_daemon,
+# ["SCAN FILE {}/*\n", '127.0.0.1:10200'],
+# qr/^(0|8|64) /m,
+# qr/^([1235679]|1[01345]) |<[^>:]*(?i)(infected|suspicious|unwanted)/m,
+# qr/(?i)<[^>:]*(?:infected|suspicious|unwanted)[^>:]*: ([^>]*)>/m ],
+
+# ### http://www.f-prot.com/
+# ['F-Prot f-protd', # old version
+# \&ask_daemon,
+# ["GET {}/*?-dumb%20-archive%20-packed HTTP/1.0\r\n\r\n",
+# ['127.0.0.1:10200', '127.0.0.1:10201', '127.0.0.1:10202',
+# '127.0.0.1:10203', '127.0.0.1:10204'] ],
+# qr/(?i)<summary[^>]*>clean<\/summary>/m,
+# qr/(?i)<summary[^>]*>infected<\/summary>/m,
+# qr/(?i)<name>(.+)<\/name>/m ],
+
+# ### http://www.sald.com/, http://www.dials.ru/english/, http://www.drweb.ru/
+# ['DrWebD', \&ask_daemon, # DrWebD 4.31 or later
+# [pack('N',1). # DRWEBD_SCAN_CMD
+# pack('N',0x00280001). # DONT_CHANGEMAIL, IS_MAIL, RETURN_VIRUSES
+# pack('N', # path length
+# length("$TEMPBASE/amavis-yyyymmddTHHMMSS-xxxxx/parts/pxxx")).
+# '{}/*'. # path
+# pack('N',0). # content size
+# pack('N',0),
+# '/var/drweb/run/drwebd.sock',
+# # '/var/lib/amavis/var/run/drwebd.sock', # suitable for chroot
+# # '/usr/local/drweb/run/drwebd.sock', # FreeBSD drweb ports default
+# # '127.0.0.1:3000', # or over an inet socket
+# ],
+# qr/\A\x00[\x10\x11][\x00\x10]\x00/sm, # IS_CLEAN,EVAL_KEY; SKIPPED
+# qr/\A\x00[\x00\x01][\x00\x10][\x20\x40\x80]/sm,# KNOWN_V,UNKNOWN_V,V._MODIF
+# qr/\A.{12}(?:infected with )?([^\x00]+)\x00/sm,
+# ],
+# # NOTE: If using amavis-milter, change length to:
+# # length("$TEMPBASE/amavis-milter-xxxxxxxxxxxxxx/parts/pxxx").
+
+ ### http://www.kaspersky.com/ (kav4mailservers)
+# ['KasperskyLab AVP - aveclient',
+# ['/usr/local/kav/bin/aveclient','/usr/local/share/kav/bin/aveclient',
+# '/opt/kav/5.5/kav4mailservers/bin/aveclient','aveclient'],
+# '-p /var/run/aveserver -s {}/*',
+# [0,3,6,8], qr/\b(INFECTED|SUSPICION|SUSPICIOUS)\b/m,
+# qr/(?:INFECTED|WARNING|SUSPICION|SUSPICIOUS) (.+)/m,
+# ],
+ # NOTE: one may prefer [0],[2,3,4,5], depending on how suspicious,
+ # corrupted or protected archives are to be handled
+
+ ### http://www.kaspersky.com/
+# ['KasperskyLab AntiViral Toolkit Pro (AVP)', ['avp'],
+# '-* -P -B -Y -O- {}', [0,3,6,8], [2,4], # any use for -A -K ?
+# qr/infected: (.+)/m,
+# sub {chdir('/opt/AVP') or die "Can't chdir to AVP: $!"},
+# sub {chdir($TEMPBASE) or die "Can't chdir back to $TEMPBASE $!"},
+# ],
+
+ ### The kavdaemon and AVPDaemonClient have been removed from Kasperky
+ ### products and replaced by aveserver and aveclient
+# ['KasperskyLab AVPDaemonClient',
+# [ '/opt/AVP/kavdaemon', 'kavdaemon',
+# '/opt/AVP/AvpDaemonClient', 'AvpDaemonClient',
+# '/opt/AVP/AvpTeamDream', 'AvpTeamDream',
+# '/opt/AVP/avpdc', 'avpdc' ],
+# "-f=$TEMPBASE {}", [0,8], [3,4,5,6], qr/infected: ([^\r\n]+)/m ],
+ # change the startup-script in /etc/init.d/kavd to:
+ # DPARMS="-* -Y -dl -f=/var/lib/amavis /var/lib/amavis"
+ # (or perhaps: DPARMS="-I0 -Y -* /var/lib/amavis" )
+ # adjusting /var/lib/amavis above to match your $TEMPBASE.
+ # The '-f=/var/lib/amavis' is needed if not running it as root, so it
+ # can find, read, and write its pid file, etc., see 'man kavdaemon'.
+ # defUnix.prf: there must be an entry "*/var/lib/amavis" (or whatever
+ # directory $TEMPBASE specifies) in the 'Names=' section.
+ # cd /opt/AVP/DaemonClients; configure; cd Sample; make
+ # cp AvpDaemonClient /opt/AVP/
+ # su - vscan -c "${PREFIX}/kavdaemon ${DPARMS}"
+
+ ### http://www.centralcommand.com/
+# ['CentralCommand Vexira (new) vascan',
+# ['vascan','/usr/lib/Vexira/vascan'],
+# "-a s --timeout=60 --temp=$TEMPBASE -y $QUARANTINEDIR ".
+# "--log=/var/log/vascan.log {}",
+# [0,3], [1,2,5],
+# qr/(?x)^\s* (?:virus|iworm|macro|mutant|sequence|trojan)\ found:\ ( [^\]\s']+ )\ \.\.\.\ /m ],
+ # Adjust the path of the binary and the virus database as needed.
+ # 'vascan' does not allow to have the temp directory to be the same as
+ # the quarantine directory, and the quarantine option can not be disabled.
+ # If $QUARANTINEDIR is not used, then another directory must be specified
+ # to appease 'vascan'. Move status 3 to the second list if password
+ # protected files are to be considered infected.
+
+ ### http://www.avira.com/
+ ### Avira AntiVir (formerly H+BEDV) or (old) CentralCommand Vexira Antivirus
+# ['Avira AntiVir', ['antivir','vexira'],
+# '--allfiles -noboot -nombr -rs -s -z {}', [0], qr/ALERT:|VIRUS:/m,
+# qr/(?x)^\s* (?: ALERT: \s* (?: \[ | [^']* ' ) |
+# (?i) VIRUS:\ .*?\ virus\ '?) ( [^\]\s']+ )/m ],
+ # NOTE: if you only have a demo version, remove -z and add 214, as in:
+ # '--allfiles -noboot -nombr -rs -s {}', [0,214], qr/ALERT:|VIRUS:/,
+
+ ### http://www.commandsoftware.com/
+# ['Command AntiVirus for Linux', 'csav',
+# '-all -archive -packed {}', [50], [51,52,53],
+# qr/Infection: (.+)/m ],
+
+ ### http://www.symantec.com/
+# ['Symantec CarrierScan via Symantec CommandLineScanner',
+# 'cscmdline', '-a scan -i 1 -v -s 127.0.0.1:7777 {}',
+# qr/^Files Infected:\s+0$/m, qr/^Infected\b/m,
+# qr/^(?:Info|Virus Name):\s+(.+)/m ],
+
+ ### http://www.symantec.com/
+# ['Symantec AntiVirus Scan Engine',
+# 'savsecls', '-server 127.0.0.1:7777 -mode scanrepair -details -verbose {}',
+# [0], qr/^Infected\b/m,
+# qr/^(?:Info|Virus Name):\s+(.+)/m ],
+ # NOTE: check options and patterns to see which entry better applies
+
+# ### http://www.f-secure.com/products/anti-virus/ version 4.65
+# ['F-Secure Antivirus for Linux servers',
+# ['/opt/f-secure/fsav/bin/fsav', 'fsav'],
+# '--delete=no --disinf=no --rename=no --archive=yes --auto=yes '.
+# '--dumb=yes --list=no --mime=yes {}', [0], [3,6,8],
+# qr/(?:infection|Infected|Suspected): (.+)/m ],
+
+ ### http://www.f-secure.com/products/anti-virus/ version 5.52
+# ['F-Secure Antivirus for Linux servers',
+# ['/opt/f-secure/fsav/bin/fsav', 'fsav'],
+# '--virus-action1=report --archive=yes --auto=yes '.
+# '--dumb=yes --list=no --mime=yes {}', [0], [3,4,6,8],
+# qr/(?:infection|Infected|Suspected|Riskware): (.+)/m ],
+ # NOTE: internal archive handling may be switched off by '--archive=no'
+ # to prevent fsav from exiting with status 9 on broken archives
+
+# ### http://www.avast.com/
+# ['avast! Antivirus daemon',
+# \&ask_daemon, # greets with 220, terminate with QUIT
+# ["SCAN {}\015\012QUIT\015\012", '/var/run/avast4/mailscanner.sock'],
+# qr/\t\[\+\]/m, qr/\t\[L\]\t/m, qr/\t\[L\]\t([^[ \t\015\012]+)/m ],
+
+# ### http://www.avast.com/
+# ['avast! Antivirus - Client/Server Version', 'avastlite',
+# '-a /var/run/avast4/mailscanner.sock -n {}', [0], [1],
+# qr/\t\[L\]\t([^[ \t\015\012]+)/m ],
+
+# ['CAI InoculateIT', 'inocucmd', # retired product
+# '-sec -nex {}', [0], [100],
+# qr/was infected by virus (.+)/m ],
+ # see: http://www.flatmtn.com/computer/Linux-Antivirus_CAI.html
+
+ ### http://www3.ca.com/Solutions/Product.asp?ID=156 (ex InoculateIT)
+# ['CAI eTrust Antivirus', 'etrust-wrapper',
+# '-arc -nex -spm h {}', [0], [101],
+# qr/is infected by virus: (.+)/m ],
+ # NOTE: requires suid wrapper around inocmd32; consider flag: -mod reviewer
+ # see http://marc.theaimsgroup.com/?l=amavis-user&m=109229779912783
+
+ ### http://mks.com.pl/english.html
+# ['MkS_Vir for Linux (beta)', ['mks32','mks'],
+# '-s {}/*', [0], [1,2],
+# qr/--[ \t]*(.+)/m ],
+
+ ### http://mks.com.pl/english.html
+# ['MkS_Vir daemon', 'mksscan',
+# '-s -q {}', [0], [1..7],
+# qr/^... (\S+)/m ],
+
+# ### http://www.nod32.com/, version v2.52 (old)
+# ['ESET NOD32 for Linux Mail servers',
+# ['/opt/eset/nod32/bin/nod32cli', 'nod32cli'],
+# '--subdir --files -z --sfx --rtp --adware --unsafe --pattern --heur '.
+# '-w -a --action-on-infected=accept --action-on-uncleanable=accept '.
+# '--action-on-notscanned=accept {}',
+# [0,3], [1,2], qr/virus="([^"]+)"/m ],
+
+# ### http://www.eset.com/, version v2.7 (old)
+# ['ESET NOD32 Linux Mail Server - command line interface',
+# ['/usr/bin/nod32cli', '/opt/eset/nod32/bin/nod32cli', 'nod32cli'],
+# '--subdir {}', [0,3], [1,2], qr/virus="([^"]+)"/m ],
+
+# ### http://www.eset.com/, version 2.71.12
+# ['ESET Software ESETS Command Line Interface',
+# ['/usr/bin/esets_cli', 'esets_cli'],
+# '--subdir {}', [0], [1,2,3], qr/virus="([^"]+)"/m ],
+
+ ### http://www.eset.com/, version 3.0
+# ['ESET Software ESETS Command Line Interface',
+# ['/usr/bin/esets_cli', 'esets_cli'],
+# '--subdir {}', [0], [1,2,3],
+# qr/:\s*action="(?!accepted)[^"]*"\n.*:\s*virus="([^"]*)"/m ],
+
+ ## http://www.nod32.com/, NOD32LFS version 2.5 and above
+# ['ESET NOD32 for Linux File servers',
+# ['/opt/eset/nod32/sbin/nod32','nod32'],
+# '--files -z --mail --sfx --rtp --adware --unsafe --pattern --heur '.
+# '-w -a --action=1 -b {}',
+# [0], [1,10], qr/^object=.*, virus="(.*?)",/m ],
+
+# Experimental, based on posting from Rado Dibarbora (Dibo) on 2002-05-31
+# ['ESET Software NOD32 Client/Server (NOD32SS)',
+# \&ask_daemon2, # greets with 200, persistent, terminate with QUIT
+# ["SCAN {}/*\r\n", '127.0.0.1:8448' ],
+# qr/^200 File OK/m, qr/^201 /m, qr/^201 (.+)/m ],
+
+ ### http://www.norman.com/products_nvc.shtml
+# ['Norman Virus Control v5 / Linux', 'nvcc',
+# '-c -l:0 -s -u -temp:$TEMPBASE {}', [0,10,11], [1,2,14],
+# qr/(?i).* virus in .* -> \'(.+)\'/m ],
+
+ ### http://www.pandasoftware.com/
+# ['Panda CommandLineSecure 9 for Linux',
+# ['/opt/pavcl/usr/bin/pavcl','pavcl'],
+# '-auto -aex -heu -cmp -nbr -nor -nos -eng -nob {}',
+# qr/Number of files infected[ .]*: 0+(?!\d)/m,
+# qr/Number of files infected[ .]*: 0*[1-9]/m,
+# qr/Found virus :\s*(\S+)/m ],
+ # NOTE: for efficiency, start the Panda in resident mode with 'pavcl -tsr'
+ # before starting amavisd - the bases are then loaded only once at startup.
+ # To reload bases in a signature update script:
+ # /opt/pavcl/usr/bin/pavcl -tsr -ulr; /opt/pavcl/usr/bin/pavcl -tsr
+ # Please review other options of pavcl, for example:
+ # -nomalw, -nojoke, -nodial, -nohackt, -nospyw, -nocookies
+
+# ### http://www.pandasoftware.com/
+# ['Panda Antivirus for Linux', ['pavcl'],
+# '-TSR -aut -aex -heu -cmp -nbr -nor -nso -eng {}',
+# [0], [0x10, 0x30, 0x50, 0x70, 0x90, 0xB0, 0xD0, 0xF0],
+# qr/Found virus :\s*(\S+)/m ],
+
+# GeCAD AV technology is acquired by Microsoft; RAV has been discontinued.
+# Check your RAV license terms before fiddling with the following two lines!
+# ['GeCAD RAV AntiVirus 8', 'ravav',
+# '--all --archive --mail {}', [1], [2,3,4,5], qr/Infected: (.+)/m ],
+# # NOTE: the command line switches changed with scan engine 8.5 !
+# # (btw, assigning stdin to /dev/null causes RAV to fail)
+
+ ### http://www.nai.com/
+# ['NAI McAfee AntiVirus (uvscan)', 'uvscan',
+# '--secure -rv --mime --summary --noboot - {}', [0], [13],
+# qr/(?x) Found (?:
+# \ the\ (.+)\ (?:virus|trojan) |
+# \ (?:virus|trojan)\ or\ variant\ ([^ ]+) |
+# :\ (.+)\ NOT\ a\ virus)/m,
+ # sub {$ENV{LD_PRELOAD}='/lib/libc.so.6'},
+ # sub {delete $ENV{LD_PRELOAD}},
+# ],
+ # NOTE1: with RH9: force the dynamic linker to look at /lib/libc.so.6 before
+ # anything else by setting environment variable LD_PRELOAD=/lib/libc.so.6
+ # and then clear it when finished to avoid confusing anything else.
+ # NOTE2: to treat encrypted files as viruses replace the [13] with:
+ # qr/^\s{5,}(Found|is password-protected|.*(virus|trojan))/
+
+ ### http://www.virusbuster.hu/en/
+# ['VirusBuster', ['vbuster', 'vbengcl'],
+# "{} -ss -i '*' -log=$MYHOME/vbuster.log", [0], [1],
+# qr/: '(.*)' - Virus/m ],
+ # VirusBuster Ltd. does not support the daemon version for the workstation
+ # engine (vbuster-eng-1.12-linux-i386-libc6.tgz) any longer. The names of
+ # binaries, some parameters AND return codes have changed (from 3 to 1).
+ # See also the new Vexira entry 'vascan' which is possibly related.
+
+# ### http://www.virusbuster.hu/en/
+# ['VirusBuster (Client + Daemon)', 'vbengd',
+# '-f -log scandir {}', [0], [3],
+# qr/Virus found = (.*);/m ],
+# # HINT: for an infected file it always returns 3,
+# # although the man-page tells a different story
+
+ ### http://www.cyber.com/
+# ['CyberSoft VFind', 'vfind',
+# '--vexit {}/*', [0], [23], qr/##==>>>> VIRUS ID: CVDL (.+)/m,
+ # sub {$ENV{VSTK_HOME}='/usr/lib/vstk'},
+# ],
+
+ ### http://www.avast.com/
+# ['avast! Antivirus', ['/usr/bin/avastcmd','avastcmd'],
+# '-a -i -n -t=A {}', [0], [1], qr/\binfected by:\s+([^ \t\n\[\]]+)/m ],
+
+ ### http://www.ikarus-software.com/
+# ['Ikarus AntiVirus for Linux', 'ikarus',
+# '{}', [0], [40], qr/Signature (.+) found/m ],
+
+ ### http://www.bitdefender.com/
+# ['BitDefender', 'bdscan', # new version
+# '--action=ignore --no-list {}', qr/^Infected files\s*:\s*0+(?!\d)/m,
+# qr/^(?:Infected files|Identified viruses|Suspect files)\s*:\s*0*[1-9]/m,
+# qr/(?:suspected|infected)\s*:\s*(.*)(?:\033|$)/m ],
+
+ ### http://www.bitdefender.com/
+# ['BitDefender', 'bdc', # old version
+# '--arc --mail {}', qr/^Infected files *:0+(?!\d)/m,
+# qr/^(?:Infected files|Identified viruses|Suspect files) *:0*[1-9]/m,
+# qr/(?:suspected|infected): (.*)(?:\033|$)/m ],
+ # consider also: --all --nowarn --alev=15 --flev=15. The --all argument may
+ # not apply to your version of bdc, check documentation and see 'bdc --help'
+
+ ### ArcaVir for Linux and Unix http://www.arcabit.pl/
+# ['ArcaVir for Linux', ['arcacmd','arcacmd.static'],
+# '-v 1 -summary 0 -s {}', [0], [1,2],
+# qr/(?:VIR|WIR):[ \t]*(.+)/m ],
+
+# ### a generic SMTP-client interface to a SMTP-based virus scanner
+# ['av_smtp', \&ask_av_smtp,
+# ['{}', 'smtp:[127.0.0.1]:5525', 'dummy@localhost'],
+# qr/^2/, qr/^5/, qr/^\s*(.*?)\s*$/m ],
+
+# ['File::Scan', sub {Amavis::AV::ask_av(sub{
+# use File::Scan; my($fn)=@_;
+# my($f)=File::Scan->new(max_txt_size=>0, max_bin_size=>0);
+# my($vname) = $f->scan($fn);
+# $f->error ? (2,"Error: ".$f->error)
+# : ($vname ne '') ? (1,"$vname FOUND") : (0,"Clean")}, @_) },
+# ["{}/*"], [0], [1], qr/^(.*) FOUND$/m ],
+
+# ### fully-fledged checker for JPEG marker segments of invalid length
+# ['check-jpeg',
+# sub { use JpegTester (); Amavis::AV::ask_av(\&JpegTester::test_jpeg, @_) },
+# ["{}/*"], undef, [1], qr/^(bad jpeg: .*)$/m ],
+# # NOTE: place file JpegTester.pm somewhere where Perl can find it,
+# # for example in /usr/local/lib/perl5/site_perl
+
+ ['always-clean', sub {0}],
+);
+
+
+@av_scanners_backup = (
+
+ ### http://www.clamav.net/ - backs up clamd or Mail::ClamAV
+ ['ClamAV-clamscan', 'clamscan',
+ "--stdout --no-summary -r --tempdir=$TEMPBASE {}",
+ [0], qr/:.*\sFOUND$/m, qr/^.*?: (?!Infected Archive)(.*) FOUND$/m ],
+
+ ### http://www.f-prot.com/ - backs up F-Prot Daemon, V6
+ ['F-PROT Antivirus for UNIX', ['fpscan'],
+ '--report --mount --adware {}', # consider: --applications -s 4 -u 3 -z 10
+ [0,8,64], [1,2,3, 4+1,4+2,4+3, 8+1,8+2,8+3, 12+1,12+2,12+3],
+ qr/^\[Found\s+[^\]]*\]\s+<([^ \t(>]*)/m ],
+
+ ### http://www.f-prot.com/ - backs up F-Prot Daemon (old)
+ ['FRISK F-Prot Antivirus', ['f-prot','f-prot.sh'],
+ '-dumb -archive -packed {}', [0,8], [3,6], # or: [0], [3,6,8],
+ qr/(?:Infection:|security risk named) (.+)|\s+contains\s+(.+)$/m ],
+
+ ### http://www.trendmicro.com/ - backs up Trophie
+ ['Trend Micro FileScanner', ['/etc/iscan/vscan','vscan'],
+ '-za -a {}', [0], qr/Found virus/m, qr/Found virus (.+) in/m ],
+
+ ### http://www.sald.com/, http://drweb.imshop.de/ - backs up DrWebD
+ ['drweb - DrWeb Antivirus', # security LHA hole in Dr.Web 4.33 and earlier
+ ['/usr/local/drweb/drweb', '/opt/drweb/drweb', 'drweb'],
+ '-path={} -al -go -ot -cn -upn -ok-',
+ [0,32], [1,9,33], qr' infected (?:with|by)(?: virus)? (.*)$'m ],
+
+ ### http://www.kaspersky.com/
+ ['Kaspersky Antivirus v5.5',
+ ['/opt/kaspersky/kav4fs/bin/kav4fs-kavscanner',
+ '/opt/kav/5.5/kav4unix/bin/kavscanner',
+ '/opt/kav/5.5/kav4mailservers/bin/kavscanner', 'kavscanner'],
+ '-i0 -xn -xp -mn -R -ePASBME {}/*', [0,10,15], [5,20,21,25],
+ qr/(?:INFECTED|WARNING|SUSPICION|SUSPICIOUS) (.*)/m,
+# sub {chdir('/opt/kav/bin') or die "Can't chdir to kav: $!"},
+# sub {chdir($TEMPBASE) or die "Can't chdir back to $TEMPBASE $!"},
+ ],
+
+# Commented out because the name 'sweep' clashes with Debian and FreeBSD
+# package/port of an audio editor. Make sure the correct 'sweep' is found
+# in the path when enabling.
+#
+# ### http://www.sophos.com/ - backs up Sophie or SAVI-Perl
+# ['Sophos Anti Virus (sweep)', 'sweep',
+# '-nb -f -all -rec -ss -sc -archive -cab -mime -oe -tnef '.
+# '--no-reset-atime {}',
+# [0,2], qr/Virus .*? found/m,
+# qr/^>>> Virus(?: fragment)? '?(.*?)'? found/m,
+# ],
+# # other options to consider: -idedir=/usr/local/sav
+
+# Always succeeds and considers mail clean.
+# Potentially useful when all other scanners fail and it is desirable
+# to let mail continue to flow with no virus checking (when uncommented).
+ ['always-clean', sub {0}],
+
+);
+
+
+1; # insure a defined return value
diff --git a/modules/apache/manifests/base.pp b/modules/apache/manifests/base.pp
new file mode 100644
index 00000000..4e1d6ed4
--- /dev/null
+++ b/modules/apache/manifests/base.pp
@@ -0,0 +1,37 @@
+class apache::base {
+ include apache::var
+
+ $conf_d = '/etc/httpd/conf/conf.d'
+
+ package { 'apache':
+ alias => 'apache-server',
+ }
+
+ service { 'httpd':
+ alias => 'apache',
+ subscribe => [ Package['apache-server'] ],
+ }
+
+ exec { 'apachectl configtest':
+ refreshonly => true,
+ notify => Service['apache'],
+ }
+
+ apache::config {
+ "${conf_d}/no_hidden_file_dir.conf":
+ content => template('apache/no_hidden_file_dir.conf'),
+ require => Package[$apache::var::pkg_conf];
+ "${conf_d}/customization.conf":
+ content => template('apache/customization.conf'),
+ require => Package[$apache::var::pkg_conf];
+ '/etc/httpd/conf/vhosts.d/00_default_vhosts.conf':
+ content => template('apache/00_default_vhosts.conf'),
+ require => Package[$apache::var::pkg_conf];
+ '/etc/httpd/conf/modules.d/50_mod_deflate.conf':
+ content => template('apache/50_mod_deflate.conf');
+ }
+
+ file { '/etc/logrotate.d/httpd':
+ content => template('apache/logrotate')
+ }
+}
diff --git a/modules/apache/manifests/config.pp b/modules/apache/manifests/config.pp
new file mode 100644
index 00000000..0ff0962c
--- /dev/null
+++ b/modules/apache/manifests/config.pp
@@ -0,0 +1,6 @@
+define apache::config($content) {
+ file { $name:
+ content => $content,
+ notify => Exec['apachectl configtest'],
+ }
+}
diff --git a/modules/apache/manifests/cve-2011-3192.pp b/modules/apache/manifests/cve-2011-3192.pp
new file mode 100644
index 00000000..1e39ac04
--- /dev/null
+++ b/modules/apache/manifests/cve-2011-3192.pp
@@ -0,0 +1,9 @@
+class apache::cve-2011-3192 {
+ include apache::base
+ # temporary protection against CVE-2011-3192
+ # https://httpd.apache.org/security/CVE-2011-3192.txt
+ apache::config {
+ "${apache::base::conf_d}/CVE-2011-3192.conf":
+ content => template('apache/CVE-2011-3192.conf'),
+ }
+}
diff --git a/modules/apache/manifests/init.pp b/modules/apache/manifests/init.pp
index e8f7a575..40779d4d 100644
--- a/modules/apache/manifests/init.pp
+++ b/modules/apache/manifests/init.pp
@@ -1,156 +1,25 @@
class apache {
-
- class base {
- package { "apache-mpm-prefork":
- alias => apache,
- ensure => installed
- }
-
- service { httpd:
- alias => apache,
- ensure => running,
- subscribe => [ Package['apache-mpm-prefork'] ],
- }
-
- file { "customization.conf":
- ensure => present,
- path => "/etc/httpd/conf.d/customization.conf",
- content => template("apache/customization.conf"),
- require => Package["apache"],
- notify => Service["apache"],
- owner => root,
- group => root,
- mode => 644,
- }
-
- file { "00_default_vhosts.conf":
- path => "/etc/httpd/conf/vhosts.d/00_default_vhosts.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template("apache/00_default_vhosts.conf")
- }
- }
-
- class mod_php inherits base {
- package { "apache-mod_php":
- ensure => installed
- }
- }
-
- class mod_perl inherits base {
- package { "apache-mod_perl":
- ensure => installed
- }
- }
-
- class mod_fcgid inherits base {
- package { "apache-mod_fcgid":
- ensure => installed
- }
- }
-
- class mod_fastcgi inherits base {
- package { "apache-mod_fastcgi":
- ensure => installed
- }
- }
-
- class mod_ssl inherits base {
- package { "apache-mod_ssl":
- ensure => installed
- }
- }
-
- class mod_wsgi inherits base {
- package { "apache-mod_wsgi":
- ensure => installed
- }
-
- file { "/usr/local/lib/wsgi":
- ensure => directory,
- owner => root,
- group => root,
- mode => 644,
+ define vhost_simple($location) {
+ include apache::base
+ apache::vhost::base { $name:
+ location => $location,
}
- }
-
- define vhost_redirect_ssl() {
- file { "redirect_ssl_$name.conf":
- path => "/etc/httpd/conf/vhosts.d/redirect_ssl_$name.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template("apache/vhost_ssl_redirect.conf")
+ apache::vhost::base { "ssl_${name}":
+ vhost => $name,
+ use_ssl => true,
+ location => $location,
}
}
- define vhost_catalyst_app($script, $location = '', $process = 4, $use_ssl = false) {
-
- include apache::mod_fastcgi
-
- file { "$name.conf":
- path => "/etc/httpd/conf/vhosts.d/$name.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template("apache/vhost_catalyst_app.conf")
+ define vhost_redirect($url,
+ $vhost = false,
+ $use_ssl = false) {
+ include apache::base
+ apache::vhost::base { $name:
+ use_ssl => $use_ssl,
+ vhost => $vhost,
+ content => template("apache/vhost_redirect.conf"),
}
}
- define vhost_django_app($module, $module_path = '/usr/share') {
- include apache::mod_wsgi
-
- file { "$name.conf":
- path => "/etc/httpd/conf/vhosts.d/$name.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template("apache/vhost_django_app.conf")
- }
-
- # fichier django wsgi
- file { "$name.wsgi":
- path => "/usr/local/lib/wsgi/$name.wsgi",
- ensure => "present",
- owner => root,
- group => root,
- mode => 755,
- notify => Service['apache'],
- content => template("apache/django.wsgi")
- }
- }
-
- define vhost_other_app($vhost_file) {
- file { "$name.conf":
- path => "/etc/httpd/conf/vhosts.d/$name.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template($vhost_file)
- }
- }
-
- define webapp_other($webapp_file) {
- $webappname = $name
- file { "webapp_$name.conf":
- path => "/etc/httpd/conf/webapps.d/$webappname.conf",
- ensure => "present",
- owner => root,
- group => root,
- mode => 644,
- notify => Service['apache'],
- content => template($webapp_file)
- }
- }
}
diff --git a/modules/apache/manifests/mod/fastcgi.pp b/modules/apache/manifests/mod/fastcgi.pp
new file mode 100644
index 00000000..2b421291
--- /dev/null
+++ b/modules/apache/manifests/mod/fastcgi.pp
@@ -0,0 +1,5 @@
+class apache::mod::fastcgi {
+ include apache::base
+ package { 'apache-mod_fastcgi': }
+}
+
diff --git a/modules/apache/manifests/mod/fcgid.pp b/modules/apache/manifests/mod/fcgid.pp
new file mode 100644
index 00000000..b8186a64
--- /dev/null
+++ b/modules/apache/manifests/mod/fcgid.pp
@@ -0,0 +1,11 @@
+class apache::mod::fcgid {
+ include apache::base
+ package { 'apache-mod_fcgid': }
+
+ file { 'urlescape':
+ path => '/usr/local/bin/urlescape',
+ mode => '0755',
+ notify => Service['apache'],
+ content => template('apache/urlescape'),
+ }
+}
diff --git a/modules/apache/manifests/mod/geoip.pp b/modules/apache/manifests/mod/geoip.pp
new file mode 100644
index 00000000..7f5516bc
--- /dev/null
+++ b/modules/apache/manifests/mod/geoip.pp
@@ -0,0 +1,4 @@
+class apache::mod::geoip {
+ include apache::base
+ package { 'apache-mod_geoip': }
+}
diff --git a/modules/apache/manifests/mod/perl.pp b/modules/apache/manifests/mod/perl.pp
new file mode 100644
index 00000000..2c52bf50
--- /dev/null
+++ b/modules/apache/manifests/mod/perl.pp
@@ -0,0 +1,4 @@
+class apache::mod::perl {
+ include apache::base
+ package { 'apache-mod_perl': }
+}
diff --git a/modules/apache/manifests/mod/php.pp b/modules/apache/manifests/mod/php.pp
new file mode 100644
index 00000000..2c8d6733
--- /dev/null
+++ b/modules/apache/manifests/mod/php.pp
@@ -0,0 +1,10 @@
+class apache::mod::php {
+ include apache::base
+ $php_date_timezone = 'UTC'
+
+ package { 'apache-mod_php': }
+
+ apache::config { "${apache::base::conf_d}/mod_php.conf":
+ content => template('apache/mod/php.conf'),
+ }
+}
diff --git a/modules/apache/manifests/mod/proxy.pp b/modules/apache/manifests/mod/proxy.pp
new file mode 100644
index 00000000..80180d62
--- /dev/null
+++ b/modules/apache/manifests/mod/proxy.pp
@@ -0,0 +1,4 @@
+class apache::mod::proxy {
+ include apache::base
+ package { 'apache-mod_proxy': }
+}
diff --git a/modules/apache/manifests/mod/public_html.pp b/modules/apache/manifests/mod/public_html.pp
new file mode 100644
index 00000000..b5691b53
--- /dev/null
+++ b/modules/apache/manifests/mod/public_html.pp
@@ -0,0 +1,4 @@
+class apache::mod::public_html {
+ include apache::base
+ package { 'apache-mod_public_html': }
+}
diff --git a/modules/apache/manifests/mod/ssl.pp b/modules/apache/manifests/mod/ssl.pp
new file mode 100644
index 00000000..ab3d24e4
--- /dev/null
+++ b/modules/apache/manifests/mod/ssl.pp
@@ -0,0 +1,20 @@
+class apache::mod::ssl {
+ include apache::base
+ file { '/etc/ssl/apache/':
+ ensure => directory
+ }
+
+ openssl::self_signed_cert{ 'localhost':
+ directory => '/etc/ssl/apache/',
+ before => Apache::Config['/etc/httpd/conf/vhosts.d/01_default_ssl_vhost.conf'],
+ }
+
+ package { 'apache-mod_ssl': }
+
+ apache::config {
+ '/etc/httpd/conf/vhosts.d/01_default_ssl_vhost.conf':
+ content => template('apache/01_default_ssl_vhost.conf');
+ "${apache::base::conf_d}/ssl_vhost.conf":
+ content => template('apache/mod/ssl_vhost.conf');
+ }
+}
diff --git a/modules/apache/manifests/mod/wsgi.pp b/modules/apache/manifests/mod/wsgi.pp
new file mode 100644
index 00000000..7f4fb719
--- /dev/null
+++ b/modules/apache/manifests/mod/wsgi.pp
@@ -0,0 +1,12 @@
+class apache::mod::wsgi {
+ include apache::base
+ package { 'apache-mod_wsgi': }
+
+ file { '/usr/local/lib/wsgi':
+ ensure => directory,
+ }
+
+ apache::config { "${apache::base::conf_d}/mod_wsgi.conf":
+ content => template('apache/mod/wsgi.conf'),
+ }
+}
diff --git a/modules/apache/manifests/var.pp b/modules/apache/manifests/var.pp
new file mode 100644
index 00000000..4a6d68eb
--- /dev/null
+++ b/modules/apache/manifests/var.pp
@@ -0,0 +1,12 @@
+# $httpdlogs_rotate:
+# number of time the log file are rotated before being removed
+# $default_vhost_redirect:
+# URL to redirect to in case of unknown vhost
+class apache::var(
+ $httpdlogs_rotate = '24',
+ $apache_user = 'apache',
+ $apache_group = 'apache',
+ $default_vhost_redirect = ''
+) {
+ $pkg_conf = 'apache'
+}
diff --git a/modules/apache/manifests/vhost/base.pp b/modules/apache/manifests/vhost/base.pp
new file mode 100644
index 00000000..27a19998
--- /dev/null
+++ b/modules/apache/manifests/vhost/base.pp
@@ -0,0 +1,50 @@
+define apache::vhost::base ($content = '',
+ $location = '/dev/null',
+ $use_ssl = false,
+ $vhost = false,
+ $aliases = {},
+ $server_aliases = [],
+ $access_logfile = false,
+ $error_logfile = false,
+ $options = [],
+ $enable_public_html = false,
+ $enable_location = true) {
+ include apache::base
+ $httpd_logdir = '/var/log/httpd'
+ $filename = "${name}.conf"
+
+ if ! $vhost {
+ $real_vhost = $name
+ } else {
+ $real_vhost = $vhost
+ }
+
+ if ! $access_logfile {
+ $real_access_logfile = "${httpd_logdir}/${real_vhost}-access_log"
+ } else {
+ $real_access_logfile = $access_logfile
+ }
+ if ! $error_logfile {
+ $real_error_logfile = "${httpd_logdir}/${real_vhost}-error_log"
+ } else {
+ $real_error_logfile = $error_logfile
+ }
+
+ if $use_ssl {
+ include apache::mod::ssl
+ if $wildcard_sslcert != true {
+ openssl::self_signed_cert{ $real_vhost:
+ directory => '/etc/ssl/apache/',
+ before => Apache::Config["/etc/httpd/conf/vhosts.d/${filename}"],
+ }
+ }
+ }
+
+ if $enable_public_html {
+ include apache::mod::public_html
+ }
+
+ apache::config { "/etc/httpd/conf/vhosts.d/${filename}":
+ content => template('apache/vhost_base.conf')
+ }
+}
diff --git a/modules/apache/manifests/vhost/catalyst_app.pp b/modules/apache/manifests/vhost/catalyst_app.pp
new file mode 100644
index 00000000..1ce40747
--- /dev/null
+++ b/modules/apache/manifests/vhost/catalyst_app.pp
@@ -0,0 +1,24 @@
+define apache::vhost::catalyst_app( $script,
+ $location = '',
+ $process = 4,
+ $use_ssl = false,
+ $aliases = {},
+ $vhost = false) {
+ include apache::mod::fcgid
+ if ($location) {
+ $aliases['/static'] = "${location}/root/static"
+ }
+
+ $script_aliases = {
+ '/' => "$script/",
+ }
+
+ apache::vhost::base { $name:
+ vhost => $vhost,
+ use_ssl => $use_ssl,
+ content => template('apache/vhost_fcgid.conf'),
+ aliases => $aliases,
+ }
+}
+
+
diff --git a/modules/apache/manifests/vhost/django_app.pp b/modules/apache/manifests/vhost/django_app.pp
new file mode 100644
index 00000000..91974acd
--- /dev/null
+++ b/modules/apache/manifests/vhost/django_app.pp
@@ -0,0 +1,22 @@
+define apache::vhost::django_app ($module = false,
+ $module_path = false,
+ $use_ssl = false,
+ $aliases= {}) {
+ include apache::mod::wsgi
+ apache::vhost::base { $name:
+ use_ssl => $use_ssl,
+ content => template('apache/vhost_django_app.conf'),
+ aliases => $aliases,
+ }
+
+ # module is a ruby reserved keyword, cannot be used in templates
+ $django_module = $module
+ file { "${name}.wsgi":
+ path => "/usr/local/lib/wsgi/${name}.wsgi",
+ mode => '0755',
+ notify => Service['apache'],
+ content => template('apache/django.wsgi'),
+ }
+}
+
+
diff --git a/modules/apache/manifests/vhost/other_app.pp b/modules/apache/manifests/vhost/other_app.pp
new file mode 100644
index 00000000..f5a71574
--- /dev/null
+++ b/modules/apache/manifests/vhost/other_app.pp
@@ -0,0 +1,6 @@
+define apache::vhost::other_app($vhost_file) {
+ include apache::base
+ apache::config { "/etc/httpd/conf/vhosts.d/${name}.conf":
+ content => template($vhost_file),
+ }
+}
diff --git a/modules/apache/manifests/vhost/redirect_ssl.pp b/modules/apache/manifests/vhost/redirect_ssl.pp
new file mode 100644
index 00000000..22a4d4f6
--- /dev/null
+++ b/modules/apache/manifests/vhost/redirect_ssl.pp
@@ -0,0 +1,6 @@
+define apache::vhost::redirect_ssl() {
+ apache::vhost::base { "redirect_ssl_${name}":
+ vhost => $name,
+ content => template('apache/vhost_ssl_redirect.conf')
+ }
+}
diff --git a/modules/apache/manifests/vhost/reverse_proxy.pp b/modules/apache/manifests/vhost/reverse_proxy.pp
new file mode 100644
index 00000000..a32aaff0
--- /dev/null
+++ b/modules/apache/manifests/vhost/reverse_proxy.pp
@@ -0,0 +1,11 @@
+define apache::vhost::reverse_proxy($url,
+ $vhost = false,
+ $use_ssl = false,
+ $content = '') {
+ include apache::mod::proxy
+ apache::vhost::base { $name:
+ use_ssl => $use_ssl,
+ vhost => $vhost,
+ content => template('apache/vhost_reverse_proxy.conf')
+ }
+}
diff --git a/modules/apache/manifests/vhost/wsgi.pp b/modules/apache/manifests/vhost/wsgi.pp
new file mode 100644
index 00000000..291c6d71
--- /dev/null
+++ b/modules/apache/manifests/vhost/wsgi.pp
@@ -0,0 +1,10 @@
+define apache::vhost::wsgi ($wsgi_path,
+ $aliases = {},
+ $server_aliases = []) {
+ include apache::mod::wsgi
+ apache::vhost::base { $name:
+ aliases => $aliases,
+ server_aliases => $server_aliases,
+ content => template('apache/vhost_wsgi.conf'),
+ }
+}
diff --git a/modules/apache/manifests/webapp_other.pp b/modules/apache/manifests/webapp_other.pp
new file mode 100644
index 00000000..147a2370
--- /dev/null
+++ b/modules/apache/manifests/webapp_other.pp
@@ -0,0 +1,7 @@
+define apache::webapp_other($webapp_file) {
+ include apache::base
+ $webappname = $name
+ apache::config { "/etc/httpd/conf/webapps.d/${webappname}.conf":
+ content => template($webapp_file),
+ }
+}
diff --git a/modules/apache/templates/00_default_vhosts.conf b/modules/apache/templates/00_default_vhosts.conf
index 25f59b5e..9a5f586c 100644
--- a/modules/apache/templates/00_default_vhosts.conf
+++ b/modules/apache/templates/00_default_vhosts.conf
@@ -3,5 +3,13 @@
<Location />
Allow from all
</Location>
- Redirect / http://www.<%= domain %>/
+ <%-
+ default_redirect = scope.lookupvar('apache::var::default_vhost_redirect')
+ if default_redirect == ''
+ -%>
+ Redirect 404 /
+ ErrorDocument 404 "Page Not Found"
+ <%- else -%>
+ Redirect / <%= default_redirect %>
+ <%- end -%>
</VirtualHost>
diff --git a/modules/apache/templates/01_default_ssl_vhost.conf b/modules/apache/templates/01_default_ssl_vhost.conf
new file mode 100644
index 00000000..323bf145
--- /dev/null
+++ b/modules/apache/templates/01_default_ssl_vhost.conf
@@ -0,0 +1,169 @@
+<IfDefine HAVE_SSL>
+ <IfModule !mod_ssl.c>
+ LoadModule ssl_module modules/mod_ssl.so
+ </IfModule>
+</IfDefine>
+
+<IfModule mod_ssl.c>
+
+##
+## SSL Virtual Host Context
+##
+
+<VirtualHost _default_:443>
+
+# General setup for the virtual host
+DocumentRoot "/var/www/html"
+#ServerName localhost:443
+ServerAdmin root@<%= @domain %>
+ErrorLog logs/ssl_error_log
+
+<IfModule mod_log_config.c>
+ TransferLog logs/ssl_access_log
+</IfModule>
+
+# SSL Engine Switch:
+# Enable/Disable SSL for this virtual host.
+SSLEngine on
+
+# SSL Cipher Suite:
+# List the ciphers that the client is permitted to negotiate.
+# See the mod_ssl documentation for a complete list.
+SSLHonorCipherOrder On
+SSLCipherSuite ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS
+
+
+# SSL Protocol support:
+# List the enable protocol levels with which clients will be able to
+# connect. Disable SSLv2/v3 access by default:
+SSLProtocol ALL -SSLv2 -SSLv3
+
+<%- if @wildcard_sslcert == 'true' then -%>
+SSLCertificateFile /etc/ssl/wildcard.<%= @domain %>.crt
+SSLCertificateKeyFile /etc/ssl/wildcard.<%= @domain %>.key
+SSLCACertificateFile /etc/ssl/wildcard.<%= @domain %>.pem
+SSLVerifyClient None
+<%- else -%>
+SSLCertificateFile /etc/ssl/apache/localhost.pem
+SSLCertificateKeyFile /etc/ssl/apache/localhost.pem
+#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt
+#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt
+<%- end -%>
+
+# Certificate Revocation Lists (CRL):
+# Set the CA revocation path where to find CA CRLs for client
+# authentication or alternatively one huge file containing all
+# of them (file must be PEM encoded)
+# Note: Inside SSLCARevocationPath you need hash symlinks
+# to point to the certificate files. Use the provided
+# Makefile to update the hash symlinks after changes.
+#SSLCARevocationPath /etc/pki/tls/certs/ssl.crl
+#SSLCARevocationFile /etc/pki/tls/certs/ca-bundle.crl
+
+# Client Authentication (Type):
+# Client certificate verification type and depth. Types are
+# none, optional, require and optional_no_ca. Depth is a
+# number which specifies how deeply to verify the certificate
+# issuer chain before deciding the certificate is not valid.
+#SSLVerifyClient require
+#SSLVerifyDepth 10
+
+# Access Control:
+# With SSLRequire you can do per-directory access control based
+# on arbitrary complex boolean expressions containing server
+# variable checks and other lookup directives. The syntax is a
+# mixture between C and Perl. See the mod_ssl documentation
+# for more details.
+#<Location />
+#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \
+# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \
+# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \
+# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \
+# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \
+# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/
+#</Location>
+
+# SSL Engine Options:
+# Set various options for the SSL engine.
+# o FakeBasicAuth:
+# Translate the client X.509 into a Basic Authorisation. This means that
+# the standard Auth/DBMAuth methods can be used for access control. The
+# user name is the `one line' version of the client's X.509 certificate.
+# Note that no password is obtained from the user. Every entry in the user
+# file needs this password: `xxj31ZMTZzkVA'.
+# o ExportCertData:
+# This exports two additional environment variables: SSL_CLIENT_CERT and
+# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
+# server (always existing) and the client (only existing when client
+# authentication is used). This can be used to import the certificates
+# into CGI scripts.
+# o StdEnvVars:
+# This exports the standard SSL/TLS related `SSL_*' environment variables.
+# Per default this exportation is switched off for performance reasons,
+# because the extraction step is an expensive operation and is usually
+# useless for serving static content. So one usually enables the
+# exportation for CGI and SSI requests only.
+# o StrictRequire:
+# This denies access when "SSLRequireSSL" or "SSLRequire" applied even
+# under a "Satisfy any" situation, i.e. when it applies access is denied
+# and no other module can change it.
+# o OptRenegotiate:
+# This enables optimized SSL connection renegotiation handling when SSL
+# directives are used in per-directory context.
+#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
+
+<FilesMatch "\.(cgi|shtml|phtml|php)$">
+ SSLOptions +StdEnvVars
+</FilesMatch>
+
+<Directory "/var/www/cgi-bin">
+ SSLOptions +StdEnvVars
+</Directory>
+
+# SSL Protocol Adjustments:
+# The safe and default but still SSL/TLS standard compliant shutdown
+# approach is that mod_ssl sends the close notify alert but doesn't wait for
+# the close notify alert from client. When you need a different shutdown
+# approach you can use one of the following variables:
+# o ssl-unclean-shutdown:
+# This forces an unclean shutdown when the connection is closed, i.e. no
+# SSL close notify alert is send or allowed to received. This violates
+# the SSL/TLS standard but is needed for some brain-dead browsers. Use
+# this when you receive I/O errors because of the standard approach where
+# mod_ssl sends the close notify alert.
+# o ssl-accurate-shutdown:
+# This forces an accurate shutdown when the connection is closed, i.e. a
+# SSL close notify alert is send and mod_ssl waits for the close notify
+# alert of the client. This is 100% SSL/TLS standard compliant, but in
+# practice often causes hanging connections with brain-dead browsers. Use
+# this only for browsers where you know that their SSL implementation
+# works correctly.
+# Notice: Most problems of broken clients are also related to the HTTP
+# keep-alive facility, so you usually additionally want to disable
+# keep-alive for those clients, too. Use variable "nokeepalive" for this.
+# Similarly, one has to force some clients to use HTTP/1.0 to workaround
+# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
+# "force-response-1.0" for this.
+
+<IfModule mod_setenvif.c>
+ BrowserMatch ".*MSIE.*" nokeepalive ssl-unclean-shutdown \
+ downgrade-1.0 force-response-1.0
+</IfModule>
+
+# Per-Server Logging:
+# The home of a custom SSL log file. Use this when you want a
+# compact non-error SSL logfile on a virtual host basis.
+
+<IfModule mod_log_config.c>
+ CustomLog logs/ssl_request_log \
+ "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"
+</IfModule>
+
+<IfModule mod_rewrite.c>
+ RewriteEngine On
+ RewriteOptions inherit
+</IfModule>
+
+</VirtualHost>
+
+</IfModule>
diff --git a/modules/apache/templates/50_mod_deflate.conf b/modules/apache/templates/50_mod_deflate.conf
new file mode 100644
index 00000000..5192bf6e
--- /dev/null
+++ b/modules/apache/templates/50_mod_deflate.conf
@@ -0,0 +1,36 @@
+<IfModule mod_deflate.c>
+ # Compress HTML, CSS, JavaScript, JSON, Text, XML and fonts
+ AddOutputFilterByType DEFLATE application/javascript
+ AddOutputFilterByType DEFLATE application/json
+ AddOutputFilterByType DEFLATE application/rss+xml
+ AddOutputFilterByType DEFLATE application/vnd.ms-fontobject
+ AddOutputFilterByType DEFLATE application/x-font
+ AddOutputFilterByType DEFLATE application/x-font-opentype
+ AddOutputFilterByType DEFLATE application/x-font-otf
+ AddOutputFilterByType DEFLATE application/x-font-truetype
+ AddOutputFilterByType DEFLATE application/x-font-ttf
+ AddOutputFilterByType DEFLATE application/x-javascript
+ AddOutputFilterByType DEFLATE application/xhtml+xml
+ AddOutputFilterByType DEFLATE application/xml
+ AddOutputFilterByType DEFLATE font/opentype
+ AddOutputFilterByType DEFLATE font/otf
+ AddOutputFilterByType DEFLATE font/ttf
+ AddOutputFilterByType DEFLATE image/svg+xml
+ AddOutputFilterByType DEFLATE image/x-icon
+ AddOutputFilterByType DEFLATE text/css
+ AddOutputFilterByType DEFLATE text/html
+ AddOutputFilterByType DEFLATE text/javascript
+ AddOutputFilterByType DEFLATE text/plain
+ AddOutputFilterByType DEFLATE text/xml
+
+ # Level of compression (9=highest compression level)
+ DeflateCompressionLevel 1
+
+ # Do not compress certain file types
+ SetEnvIfNoCase Request_URI \.(?:gif|jpe?g|png|heif|heic|webp|mp4|mov|mpg|webm|avi)$ no-gzip dont-vary
+ SetEnvIfNoCase Request_URI \.(?:exe|t?gz|zip|bz2|xz|zst|lzo|lzma|sit|rar|cab|rpm)$ no-gzip dont-vary
+ SetEnvIfNoCase Request_URI \.pdf$ no-gzip dont-vary
+
+ # Make sure proxies don't deliver the wrong content
+ Header append Vary User-Agent env=!dont-vary
+</IfModule>
diff --git a/modules/apache/templates/CVE-2011-3192.conf b/modules/apache/templates/CVE-2011-3192.conf
new file mode 100644
index 00000000..25751adc
--- /dev/null
+++ b/modules/apache/templates/CVE-2011-3192.conf
@@ -0,0 +1,12 @@
+ # Drop the Range header when more than 5 ranges.
+ # CVE-2011-3192
+ SetEnvIf Range (?:,.*?){5,5} bad-range=1
+ RequestHeader unset Range env=bad-range
+
+ # We always drop Request-Range; as this is a legacy
+ # dating back to MSIE3 and Netscape 2 and 3.
+ #
+ RequestHeader unset Request-Range
+
+ # optional logging.
+ CustomLog logs/range-CVE-2011-3192.log common env=bad-range
diff --git a/modules/apache/templates/customization.conf b/modules/apache/templates/customization.conf
index 81424c42..41e15e3a 100644
--- a/modules/apache/templates/customization.conf
+++ b/modules/apache/templates/customization.conf
@@ -1,2 +1 @@
NameVirtualHost *:80
-NameVirtualHost *:443
diff --git a/modules/apache/templates/django.wsgi b/modules/apache/templates/django.wsgi
index 90521653..2188e1e7 100644
--- a/modules/apache/templates/django.wsgi
+++ b/modules/apache/templates/django.wsgi
@@ -1,7 +1,16 @@
#!/usr/bin/python
import os, sys
-sys.path.append('<%= module_path %>')
-os.environ['DJANGO_SETTINGS_MODULE'] = '<%= module %>.settings'
+<%- for m in module_path -%>
+path = '<%= m %>'
+if path not in sys.path:
+ sys.path.append(path)
+<%- end -%>
+
+<%- if @django_module -%>
+os.environ['DJANGO_SETTINGS_MODULE'] = '<%= @django_module %>.settings'
+<%- else -%>
+os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
+<%- end -%>
import django.core.handlers.wsgi
diff --git a/modules/apache/templates/logrotate b/modules/apache/templates/logrotate
new file mode 100644
index 00000000..823989eb
--- /dev/null
+++ b/modules/apache/templates/logrotate
@@ -0,0 +1,23 @@
+/var/log/httpd/*_log /var/log/httpd/apache_runtime_status /var/log/httpd/ssl_mutex {
+<% if @hostname == 'duvel' %>
+ rotate 60
+ daily
+<% elsif @hostname == 'friteuse' %>
+ # The virtual disk is very small so keep log sizes down
+ rotate 26
+ weekly
+<% elsif @hostname == 'sucuk' %>
+ rotate 52
+ weekly
+<% else %>
+ rotate <%= scope.lookupvar('apache::var::httpdlogs_rotate') %>
+ monthly
+<% end %>
+ missingok
+ notifempty
+ sharedscripts
+ compress
+ postrotate
+ /bin/systemctl restart httpd.service > /dev/null 2>/dev/null || true
+ endscript
+}
diff --git a/modules/apache/templates/mod/php.conf b/modules/apache/templates/mod/php.conf
new file mode 100644
index 00000000..8bc20078
--- /dev/null
+++ b/modules/apache/templates/mod/php.conf
@@ -0,0 +1,5 @@
+# as php insist to have this value set, let's
+# look on the system for him
+php_value date.timezone "<%= @php_date_timezone %>"
+php_admin_value sendmail_path "/usr/sbin/sendmail -t -i -f root@<%= @domain %>"
+
diff --git a/modules/apache/templates/mod/ssl_vhost.conf b/modules/apache/templates/mod/ssl_vhost.conf
new file mode 100644
index 00000000..bcfe8201
--- /dev/null
+++ b/modules/apache/templates/mod/ssl_vhost.conf
@@ -0,0 +1 @@
+NameVirtualHost *:443
diff --git a/modules/apache/templates/mod/wsgi.conf b/modules/apache/templates/mod/wsgi.conf
new file mode 100644
index 00000000..18678bc6
--- /dev/null
+++ b/modules/apache/templates/mod/wsgi.conf
@@ -0,0 +1,12 @@
+# https://code.google.com/p/modwsgi/wiki/ApplicationIssues
+# mainly for viewvc at the moment , when doing a diff
+WSGIRestrictStdout Off
+# again viewvc :
+# mod_wsgi (pid=20083): Callback registration for signal 15 ignored.
+# no bug reported upstream yet :/
+# WSGIRestrictSignal Off
+# reenabled, as this prevent apache from restarting properly
+
+# make sure transifex client work fine, as we need wsgi to pass authorisation
+# header to django ( otherwise, this just show error 401 )
+WSGIPassAuthorization On
diff --git a/modules/apache/templates/no_hidden_file_dir.conf b/modules/apache/templates/no_hidden_file_dir.conf
new file mode 100644
index 00000000..dce78912
--- /dev/null
+++ b/modules/apache/templates/no_hidden_file_dir.conf
@@ -0,0 +1,4 @@
+#
+# dont serve up any hidden files or dirs like .git*, .svn, ...
+#
+RedirectMatch 404 /\..*$
diff --git a/modules/apache/templates/urlescape b/modules/apache/templates/urlescape
new file mode 100644
index 00000000..8feb7fa4
--- /dev/null
+++ b/modules/apache/templates/urlescape
@@ -0,0 +1,9 @@
+#!/usr/bin/python3 -u
+# URL escape each path given on stdin
+import sys
+import urllib.parse
+while True:
+ l = sys.stdin.readline()
+ if not l:
+ break
+ print(urllib.parse.quote(l.rstrip("\n")))
diff --git a/modules/apache/templates/vhost_base.conf b/modules/apache/templates/vhost_base.conf
new file mode 100644
index 00000000..da26b683
--- /dev/null
+++ b/modules/apache/templates/vhost_base.conf
@@ -0,0 +1,53 @@
+<%- if @use_ssl then
+ port = 443
+else
+ port = 80
+end
+-%>
+
+<VirtualHost *:<%= port %>>
+<%- if @use_ssl then -%>
+<%= scope.function_template(["apache/vhost_ssl.conf"]) %>
+<%- end -%>
+ ServerName <%= @real_vhost %>
+<%- @server_aliases.each do |key| -%>
+ ServerAlias <%= key %>
+<%- end -%>
+ DocumentRoot <%= @location %>
+
+ CustomLog <%= @real_access_logfile %> combined
+ ErrorLog <%= @real_error_logfile %>
+
+<%- if @enable_public_html -%>
+ #TODO add the rest
+ UserDir public_html
+<%- else -%>
+<IfModule mod_userdir.c>
+ UserDir disabled
+</IfModule>
+<%- end -%>
+
+<%- @aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%>
+ Alias <%= key %> <%= @aliases[key] %>
+<%- end -%>
+
+ <%= @content %>
+
+<%- if @options.length > 0 -%>
+ <Directory <%= @location %>>
+ Options <%= @options.join(" ") %>
+ </Directory>
+<%- end -%>
+
+<%- if @enable_location -%>
+ <Location />
+ <IfModule mod_authz_core.c>
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ Allow from all
+ </IfModule>
+ </Location>
+<%- end -%>
+</VirtualHost>
+
diff --git a/modules/apache/templates/vhost_catalyst_app.conf b/modules/apache/templates/vhost_catalyst_app.conf
deleted file mode 100644
index 57867fc4..00000000
--- a/modules/apache/templates/vhost_catalyst_app.conf
+++ /dev/null
@@ -1,30 +0,0 @@
-<% if use_ssl then
- port = 443
-else
- port = 80
-end
-%>
-
-<VirtualHost *:<%= port %>>
-<% if use_ssl then %>
- SSLEngine on
- #TODO deploy SNI later
- SSLCertificateFile /etc/ssl/apache/apache.pem
- SSLCertificateKeyFile /etc/ssl/apache/apache.pem
-<% end %>
- ServerName <%= name %>
- # Serve static content directly
- DocumentRoot /dev/null
-# header
-
-<% if location then %>
- Alias /static <%= location %>/root/static
-<% end %>
- Alias / <%= script %>/
- FastCgiServer <%= script %> -processes <%= process %> -idle-timeout 30
-
- <Location />
- Allow from all
- </Location>
-</VirtualHost>
-
diff --git a/modules/apache/templates/vhost_django_app.conf b/modules/apache/templates/vhost_django_app.conf
index 9d64865f..d85cf7a9 100644
--- a/modules/apache/templates/vhost_django_app.conf
+++ b/modules/apache/templates/vhost_django_app.conf
@@ -1,12 +1 @@
-<VirtualHost *:80>
- ServerName <%= name %>
- # Serve static content directly
- DocumentRoot /dev/null
-
- WSGIScriptAlias / /usr/local/lib/wsgi/<%= name %>.wsgi
-#footer
- <Location />
- Allow from all
- </Location>
-</VirtualHost>
-
+WSGIScriptAlias / /usr/local/lib/wsgi/<%= @name %>.wsgi
diff --git a/modules/apache/templates/vhost_fcgid.conf b/modules/apache/templates/vhost_fcgid.conf
new file mode 100644
index 00000000..fefa4a49
--- /dev/null
+++ b/modules/apache/templates/vhost_fcgid.conf
@@ -0,0 +1,6 @@
+AddHandler fcgid-script .pl
+<%- @script_aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%>
+ ScriptAlias <%= key %> <%= @script_aliases[key] %>
+<%- end -%>
+FcgidMinProcessesPerClass <%= @process %>
+FcgidIdleTimeout 30
diff --git a/modules/apache/templates/vhost_fcgid_norobot.conf b/modules/apache/templates/vhost_fcgid_norobot.conf
new file mode 100644
index 00000000..0643cac9
--- /dev/null
+++ b/modules/apache/templates/vhost_fcgid_norobot.conf
@@ -0,0 +1,45 @@
+AddHandler fcgid-script .pl
+<%- @script_aliases.keys.sort {|a,b| a.size <=> b.size }.reverse.each do |key| -%>
+ ScriptAlias <%= key %> <%= @script_aliases[key] %>
+<%- end -%>
+FcgidMinProcessesPerClass <%= @process %>
+FcgidIdleTimeout 30
+
+# These robots were scraping the whole of svnweb in 2024-04, causing severe
+# load, so they are banned. It's not clear whether they obey robots.txt or
+# not (we didn't give them enough of a chance to find out), so we could
+# consider giving them a chance to redeem themselves at some point in the
+# future.
+RewriteEngine on
+RewriteCond %{HTTP_USER_AGENT} ClaudeBot|Amazonbot
+RewriteRule . - [R=403,L]
+
+# Block expensive SVN operations on all common robots ("spider" covers a
+# bunch). "Expensive" is considered to be most operations other than showing a
+# directory or downloading a specific version of a file.
+# Note: eliminating view=log and annotate= doesn't make much difference to the
+# CPU load when robots are hitting the server in real world operation.
+#RewriteCond %{QUERY_STRING} pathrev=|r1=
+# Treat anything other than a plain path as "expensive"
+RewriteCond %{QUERY_STRING} .
+RewriteCond %{HTTP_USER_AGENT} "Googlebot|GoogleOther|bingbot|Yahoo! Slurp|ClaudeBot|Amazonbot|YandexBot|SemrushBot|Barkrowler|DataForSeoBot|PetalBot|facebookexternalhit|GPTBot|ImagesiftBot|spider|Spider|iPod|Trident|Presto"
+RewriteRule . - [R=403,L]
+
+# Only let expensive operations through when a cookie is set. If no cookie is
+# set, redirect to a page where it will be set using JavaScript and redirect
+# back. This will block requests from user agents that do not support
+# JavaScript, which includes many robots.
+RewriteMap urlescape prg:/usr/local/bin/urlescape
+#RewriteCond %{QUERY_STRING} pathrev=|r1=
+# Treat anything other than a plain path as "expensive"
+RewriteCond %{QUERY_STRING} .
+RewriteCond %{REQUEST_URI} !/_check
+RewriteCond %{HTTP_COOKIE} !session=([^;]+) [novary]
+RewriteRule . %{REQUEST_SCHEME}://%{SERVER_NAME}:%{SERVER_PORT}/_check?to=%{REQUEST_URI}?${urlescape:%{QUERY_STRING}} [R=302,L]
+
+# Block abusive spiders by IP address who don't identify themselves in the
+# User-Agent: string
+RewriteCond expr "-R '47.76.0.0/14' || -R '47.80.0.0/14' || -R '47.208.0.0/16' || -R '47.238.0.0/16' || -R '8.210.0.0/16' || -R '8.218.0.0/16' || -R '188.239.0.0/18' || -R '166.108.192.0/18' || -R '124.243.160.0/19' || -R '101.46.0.0/20'"
+RewriteRule . - [R=403,L]
+
+ErrorDocument 403 "<html><body>Impolite robots are not allowed</body></html>"
diff --git a/modules/apache/templates/vhost_redirect.conf b/modules/apache/templates/vhost_redirect.conf
new file mode 100644
index 00000000..c787311e
--- /dev/null
+++ b/modules/apache/templates/vhost_redirect.conf
@@ -0,0 +1,2 @@
+Redirect / <%= @url %>
+
diff --git a/modules/apache/templates/vhost_reverse_proxy.conf b/modules/apache/templates/vhost_reverse_proxy.conf
new file mode 100644
index 00000000..4859bda3
--- /dev/null
+++ b/modules/apache/templates/vhost_reverse_proxy.conf
@@ -0,0 +1,15 @@
+<%= @content %>
+
+ ProxyRequests Off
+ ProxyPreserveHost On
+
+ <Proxy *>
+ Order deny,allow
+ Allow from all
+ </Proxy>
+<%- if @url =~ /^https/ -%>
+ SSLProxyEngine On
+<%- end -%>
+ ProxyPass / <%= @url %>
+ ProxyPassReverse / <%= @url %>
+
diff --git a/modules/apache/templates/vhost_simple.conf b/modules/apache/templates/vhost_simple.conf
new file mode 100644
index 00000000..77b55287
--- /dev/null
+++ b/modules/apache/templates/vhost_simple.conf
@@ -0,0 +1,14 @@
+<VirtualHost *:80>
+ ServerName <%= @name %>
+ DocumentRoot <%= @location %>
+
+ <Location />
+ <IfModule mod_authz_core.c>
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ Allow from all
+ </IfModule>
+ </Location>
+</VirtualHost>
+
diff --git a/modules/apache/templates/vhost_ssl.conf b/modules/apache/templates/vhost_ssl.conf
new file mode 100644
index 00000000..0cb52eca
--- /dev/null
+++ b/modules/apache/templates/vhost_ssl.conf
@@ -0,0 +1,13 @@
+ SSLEngine on
+ SSLProtocol ALL -SSLv2 -SSLv3
+ SSLHonorCipherOrder On
+ SSLCipherSuite ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS
+ <%- if @wildcard_sslcert == 'true' then -%>
+ SSLCertificateFile /etc/ssl/wildcard.<%= @domain %>.crt
+ SSLCertificateKeyFile /etc/ssl/wildcard.<%= @domain %>.key
+ SSLCACertificateFile /etc/ssl/wildcard.<%= @domain %>.pem
+ SSLVerifyClient None
+ <%- else -%>
+ SSLCertificateFile /etc/ssl/apache/<%= @real_vhost %>.pem
+ SSLCertificateKeyFile /etc/ssl/apache/<%= @real_vhost %>.pem
+ <%- end -%>
diff --git a/modules/apache/templates/vhost_ssl_redirect.conf b/modules/apache/templates/vhost_ssl_redirect.conf
index bb22a2c8..23a7eabe 100644
--- a/modules/apache/templates/vhost_ssl_redirect.conf
+++ b/modules/apache/templates/vhost_ssl_redirect.conf
@@ -1,4 +1 @@
-<VirtualHost *:80>
- ServerName <%= name %>
- Redirect / https://<%= name %>/
-</VirtualHost>
+Redirect / https://<%= @name %>/
diff --git a/modules/apache/templates/vhost_wsgi.conf b/modules/apache/templates/vhost_wsgi.conf
new file mode 100644
index 00000000..2f1ba585
--- /dev/null
+++ b/modules/apache/templates/vhost_wsgi.conf
@@ -0,0 +1,3 @@
+WSGIScriptAlias / <%= @wsgi_path %>
+
+
diff --git a/modules/auto_installation/manifests/download.rb b/modules/auto_installation/manifests/download.rb
new file mode 100644
index 00000000..12cc53bf
--- /dev/null
+++ b/modules/auto_installation/manifests/download.rb
@@ -0,0 +1,21 @@
+define "auto_installation::download::netboot_images", :path, :versions, :archs, :mirror_path, :files do
+ # example :
+ # mandriva :
+ # ftp://ftp.free.fr/pub/Distributions_Linux/MandrivaLinux/devel/%{version}/%{arch}/isolinux/alt0/
+ for a in @archs do
+ for v in @versions do
+ # uncomment when ruby 1.9 will be stable and used
+ # mirror_file_path = @mirror_path % { :arch => a, :version => v }
+ mirror_file_path = @mirror_path.gsub(/%{arch}/, a)
+ mirror_file_path = mirror_file_path.gsub(/%{version}/, v)
+ for f in @files do
+ file_name = "#{@path}/#{@name}_#{v}_#{a}_#{f}"
+ create_resource(:exec, "wget -q #{mirror_file_path}/#{f} -O #{file_name}",
+ :creates => file_name)
+ end
+ end
+ end
+end
+
+
+
diff --git a/modules/auto_installation/manifests/init.pp b/modules/auto_installation/manifests/init.pp
new file mode 100644
index 00000000..642cddfd
--- /dev/null
+++ b/modules/auto_installation/manifests/init.pp
@@ -0,0 +1,140 @@
+# what should be possible :
+# install a base system
+# - mandriva
+# - mageia
+# - others ? ( for testing package ? )
+
+# install a server
+# - by name, with a valstar clone
+
+class auto_installation {
+ class variables {
+ $pxe_dir = "/var/lib/pxe"
+ # m/ for menu. There is limitation on the path length so
+ # while we will likely not hit the limit, it may be easier
+ $pxe_menu_dir = "${pxe_dir}/pxelinux.cfg/m/"
+ }
+
+ class download {
+ import "download.rb"
+ }
+
+ class pxe_menu inherits variables {
+ package { 'syslinux':
+
+ }
+
+ file { $pxe_dir:
+ ensure => directory,
+ }
+
+ file { "${pxe_dir}/pxelinux.0":
+ ensure => "/usr/lib/syslinux/pxelinux.0",
+ }
+
+ file { "${pxe_dir}/menu.c32":
+ ensure => "/usr/lib/syslinux/menu.c32"
+ }
+
+ file { "${pxe_dir}/pxelinux.cfg":
+ ensure => directory,
+ }
+ # m for menu, there is some limitation on the path length so I
+ # prefer to
+ file { "${pxe_menu_dir}":
+ ensure => directory,
+ }
+
+ # TODO make it tag aware
+ $menu_entries = list_exported_ressources('Auto_installation::Pxe_menu_base')
+ # default file should have exported resources
+ file { "${pxe_dir}/pxelinux.cfg/default":
+ ensure => present,
+ content => template('auto_installation/default'),
+ }
+ Auto_installation::Pxe_menu_base <<| tag == $fqdn |>>
+ }
+
+ define pxe_menu_base($content) {
+ include auto_installation::variables
+ file { "${auto_installation::variables::pxe_menu_dir}/${name}":
+ ensure => present,
+ content => $content,
+ }
+ }
+
+ define pxe_menu_entry($kernel_path, $append, $label) {
+ @@auto_installation::pxe_menu_base { $name:
+ tag => $fqdn,
+ content => template('auto_installation/menu'),
+ }
+ }
+
+ # define pxe_linux_entry
+ # meant to be exported
+ # name
+ # label
+ # kernel
+ # append
+ class netinst_storage {
+ # to ease the creation of test iso
+ $netinst_path = "/var/lib/libvirt/netinst"
+
+ file { $netinst_path:
+ ensure => directory,
+ require => Package[libvirt-utils],
+ }
+
+ libvirtd::storage { "netinst":
+ path => $netinst_path,
+ require => File[$netinst_path],
+ }
+ }
+
+ define download_file($destination_path, $download_url) {
+ exec { "wget -q -O ${destination_path}/${name} ${download_url}/${name}":
+ creates => "${destination_path}/${name}",
+ }
+ }
+
+ define mandriva_installation_entry($version, $arch = 'x86_64') {
+ include netinst_storage
+ $protocol = "ftp"
+ $server = "ftp.free.fr"
+ $mirror_url_base = "/pub/Distributions_Linux/MandrivaLinux/"
+ $mirror_url_middle = $version ? {
+ "cooker" => "devel/cooker/${arch}/",
+ default => "official/${version}/${arch}/"
+ }
+ $mirror_url = "${mirror_url_base}/${mirror_url_middle}"
+
+ $mirror_url_end = "isolinux/alt0"
+
+ $destination_path = "${netinst_storage::netinst_path}/${name}"
+
+ file { "${destination_path}":
+ ensure => directory,
+ }
+
+ $download_url = "${protocol}\\://${server}/${mirror_url}/${mirror_url_end}"
+
+
+ download_file { ['all.rdz','vmlinuz']:
+ destination_path => $destination_path,
+ download_url => $download_url,
+ require => File[$destination_path],
+ }
+
+ pxe_menu_entry { "mandriva_${version}_${arch}":
+ kernel_path => "${name}/vmlinuz",
+ label => "Mandriva ${version} ${arch}",
+ #TODO add autoinst.cfg
+ append => "${name}/all.rdz useless_thing_accepted=1 lang=fr automatic=int:eth0,netw:dhcp,met:${protocol},ser:${server},dir:${mirror_url} ",
+ }
+ }
+ #
+ # define a template for autoinst
+ # - basic installation
+ # - server installation ( with server name as a parameter )
+
+}
diff --git a/modules/auto_installation/templates/default b/modules/auto_installation/templates/default
new file mode 100644
index 00000000..a9ea8de3
--- /dev/null
+++ b/modules/auto_installation/templates/default
@@ -0,0 +1,15 @@
+DEFAULT menu.c32
+PROMPT 10
+TIMEOUT 100
+NOESCAPE 1
+
+MENU SHIFTKEY 1
+MENU TITLE PXE Boot on <%= fqdn %>
+
+LABEL local
+ MENU LABEL Local
+ localboot 0
+
+<% for m in menu_entries %>
+INCLUDE pxelinux.cfg/m/<%= m %>
+<% end %>
diff --git a/modules/auto_installation/templates/menu b/modules/auto_installation/templates/menu
new file mode 100644
index 00000000..3d0ce6fa
--- /dev/null
+++ b/modules/auto_installation/templates/menu
@@ -0,0 +1,5 @@
+LABEL <%= name %>
+ MENU DEFAULT
+ MENU LABEL Install <%= label %>
+ kernel <%= kernel_path %>
+ append <%= append %>
diff --git a/modules/bcd/manifests/base.pp b/modules/bcd/manifests/base.pp
new file mode 100644
index 00000000..d515f3e1
--- /dev/null
+++ b/modules/bcd/manifests/base.pp
@@ -0,0 +1,29 @@
+class bcd::base {
+ include sudo
+ include bcd
+
+ group { $bcd::login: }
+
+ user { $bcd::login:
+ home => $bcd::home,
+ comment => 'User for creating ISOs',
+ }
+
+ file { [$bcd::public_isos, '/var/lib/bcd']:
+ ensure => directory,
+ owner => $bcd::login,
+ group => $bcd::login,
+ mode => '0755',
+ }
+
+ # svn version is used for now
+ #package { bcd: }
+
+ # needed for qemu-over ssh
+ package { 'xauth': }
+
+ $isomakers_group = 'mga-iso_makers'
+ sudo::sudoers_config { 'bcd':
+ content => template('bcd/sudoers.bcd')
+ }
+}
diff --git a/modules/bcd/manifests/init.pp b/modules/bcd/manifests/init.pp
new file mode 100644
index 00000000..1ff57144
--- /dev/null
+++ b/modules/bcd/manifests/init.pp
@@ -0,0 +1,5 @@
+class bcd {
+ $login = 'bcd'
+ $home = '/home/bcd'
+ $public_isos = "${home}/public_html/isos"
+}
diff --git a/modules/bcd/manifests/rsync.pp b/modules/bcd/manifests/rsync.pp
new file mode 100644
index 00000000..0a9ccc34
--- /dev/null
+++ b/modules/bcd/manifests/rsync.pp
@@ -0,0 +1,7 @@
+class bcd::rsync {
+ include bcd::base
+ $public_isos = $bcd::public_isos
+ class { 'rsyncd':
+ rsyncd_conf => 'bcd/rsyncd.conf',
+ }
+}
diff --git a/modules/bcd/manifests/web.pp b/modules/bcd/manifests/web.pp
new file mode 100644
index 00000000..d670cf5d
--- /dev/null
+++ b/modules/bcd/manifests/web.pp
@@ -0,0 +1,9 @@
+class bcd::web {
+ include bcd::base
+ $location = "${bcd::home}/public_html"
+
+ apache::vhost::base { "bcd.${::domain}":
+ location => $location,
+ content => template('bcd/vhost_bcd.conf'),
+ }
+}
diff --git a/modules/bcd/templates/rsyncd.conf b/modules/bcd/templates/rsyncd.conf
new file mode 100644
index 00000000..75c7d335
--- /dev/null
+++ b/modules/bcd/templates/rsyncd.conf
@@ -0,0 +1,12 @@
+# $Id: rsyncd.conf 1419 2011-03-29 17:04:07Z nanardon $
+
+uid = nobody
+gid = nogroup
+
+[isos]
+ path = <%= scope.lookupvar("bcd::public_isos") %>
+ comment = Mageia ISOs
+ exclude = .htaccess .htpasswd
+ read only = yes
+ auth users = isoqa
+ secrets file = /etc/rsyncd.secrets
diff --git a/modules/bcd/templates/sudoers.bcd b/modules/bcd/templates/sudoers.bcd
new file mode 100644
index 00000000..c462bffd
--- /dev/null
+++ b/modules/bcd/templates/sudoers.bcd
@@ -0,0 +1,10 @@
+<%= scope.lookupvar('bcd::login') %> ALL=(root) NOPASSWD:/bin/mount, /bin/umount, \
+/usr/sbin/chroot, \
+/usr/sbin/urpmi, \
+/usr/sbin/urpmi.addmedia, \
+/usr/sbin/urpmi.removemedia, \
+/usr/sbin/urpmi.update, \
+/usr/bin/urpmq, \
+/bin/rm
+
+%<%= @isomakers_group %> ALL=(<%= scope.lookupvar('bcd::login') %>) SETENV: NOPASSWD: ALL
diff --git a/modules/bcd/templates/vhost_bcd.conf b/modules/bcd/templates/vhost_bcd.conf
new file mode 100644
index 00000000..c89955e2
--- /dev/null
+++ b/modules/bcd/templates/vhost_bcd.conf
@@ -0,0 +1,12 @@
+<Directory <%= @location %>>
+ AuthUserFile <%= scope.lookupvar('bcd::home') %>/htpasswd
+ AuthGroupFile /dev/null
+ AuthName "QA test isos, restricted access"
+ ErrorDocument 403 "For the password, please contact the QA team ( https://wiki.<%= @domain %>/en/QA_Team )"
+
+ AuthType Basic
+ require valid-user
+
+ Options FollowSymlinks
+ Options Indexes
+</Directory>
diff --git a/modules/bind/manifests/init.pp b/modules/bind/manifests/init.pp
index 60ef7a04..a5d20c09 100644
--- a/modules/bind/manifests/init.pp
+++ b/modules/bind/manifests/init.pp
@@ -1,56 +1,25 @@
class bind {
- class bind_base {
- package { bind:
- ensure => installed
- }
+ package { 'bind': }
- service { named:
- ensure => running,
- path => "/etc/init.d/named",
- subscribe => [ Package["bind"]]
- }
-
- file { '/etc/named.conf':
- ensure => "/var/lib/named/etc/named.conf",
- owner => root,
- group => root,
- mode => 644
- }
+ service { 'named':
+ restart => 'service named restart',
+ subscribe => Package['bind'],
}
-
- file { '/var/lib/named/etc/named.conf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["bind"],
- content => "",
- notify => [Service['named']]
+ file { '/etc/named.conf':
+ ensure => link,
+ target => '/var/lib/named/etc/named.conf',
+ require => Package['bind'],
}
- define zone_master {
- file { "/var/lib/named/var/named/master/$name.zone":
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("bind/zones/$name.zone"),
- require => Package[bind],
- notify => Service[named]
- }
+ exec { 'named_reload':
+ command => 'service named reload',
+ refreshonly => true,
}
- class bind_master inherits bind_base {
- file { '/var/lib/named/etc/named.conf':
- content => template("bind/named_base.conf", "bind/named_master.conf"),
- }
- }
-
- class bind_slave inherits bind_base {
- file { '/var/lib/named/etc/named.conf':
- content => template("bind/named_base.conf", "bind/named_slave.conf"),
- }
+ file { '/var/lib/named/etc/named.conf':
+ require => Package['bind'],
+ content => '',
+ notify => Service['named'],
}
-
}
diff --git a/modules/bind/manifests/master.pp b/modules/bind/manifests/master.pp
new file mode 100644
index 00000000..a82d4757
--- /dev/null
+++ b/modules/bind/manifests/master.pp
@@ -0,0 +1,17 @@
+class bind::master inherits bind {
+ Tld_redirections::Domain <<| |>>
+
+ $managed_tlds = list_exported_ressources('Tld_redirections::Domain')
+
+ file { "/var/lib/named/var/named/master":
+ ensure => directory
+ }
+
+ file { "/var/lib/named/var/named/reverse":
+ ensure => directory
+ }
+
+ File['/var/lib/named/etc/named.conf'] {
+ content => template('bind/named_base.conf', 'bind/named_master.conf'),
+ }
+}
diff --git a/modules/bind/manifests/slave.pp b/modules/bind/manifests/slave.pp
new file mode 100644
index 00000000..e446b57a
--- /dev/null
+++ b/modules/bind/manifests/slave.pp
@@ -0,0 +1,6 @@
+class bind::slave inherits bind {
+ $managed_tlds = list_exported_ressources('Tld_redirections::Domain')
+ File['/var/lib/named/etc/named.conf'] {
+ content => template('bind/named_base.conf', 'bind/named_slave.conf'),
+ }
+}
diff --git a/modules/bind/manifests/zone.pp b/modules/bind/manifests/zone.pp
new file mode 100644
index 00000000..17f2075e
--- /dev/null
+++ b/modules/bind/manifests/zone.pp
@@ -0,0 +1,13 @@
+define bind::zone($type, $content = false) {
+ if ! $content {
+ $zone_content = template("bind/zones/${name}.zone")
+ } else {
+ $zone_content = $content
+ }
+
+ file { "/var/named/${type}/${name}.zone":
+ content => $zone_content,
+ require => Package['bind'],
+ notify => Exec['named_reload']
+ }
+}
diff --git a/modules/bind/manifests/zone/master.pp b/modules/bind/manifests/zone/master.pp
new file mode 100644
index 00000000..460f52c6
--- /dev/null
+++ b/modules/bind/manifests/zone/master.pp
@@ -0,0 +1,6 @@
+define bind::zone::master($content = false) {
+ bind::zone { $name :
+ type => 'master',
+ content => $content,
+ }
+}
diff --git a/modules/bind/manifests/zone/reverse.pp b/modules/bind/manifests/zone/reverse.pp
new file mode 100644
index 00000000..400e77f9
--- /dev/null
+++ b/modules/bind/manifests/zone/reverse.pp
@@ -0,0 +1,6 @@
+define bind::zone::reverse($content = false) {
+ bind::zone { $name :
+ type => 'reverse',
+ content => $content,
+ }
+}
diff --git a/modules/bind/templates/named_base.conf b/modules/bind/templates/named_base.conf
index 3eb30478..5adba9f3 100644
--- a/modules/bind/templates/named_base.conf
+++ b/modules/bind/templates/named_base.conf
@@ -18,6 +18,10 @@ logging {
acl "trusted_networks" {
127.0.0.1;
212.85.158.144/28;
+ # used for various virtual machines
+ 192.168.0.0/16;
+ 10.0.0.0/8;
+ 172.16.0.0/12;
};
// Enable statistics at http://127.0.0.1:5380/
statistics-channels {
@@ -28,7 +32,6 @@ options {
version "";
directory "/var/named";
dump-file "/var/tmp/named_dump.db";
- pid-file "/var/run/named.pid";
statistics-file "/var/tmp/named.stats";
zone-statistics yes;
// datasize 256M;
@@ -97,37 +100,31 @@ zone "." IN {
zone "localdomain" IN {
type master;
- file "master/localdomain.zone";
+ file "named.localhost";
allow-update { none; };
};
zone "localhost" IN {
type master;
- file "master/localhost.zone";
+ file "named.localhost";
allow-update { none; };
};
-zone "0.0.127.in-addr.arpa" IN {
+zone "1.0.0.127.in-addr.arpa" IN {
type master;
- file "reverse/named.local";
+ file "named.loopback";
allow-update { none; };
};
-zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN {
+zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN {
type master;
- file "reverse/named.ip6.local";
- allow-update { none; };
-};
-
-zone "255.in-addr.arpa" IN {
- type master;
- file "reverse/named.broadcast";
+ file "named.loopback";
allow-update { none; };
};
zone "0.in-addr.arpa" IN {
type master;
- file "reverse/named.zero";
+ file "named.empty";
allow-update { none; };
};
diff --git a/modules/bind/templates/named_master.conf b/modules/bind/templates/named_master.conf
index f5219e94..30b3418f 100644
--- a/modules/bind/templates/named_master.conf
+++ b/modules/bind/templates/named_master.conf
@@ -4,11 +4,26 @@ zone "mageia.org" IN {
allow-update { none; };
};
-zone "mageia.fr" IN {
+<%
+for tld in managed_tlds
+%>
+
+zone "mageia.<%= tld %>" IN {
type master;
- file "master/mageia.fr.zone";
+ file "master/mageia.<%= tld %>.zone";
allow-update { none; };
};
+<% end %>
+zone "7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa" IN {
+ type master;
+ file "reverse/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone";
+ allow-update { none; };
+};
+zone "2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa" IN {
+ type master;
+ file "reverse/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone";
+ allow-update { none; };
+};
diff --git a/modules/bind/templates/named_slave.conf b/modules/bind/templates/named_slave.conf
index 2a3a2fad..b59db37f 100644
--- a/modules/bind/templates/named_slave.conf
+++ b/modules/bind/templates/named_slave.conf
@@ -1,14 +1,31 @@
zone "mageia.org" IN {
type slave;
file "slave/mageia.org";
- allow-update { 212.85.158.146; };
+ allow-update { 212.85.158.151; };
};
-zone "mageia.fr" IN {
+<%
+for tld in managed_tlds
+%>
+
+zone "mageia.<%= tld %>" IN {
type master;
- file "master/mageia.fr";
- allow-update { 212.85.158.146; };
+ file "master/mageia.<= tld %>";
+ allow-update { 212.85.158.151; };
};
+<%
+end
+%>
+zone "7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa" IN {
+ type slave;
+ file "slave/7.0.0.0.2.0.0.0.8.7.1.2.2.0.a.2.ip6.arpa.zone";
+ allow-update { 212.85.158.151; };
+};
+zone "2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa" IN {
+ type slave;
+ file "slave/2.1.0.0.0.0.0.1.b.0.e.0.1.0.a.2.ip6.arpa.zone";
+ allow-update { 212.85.158.151; };
+};
diff --git a/modules/bind/templates/zones/mageia.fr.zone b/modules/bind/templates/zones/mageia.fr.zone
deleted file mode 100644
index 70ecc840..00000000
--- a/modules/bind/templates/zones/mageia.fr.zone
+++ /dev/null
@@ -1,27 +0,0 @@
-; cfengine-distributed file
-; local modifications will be lost
-; $Id$
-$TTL 3D
-@ IN SOA ns0.mageia.org. mageia.fr. (
- 2010110200 ; Serial
- 21600 ; Refresh
- 3600 ; Retry
- 2419200 ; Expire
- 86400 ; Minmun TTL
- )
-
-; nameservers
-@ IN NS ns0.mageia.org.
-@ IN NS ns1.mageia.org.
-
-@ IN MX 10 mx0.zarb.org.
-@ IN MX 20 mx1.zarb.org.
-
-; MX
-;@ IN MX 10 mx0.zarb.org.
-
-; machines
-mageia.fr. IN A 212.85.158.22
-
-; aliases
-www IN CNAME mageia.fr.
diff --git a/modules/bind/templates/zones/mageia.org.zone b/modules/bind/templates/zones/mageia.org.zone
deleted file mode 100644
index 1a9de019..00000000
--- a/modules/bind/templates/zones/mageia.org.zone
+++ /dev/null
@@ -1,87 +0,0 @@
-; puppet-distributed file
-; local modifications will be lost
-; $Id$
-$TTL 3D
-@ IN SOA ns0.mageia.org. root.mageia.org. (
- 2010112201 ; Serial
- 21600 ; Refresh
- 3600 ; Retry
- 2419200 ; Expire
- 86400 ; Minmun TTL
- )
-
-; nameservers
-@ IN NS ns0.mageia.org.
-@ IN NS ns1.mageia.org.
-
-@ IN MX 10 mx0.zarb.org.
-@ IN MX 20 mx1.zarb.org.
-
-ml IN MX 10 alamut.mageia.org.
-ml IN MX 20 krampouezh.mageia.org.
-
-; MX
-;@ IN MX 10 mx0.zarb.org.
-
-; machines
-mageia.org. IN A 212.85.158.22
-www-zarb IN A 212.85.158.22
-; gandi vm 1
-vm-gandi IN A 95.142.164.207
-kouign-amann IN A 95.142.164.207
-krampouezh IN A 95.142.164.207
-champagne IN A 217.70.188.116
-
-www-aufml IN A 91.121.11.63
-forum IN A 88.191.127.89
-
-; lost oasis
-alamut IN A 212.85.158.146
-alamut IN AAAA 2a02:2178:2:7::2
-; since we have a subdomain, we cannot use a CNAME
-ml IN A 212.85.158.146
-ml IN AAAA 2a02:2178:2:7::2
-
-valstar IN A 212.85.158.147
-valstar IN AAAA 2a02:2178:2:7::3
-ecosse IN A 212.85.158.148
-ecosse IN AAAA 2a02:2178:2:7::4
-jonund IN A 212.85.158.149
-jonund IN AAAA 2a02:2178:2:7::5
-fiona IN A 212.85.158.150
-fiona IN AAAA 2a02:2178:2:7::6
-
-; alamut
-ns0 IN A 212.85.158.146
-; krampouezh
-ns1 IN A 95.142.164.207
-
-; aliases
-www IN CNAME www-zarb
-www-test IN CNAME champagne
-blog IN CNAME www-zarb
-blog-test IN CNAME champagne
-rsync IN CNAME www-zarb
-
-ldap IN CNAME valstar
-
-svn IN CNAME valstar
-meetbot IN CNAME krampouezh
-
-donate IN CNAME www-aufml
-donation IN CNAME www-aufml
-
-puppetmaster IN CNAME valstar
-pkgsubmit IN CNAME valstar
-repository IN CNAME valstar
-ldap IN CNAME valstar
-
-identity IN CNAME alamut
-mirrors IN CNAME alamut
-epoll IN CNAME alamut
-pgsql IN CNAME alamut
-bugs IN CNAME alamut
-lists IN CNAME alamut
-; temporary
-;forum IN A 140.211.167.148
-;wiki IN A 88.191.83.84
diff --git a/modules/blog/manifests/init.pp b/modules/blog/manifests/init.pp
index ab7f9ec0..c89a8168 100644
--- a/modules/blog/manifests/init.pp
+++ b/modules/blog/manifests/init.pp
@@ -1,41 +1,97 @@
-#TODO:
-# - add the creation of the user 'blog' in puppet
class blog {
- package { 'mysql':
- ensure => installed
- }
+ class base {
+ $blog_domain = "blog.${::domain}"
+ $blog_location = "/var/www/vhosts/${blog_domain}"
+ $blog_db_backupdir = '/var/lib/backups/blog_db'
+ $blog_files_backupdir = '/var/lib/backups/blog_files'
+ $blog_newpost_email_to = "i18n-reports@ml.${::domain}"
+ $blog_newpost_email_from = "Mageia Blog bot <blog@${::domain}>"
- package { 'wget':
- ensure => installed
+ user { 'blog':
+ groups => apache,
+ comment => 'Mageia Blog bot',
+ home => '/var/lib/blog',
}
+ }
- include apache::mod_php
+ class files_bots inherits base {
+if versioncmp($::lsbdistrelease, '9') < 0 {
+ package { ['php-mysqlnd',
+ 'php-ldap',
+ 'unzip',
+ 'nail']: }
+} else {
+ package { ['php-mysqlnd',
+ 'php-ldap',
+ 'unzip',
+ 's-nail']: }
+}
+
+ mga_common::local_script { 'check_new-blog-post.sh':
+ content => template('blog/check_new-blog-post.sh'),
+ }
+
+ cron { 'Blog bot':
+ user => 'blog',
+ minute => '*/15',
+ command => '/usr/local/bin/check_new-blog-post.sh',
+ require => Mga_common::Local_script['check_new-blog-post.sh'],
+ }
+
+ include apache::mod::php
+
+ apache::vhost::base { "${blog_domain}":
+ location => $blog_location,
+ content => template('blog/blogs_vhosts.conf'),
+ }
+
+ apache::vhost::base { "ssl_${blog_domain}":
+ use_ssl => true,
+ vhost => $blog_domain,
+ location => $blog_location,
+ content => template('blog/blogs_vhosts.conf'),
+ }
- package { 'php-mysql':
- ensure => installed
- }
+ file { $blog_location:
+ ensure => directory,
+ owner => apache,
+ group => apache,
+ }
+ }
+
+ class db_backup inherits base {
+ file { $blog_db_backupdir:
+ ensure => directory,
+ }
+ mga_common::local_script { 'backup_blog-db.sh':
+ content => template('blog/backup_blog-db.sh'),
+ }
- file { "check_new-blog-post":
- path => "/usr/local/bin/check_new-blog-post.sh",
- ensure => present,
- owner => blog,
- group => blog,
- mode => 755,
- content => template("blog/check_new-blog-post.sh")
- }
+ cron { "Backup DB (blog)":
+ user => root,
+ hour => '23',
+ minute => '42',
+ command => '/usr/local/bin/backup_blog-db.sh',
+ require => Mga_common::Local_script['backup_blog-db.sh'],
+ }
+ }
- file { "/var/lib/blog":
+ class files_backup inherits base {
+ file { $blog_files_backupdir:
ensure => directory,
- owner => blog,
- group => blog,
- mode => 644,
}
- cron { blog:
- user => blog,
- minute => '*/15',
- command => "/usr/local/bin/check_new-blog-post.sh",
- require => File["check_new-blog-post"]
- }
+ mga_common::local_script { 'backup_blog-files.sh':
+ content => template('blog/backup_blog-files.sh'),
+ }
+
+ cron { 'Backup files (blog)':
+ user => root,
+ hour => '23',
+ minute => '42',
+ command => '/usr/local/bin/backup_blog-files.sh',
+ require => Mga_common::Local_script['backup_blog-files.sh'],
+ }
+ }
}
diff --git a/modules/blog/templates/.htaccess b/modules/blog/templates/.htaccess
new file mode 100644
index 00000000..19bee3bd
--- /dev/null
+++ b/modules/blog/templates/.htaccess
@@ -0,0 +1,10 @@
+# BEGIN WordPress
+<IfModule mod_rewrite.c>
+RewriteEngine On
+RewriteBase /
+RewriteCond %{REQUEST_FILENAME} !-f
+RewriteCond %{REQUEST_FILENAME} !-d
+RewriteRule . /index.php [L]
+</IfModule>
+
+# END WordPress
diff --git a/modules/blog/templates/backup_blog-db.sh b/modules/blog/templates/backup_blog-db.sh
new file mode 100755
index 00000000..c497cb8f
--- /dev/null
+++ b/modules/blog/templates/backup_blog-db.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# Initialization
+PATH_TO_FILE=${PATH_TO_FILE:-<%= blog_db_backupdir %>}
+[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count
+COUNT=$(cat "$PATH_TO_FILE/count")
+# Backup each locale DB
+for locale in de el en es fr it nl pl pt ro ru tr uk
+do
+ if [ ! -d $PATH_TO_FILE/$locale ]
+ then
+ /bin/mkdir $PATH_TO_FILE/$locale
+ fi
+ /usr/bin/mysqldump --add-drop-table -h localhost blog_$locale | bzip2 -c > $PATH_TO_FILE/$locale/mageia_$locale-$COUNT.bak.sql.bz2
+done
+# Check count file to have a week of backup in the directory
+if [ $COUNT -ne 6 ]
+then
+ COUNT=$(expr $COUNT + 1)
+else
+ COUNT="0"
+fi
+echo $COUNT > $PATH_TO_FILE/count
diff --git a/modules/blog/templates/backup_blog-files.sh b/modules/blog/templates/backup_blog-files.sh
new file mode 100755
index 00000000..e268ad2b
--- /dev/null
+++ b/modules/blog/templates/backup_blog-files.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Initialization
+PATH_TO_FILE=${PATH_TO_FILE:-<%= blog_files_backupdir %>}
+[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count
+COUNT=$(cat "$PATH_TO_FILE/count")
+# Backup each locale
+for locale in de el en es fr it nl pl pt ro ru sv tr uk
+do
+ if [ ! -d $PATH_TO_FILE/$locale ]
+ then
+ /bin/mkdir $PATH_TO_FILE/$locale
+ fi
+ # use relative paths to avoid "Removing leading `/' from member names'" warning
+ tar -C / -Jcf "$PATH_TO_FILE/$locale/$locale-$COUNT.tar.xz" "$(sed s,^/,, <<< "<%= blog_location %>/$locale")"
+done
+# Check count file to have a week of backup in the directory
+if [ $COUNT -ne 6 ]
+then
+ COUNT=$(expr $COUNT + 1)
+else
+ COUNT="0"
+fi
+echo $COUNT > $PATH_TO_FILE/count
diff --git a/modules/blog/templates/blogs_vhosts.conf b/modules/blog/templates/blogs_vhosts.conf
new file mode 100644
index 00000000..ff3c792f
--- /dev/null
+++ b/modules/blog/templates/blogs_vhosts.conf
@@ -0,0 +1,16 @@
+<Directory <%= blog_location %> >
+ Order deny,allow
+ Allow from All
+ AllowOverride All
+ Options FollowSymlinks
+ Options +Indexes
+</Directory>
+# Add a permanent redirection for 'pt' as it was 'pt-br' before
+# Add a permanent redirection for '/*' as it's now '/en/' for english blog
+# TO BE REMOVE in May, 1st (?)
+<IfModule mod_alias.c>
+ Redirect permanent /pt-br/ /pt/
+ Redirect permanent /wp-content/uploads/ /en/wp-content/uploads/
+ Redirect permanent /wp-includes/images/ /en/wp-includes/images/
+ RedirectMatch permanent ^/?$ /en/
+</IfModule>
diff --git a/modules/blog/templates/check_new-blog-post.sh b/modules/blog/templates/check_new-blog-post.sh
index c3183375..f2089a52 100755
--- a/modules/blog/templates/check_new-blog-post.sh
+++ b/modules/blog/templates/check_new-blog-post.sh
@@ -2,36 +2,49 @@
# Initialization
PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/blog}
-/usr/bin/wget -qO $PATH_TO_FILE"/RSS_new" http://blog.mageia.org/?feed=rss2
-if [ -n $? ]
+/usr/bin/wget -qO $PATH_TO_FILE"/last_tmp" https://blog.mageia.org/en/?feed=rss2
+if [ $? -ne 0 ]
then
- exit 2
+ exit 2
fi
-# Check if RSS_old exists
-if [ ! -f $PATH_TO_FILE"/RSS_old" ]
+last_title=$(grep "title" $PATH_TO_FILE"/last_tmp" | head -n 2 | sed '1d' | sed 's/<title>//' | sed 's/<\/title>//' | sed 's/^[ \t]*//')
+last_pub=$(grep "pubDate" $PATH_TO_FILE"/last_tmp" | head -n 1 | sed 's/<pubDate>//' | sed 's/<\/pubDate>//' | sed 's/^[ \t]*//')
+last_creator=$(grep "creator" $PATH_TO_FILE"/last_tmp" | head -n 1 | sed 's/<dc:creator>//' | sed 's/<\/dc:creator>//' | sed 's/^[ \t]*//')
+echo -e "$last_title\n$last_pub\n$last_creator" > $PATH_TO_FILE"/last_tmp"
+
+# Check if 'last_entry' exists
+if [ ! -f $PATH_TO_FILE"/last_entry" ]
then
- /bin/mv -f $PATH_TO_FILE"/RSS_new" $PATH_TO_FILE"/RSS_old"
+ /bin/mv -f $PATH_TO_FILE"/last_tmp" $PATH_TO_FILE"/last_entry"
exit 1
fi
+# Add a date file for log
/bin/date +"%d:%m:%Y %H:%M" > $PATH_TO_FILE"/last_check"
# Check if a new blog post on EN needs to be translated on other blogs
-tmp_new=$(/bin/grep 'lastBuildDate' $PATH_TO_FILE"/RSS_new")
-tmp_old=$(/bin/grep 'lastBuildDate' $PATH_TO_FILE"/RSS_old")
+tmp_new=$(cat $PATH_TO_FILE"/last_tmp" | sed -n '1p')
+tmp_old=$(cat $PATH_TO_FILE"/last_entry" | sed -n '1p')
if [ "$tmp_old" = "$tmp_new" ]
then
# Nothing new
- echo "NO" >> $PATH_TO_FILE"/last_check"
+ tmp_new=$(cat $PATH_TO_FILE"/last_tmp" | sed -n '2p')
+ tmp_old=$(cat $PATH_TO_FILE"/last_entry" | sed -n '2p')
+ if [ "$tmp_old" != "$tmp_new" ]
+ then
+ # Modification on latest post
+ echo "YES - Modification" >> $PATH_TO_FILE"/last_check"
+ echo -e "The latest blog post has been modified and needs to be checked!\n\nTitle:\t$last_title\nAuthor:\t$last_creator\n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -r '<%= blog_newpost_email_from %>' -s "Modification of the latest entry on English Blog" <%= blog_newpost_email_to %>
+ echo $DATE
+ else
+ echo "NO" >> $PATH_TO_FILE"/last_check"
+ fi
else
# New post to translate
- cat $PATH_TO_FILE"/last_check" > $PATH_TO_FILE"/last_need_translation"
- new_post=$(grep "title" $PATH_TO_FILE"/RSS_new" | head -n 2 | sed '1d' | sed 's/<title>//' | sed 's/<\/title>//' | sed 's/^[ \t]*//')
- echo $new_post >> $PATH_TO_FILE"/last_need_translation"
- echo "YES" >> $PATH_TO_FILE"/last_check"
- echo -e "A new blog post is waiting for translation\n\"$new_post\"" | /bin/mail -s "New entry on English Blog" mageia-blogteam@mageia.org
+ echo "YES - New entry" >> $PATH_TO_FILE"/last_check"
+ echo -e "A new blog post is waiting for translation:\n\nTitle:\t$last_title\nAuthor:\t$last_creator\n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -r '<%= blog_newpost_email_from %>' -s "New entry on English Blog" <%= blog_newpost_email_to %>
echo $DATE
fi
# Clean tmp files and copy RSS_new to RSS_old
-/bin/mv -f $PATH_TO_FILE"/RSS_new" $PATH_TO_FILE"/RSS_old"
+/bin/mv -f $PATH_TO_FILE"/last_tmp" $PATH_TO_FILE"/last_entry"
diff --git a/modules/bugzilla-dev/manifests/init.pp b/modules/bugzilla-dev/manifests/init.pp
new file mode 100755
index 00000000..c6623872
--- /dev/null
+++ b/modules/bugzilla-dev/manifests/init.pp
@@ -0,0 +1,81 @@
+class bugzilla-dev {
+
+ $bugzilla_dev_location = '/usr/share/bugzilla/'
+
+ package {['graphviz',
+ 'perl-Template-GD', # needed for graphical_report support
+ 'perl-Test-Taint',
+ 'perl-JSON-RPC',
+ 'perl-Email-MIME',
+ 'perl-Email-Sender',
+ 'Math-Random-ISAAC',
+ 'perl-Chart',
+ 'perl-PatchReader',
+ 'perl-ldap',
+ 'perl-SOAP-Lite',
+ 'perl-XMLRPC-Lite',
+ 'perl-CGI']: }
+
+ $pgsql_password = extlookup('bugzilla_pgsql','x')
+ $ldap_password = extlookup('bugzilla_ldap','x')
+
+ postgresql::remote_db_and_user { 'bugs':
+ description => 'Bugzilla database',
+ password => $pgsql_password,
+ }
+
+ file { '/usr/share/bugzilla/localconfig':
+ group => 'apache',
+ mode => '0640',
+ content => template('bugzilla-dev/localconfig')
+ }
+
+
+ file { '/usr/share/bugzilla/data/params.json':
+ group => 'apache',
+ mode => '0640',
+ content => template('bugzilla-dev/params.json')
+ }
+
+ apache::webapp_other { 'bugzilla-dev':
+ webapp_file => 'bugzilla-dev/webapp_bugzilla.conf',
+ }
+
+ $bugs_vhost = "bugs-dev.${::domain}"
+ $vhost_root = '/usr/share/bugzilla'
+
+ apache::vhost::redirect_ssl { $bugs_vhost: }
+
+ apache::vhost::base { $bugs_vhost:
+ content => template('bugzilla-dev/vhost.conf'),
+ aliases => { '/bugzilla/' => $vhost_root },
+ use_ssl => true,
+ location => $vhost_root,
+ vhost => $bugs_vhost,
+ }
+
+ git::snapshot { $bugzilla_dev_location:
+ source => "git://git.${::domain}/web/bugs"
+ }
+
+ file { 'Mageia':
+ ensure => directory,
+ path => '/usr/share/bugzilla',
+ group => 'apache',
+ recurse => true,
+ require => Git::Snapshot[$bugzilla_dev_location],
+ }
+
+ file { '/usr/share/bugzilla/robots.txt':
+ group => 'apache',
+ mode => '0640',
+ content => template('bugzilla-dev/robots.txt')
+ }
+
+ cron { 'collectstats':
+ command => '/usr/share/bugzilla/bin/collectstats.pl',
+ user => 'apache',
+ hour => 2,
+ minute => 30,
+ }
+}
diff --git a/modules/bugzilla-dev/templates/localconfig b/modules/bugzilla-dev/templates/localconfig
new file mode 100755
index 00000000..2b7d6035
--- /dev/null
+++ b/modules/bugzilla-dev/templates/localconfig
@@ -0,0 +1,121 @@
+# If you are using Apache as your web server, Bugzilla can create .htaccess
+# files for you, which will keep this file (localconfig) and other
+# confidential files from being read over the web.
+#
+# If this is set to 1, checksetup.pl will create .htaccess files if
+# they don't exist.
+#
+# If this is set to 0, checksetup.pl will not create .htaccess files.
+$create_htaccess = 0;
+
+# The name of the group that your web server runs as. On Red Hat
+# distributions, this is usually "apache". On Debian/Ubuntu, it is
+# usually "www-data".
+#
+# If you have use_suexec turned on below, then this is instead the name
+# of the group that your web server switches to to run cgi files.
+#
+# If this is a Windows machine, ignore this setting, as it does nothing.
+#
+# If you do not have access to the group your scripts will run under,
+# set this to "". If you do set this to "", then your Bugzilla installation
+# will be _VERY_ insecure, because some files will be world readable/writable,
+# and so anyone who can get local access to your machine can do whatever they
+# want. You should only have this set to "" if this is a testing installation
+# and you cannot set this up any other way. YOU HAVE BEEN WARNED!
+#
+# If you set this to anything other than "", you will need to run checksetup.pl
+# as root or as a user who is a member of the specified group.
+$webservergroup = 'apache';
+
+# Set this to 1 if Bugzilla runs in an Apache SuexecUserGroup environment.
+#
+# If your web server runs control panel software (cPanel, Plesk or similar),
+# or if your Bugzilla is to run in a shared hosting environment, then you are
+# almost certainly in an Apache SuexecUserGroup environment.
+#
+# If this is a Windows box, ignore this setting, as it does nothing.
+#
+# If set to 0, checksetup.pl will set file permissions appropriately for
+# a normal webserver environment.
+#
+# If set to 1, checksetup.pl will set file permissions so that Bugzilla
+# works in a SuexecUserGroup environment.
+$use_suexec = 0;
+
+# What SQL database to use. Default is mysql. List of supported databases
+# can be obtained by listing Bugzilla/DB directory - every module corresponds
+# to one supported database and the name of the module (before ".pm")
+# corresponds to a valid value for this variable.
+$db_driver = 'pg';
+
+# The DNS name or IP address of the host that the database server runs on.
+$db_host = 'pg.mageia.org';
+
+# The name of the database. For Oracle, this is the database's SID. For
+# SQLite, this is a name (or path) for the DB file.
+$db_name = 'bugs';
+
+# Who we connect to the database as.
+$db_user = 'bugs';
+
+# Enter your database password here. It's normally advisable to specify
+# a password for your bugzilla database user.
+# If you use apostrophe (') or a backslash (\) in your password, you'll
+# need to escape it by preceding it with a '\' character. (\') or (\)
+# (It is far simpler to just not use those characters.)
+$db_pass = '<%= pgsql_password %>';
+
+# Sometimes the database server is running on a non-standard port. If that's
+# the case for your database server, set this to the port number that your
+# database server is running on. Setting this to 0 means "use the default
+# port for my database server."
+$db_port = 0;
+
+# MySQL Only: Enter a path to the unix socket for MySQL. If this is
+# blank, then MySQL's compiled-in default will be used. You probably
+# want that.
+$db_sock = '';
+
+# Should checksetup.pl try to verify that your database setup is correct?
+# With some combinations of database servers/Perl modules/moonphase this
+# doesn't work, and so you can try setting this to 0 to make checksetup.pl
+# run.
+$db_check = 1;
+
+# Path to a PEM file with a list of trusted SSL CA certificates.
+# The file must be readable by web server user.
+$db_mysql_ssl_ca_file = '';
+
+# Path to a directory containing trusted SSL CA certificates in PEM format.
+# Directory and files inside must be readable by the web server user.
+$db_mysql_ssl_ca_path = '';
+
+# Full path to the client SSL certificate in PEM format we will present to the DB server.
+# The file must be readable by web server user.
+$db_mysql_ssl_client_cert = '';
+
+# Full path to the private key corresponding to the client SSL certificate.
+# The file must not be password-protected and must be readable by web server user.
+$db_mysql_ssl_client_key = '';
+
+# Most web servers will allow you to use index.cgi as a directory
+# index, and many come preconfigured that way, but if yours doesn't
+# then you'll need an index.html file that provides redirection
+# to index.cgi. Setting $index_html to 1 below will allow
+# checksetup.pl to create an index.html for you if it doesn't exist.
+# NOTE: checksetup.pl will not replace an existing file, so if you
+# wish to have checksetup.pl create one for you, you must
+# make sure that index.html doesn't already exist.
+$index_html = 0;
+
+# If you want to use the "Difference Between Two Patches" feature of the
+# Patch Viewer, please specify the full path to the "interdiff" executable
+# here.
+$interdiffbin = '/usr/bin/interdiff';
+
+# For the "Difference Between Two Patches" feature to work, we need to know
+# what directory the "diff" bin is in. (You only need to set this if you
+# are using that feature of the Patch Viewer.)
+$diffpath = '/usr/bin';
+
diff --git a/modules/bugzilla-dev/templates/params.json b/modules/bugzilla-dev/templates/params.json
new file mode 100644
index 00000000..b51b4c00
--- /dev/null
+++ b/modules/bugzilla-dev/templates/params.json
@@ -0,0 +1,104 @@
+{
+ "LDAPBaseDN" : "ou=People,<%= dc_suffix %>",
+ "LDAPbinddn" : "cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>",
+ "LDAPfilter" : "",
+ "LDAPmailattribute" : "mail",
+ "LDAPserver" : "ldap.<%= domain %>",
+ "LDAPstarttls" : "1",
+ "LDAPuidattribute" : "uid",
+ "RADIUS_NAS_IP" : "",
+ "RADIUS_email_suffix" : "",
+ "RADIUS_secret" : "",
+ "RADIUS_server" : "",
+ "ajax_user_autocompletion" : "1",
+ "allow_attachment_deletion" : "0",
+ "allow_attachment_display" : "1",
+ "allowbugdeletion" : "0",
+ "allowemailchange" : "0",
+ "allowuserdeletion" : "0",
+ "announcehtml" : "",
+ "attachment_base" : "",
+ "auth_env_email" : "",
+ "auth_env_id" : "",
+ "auth_env_realname" : "",
+ "chartgroup" : "editbugs",
+ "collapsed_comment_tags" : "obsolete, spam",
+ "comment_taggers_group" : "editbugs",
+ "commentonchange_resolution" : "1",
+ "commentonduplicate" : "0",
+ "confirmuniqueusermatch" : "1",
+ "cookiedomain" : "",
+ "cookiepath" : "/",
+ "createemailregexp" : ".*",
+ "debug_group" : "editbugs",
+ "default_search_limit" : "500",
+ "defaultopsys" : "Linux",
+ "defaultplatform" : "All",
+ "defaultpriority" : "Normal",
+ "defaultquery" : "bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring",
+ "defaultseverity" : "normal",
+ "duplicate_or_move_bug_status" : "RESOLVED",
+ "emailregexp" : "^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$",
+ "emailregexpdesc" : "A legal address must contain exactly one '@', and at least one '.' after the @.",
+ "emailsuffix" : "",
+ "font_file" : "",
+ "globalwatchers" : "bugs-dev@ml.mageia.org",
+ "inbound_proxies" : "",
+ "insidergroup" : "secteam",
+ "last_visit_keep_days" : "10",
+ "letsubmitterchoosemilestone" : "1",
+ "letsubmitterchoosepriority" : "1",
+ "mail_delivery_method" : "SMTP",
+ "mailfrom" : "bugzilla-daemon@<%= domain %>",
+ "maintainer" : "sysadmin@group.<%= domain %>",
+ "makeproductgroups" : "0",
+ "max_search_results" : "10000",
+ "maxattachmentsize" : "1000",
+ "maxlocalattachment" : "0",
+ "maxusermatches" : "1000",
+ "memcached_namespace" : "bugzilla:",
+ "memcached_servers" : "",
+ "musthavemilestoneonaccept" : "0",
+ "mybugstemplate" : "buglist.cgi?bug_status=UNCONFIRMED&amp;bug_status=NEW&amp;bug_status=ASSIGNED&amp;bug_status=REOPENED&amp;emailassigned_to1=1&amp;emailreporter1=1&amp;emailtype1=exact&amp;email1=%userid%&amp;field0-0-0=bug_status&amp;type0-0-0=notequals&amp;value0-0-0=UNCONFIRMED&amp;field0-0-1=reporter&amp;type0-0-1=equals&amp;value0-0-1=%userid%",
+ "noresolveonopenblockers" : "0",
+ "or_groups" : "0",
+ "password_check_on_login" : "1",
+ "password_complexity" : "no_constraints",
+ "proxy_url" : "",
+ "querysharegroup" : "editbugs",
+ "quip_list_entry_control" : "open",
+ "rememberlogin" : "on",
+ "requirelogin" : "0",
+ "search_allow_no_criteria" : "0",
+ "shadowdb" : "",
+ "shadowdbhost" : "",
+ "shadowdbport" : "3306",
+ "shadowdbsock" : "",
+ "shutdownhtml" : "",
+ "smtp_debug" : "0",
+ "smtp_password" : "",
+ "smtp_ssl" : "0",
+ "smtp_username" : "",
+ "smtpserver" : "localhost",
+ "ssl_redirect" : "1",
+ "sslbase" : "https://bugs-dev.<%= domain %>/",
+ "strict_isolation" : "0",
+ "strict_transport_security" : "off",
+ "timetrackinggroup" : "",
+ "upgrade_notification" : "latest_stable_release",
+ "urlbase" : "http://bugs-dev.<%= domain %>/",
+ "use_mailer_queue" : "0",
+ "use_see_also" : "1",
+ "useclassification" : "0",
+ "usemenuforusers" : "0",
+ "useqacontact" : "1",
+ "user_info_class" : "CGI",
+ "user_verify_class" : "LDAP",
+ "usestatuswhiteboard" : "1",
+ "usetargetmilestone" : "1",
+ "usevisibilitygroups" : "0",
+ "utf8" : "1",
+ "webdotbase" : "/usr/bin/dot",
+ "webservice_email_filter" : "0",
+ "whinedays" : "0"
+}
diff --git a/modules/bugzilla-dev/templates/robots.txt b/modules/bugzilla-dev/templates/robots.txt
new file mode 100755
index 00000000..63639f02
--- /dev/null
+++ b/modules/bugzilla-dev/templates/robots.txt
@@ -0,0 +1,10 @@
+User-agent: *
+Disallow: /
+Allow: /*index.cgi
+Allow: /*page.cgi
+Allow: /*show_bug.cgi
+Allow: /*describecomponents.cgi
+Disallow: /*show_bug.cgi*ctype=*
+Disallow: /*show_bug.cgi*format=multiple*
+Disallow: /*page.cgi*id=voting*
+Sitemap: https://bugs.mageia.org/page.cgi?id=sitemap/sitemap.xml
diff --git a/modules/bugzilla-dev/templates/vhost.conf b/modules/bugzilla-dev/templates/vhost.conf
new file mode 100755
index 00000000..79eab9fb
--- /dev/null
+++ b/modules/bugzilla-dev/templates/vhost.conf
@@ -0,0 +1,2 @@
+RewriteEngine On
+RewriteRule ^/([0-9]+)$ /show_bug.cgi?id=$1 [R=301,L]
diff --git a/modules/bugzilla-dev/templates/webapp_bugzilla.conf b/modules/bugzilla-dev/templates/webapp_bugzilla.conf
new file mode 100755
index 00000000..a8f37a00
--- /dev/null
+++ b/modules/bugzilla-dev/templates/webapp_bugzilla.conf
@@ -0,0 +1,73 @@
+<%
+path_data_directory = "/var/lib/bugzilla"
+%>
+
+<Directory /usr/share/bugzilla/>
+ AddHandler cgi-script .cgi
+ Options +ExecCGI +FollowSymLinks
+ DirectoryIndex index.cgi index.html
+ AllowOverride All
+</Directory>
+
+# The duplicates.rdf must be accessible, as it is used by
+# duplicates.xul
+<Directory <%= path_data_directory %>>
+ <Files duplicates.rdf>
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
+ </Files>
+</Directory>
+
+# The png files locally created locally must be accessible
+<Directory <%= path_data_directory %>/webdot>
+ <FilesMatch \.png$>
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
+ </FilesMatch>
+</Directory>
+
+Alias /graphs/ <%= path_data_directory %>/graphs/
+<Directory <%= path_data_directory %>/graphs>
+ <FilesMatch \.png$>
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
+ </FilesMatch>
+</Directory>
+
+# This should work automatically, but perhaps something
+# in our Bugzilla packaging breaks this?
+Alias /extensions/Mageia/web/ <%= scope.lookupvar("bugzilla::extension_location") %>/web/
+<Directory <%= scope.lookupvar("bugzilla::extension_location") %>/web/>
+ <FilesMatch \.png$>
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
+ </FilesMatch>
+</Directory>
diff --git a/modules/bugzilla/manifests/init.pp b/modules/bugzilla/manifests/init.pp
index 23a24f1e..e66ddf0e 100644..100755
--- a/modules/bugzilla/manifests/init.pp
+++ b/modules/bugzilla/manifests/init.pp
@@ -1,36 +1,202 @@
class bugzilla {
- package { 'bugzilla':
- ensure => installed;
+ $bugzilla_location = '/usr/share/bugzilla'
+
+ package {['graphviz',
+ 'perl-Template-GD', # needed for graphical_report support
+ 'perl-Test-Taint',
+ 'perl-JSON-RPC',
+ 'perl-JSON-XS',
+ 'perl-Email-MIME',
+ 'perl-Email-Sender',
+ 'perl-Math-Random-ISAAC',
+ 'perl-Chart',
+ 'perl-PatchReader',
+ 'perl-ldap',
+ 'perl-SOAP-Lite',
+ 'perl-XMLRPC-Lite',
+ 'perl-CGI',
+ 'perl-HTML-Scrubber',
+ 'perl-Encode-Detect',
+ 'perl-File-MimeInfo',
+ 'perl-Email-Reply',
+ 'perl-HTML-FormatText-WithLinks',
+ 'perl-Cache-Memcached',
+ 'perl-File-Copy-Recursive',
+ 'perl-Daemon-Generic']: }
+
+ $pgsql_password = extlookup('bugzilla_pgsql','x')
+ $ldap_password = extlookup('bugzilla_ldap','x')
+ $bugzilla_secret_key = extlookup('bugzilla_secret_key','x')
+
+ postgresql::remote_db_and_user { 'bugs':
+ description => 'Bugzilla database',
+ password => $pgsql_password,
}
- $password = extlookup("bugzilla_password")
- $passwordLdap = extlookup("bugzilla_ldap")
+ file { "$bugzilla_location/localconfig":
+ group => 'apache',
+ mode => '0640',
+ content => template('bugzilla/localconfig')
+ }
- file { '/etc/bugzilla/localconfig':
- ensure => present,
- owner => root,
- group => apache,
- mode => 640,
- content => template("bugzilla/localconfig")
+
+ file { "$bugzilla_location/data/params.json":
+ group => 'apache',
+ mode => '0640',
+ content => template('bugzilla/params.json')
+ }
+ file { "$bugzilla_location/graphs":
+ ensure => directory,
+ owner => 'apache',
+ group => 'apache',
+ mode => '0770'
}
+ apache::webapp_other { 'bugzilla':
+ webapp_file => 'bugzilla/webapp_bugzilla.conf',
+ }
- file { '/var/lib/bugzilla/params':
- ensure => present,
- owner => root,
- group => apache,
- mode => 640,
- content => template("bugzilla/params")
+ $bugs_vhost = "bugs.${::domain}"
+ $vhost_root = $bugzilla_location
+
+ apache::vhost::redirect_ssl { $bugs_vhost: }
+
+ apache::vhost::base { $bugs_vhost:
+ content => template('bugzilla/vhost.conf'),
+ aliases => { '/bugzilla/' => $vhost_root },
+ use_ssl => true,
+ location => $vhost_root,
+ vhost => $bugs_vhost,
+ enable_location => false,
+ }
+
+ git::snapshot { $bugzilla_location:
+ source => "git://git.${::domain}/web/bugs"
}
- include apache::mod_fcgid
- apache::webapp_other{"bugzilla":
- webapp_file => "bugzilla/webapp_bugzilla.conf",
- }
+ file { 'Mageia':
+ ensure => directory,
+ path => $bugzilla_location,
+ group => 'apache',
+ recurse => true,
+ require => Git::Snapshot[$bugzilla_location],
+ }
- apache::vhost_other_app { "bugs.$domain":
- vhost_file => "bugzilla/vhost_bugs.conf",
+ file { ["$bugzilla_location/data",
+ "$bugzilla_location/data/mining"]:
+ ensure => directory,
+ owner => 'apache',
+ group => 'apache',
+ mode => '0770'
}
-}
+ file { "$bugzilla_location/data/assets":
+ ensure => directory,
+ owner => 'apache',
+ group => 'apache',
+ mode => '0770'
+ }
+
+ file { "$bugzilla_location/robots.txt":
+ group => 'apache',
+ mode => '0640'
+ }
+
+ file { "$bugzilla_location/data/bugzilla-update.xml":
+ owner => 'apache',
+ group => 'apache',
+ mode => '0640'
+ }
+
+ file { [
+ "$bugzilla_location/admin.cgi",
+ "$bugzilla_location/attachment.cgi",
+ "$bugzilla_location/buglist.cgi",
+ "$bugzilla_location/chart.cgi",
+ "$bugzilla_location/colchange.cgi",
+ "$bugzilla_location/config.cgi",
+ "$bugzilla_location/createaccount.cgi",
+ "$bugzilla_location/describecomponents.cgi",
+ "$bugzilla_location/describekeywords.cgi",
+ "$bugzilla_location/duplicates.cgi",
+ "$bugzilla_location/editclassifications.cgi",
+ "$bugzilla_location/editfields.cgi",
+ "$bugzilla_location/editgroups.cgi",
+ "$bugzilla_location/editmilestones.cgi",
+ "$bugzilla_location/editproducts.cgi",
+ "$bugzilla_location/editusers.cgi",
+ "$bugzilla_location/editversions.cgi",
+ "$bugzilla_location/editworkflow.cgi",
+ "$bugzilla_location/editcomponents.cgi",
+ "$bugzilla_location/editflagtypes.cgi",
+ "$bugzilla_location/editkeywords.cgi",
+ "$bugzilla_location/editparams.cgi",
+ "$bugzilla_location/editsettings.cgi",
+ "$bugzilla_location/editvalues.cgi",
+ "$bugzilla_location/editwhines.cgi",
+ "$bugzilla_location/enter_bug.cgi",
+ "$bugzilla_location/index.cgi",
+ "$bugzilla_location/jsonrpc.cgi",
+ "$bugzilla_location/page.cgi",
+ "$bugzilla_location/post_bug.cgi",
+ "$bugzilla_location/process_bug.cgi",
+ "$bugzilla_location/query.cgi",
+ "$bugzilla_location/quips.cgi",
+ "$bugzilla_location/relogin.cgi",
+ "$bugzilla_location/reports.cgi",
+ "$bugzilla_location/rest.cgi",
+ "$bugzilla_location/search_plugin.cgi",
+ "$bugzilla_location/show_bug.cgi",
+ "$bugzilla_location/showdependencytree.cgi",
+ "$bugzilla_location/testagent.cgi",
+ "$bugzilla_location/userprefs.cgi",
+ "$bugzilla_location/xmlrpc.cgi",
+ "$bugzilla_location/report.cgi",
+ "$bugzilla_location/request.cgi",
+ "$bugzilla_location/sanitycheck.cgi",
+ "$bugzilla_location/show_activity.cgi",
+ "$bugzilla_location/showdependencygraph.cgi",
+ "$bugzilla_location/summarize_time.cgi",
+ "$bugzilla_location/token.cgi",
+ "$bugzilla_location/votes.cgi",
+ "$bugzilla_location/checksetup.pl",
+ "$bugzilla_location/clean-bug-user-last-visit.pl",
+ "$bugzilla_location/collectstats.pl",
+ "$bugzilla_location/email_in.pl",
+ "$bugzilla_location/importxml.pl",
+ "$bugzilla_location/install-module.pl",
+ "$bugzilla_location/jobqueue.pl",
+ "$bugzilla_location/migrate.pl",
+ "$bugzilla_location/runtests.pl",
+ "$bugzilla_location/sanitycheck.pl",
+ "$bugzilla_location/testserver.pl",
+ "$bugzilla_location/whineatnews.pl",
+ "$bugzilla_location/whine.pl",
+ ]:
+ group => 'apache',
+ mode => '0750',
+ }
+
+# Improper file permissions makes this fail, and nobody seems to care
+# cron { 'collectstats':
+# command => "cd $bugzilla_location && ./collectstats.pl",
+# user => 'apache',
+# hour => 2,
+# minute => 30,
+# }
+
+ cron { 'clean-bug-user-last-visit':
+ command => "cd $bugzilla_location && ./clean-bug-user-last-visit.pl",
+ user => 'apache',
+ hour => 3,
+ minute => 0,
+ }
+ cron { 'sanitycheck':
+ command => "cd $bugzilla_location && $bugzilla_location/sanitycheck.pl --login LpSolit@gmail.com",
+ user => 'apache',
+ hour => 21,
+ minute => 0,
+ }
+
+}
diff --git a/modules/bugzilla/templates/localconfig b/modules/bugzilla/templates/localconfig
index 23089510..61935552 100644
--- a/modules/bugzilla/templates/localconfig
+++ b/modules/bugzilla/templates/localconfig
@@ -1,51 +1,59 @@
-
# If you are using Apache as your web server, Bugzilla can create .htaccess
-# files for you that will instruct Apache not to serve files that shouldn't
-# be accessed from the web browser (like your local configuration data and non-cgi
-# executable files). For this to work, the directory your Bugzilla
-# installation is in must be within the jurisdiction of a <Directory> block
-# in the httpd.conf file that has 'AllowOverride Limit' in it. If it has
-# 'AllowOverride All' or other options with Limit, that's fine.
-# (Older Apache installations may use an access.conf file to store these
-# <Directory> blocks.)
-# If this is set to 1, Bugzilla will create these files if they don't exist.
-# If this is set to 0, Bugzilla will not create these files.
-$create_htaccess = 0;
-
-# Usually, this is the group your web server runs as.
-# If you have a Windows box, ignore this setting.
-# If you have use_suexec switched on below, this is the group Apache switches
-# to in order to run Bugzilla scripts.
+# files for you, which will keep this file (localconfig) and other
+# confidential files from being read over the web.
+#
+# If this is set to 1, checksetup.pl will create .htaccess files if
+# they don't exist.
+#
+# If this is set to 0, checksetup.pl will not create .htaccess files.
+$create_htaccess = 1;
+
+# The name of the group that your web server runs as. On Red Hat
+# distributions, this is usually "apache". On Debian/Ubuntu, it is
+# usually "www-data".
+#
+# If you have use_suexec turned on below, then this is instead the name
+# of the group that your web server switches to to run cgi files.
+#
+# If this is a Windows machine, ignore this setting, as it does nothing.
+#
# If you do not have access to the group your scripts will run under,
# set this to "". If you do set this to "", then your Bugzilla installation
# will be _VERY_ insecure, because some files will be world readable/writable,
# and so anyone who can get local access to your machine can do whatever they
# want. You should only have this set to "" if this is a testing installation
# and you cannot set this up any other way. YOU HAVE BEEN WARNED!
+#
# If you set this to anything other than "", you will need to run checksetup.pl
-# asroot, or as a user who is a member of the specified group.
+# as root or as a user who is a member of the specified group.
$webservergroup = 'apache';
-# Set this if Bugzilla runs in an Apache SuexecUserGroup environment.
-# (If your web server runs control panel software (cPanel, Plesk or similar),
+# Set this to 1 if Bugzilla runs in an Apache SuexecUserGroup environment.
+#
+# If your web server runs control panel software (cPanel, Plesk or similar),
# or if your Bugzilla is to run in a shared hosting environment, then you are
-# almost certainly in an Apache SuexecUserGroup environment.)
-# If you have a Windows box, ignore this setting.
-# If set to 0, Bugzilla will set file permissions as tightly as possible.
-# If set to 1, Bugzilla will set file permissions so that it may work in an
-# SuexecUserGroup environment. The difference is that static files (CSS,
-# JavaScript and so on) will receive world read permissions.
+# almost certainly in an Apache SuexecUserGroup environment.
+#
+# If this is a Windows box, ignore this setting, as it does nothing.
+#
+# If set to 0, checksetup.pl will set file permissions appropriately for
+# a normal webserver environment.
+#
+# If set to 1, checksetup.pl will set file permissions so that Bugzilla
+# works in a SuexecUserGroup environment.
$use_suexec = 0;
# What SQL database to use. Default is mysql. List of supported databases
# can be obtained by listing Bugzilla/DB directory - every module corresponds
-# to one supported database and the name corresponds to a driver name.
+# to one supported database and the name of the module (before ".pm")
+# corresponds to a valid value for this variable.
$db_driver = 'pg';
-# The DNS name of the host that the database server runs on.
-$db_host = 'pgsql.<%= domain %>';
+# The DNS name or IP address of the host that the database server runs on.
+$db_host = 'pg.mageia.org';
-# The name of the database
+# The name of the database. For Oracle, this is the database's SID. For
+# SQLite, this is a name (or path) for the DB file.
$db_name = 'bugs';
# Who we connect to the database as.
@@ -55,8 +63,8 @@ $db_user = 'bugs';
# a password for your bugzilla database user.
# If you use apostrophe (') or a backslash (\) in your password, you'll
# need to escape it by preceding it with a '\' character. (\') or (\)
-# (Far simpler just not to use those characters.)
-$db_pass = '<%= password %>';
+# (It is far simpler to just not use those characters.)
+$db_pass = '<%= pgsql_password %>';
# Sometimes the database server is running on a non-standard port. If that's
# the case for your database server, set this to the port number that your
@@ -70,35 +78,50 @@ $db_port = 0;
$db_sock = '';
# Should checksetup.pl try to verify that your database setup is correct?
-# (with some combinations of database servers/Perl modules/moonphase this
-# doesn't work)
+# With some combinations of database servers/Perl modules/moonphase this
+# doesn't work, and so you can try setting this to 0 to make checksetup.pl
+# run.
$db_check = 1;
-# With the introduction of a configurable index page using the
-# template toolkit, Bugzilla's main index page is now index.cgi.
+# Path to a PEM file with a list of trusted SSL CA certificates.
+# The file must be readable by web server user.
+$db_mysql_ssl_ca_file = '';
+
+# Path to a directory containing trusted SSL CA certificates in PEM format.
+# Directory and files inside must be readable by the web server user.
+$db_mysql_ssl_ca_path = '';
+
+# Full path to the client SSL certificate in PEM format we will present to the DB server.
+# The file must be readable by web server user.
+$db_mysql_ssl_client_cert = '';
+
+# Full path to the private key corresponding to the client SSL certificate.
+# The file must not be password-protected and must be readable by web server user.
+$db_mysql_ssl_client_key = '';
+
# Most web servers will allow you to use index.cgi as a directory
# index, and many come preconfigured that way, but if yours doesn't
# then you'll need an index.html file that provides redirection
# to index.cgi. Setting $index_html to 1 below will allow
-# checksetup.pl to create one for you if it doesn't exist.
+# checksetup.pl to create an index.html for you if it doesn't exist.
# NOTE: checksetup.pl will not replace an existing file, so if you
# wish to have checksetup.pl create one for you, you must
-# make sure that index.html doesn't already exist
+# make sure that index.html doesn't already exist.
$index_html = 0;
-# For some optional functions of Bugzilla (such as the pretty-print patch
-# viewer), we need the cvs binary to access files and revisions.
-# Because it's possible that this program is not in your path, you can specify
-# its location here. Please specify the full path to the executable.
-$cvsbin = '/usr/bin/cvs';
-
-# For some optional functions of Bugzilla (such as the pretty-print patch
-# viewer), we need the interdiff binary to make diffs between two patches.
-# Because it's possible that this program is not in your path, you can specify
-# its location here. Please specify the full path to the executable.
+# If you want to use the "Difference Between Two Patches" feature of the
+# Patch Viewer, please specify the full path to the "interdiff" executable
+# here.
$interdiffbin = '/usr/bin/interdiff';
-# The interdiff feature needs diff, so we have to have that path.
-# Please specify the directory name only; do not use trailing slash.
+# For the "Difference Between Two Patches" feature to work, we need to know
+# what directory the "diff" bin is in. (You only need to set this if you
+# are using that feature of the Patch Viewer.)
$diffpath = '/usr/bin';
+# This secret key is used by your installation for the creation and
+# validation of encrypted tokens. These tokens are used to implement
+# security features in Bugzilla, to protect against certain types of attacks.
+# A random string is generated by default. It's very important that this key
+# is kept secret. It also must be very long.
+$site_wide_secret = '<%= bugzilla_secret_key %>';
diff --git a/modules/bugzilla/templates/params b/modules/bugzilla/templates/params
index df5c98a2..2e71a39d 100644
--- a/modules/bugzilla/templates/params
+++ b/modules/bugzilla/templates/params
@@ -1,6 +1,6 @@
%param = (
'LDAPBaseDN' => 'ou=People,<%= dc_suffix %>',
- 'LDAPbinddn' => 'cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= passwordLdap %>',
+ 'LDAPbinddn' => 'cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>',
'LDAPfilter' => '',
'LDAPmailattribute' => 'mail',
'LDAPserver' => 'ldap.<%= domain %>',
@@ -10,11 +10,12 @@
'RADIUS_email_suffix' => '',
'RADIUS_secret' => '',
'RADIUS_server' => '',
+ 'ajax_user_autocompletion' => '1',
'allow_attach_url' => 0,
'allow_attachment_deletion' => 0,
- 'allow_attachment_display' => 0,
+ 'allow_attachment_display' => 1,
'allowbugdeletion' => 0,
- 'allowemailchange' => 1,
+ 'allowemailchange' => 0,
'allowloginid' => '0',
'allowuserdeletion' => 0,
'announcehtml' => '',
@@ -24,33 +25,36 @@
'auth_env_realname' => '',
'bonsai_url' => '',
'chartgroup' => 'editbugs',
- 'commentonchange_resolution' => 0,
+ 'commentonchange_resolution' => 1,
'commentonduplicate' => 0,
'confirmuniqueusermatch' => 1,
'cookiedomain' => '',
'cookiepath' => '/',
- 'createemailregexp' => '.*',
- 'cvsroot' => '',
- 'cvsroot_get' => '',
- 'defaultopsys' => '',
- 'defaultplatform' => '',
- 'defaultpriority' => '---',
- 'defaultquery' => 'bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring',
- 'defaultseverity' => 'enhancement',
- 'docs_urlbase' => 'docs/%lang%/html/',
+ 'createemailregexp' => '.*',
+ 'cvsroot' => '',
+ 'cvsroot_get' => '',
+ 'debug_group' => 'editbugs',
+ 'default_search_limit' => '500',
+ 'defaultopsys' => 'Linux',
+ 'defaultplatform' => 'All',
+ 'defaultpriority' => 'Normal',
+ 'defaultquery' => 'bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&order=Importance&long_desc_type=substring',
+ 'defaultseverity' => 'normal',
+ 'docs_urlbase' => ' https://www.bugzilla.org/docs/4.4/en/html/',
'duplicate_or_move_bug_status' => 'RESOLVED',
'emailregexp' => '^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$',
'emailregexpdesc' => 'A legal address must contain exactly one \'@\', and at least one \'.\' after the @.',
'emailsuffix' => '',
- 'globalwatchers' => '',
+ 'globalwatchers' => 'bugs@ml.<%= domain %>',
'inbound_proxies' => '',
- 'insidergroup' => '',
+ 'insidergroup' => 'secteam',
'letsubmitterchoosemilestone' => 1,
'letsubmitterchoosepriority' => 1,
'lxr_root' => '',
'lxr_url' => '',
- 'mail_delivery_method' => 'Sendmail',
- 'mailfrom' => 'bugzilla-daemon',
+ 'mail_delivery_method' => 'SMTP',
+ 'mailfrom' => 'bugzilla_noreply@ml.<%= domain %>',
+ 'maintainer' => 'sysadmin@group.<%= domain %>',
'makeproductgroups' => 0,
'maxattachmentsize' => '1000',
'maxlocalattachment' => '0',
@@ -72,36 +76,36 @@
'quip_list_entry_control' => 'open',
'rememberlogin' => 'on',
'requirelogin' => '0',
+ 'search_allow_no_criteria' => '0',
'sendmailnow' => 1,
'shadowdb' => '',
'shadowdbhost' => '',
'shadowdbport' => '3306',
'shadowdbsock' => '',
- 'shutdownhtml' => '',
'smtp_debug' => 0,
'smtp_password' => '',
'smtp_username' => '',
'smtpserver' => 'localhost',
'specific_search_allow_empty_words' => 1,
- 'ssl_redirect' => 0,
- 'sslbase' => '',
+ 'ssl_redirect' => 1,
+ 'sslbase' => 'https://bugs.<%= domain %>/',
'strict_isolation' => 0,
- 'timetrackinggroup' => 'editbugs',
+ 'timetrackinggroup' => '',
'upgrade_notification' => 'latest_stable_release',
- 'urlbase' => 'http://bugs.<%= domain %>/',
+ 'urlbase' => 'https://bugs.<%= domain %>/',
'use_mailer_queue' => 0,
'use_see_also' => 1,
'usebugaliases' => 0,
'useclassification' => 0,
'usemenuforusers' => '0',
- 'useqacontact' => 0,
+ 'useqacontact' => 1,
'user_info_class' => 'CGI',
'user_verify_class' => 'LDAP',
- 'usestatuswhiteboard' => 0,
- 'usetargetmilestone' => 0,
+ 'usestatuswhiteboard' => 1,
+ 'usetargetmilestone' => 1,
'usevisibilitygroups' => 0,
'usevotes' => 0,
'utf8' => 1,
- 'webdotbase' => 'http://www.research.att.com/~north/cgi-bin/webdot.cgi/%urlbase%',
- 'whinedays' => 7
+ 'webdotbase' => '/usr/bin/dot',
+ 'whinedays' => 0
);
diff --git a/modules/bugzilla/templates/params.json b/modules/bugzilla/templates/params.json
new file mode 100644
index 00000000..05325bc7
--- /dev/null
+++ b/modules/bugzilla/templates/params.json
@@ -0,0 +1,104 @@
+{
+ "LDAPBaseDN" : "ou=People,<%= dc_suffix %>",
+ "LDAPbinddn" : "cn=bugzilla-alamut,ou=System Accounts,<%= dc_suffix %>:<%= ldap_password %>",
+ "LDAPfilter" : "",
+ "LDAPmailattribute" : "mail",
+ "LDAPserver" : "ldap.<%= domain %>",
+ "LDAPstarttls" : "1",
+ "LDAPuidattribute" : "uid",
+ "RADIUS_NAS_IP" : "",
+ "RADIUS_email_suffix" : "",
+ "RADIUS_secret" : "",
+ "RADIUS_server" : "",
+ "ajax_user_autocompletion" : "1",
+ "allow_attachment_deletion" : "0",
+ "allow_attachment_display" : "1",
+ "allowbugdeletion" : "0",
+ "allowemailchange" : "0",
+ "allowuserdeletion" : "0",
+ "announcehtml" : "",
+ "attachment_base" : "",
+ "auth_env_email" : "",
+ "auth_env_id" : "",
+ "auth_env_realname" : "",
+ "chartgroup" : "editbugs",
+ "collapsed_comment_tags" : "obsolete, spam, off-topic",
+ "comment_taggers_group" : "editbugs",
+ "commentonchange_resolution" : "1",
+ "commentonduplicate" : "0",
+ "confirmuniqueusermatch" : "1",
+ "cookiedomain" : "",
+ "cookiepath" : "/",
+ "createemailregexp" : ".*",
+ "debug_group" : "admin",
+ "default_search_limit" : "500",
+ "defaultopsys" : "Linux",
+ "defaultplatform" : "All",
+ "defaultpriority" : "Normal",
+ "defaultquery" : "resolution=---&emailassigned_to1=1&emailassigned_to2=1&emailreporter2=1&emailcc2=1&emailqa_contact2=1&emaillongdesc3=1&order=Importance&long_desc_type=substring",
+ "defaultseverity" : "normal",
+ "duplicate_or_move_bug_status" : "RESOLVED",
+ "emailregexp" : "^[\\w\\.\\+\\-=]+@[\\w\\.\\-]+\\.[\\w\\-]+$",
+ "emailregexpdesc" : "A legal address must contain exactly one '@', and at least one '.' after the @.",
+ "emailsuffix" : "",
+ "font_file" : "",
+ "globalwatchers" : "bugs@ml.mageia.org",
+ "inbound_proxies" : "",
+ "insidergroup" : "secteam",
+ "last_visit_keep_days" : "60",
+ "letsubmitterchoosemilestone" : "1",
+ "letsubmitterchoosepriority" : "1",
+ "mail_delivery_method" : "SMTP",
+ "mailfrom" : "bugzilla_noreply@ml.<%= domain %>",
+ "maintainer" : "sysadmin@group.<%= domain %>",
+ "makeproductgroups" : "0",
+ "max_search_results" : "10000",
+ "maxattachmentsize" : "1000",
+ "maxlocalattachment" : "0",
+ "maxusermatches" : "1000",
+ "memcached_namespace" : "bugzilla:",
+ "memcached_servers" : "",
+ "musthavemilestoneonaccept" : "0",
+ "mybugstemplate" : "buglist.cgi?resolution=---&amp;emailassigned_to1=1&amp;emailreporter1=1&amp;emailtype1=exact&amp;email1=%userid%",
+ "noresolveonopenblockers" : "0",
+ "or_groups" : "0",
+ "password_check_on_login" : "1",
+ "password_complexity" : "no_constraints",
+ "proxy_url" : "",
+ "querysharegroup" : "editbugs",
+ "quip_list_entry_control" : "open",
+ "rememberlogin" : "on",
+ "requirelogin" : "0",
+ "search_allow_no_criteria" : "0",
+ "shadowdb" : "",
+ "shadowdbhost" : "",
+ "shadowdbport" : "3306",
+ "shadowdbsock" : "",
+ "shutdownhtml" : "",
+ "smtp_debug" : "0",
+ "smtp_password" : "",
+ "smtp_ssl" : "0",
+ "smtp_username" : "",
+ "smtpserver" : "localhost",
+ "ssl_redirect" : "1",
+ "sslbase" : "https://bugs.<%= domain %>/",
+ "strict_isolation" : "0",
+ "strict_transport_security" : "off",
+ "timetrackinggroup" : "",
+ "upgrade_notification" : "latest_stable_release",
+ "urlbase" : "https://bugs.<%= domain %>/",
+ "use_mailer_queue" : "0",
+ "use_see_also" : "1",
+ "useclassification" : "0",
+ "usemenuforusers" : "0",
+ "useqacontact" : "1",
+ "user_info_class" : "CGI",
+ "user_verify_class" : "LDAP",
+ "usestatuswhiteboard" : "1",
+ "usetargetmilestone" : "1",
+ "usevisibilitygroups" : "0",
+ "utf8" : "1",
+ "webdotbase" : "/usr/bin/dot",
+ "webservice_email_filter" : "0",
+ "whinedays" : "0"
+}
diff --git a/modules/bugzilla/templates/vhost.conf b/modules/bugzilla/templates/vhost.conf
new file mode 100644
index 00000000..fd55e5f2
--- /dev/null
+++ b/modules/bugzilla/templates/vhost.conf
@@ -0,0 +1,14 @@
+RewriteEngine On
+RewriteRule ^/([0-9]+)$ /show_bug.cgi?id=$1 [R=301,L]
+
+ <Directory /usr/share/bugzilla>
+ Require all granted
+ </Directory>
+
+
+ <Directory /usr/share/bugzilla>
+ AllowOverride all
+ AddHandler cgi-script .cgi
+ Options +ExecCGI +FollowSymLinks
+ DirectoryIndex index.cgi index.html
+ </Directory>
diff --git a/modules/bugzilla/templates/vhost_bugs.conf b/modules/bugzilla/templates/vhost_bugs.conf
deleted file mode 100644
index 25306b1e..00000000
--- a/modules/bugzilla/templates/vhost_bugs.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-<%
-path_data_directory = lib_dir + "/bugzilla"
-%>
-
-<VirtualHost *:80>
- ServerName bugs.<%= domain %>
- DocumentRoot /usr/share/bugzilla/www
- Alias /bugzilla/data <%= path_data_directory %>
- Alias /bugzilla /usr/share/bugzilla/www
- <Location />
- Allow from all
- </Location>
-</VirtualHost>
diff --git a/modules/bugzilla/templates/webapp_bugzilla.conf b/modules/bugzilla/templates/webapp_bugzilla.conf
index a37760d8..d2e3f395 100644
--- a/modules/bugzilla/templates/webapp_bugzilla.conf
+++ b/modules/bugzilla/templates/webapp_bugzilla.conf
@@ -1,35 +1,11 @@
<%
-path_data_directory = lib_dir + "/bugzilla"
+path_data_directory = "/usr/share/bugzilla/"
%>
-<Directory /usr/share/bugzilla/www>
- Order allow,deny
- Allow from all
-
- Options ExecCGI
- DirectoryIndex index.cgi
-</Directory>
-
-# The duplicates.rdf must be accessible, as it is used by
-# duplicates.xul
-<Directory <%= path_data_directory %>>
- <Files duplicates.rdf>
- Order allow,deny
- Allow from all
- </Files>
+<Directory <%= path_data_directory %> >
+ AddHandler cgi-script .cgi
+ Options +ExecCGI +FollowSymLinks
+ DirectoryIndex index.cgi index.html
+ AllowOverride All
</Directory>
-# The dot files must be accessible to the public webdot server
-# The png files locally created locally must be accessible
-<Directory <%= path_data_directory %>/webdot>
- <FilesMatch \.dot$>
- Order deny,allow
- Deny from all
- Allow from research.att.com
- </FilesMatch>
-
- <FilesMatch \.png$>
- Order allow,deny
- Allow from all
- </FilesMatch>
-</Directory>
diff --git a/modules/buildsystem/files/Mageia.pm b/modules/buildsystem/files/Mageia.pm
new file mode 100644
index 00000000..443f6cb7
--- /dev/null
+++ b/modules/buildsystem/files/Mageia.pm
@@ -0,0 +1,509 @@
+package Youri::Repository::Mageia;
+
+=head1 NAME
+
+Youri::Repository::Mageia - Mageia repository implementation
+
+=head1 DESCRIPTION
+
+This module implements Mageia repository
+
+=cut
+
+use warnings;
+use strict;
+use Carp;
+use Memoize;
+use File::Find 'find';
+use base qw/Youri::Repository/;
+use MDV::Distribconf::Build;
+use SVN::Client;
+use Sys::Hostname;
+
+use constant {
+ PACKAGE_CLASS => 'Youri::Package::RPM::URPM',
+ PACKAGE_CHARSET => 'utf8'
+};
+
+memoize('_get_media_config');
+
+my %translate_arch = (
+ i386 => 'i586',
+ sparc64 => 'sparcv9',
+);
+
+sub _init {
+ my $self = shift;
+ my %options = (
+ noarch => 'i586', # noarch packages policy
+ src => 'i586',
+ install_root => '',
+ test => 0, # test mode
+ verbose => 0, # verbose mode
+ queue => '',
+ rejected => '',
+ @_
+ );
+ foreach my $var ('upload_state') {
+ $self->{"_$var"} = [];
+ foreach my $value (split ' ', $options{$var}) {
+ push @{$self->{"_$var"}}, $value
+ }
+ }
+ print "Initializing repository\n";
+ foreach my $v ('rejected', 'svn', 'queue', 'noarch', 'install_root', 'upload_root', 'verbose') {
+ $self->{"_$v"} = $options{$v}
+ }
+ foreach my $target (@{$options{targets}}) {
+ $self->{$target} = [];
+ print "Adding $target ($options{$target}{arch})\n" if $self->{_verbose};
+ foreach my $value (split ' ', $options{$target}{arch}) {
+ push @{$self->{_arch}{$target}}, $value;
+ push @{$self->{_extra_arches}}, $value
+ }
+ }
+ $self
+}
+
+sub get_group_id {
+ my ($user) = @_;
+ my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = gmtime(time);
+ $year+=1900;
+ $mon++;
+ my ($host) = hostname =~ /([^.]*)/;
+ sprintf "$year%02d%02d%02d%02d%02d.$user.$host.${$}_", $mon, $mday, $hour, $min, $sec;
+}
+
+sub get_target_arch {
+ my ($self, $target) = $_;
+ return $self->{_arch}{$target}
+}
+
+sub set_arch_changed {
+ my ($self, $target, $arch) = @_;
+ if ($arch eq 'noarch') {
+ $self->{_arch_changed}{$_} = 1 foreach @{$self->{_arch}{$target}}
+ } elsif ($arch eq 'src') {
+ $self->{_arch_changed} = $self->{_src}
+ } else {
+ $self->{_arch_changed}{$arch} = 1
+ }
+}
+
+sub get_arch_changed {
+ my ($self, $target) = @_;
+ return [ keys %{$self->{_arch_changed}} ]
+}
+
+sub set_install_dir_changed {
+ my ($self, $install_dir) = @_;
+ $self->{_install_dir_changed}{$install_dir} = 1;
+}
+
+sub get_install_dir_changed {
+ my ($self) = @_;
+ return [ keys %{$self->{_install_dir_changed}} ];
+}
+
+sub _get_media_config {
+ my ($self, $target) = @_;
+ my %media;
+ my $real_target = $target;
+ $real_target =~ s/_force//;
+ foreach my $arch (@{$self->{_arch}{$target}}) {
+ my $root = "$self->{_install_root}/$real_target/$arch";
+ my $distrib = MDV::Distribconf::Build->new($root);
+ print "Getting media config from $root\n" if $self->{_verbose};
+ $self->{distrib}{$arch} = $distrib;
+ $distrib->loadtree or die "$root does not seem to be a distribution tree\n";
+ $distrib->parse_mediacfg;
+ foreach my $media ($distrib->listmedia) {
+ my $rpms = $distrib->getvalue($media, 'rpms');
+ my $debug_for = $distrib->getvalue($media, 'debug_for');
+ my $srpms = $distrib->getvalue($media, 'srpms');
+ my $path = $distrib->getfullpath($media, 'path');
+ if (!$rpms) {
+ if (-d $path) {
+ print "MEDIA defining $media in $path\n" if $self->{_verbose} > 1;
+ $media{$arch}{$media} = $path
+ } else {
+ print "ERROR $path does not exist for media $media on $arch\n"
+ }
+ } else {
+ my ($media) = split ' ', $rpms;
+ if (-d $path) {
+ print "MEDIA defining SOURCE media for $media in $path\n" if $self->{_verbose} > 1;
+ $media{src}{$media} = $path
+ } else {
+ print "ERROR $path does not exist for source media $media on $arch\n"
+ }
+ }
+ }
+ }
+ \%media
+}
+
+sub get_package_class {
+ return PACKAGE_CLASS;
+}
+
+sub get_package_charset {
+ return PACKAGE_CHARSET;
+}
+
+sub get_upload_dir {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ croak "Not a class method" unless ref $self;
+ my $arch = $package->get_arch();
+ return
+ $self->{_upload_root} .
+ "/$self->{_queue}/$target/" .
+ _get_section($self, $package, $target, $user_context, $app_context) .
+ '/' .
+ ($user_context->{prefix} ? '' : get_group_id($user_context->{user}))
+}
+
+sub get_install_path {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ return $self->_get_path($package, $target, $user_context, $app_context);
+}
+
+
+sub get_distribution_paths {
+ my ($self, $package, $target) = @_;
+
+ return $self->_get_distribution_paths($package, $target);
+}
+
+=head2 get_distribution_roots()
+
+Returns distribution roots (ie install_root + target + arch)
+(it returns a list in case of noarch)
+
+=cut
+
+sub get_distribution_roots {
+ my ($self, $package, $target) = @_;
+ croak "Not a class method" unless ref $self;
+
+ map {
+ $self->_get_dir($self->{_install_root}, $_);
+ } $self->_get_distribution_paths($package, $target);
+}
+
+sub get_archive_path {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ return $self->_get_path($package, $target, $user_context, $app_context);
+}
+
+sub get_reject_path {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ return $self->{_rejected};
+}
+
+
+sub _get_path {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ my $section = $self->_get_section($package, $target, $user_context, $app_context);
+ my $arch = $app_context->{arch} || $package->get_arch();
+ $arch = $translate_arch{$arch} || $arch;
+ if ($arch eq 'noarch') {
+ $arch = $self->{_noarch}
+ } elsif ($arch eq 'src') {
+ return "$target/SRPMS/$section"
+ }
+ "$target/$arch/media/$section"
+}
+
+sub _get_distribution_paths {
+ my ($self, $package, $target) = @_;
+
+ my $arch = $package->get_arch();
+ $arch = $translate_arch{$arch} || $arch;
+ if ($arch eq 'noarch') {
+ map { "$target/$_" } $self->get_target_arches($target);
+ } elsif ($arch eq 'src') {
+ die "no way to get distribution path using a $arch package";
+ } else {
+ "$target/$arch";
+ }
+}
+
+sub get_arch {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ my $arch = $package->get_arch();
+ $arch = $translate_arch{$arch} || $arch;
+ if ($arch eq 'noarch') {
+ $arch = $self->{_noarch}
+ }
+ $arch
+}
+
+sub get_version_path {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ my $section = $self->_get_section($package, $target, $user_context, $app_context);
+
+ return "$self->{_module}/$section";
+}
+
+=head2 get_replaced_packages($package, $target, $user_context, $app_context)
+
+Overrides parent method to add libified packages.
+
+=cut
+
+sub get_replaced_packages {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ croak "Not a class method" unless ref $self;
+
+ my @replaced_packages =
+ $self->SUPER::get_replaced_packages($package, $target, $user_context, $app_context);
+
+ my $name = $package->get_name();
+
+ # kernel packages have the version in the name
+ # binary dkms built for old kernels have to be removed too
+ if ($name =~ /^kernel-([^\d]*-)?([\d.]*)-(.*)$/) { # "desktop", "2.6.28", "2mnb"
+ push(@replaced_packages,
+ map { PACKAGE_CLASS->new(file => $_) }
+ $self->get_files(
+ $self->{_install_root},
+ $self->get_install_path($package, $target, $user_context, $app_context),
+ PACKAGE_CLASS->get_pattern(
+ '(kernel-' . $1 . '\d.*|.*-kernel-[\d.]*-' . $1 . '\d.*)',
+ undef,
+ undef,
+ $package->get_arch()
+ ),
+ )
+ );
+ }
+
+ return @replaced_packages;
+
+}
+
+sub _get_main_section {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ my $section = $self->_get_section($package, $target, $user_context, $app_context);
+ my ($main_section) = $section =~ m,^([^/]+),;
+ $main_section
+}
+
+sub _get_section {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+
+ my $name = $package->get_name();
+ my $cname = $package->get_canonical_name();
+ my $version = $package->get_version();
+ my $release = $package->get_release();
+ my $section = $user_context->{section};
+ my $media = $self->_get_media_config($target);
+ my $arch = $package->get_arch();
+ my $file = $package->as_file();
+ $file =~ s,/+,/,g; # unneeded?
+ # FIXME: use $self->get_arch()
+ $arch = $self->{_noarch} if $arch eq 'noarch';
+ $arch = $translate_arch{$arch} || $arch;
+
+ if (!$section) {
+ $section = $self->{packages}{$file}{section};
+ print "Section undefined, repository says it is '$section' for '$file'\n" if $self->{_verbose};
+ }
+ # FIXME: use debug_for info
+ if ($section && $section !~ m|debug/| && $package->is_debug()) {
+ $section = "debug/$section"
+ }
+
+ # if have section already, check if it exists, and may return immediately
+ if ($section) {
+ print "Using requested section $section\n" if $self->{_verbose};
+ if ($media->{$arch}{$section}) {
+ return $section
+ } else {
+ die "FATAL youri: unknown section $section for target $target for arch $arch\n"
+ }
+ }
+ # else, try to find section automatically
+
+ # pattern for search of src package with specific version-release,
+ # should be searched first, because we prefer to find the precise
+ # section a package is already in
+ my $specific_source_pattern = PACKAGE_CLASS->get_pattern(
+ $cname,
+ $version,
+ $release,
+ 'src'
+ );
+
+ my $source_pattern = PACKAGE_CLASS->get_pattern(
+ $cname,
+ undef,
+ undef,
+ 'src'
+ );
+
+ # if a media has no source media configured, or if it is a debug
+ # package, we search in binary media
+
+ # pattern for search when a binary media has no src media configured
+ my $specific_binary_pattern = PACKAGE_CLASS->get_pattern(
+ $name,
+ $version,
+ $release,
+ $arch
+ );
+
+ # last resort pattern: previous existing binary packages
+ my $binary_pattern = PACKAGE_CLASS->get_pattern(
+ $name,
+ undef,
+ undef,
+ $arch
+ );
+
+ # first try to find section for the specific version, as it is possibly already there;
+ # this is the case for when called in Youri::Submit::Action::Archive, to find the
+ # section the package got installed
+ print "Looking for package $name with version $version-$release\n" if $self->{_verbose};
+ foreach my $m (keys %{$media->{$arch}}) {
+ print " .. section '$m' path '".$media->{$arch}{$m}."'\n" if $self->{_verbose};
+ # - prefer source for non-debug packages, use binary if there is no source media configured
+ # - debug packages must be searched in binary medias, due to their
+ # src section != binary section; NOTE: should/need we search in
+ # src medias and add the 'debug_' prefix?
+ if (!$package->is_debug() && $media->{src}{$m}) {
+ next unless $self->get_files('', $media->{src}{$m}, $specific_source_pattern);
+ } else {
+ next unless $self->get_files('', $media->{$arch}{$m}, $specific_binary_pattern);
+ }
+ $section = $m;
+ last;
+ }
+
+ # if still not found, try finding any version of the package in a
+ # /release subsection (safe default: /release is default for cooker,
+ # should be locked for released distros, and we don't risk wrongly
+ # choosing /backports, /testing, or /updates);
+ # this is the case for when called at submit, to find the section where
+ # the package already resides
+ if (!$section) {
+ # debug packages should be found by previous specific version search
+ # NOTE: as above, should/need we search here and add the 'debug_' prefix?
+ # ... probably... as at least mga-youri-submit-force will process debug packages
+ if ($package->is_debug() && $self->{_verbose}) {
+ print "Warning: debug package $name with version $version-$release not found.\n";
+ }
+
+ print "Warning: Looking for any section with a package $name of any version\n";
+ foreach my $m (keys %{$media->{$arch}}) {
+ print " .. section '$m' path '".$media->{$arch}{$m}."'\n" if $self->{_verbose};
+ # NOTE: !$package->is_debug() test is here to prevent when above FATAL error is removed
+ next if $m !~ /release/ || ($m =~ /debug/ && !$package->is_debug());
+ # - prefer source
+ if ($media->{src}{$m}) {
+ next unless $self->get_files('', $media->{src}{$m}, $source_pattern);
+ } else {
+ next unless $self->get_files('', $media->{$arch}{$m}, $binary_pattern);
+ }
+ $section = $m;
+ last;
+ }
+ }
+
+ # FIXME: doing this here is wrong; this way the caller can never know if
+ # a section was actually found or not; should return undef and let the
+ # caller set a default (Note: IIRC PLF|Zarb has this right, see there) -spuk
+ print STDERR "Warning: Can't guess destination: section missing, defaulting to core/release\n" unless $section;
+ $section ||= 'core/release';
+
+ # next time we don't need to search everything again
+ $self->{packages}{$file}{section} = $section;
+
+ print "Section is '$section'.\n" if $self->{_verbose};
+
+ return $section;
+}
+
+sub get_upload_newer_revisions {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ croak "Not a class method" unless ref $self;
+ my $arch = $package->get_arch();
+ my $name = $package->as_string();
+ $name =~ s/^\@\d+://;
+ my $pattern = $self->get_package_class()->get_pattern($package->get_name(), undef, undef, $arch);
+ my $media = $self->_get_media_config($target);
+ my @packages;
+ foreach my $state (@{$self->{_upload_state}}) {
+ foreach my $m (keys %{$media->{$arch}}) {
+ next if defined($user_context->{section}) and $user_context->{section} ne $m;
+ my $path = "$self->{_upload_root}/$state/$target/$m";
+ print "Looking for package $package revisions for $target in $path (pattern $pattern)\n" if $self->{_verbose};
+ find(
+ sub {
+ s/\d{14}\.[^.]*\.[^.]*\.\d+_//;
+ s/^\@\d+://;
+ return if ! /^$pattern/;
+ return if /\.info$/;
+ print "Find $_\n" if $self->{_verbose} > 1;
+ push @packages, $File::Find::name if $package->check_ranges_compatibility("== $name", "< $_")
+ }, $path);
+ }
+ }
+ return
+ @packages;
+}
+
+sub package_in_svn {
+ my ($self, $srpm_name) = @_;
+ my $ctx = new SVN::Client(
+ auth => [SVN::Client::get_simple_provider(),
+ SVN::Client::get_simple_prompt_provider(\&simple_prompt,2),
+ SVN::Client::get_username_provider()]
+ );
+
+ my $svn_entry = $ctx->ls("$self->{_svn}/$srpm_name", 'HEAD', 0);
+ if ($svn_entry) {
+ print "Package $srpm_name is in the SVN\n" if $self->{_verbose};
+ return 1
+ }
+}
+
+sub get_svn_url {
+ my ($self) = @_;
+ $self->{_svn}
+}
+
+sub reject {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ croak "Not a class method" unless ref $self;
+
+
+}
+
+sub get_archive_dir {
+ my ($self, $package, $target, $user_context, $app_context) = @_;
+ croak "Not a class method" unless ref $self;
+
+ return
+ $self->{_archive_root}
+}
+
+
+=head1 COPYRIGHT AND LICENSE
+
+Copyright (C) 2002-2006, YOURI project
+Copyright (C) 2006,2007,2009 Mandriva
+Copyright (C) 2011 Nicolas Vigier, Michael Scherer, Pascal Terjan
+
+This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself.
+
+=cut
+
+1;
diff --git a/modules/buildsystem/files/signbot/mga-signpackage b/modules/buildsystem/files/signbot/mga-signpackage
new file mode 100755
index 00000000..199dbe0e
--- /dev/null
+++ b/modules/buildsystem/files/signbot/mga-signpackage
@@ -0,0 +1,31 @@
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+use RPM4::Sign;
+use File::Spec;
+
+sub signpackage {
+ my ($file, $name, $path) = @_;
+
+ # check if parent directory is writable
+ my $parent = (File::Spec->splitpath($file))[1];
+ die "Unsignable package, parent directory is read-only"
+ unless -w $parent;
+
+ my $sign = RPM4::Sign->new(
+ name => $name,
+ path => $path,
+ passphrase => '',
+ );
+
+ $sign->rpmssign($file)
+}
+
+if (@ARGV != 3) {
+ exit 1;
+}
+
+signpackage(@ARGV);
+exit 0
+
diff --git a/modules/buildsystem/files/signbot/sign-check-package b/modules/buildsystem/files/signbot/sign-check-package
new file mode 100644
index 00000000..fc9704fd
--- /dev/null
+++ b/modules/buildsystem/files/signbot/sign-check-package
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+if [ $# != 3 ] ; then
+ echo "missing arguments"
+ echo "usage : $0 file key_number key_directory"
+ exit 1
+fi
+
+file="$1"
+key="$2"
+keydir="$3"
+
+tmpdir=`mktemp -d ${TMPDIR:-/tmp}/signbot-XXXXX`
+tmpfile="$tmpdir/$(basename $file)"
+cp -pf "$file" "$tmpfile"
+rpm --delsign "$tmpfile"
+/usr/local/bin/mga-signpackage "$tmpfile" "$key" "$keydir"
+nbtry=0
+while rpmsign -Kv "$tmpfile" 2>&1 | grep BAD
+do
+ nbtry=$(($nbtry + 1))
+ if [ $nbtry -ge 30 ]
+ then
+ exit 1
+ fi
+
+ # Archive failed file for further analysis
+ mkdir -p "/tmp/failed-sign/"
+ failedfile="/tmp/failed-sign/$(basename "$file").$(date +%Y%m%d%H%M%S)"
+ cp -pf "$file" "$failedfile"
+
+ cp -pf "$file" "$tmpfile"
+ rpm --delsign "$tmpfile"
+ /usr/local/bin/mga-signpackage "$tmpfile" "$key" "$keydir"
+done
+mv -f "$tmpfile" "$file"
+rmdir "$tmpdir"
diff --git a/modules/buildsystem/files/signbot/signbot-rpmmacros b/modules/buildsystem/files/signbot/signbot-rpmmacros
new file mode 100644
index 00000000..aab7e389
--- /dev/null
+++ b/modules/buildsystem/files/signbot/signbot-rpmmacros
@@ -0,0 +1,3 @@
+%__gpg_sign_cmd %{__gpg} \
+ gpg --batch --force-v3-sigs --no-verbose --no-armor --passphrase-fd 3 --no-secmem-warning \
+ -u "%{_gpg_name}" -sbo %{__signature_filename} %{__plaintext_filename}
diff --git a/modules/buildsystem/manifests/binrepo.pp b/modules/buildsystem/manifests/binrepo.pp
new file mode 100644
index 00000000..5bf16b53
--- /dev/null
+++ b/modules/buildsystem/manifests/binrepo.pp
@@ -0,0 +1,48 @@
+class buildsystem::binrepo {
+ include buildsystem::var::binrepo
+ include buildsystem::var::groups
+ include sudo
+
+ # upload-bin script uses the mailx command provided by nail
+if versioncmp($::lsbdistrelease, '9') < 0 {
+ package { 'nail':
+ ensure => installed,
+ }
+} else {
+ package { 's-nail':
+ ensure => installed,
+ }
+}
+
+ user { $buildsystem::var::binrepo::login:
+ home => $buildsystem::var::binrepo::homedir,
+ }
+
+ file { [$buildsystem::var::binrepo::repodir, $buildsystem::var::binrepo::uploadinfosdir]:
+ ensure => directory,
+ owner => $buildsystem::var::binrepo::login,
+ }
+
+ mga_common::local_script {
+ 'upload-bin':
+ content => template('buildsystem/binrepo/upload-bin');
+ 'wrapper.upload-bin':
+ content => template('buildsystem/binrepo/wrapper.upload-bin');
+ }
+
+ sudo::sudoers_config { 'binrepo':
+ content => template('buildsystem/binrepo/sudoers.binrepo')
+ }
+
+ apache::vhost::base { $buildsystem::var::binrepo::hostname:
+ location => $buildsystem::var::binrepo::repodir,
+ content => template('buildsystem/binrepo/vhost_binrepo.conf'),
+ }
+
+ apache::vhost::base { "ssl_${buildsystem::var::binrepo::hostname}":
+ use_ssl => true,
+ vhost => $buildsystem::var::binrepo::hostname,
+ location => $buildsystem::var::binrepo::repodir,
+ content => template('buildsystem/binrepo/vhost_binrepo.conf'),
+ }
+}
diff --git a/modules/buildsystem/manifests/buildnode.pp b/modules/buildsystem/manifests/buildnode.pp
new file mode 100644
index 00000000..1573c093
--- /dev/null
+++ b/modules/buildsystem/manifests/buildnode.pp
@@ -0,0 +1,12 @@
+class buildsystem::buildnode {
+ include buildsystem::iurt
+ include buildsystem::var::scheduler
+ include buildsystem::var::iurt
+ include buildsystem::sshkeys
+
+ sshkeys::set_authorized_keys { 'iurt-allow-scheduler':
+ keyname => $buildsystem::var::scheduler::login,
+ home => $buildsystem::var::iurt::homedir,
+ user => $buildsystem::var::iurt::login,
+ }
+}
diff --git a/modules/buildsystem/manifests/create_upload_dir.rb b/modules/buildsystem/manifests/create_upload_dir.rb
new file mode 100644
index 00000000..8023ab5d
--- /dev/null
+++ b/modules/buildsystem/manifests/create_upload_dir.rb
@@ -0,0 +1,28 @@
+hostclass "buildsystem::create_upload_dir" do
+ states = ["todo","done","failure","queue","rejected"]
+ owner = scope.lookupvar('buildsystem::var::scheduler::login')
+ group = owner
+ uploads_dir = scope.lookupvar('buildsystem::var::scheduler::homedir') + '/uploads'
+
+ file uploads_dir, :ensure => 'directory', :owner => owner, :group => group
+
+ for st in states do
+ file [uploads_dir, st].join('/'), :ensure => 'directory', :owner => owner, :group => group
+
+ scope.lookupvar('buildsystem::var::distros::distros').each{|rel, distro|
+ file [uploads_dir, st, rel].join('/'), :ensure => 'directory', :owner => owner, :group => group
+ medias = distro['medias']
+ medias.each{|media, m|
+ file [uploads_dir, st, rel, media].join('/'), :ensure => 'directory', :owner => owner, :group => group
+
+ for repo in m['repos'].keys do
+ if st == 'done'
+ file [uploads_dir, st, rel, media, repo].join('/'), :ensure => 'directory', :owner => owner, :group => group, :mode => 0775
+ else
+ file [uploads_dir, st, rel, media, repo].join('/'), :ensure => 'directory', :owner => owner, :group => group
+ end
+ end
+ }
+ }
+ end
+end
diff --git a/modules/buildsystem/manifests/distros.rb b/modules/buildsystem/manifests/distros.rb
new file mode 100644
index 00000000..a298c0a8
--- /dev/null
+++ b/modules/buildsystem/manifests/distros.rb
@@ -0,0 +1,97 @@
+hostclass "buildsystem::distros" do
+ mirror_user = 'root'
+ schedbot_user = scope.lookupvar('buildsystem::var::scheduler::login')
+ bootstrap_reporoot = scope.lookupvar('buildsystem::var::repository::bootstrap_reporoot')
+ scope.lookupvar('buildsystem::var::distros::distros').each{|rel, distro|
+ file [ bootstrap_reporoot, rel ].join('/'), :ensure => 'directory',
+ :owner => mirror_user, :group => mirror_user
+ for arch in distro['arch'] do
+ # As ruby dsl cannot use defined resources, we have to use a
+ # workaround with 'find_resource_type' as described in this
+ # puppet issue: http://projects.puppetlabs.com/issues/11912
+ scope.find_resource_type 'buildsystem::media_cfg'
+ media_cfg_args = {
+ :distro_name => rel,
+ :arch => arch,
+ }
+ if distro['tmpl_media.cfg'] != nil
+ media_cfg_args['templatefile'] = distro['tmpl_media.cfg']
+ end
+ if ! distro['no_media_cfg_update']
+ create_resource 'buildsystem::media_cfg',
+ [ rel, ' ', arch ].join('/'), media_cfg_args
+ end
+ file [ bootstrap_reporoot, rel, arch ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ mediadir = [ bootstrap_reporoot, rel, arch, 'media' ].join('/')
+ file mediadir, :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, 'media_info' ].join('/'), :ensure => 'directory',
+ :owner => schedbot_user, :group => schedbot_user
+ file [ mediadir, 'debug' ].join('/'), :ensure => 'directory',
+ :owner => schedbot_user, :group => schedbot_user
+ distro['medias'].each{|media, m|
+ file [ mediadir, media ].join('/'), :ensure => 'directory',
+ :owner => schedbot_user, :group => schedbot_user
+ file [ mediadir, 'debug', media ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ for repo in m['repos'].keys do
+ file [ mediadir, media, repo ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, media, repo, 'media_info' ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, media, repo, 'repodata' ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, 'debug', media, repo ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, 'debug', media, repo, 'media_info' ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ file [ mediadir, 'debug', media, repo, 'repodata' ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ end
+ }
+ if distro['based_on'] != nil
+ distro['based_on'].each{|bdistroname, medias|
+ file [ mediadir, bdistroname ].join('/'),
+ :ensure => 'directory', :owner => mirror_user,
+ :group => mirror_user
+ medias.each{|medianame, media|
+ mdir = [ mediadir, bdistroname, medianame ].join('/')
+ file mdir, :ensure => 'directory',
+ :owner => mirror_user, :group => mirror_user
+ for reponame in media
+ file [ mdir, reponame ].join('/'),
+ :ensure => 'link',
+ :target => [
+ '../../../../..', bdistroname, arch,
+ 'media', medianame, reponame ].join('/'),
+ :owner => mirror_user, :group => mirror_user
+ end
+ }
+ }
+ end
+ end
+ # SRPMS
+ srpmsdir = [ bootstrap_reporoot, rel, 'SRPMS' ].join('/')
+ file srpmsdir,
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ distro['medias'].each{|media, m|
+ file [ srpmsdir, media ].join('/'), :ensure => 'directory',
+ :owner => schedbot_user, :group => schedbot_user
+ for repo in m['repos'].keys do
+ file [ srpmsdir, media, repo ].join('/'),
+ :ensure => 'directory', :owner => schedbot_user,
+ :group => schedbot_user
+ end
+ }
+ }
+end
diff --git a/modules/buildsystem/manifests/gatherer.pp b/modules/buildsystem/manifests/gatherer.pp
new file mode 100644
index 00000000..eebfd97e
--- /dev/null
+++ b/modules/buildsystem/manifests/gatherer.pp
@@ -0,0 +1,5 @@
+class buildsystem::gatherer {
+ # emi is in main iurt rpm, should be moved out
+ include iurt::packages
+ include iurt::upload
+}
diff --git a/modules/buildsystem/manifests/init.pp b/modules/buildsystem/manifests/init.pp
index e78468f2..f15b5dbf 100644
--- a/modules/buildsystem/manifests/init.pp
+++ b/modules/buildsystem/manifests/init.pp
@@ -1,83 +1,2 @@
class buildsystem {
-
- class base {
- $build_login = "iurt"
- $build_home_dir = "/home/iurt/"
-
- include ssh::auth
- ssh::auth::key { $build_login: } # declare a key for build bot: RSA, 2048 bits
- }
-
- class mainnode inherits base {
- include iurtuser
- ssh::auth::server { $build_login: }
-
- package { "task-bs-cluster-main":
- ensure => "installed"
- }
- }
-
- class buildnode inherits base {
- include iurt
- }
-
- class scheduler {
- # ulri
- }
-
- class dispatcher {
- # emi
- }
-
- class repsys {
- package { 'repsys':
-
- }
-
-
- }
-
- class iurtuser {
- group {"$build_login":
- ensure => present,
- }
-
- user {"$build_login":
- ensure => present,
- comment => "System user used to run build bots",
- managehome => true,
- gid => $build_login,
- shell => "/bin/bash",
- }
- }
-
- class iurt {
- include sudo
- include iurtuser
- ssh::auth::client { $build_login: }
-
- # build node common settings
- # we could have the following skip list to use less space:
- # '/(drakx-installer-binaries|drakx-installer-advertising|gfxboot|drakx-installer-stage2|mandriva-theme)/'
- $package_list = ['task-bs-cluster-chroot', 'iurt']
- package { $package_list:
- ensure => installed;
- }
-
- file { "$build_home_dir/.iurt.cauldron.conf":
- ensure => present,
- owner => $build_login,
- group => $build_login,
- mode => 644,
- content => template("buildsystem/iurt.cauldron.conf")
- }
-
- file { "/etc/sudoers.d/iurt":
- ensure => present,
- owner => root,
- group => root,
- mode => 440,
- content => template("buildsystem/sudoers.iurt")
- }
- }
}
diff --git a/modules/buildsystem/manifests/iurt.pp b/modules/buildsystem/manifests/iurt.pp
new file mode 100644
index 00000000..231c5373
--- /dev/null
+++ b/modules/buildsystem/manifests/iurt.pp
@@ -0,0 +1,26 @@
+class buildsystem::iurt {
+ include sudo
+ include buildsystem::iurt::user
+ include buildsystem::iurt::packages
+ include buildsystem::var::iurt
+ include buildsystem::var::distros
+
+ # remove old build directory
+ tidy { "${buildsystem::var::iurt::homedir}/iurt":
+ age => '8w',
+ recurse => true,
+ matches => ['[0-9][0-9].*\..*\..*\.[0-9]*','log','*.rpm','*.log','*.mga[0-9]+'],
+ rmdirs => true,
+ }
+
+ file { '/etc/iurt/build':
+ ensure => directory,
+ }
+
+ $distros_list = hash_keys($buildsystem::var::distros::distros)
+ buildsystem::iurt::config { $distros_list: }
+
+ sudo::sudoers_config { 'iurt':
+ content => template('buildsystem/sudoers.iurt')
+ }
+}
diff --git a/modules/buildsystem/manifests/iurt/config.pp b/modules/buildsystem/manifests/iurt/config.pp
new file mode 100644
index 00000000..02f5be63
--- /dev/null
+++ b/modules/buildsystem/manifests/iurt/config.pp
@@ -0,0 +1,50 @@
+define buildsystem::iurt::config() {
+ include buildsystem::var::iurt
+ include buildsystem::var::webstatus
+ include buildsystem::var::repository
+ $distribution = $name
+ # TODO rename the variable too in template
+ $build_login = $buildsystem::var::iurt::login
+
+ $build_timeout = {
+ 'default' => 36000,
+ 'atlas' => 57600,
+ 'blender' => 57600,
+ 'chromium-browser-stable' => 172800,
+ 'clang' => 172800,
+ 'cross-gcc' => 115200,
+ 'gcc' => 115200,
+ 'itk' => 115200,
+ 'java-1.8.0-openjdk' => 172800,
+ 'java-17-openjdk' => 172800,
+ 'java-21-openjdk' => 172800,
+ 'java-latest-openjdk' => 172800,
+ 'kernel' => 115200,
+ 'libreoffice' => 432000,
+ 'llvm' => 115200,
+ 'llvm17-suite' => 115200,
+ 'llvm19-suite' => 115200,
+ 'openfoam' => 115200,
+ 'paraview' => 115200,
+ 'qgis' => 57600,
+ 'qtwebengine5' => 115200,
+ 'qtwebengine6' => 172800,
+ 'rust' => 180000,
+ 'salome' => 57600,
+ 'vtk' => 57600,
+ 'webkit' => 57600,
+ 'webkit2' => 115200,
+ 'wrapitk' => 115200,
+ 'rocm-llvm' => 70000,
+ }
+
+ $allow_network_access = [
+ 'libguestfs', # Needs access to the configured mirrors
+ ]
+
+ file { "/etc/iurt/build/${distribution}.conf":
+ owner => $build_login,
+ group => $build_login,
+ content => template("buildsystem/iurt.conf")
+ }
+}
diff --git a/modules/buildsystem/manifests/iurt/packages.pp b/modules/buildsystem/manifests/iurt/packages.pp
new file mode 100644
index 00000000..e814b7c2
--- /dev/null
+++ b/modules/buildsystem/manifests/iurt/packages.pp
@@ -0,0 +1,3 @@
+class buildsystem::iurt::packages {
+ package { 'iurt': }
+}
diff --git a/modules/buildsystem/manifests/iurt/upload.pp b/modules/buildsystem/manifests/iurt/upload.pp
new file mode 100644
index 00000000..5417d36e
--- /dev/null
+++ b/modules/buildsystem/manifests/iurt/upload.pp
@@ -0,0 +1,16 @@
+class buildsystem::iurt::upload {
+ include buildsystem::var::iurt
+ include buildsystem::var::webstatus
+ include buildsystem::var::repository
+ file { '/etc/iurt/upload.conf':
+ require => File['/etc/iurt'],
+ content => template('buildsystem/upload.conf'),
+ notify => Exec['check iurt config'],
+ }
+
+ exec { 'check iurt config':
+ refreshonly => true,
+ command => 'perl -cw /etc/iurt/upload.conf',
+ logoutput => 'on_failure',
+ }
+}
diff --git a/modules/buildsystem/manifests/iurt/user.pp b/modules/buildsystem/manifests/iurt/user.pp
new file mode 100644
index 00000000..a93ac7e7
--- /dev/null
+++ b/modules/buildsystem/manifests/iurt/user.pp
@@ -0,0 +1,11 @@
+class buildsystem::iurt::user {
+ include buildsystem::var::iurt
+
+ buildsystem::sshuser { $buildsystem::var::iurt::login:
+ homedir => $buildsystem::var::iurt::homedir,
+ }
+
+ file { '/etc/iurt':
+ ensure => directory,
+ }
+}
diff --git a/modules/buildsystem/manifests/mainnode.pp b/modules/buildsystem/manifests/mainnode.pp
new file mode 100644
index 00000000..01de764f
--- /dev/null
+++ b/modules/buildsystem/manifests/mainnode.pp
@@ -0,0 +1,23 @@
+class buildsystem::mainnode {
+ include buildsystem::var::repository
+ include buildsystem::var::scheduler
+ include buildsystem::var::distros
+ include buildsystem::iurt::user
+ include buildsystem::scheduler
+ include buildsystem::gatherer
+ include buildsystem::mgarepo
+ include buildsystem::signbot
+ include buildsystem::youri_submit
+ include buildsystem::sshkeys
+ include buildsystem::distros
+
+ sshkeys::set_client_key_pair { $buildsystem::var::scheduler::login:
+ home => $buildsystem::var::scheduler::homedir,
+ user => $buildsystem::var::scheduler::login,
+ }
+ sshkeys::set_authorized_keys { 'scheduler-allow-scheduler':
+ keyname => $buildsystem::var::scheduler::login,
+ home => $buildsystem::var::scheduler::homedir,
+ user => $buildsystem::var::scheduler::login,
+ }
+}
diff --git a/modules/buildsystem/manifests/maintdb.pp b/modules/buildsystem/manifests/maintdb.pp
new file mode 100644
index 00000000..5a961b63
--- /dev/null
+++ b/modules/buildsystem/manifests/maintdb.pp
@@ -0,0 +1,58 @@
+class buildsystem::maintdb {
+ include buildsystem::var::maintdb
+ include buildsystem::var::groups
+ include buildsystem::var::webstatus
+ include sudo
+
+ user { $buildsystem::var::maintdb::login:
+ home => $buildsystem::var::maintdb::homedir,
+ }
+
+ file { [$buildsystem::var::maintdb::homedir,$buildsystem::var::maintdb::dbdir]:
+ ensure => directory,
+ owner => $buildsystem::var::maintdb::login,
+ group => $buildsystem::var::maintdb::login,
+ mode => '0711',
+ require => User[$buildsystem::var::maintdb::login],
+ }
+
+ file { $buildsystem::var::maintdb::binpath:
+ mode => '0755',
+ content => template('buildsystem/maintdb/maintdb.bin')
+ }
+
+ mga_common::local_script { 'wrapper.maintdb':
+ content => template('buildsystem/maintdb/wrapper.maintdb')
+ }
+
+ sudo::sudoers_config { 'maintdb':
+ content => template('buildsystem/maintdb/sudoers.maintdb')
+ }
+
+ file { [$buildsystem::var::maintdb::dump,
+ "${buildsystem::var::maintdb::dump}.new",
+ $buildsystem::var::maintdb::unmaintained,
+ "${buildsystem::var::maintdb::unmaintained}.new"]:
+ owner => $buildsystem::var::maintdb::login,
+ require => File["${buildsystem::var::webstatus::location}/data"],
+ }
+
+ cron { 'update maintdb export':
+ user => $buildsystem::var::maintdb::login,
+ command => "${buildsystem::var::maintdb::binpath} root get > ${buildsystem::var::maintdb::dump}.new; cp -f ${buildsystem::var::maintdb::dump}.new ${buildsystem::var::maintdb::dump}; grep ' nobody\$' ${buildsystem::var::maintdb::dump} | sed 's/ nobody\$//' > ${buildsystem::var::maintdb::unmaintained}.new; cp -f ${buildsystem::var::maintdb::unmaintained}.new ${buildsystem::var::maintdb::unmaintained}",
+ minute => '*/30',
+ require => User[$buildsystem::var::maintdb::login],
+ }
+
+ apache::vhost::base { $buildsystem::var::maintdb::hostname:
+ location => $buildsystem::var::maintdb::dbdir,
+ content => template('buildsystem/maintdb/vhost_maintdb.conf'),
+ }
+
+ apache::vhost::base { "ssl_${buildsystem::var::maintdb::hostname}":
+ use_ssl => true,
+ vhost => $buildsystem::var::maintdb::hostname,
+ location => $buildsystem::var::maintdb::dbdir,
+ content => template('buildsystem/maintdb/vhost_maintdb.conf'),
+ }
+}
diff --git a/modules/buildsystem/manifests/media_cfg.pp b/modules/buildsystem/manifests/media_cfg.pp
new file mode 100644
index 00000000..77fcc8fd
--- /dev/null
+++ b/modules/buildsystem/manifests/media_cfg.pp
@@ -0,0 +1,11 @@
+define buildsystem::media_cfg($distro_name, $arch, $templatefile = 'buildsystem/media.cfg') {
+ include buildsystem::var::repository
+ include buildsystem::var::scheduler
+ include buildsystem::repository
+
+ file { "${buildsystem::var::repository::bootstrap_reporoot}/${distro_name}/${arch}/media/media_info/media.cfg":
+ owner => $buildsystem::var::scheduler::login,
+ group => $buildsystem::var::scheduler::login,
+ content => template($templatefile),
+ }
+}
diff --git a/modules/buildsystem/manifests/mgarepo.pp b/modules/buildsystem/manifests/mgarepo.pp
new file mode 100644
index 00000000..14e11e1a
--- /dev/null
+++ b/modules/buildsystem/manifests/mgarepo.pp
@@ -0,0 +1,36 @@
+class buildsystem::mgarepo {
+ include buildsystem::var::scheduler
+ include buildsystem::var::distros
+ include buildsystem::var::groups
+ include buildsystem::var::binrepo
+ include buildsystem::create_upload_dir
+ $sched_login = $buildsystem::var::scheduler::login
+ $sched_home_dir = $buildsystem::var::scheduler::homedir
+
+ package { ['mgarepo','rpm-build']: }
+
+ file { '/etc/mgarepo.conf':
+ content => template('buildsystem/mgarepo.conf'),
+ }
+
+ file { "${sched_home_dir}/repsys":
+ ensure => 'directory',
+ owner => $sched_login,
+ require => File[$sched_home_dir],
+ }
+
+ file { ["${sched_home_dir}/repsys/tmp", "${sched_home_dir}/repsys/srpms"]:
+ ensure => 'directory',
+ owner => $sched_login,
+ group => $buildsystem::var::groups::packagers,
+ mode => '1775',
+ require => File["${sched_home_dir}/repsys"],
+ }
+
+ # FIXME: disabled temporarily as upload dir is a symlink to /var/lib/repsys/uploads
+ #file { "${sched_home_dir}/uploads":
+ # ensure => "directory",
+ # owner => $sched_login,
+ # require => File[$sched_home_dir],
+ #}
+}
diff --git a/modules/buildsystem/manifests/release.pp b/modules/buildsystem/manifests/release.pp
new file mode 100644
index 00000000..d9feac8e
--- /dev/null
+++ b/modules/buildsystem/manifests/release.pp
@@ -0,0 +1,5 @@
+class buildsystem::release {
+ git::snapshot { '/root/release':
+ source => "git://git.${::domain}/software/infrastructure/release",
+ }
+}
diff --git a/modules/buildsystem/manifests/repoctl.pp b/modules/buildsystem/manifests/repoctl.pp
new file mode 100644
index 00000000..8d44e52c
--- /dev/null
+++ b/modules/buildsystem/manifests/repoctl.pp
@@ -0,0 +1,11 @@
+class buildsystem::repoctl {
+ include buildsystem::var::distros
+ include buildsystem::var::repository
+
+ package{ 'repoctl': }
+
+ file { '/etc/repoctl.conf':
+ content => template('buildsystem/repoctl.conf'),
+ require => Package['repoctl'],
+ }
+}
diff --git a/modules/buildsystem/manifests/repository.pp b/modules/buildsystem/manifests/repository.pp
new file mode 100644
index 00000000..dda90eb2
--- /dev/null
+++ b/modules/buildsystem/manifests/repository.pp
@@ -0,0 +1,11 @@
+class buildsystem::repository {
+ include buildsystem::var::repository
+ file { [ $buildsystem::var::repository::bootstrap_root,
+ $buildsystem::var::repository::bootstrap_reporoot ] :
+ ensure => directory,
+ }
+
+ apache::vhost::other_app { $buildsystem::var::repository::hostname:
+ vhost_file => 'buildsystem/vhost_repository.conf',
+ }
+}
diff --git a/modules/buildsystem/manifests/rpmlint.pp b/modules/buildsystem/manifests/rpmlint.pp
new file mode 100644
index 00000000..388d0bee
--- /dev/null
+++ b/modules/buildsystem/manifests/rpmlint.pp
@@ -0,0 +1,3 @@
+class buildsystem::rpmlint {
+ package { 'rpmlint': }
+}
diff --git a/modules/buildsystem/manifests/scheduler.pp b/modules/buildsystem/manifests/scheduler.pp
new file mode 100644
index 00000000..53b248fc
--- /dev/null
+++ b/modules/buildsystem/manifests/scheduler.pp
@@ -0,0 +1,57 @@
+class buildsystem::scheduler {
+ # until ulri is split from main iurt rpm
+ include buildsystem::iurt::packages
+ include buildsystem::iurt::upload
+ include buildsystem::var::scheduler
+
+ $login = $buildsystem::var::scheduler::login
+ $homedir = $buildsystem::var::scheduler::homedir
+ $logdir = $buildsystem::var::scheduler::logdir
+
+ buildsystem::sshuser { $login:
+ homedir => $homedir,
+ }
+
+ file { $logdir:
+ ensure => directory,
+ mode => '0755',
+ owner => $login,
+ }
+
+ cron { 'dispatch jobs':
+ user => $login,
+ command => "EMI_LOG_FILE=${logdir}/emi.log ULRI_LOG_FILE=${logdir}/ulri.log ulri; EMI_LOG_FILE=${logdir}/emi.log emi",
+ minute => '*',
+ }
+
+ if ($buildsystem::var::scheduler::clean_uploads_logs_age != 0) {
+ cron { 'clean uploads logs':
+ user => $login,
+ # Delete old upload logs
+ command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -xdev -depth -type f -ctime +%d -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_logs_age)),
+ hour => '*/4',
+ minute => '51',
+ }
+ cron { 'clean uploads dirs':
+ user => $login,
+ # Remove old empty uploads directories. This will take several
+ # passes (over several weeks) to delete a directory hierarchy
+ # because it is looking at ctime instead of mtime, which resets
+ # every time a file/directory underneath it is deleted.
+ # Directories don't take much space, so this shouldn't be a
+ # real issue.
+ command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -mindepth 5 -xdev -depth -type d -ctime +%d -empty -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_logs_age)),
+ hour => '*/4',
+ minute => '53',
+ }
+ }
+ if ($buildsystem::var::scheduler::clean_uploads_packages_age != 0) {
+ cron { 'clean uploads packages':
+ user => $login,
+ # Delete old upload RPMs
+ command => sprintf("/usr/bin/find %s/uploads -ignore_readdir_race -xdev -depth -type f -name '*.rpm' -ctime +%d -delete", shellquote($homedir), shellquote($buildsystem::var::scheduler::clean_uploads_packages_age)),
+ hour => '*/4',
+ minute => '52',
+ }
+ }
+}
diff --git a/modules/buildsystem/manifests/signbot.pp b/modules/buildsystem/manifests/signbot.pp
new file mode 100644
index 00000000..60c7c318
--- /dev/null
+++ b/modules/buildsystem/manifests/signbot.pp
@@ -0,0 +1,31 @@
+class buildsystem::signbot {
+ include buildsystem::var::scheduler
+ include buildsystem::var::signbot
+ $sched_login = $buildsystem::var::scheduler::login
+
+ sshuser { $buildsystem::var::signbot::login:
+ homedir => $buildsystem::var::signbot::home_dir,
+ groups => [$sched_login],
+ }
+
+ gnupg::keys{ 'packages':
+ email => $buildsystem::var::signbot::keyemail,
+ key_name => $buildsystem::var::signbot::keyname,
+ login => $buildsystem::var::signbot::login,
+ batchdir => "${buildsystem::var::signbot::home_dir}/batches",
+ keydir => $buildsystem::var::signbot::sign_keydir,
+ }
+
+ sudo::sudoers_config { 'signpackage':
+ content => template('buildsystem/signbot/sudoers.signpackage')
+ }
+
+ file { "${home_dir}/.rpmmacros":
+ source => 'puppet:///modules/buildsystem/signbot/signbot-rpmmacros',
+ }
+
+ mga_common::local_script {
+ 'sign-check-package': source => 'puppet:///modules/buildsystem/signbot/sign-check-package';
+ 'mga-signpackage': source => 'puppet:///modules/buildsystem/signbot/mga-signpackage';
+ }
+}
diff --git a/modules/buildsystem/manifests/sshkeys.pp b/modules/buildsystem/manifests/sshkeys.pp
new file mode 100644
index 00000000..5a1b2900
--- /dev/null
+++ b/modules/buildsystem/manifests/sshkeys.pp
@@ -0,0 +1,5 @@
+class buildsystem::sshkeys {
+ include buildsystem::var::scheduler
+
+ sshkeys::create_key { $buildsystem::var::scheduler::login: }
+}
diff --git a/modules/buildsystem/manifests/sshuser.pp b/modules/buildsystem/manifests/sshuser.pp
new file mode 100644
index 00000000..5cad97ad
--- /dev/null
+++ b/modules/buildsystem/manifests/sshuser.pp
@@ -0,0 +1,36 @@
+# $groups: array of secondary groups (only local groups, no ldap)
+define buildsystem::sshuser($homedir, $comment = undef, $groups = []) {
+ group { $name: }
+
+ user { $name:
+ comment => $comment,
+ managehome => true,
+ home => $homedir,
+ gid => $name,
+ groups => $groups,
+ shell => '/bin/bash',
+ notify => Exec["unlock ${name}"],
+ require => Group[$title],
+ }
+
+ # set password to * to unlock the account but forbid login through login
+ exec { "unlock ${name}":
+ command => "usermod -p '*' ${name}",
+ refreshonly => true,
+ }
+
+ file { $homedir:
+ ensure => directory,
+ owner => $name,
+ group => $name,
+ require => User[$name],
+ }
+
+ file { "${homedir}/.ssh":
+ ensure => directory,
+ mode => '0600',
+ owner => $name,
+ group => $name,
+ require => File[$homedir],
+ }
+}
diff --git a/modules/buildsystem/manifests/var/binrepo.pp b/modules/buildsystem/manifests/var/binrepo.pp
new file mode 100644
index 00000000..1431ed25
--- /dev/null
+++ b/modules/buildsystem/manifests/var/binrepo.pp
@@ -0,0 +1,15 @@
+# $uploadmail_from:
+# from who will be sent the binrepo upload email notifications
+# $uploadmail_to:
+# where binrepo email notifications are sent
+class buildsystem::var::binrepo(
+ $hostname = "binrepo.${::domain}",
+ $login = 'binrepo',
+ $homedir = '/var/lib/binrepo',
+ $uploadmail_from,
+ $uploadmail_to
+) {
+ $repodir = "${homedir}/data"
+ $uploadinfosdir = "${homedir}/infos"
+ $uploadbinpath = '/usr/local/bin/upload-bin'
+}
diff --git a/modules/buildsystem/manifests/var/distros.pp b/modules/buildsystem/manifests/var/distros.pp
new file mode 100644
index 00000000..9e45e2c2
--- /dev/null
+++ b/modules/buildsystem/manifests/var/distros.pp
@@ -0,0 +1,126 @@
+# $default_distro:
+# the name of the default distribution
+# $repo_allow_from_ips:
+# $repo_allow_from_domains:
+# list of IP or domains allowed to access the repository. If you don't want to
+# filter allowed IPs, don't those values.
+# $distros:
+# a hash variable containing distributions information indexed by
+# distribution name. Each distribution is itself an hash containing
+# the following infos:
+# {
+# # the 'cauldron' distribution
+# 'cauldron' => {
+# # list of arch supported by 'cauldron'
+# 'arch' => [ 'i586', 'x86_64' ],
+# # Set this if you don't want media.cfg to be generated
+# 'no_media_cfg_update' => true,
+# 'medias' => {
+# # the 'core' media
+# 'core' => {
+# 'repos' => {
+# # the 'release' repo in the 'core' media
+# 'release' => {
+# 'media_type' => [ 'release' ],
+# 'noauto' => '1',
+# # the 'release' repo should be listed first in media.cfg
+# 'order' => 0,
+# },
+# # the 'updates' repo
+# 'updates' => {
+# 'media_type' => [ 'updates' ],
+# 'noauto' => '1',
+# # the 'updates' repo requires the 'release' repo
+# 'requires' => [ 'release' ],
+# # the 'updates' repo should be listed after 'release' in media.cfg
+# 'order' => 1,
+# },
+# },
+# # media_type for media.cfg
+# 'media_type' => [ 'official', 'free' ],
+# # if noauto is set to '1' either in medias or repos,
+# # the option will be added to media.cfg
+# 'noauto' => '1',
+# # list 'core' first in media.cfg
+# 'order' => 0,
+# },
+# # the 'non-free' media
+# 'non-free' => {
+# 'repos' => {
+# ...
+# },
+# 'media_type' => [ 'official', 'non-free' ],
+# # the 'non-free' media requires the 'core' media
+# 'requires' => [ 'core' ],
+# # list 'non-free' second
+# 'order' => 1,
+# }
+# },
+# # the list of media used by iurt to build the chroots
+# 'base_medias' => [ 'core/release' ],
+# # optionally, a media.cfg template file can be specified, if
+# # the default one should not be used
+# 'tmpl_media.cfg' => 'buildsystem/something',
+# # branch is Devel or Official. Used in media.cfg.
+# 'branch' => 'Devel',
+# # Version of the distribution
+# 'version' => '3',
+# # SVN Urls allowed to submit
+# 'submit_allowed' => 'svn://svn.something/svn/packages/cauldron',
+# # rpm macros to set when build source package
+# 'macros' => {
+# 'distsuffix' => '.mga',
+# 'distribution' => 'Mageia',
+# 'vendor' => 'Mageia.Org',
+# },
+# # set this if the distro is not mirrored. This is used to add
+# # an Alias in the vhost.
+# 'no_mirror' => true,
+# Optionally, the distribution can be based on the repos from an other
+# distribution. In this example we're saying that the distribution is
+# based on 2/core/release and 2/core/updates.
+# 'based_on' => {
+# '2' => {
+# 'core' => [ 'release', 'updates' ],
+# },
+# },
+# 'youri' => {
+# # Configuration for youri-upload
+# 'upload' => {
+# # list of enabled checks, actions and posts
+# 'targets' => {
+# 'checks' => [
+# ...
+# ],
+# 'actions' => [
+# ...
+# ],
+# 'posts' => [
+# ...
+# ],
+# },
+# 'checks' => {
+# # rpmlint checks options
+# 'rpmlint' => {
+# 'config' => '/usr/share/rpmlint/config',
+# 'path' => ''/usr/bin/rpmlint',
+# },
+# },
+# # options for actions
+# 'actions' => {
+# ...
+# },
+# },
+# # Configuration for youri-todo
+# 'todo' => {
+# ...
+# },
+# },
+# },
+# }
+class buildsystem::var::distros(
+ $default_distro,
+ $repo_allow_from_ips,
+ $repo_allow_from_domains,
+ $distros,
+) { }
diff --git a/modules/buildsystem/manifests/var/groups.pp b/modules/buildsystem/manifests/var/groups.pp
new file mode 100644
index 00000000..c0b2c917
--- /dev/null
+++ b/modules/buildsystem/manifests/var/groups.pp
@@ -0,0 +1,9 @@
+# $packagers:
+# name of packagers group, who should be allowed to submit packages
+# $packagers_committers:
+# name of group of users who are allowed to commit on packages
+class buildsystem::var::groups(
+ $packagers,
+ $packagers_committers
+) {
+}
diff --git a/modules/buildsystem/manifests/var/iurt.pp b/modules/buildsystem/manifests/var/iurt.pp
new file mode 100644
index 00000000..fb65a160
--- /dev/null
+++ b/modules/buildsystem/manifests/var/iurt.pp
@@ -0,0 +1,5 @@
+class buildsystem::var::iurt(
+ $login = 'iurt',
+ $homedir = '/home/iurt',
+ $timeout_multiplier = 1,
+) { }
diff --git a/modules/buildsystem/manifests/var/maintdb.pp b/modules/buildsystem/manifests/var/maintdb.pp
new file mode 100644
index 00000000..e0079e40
--- /dev/null
+++ b/modules/buildsystem/manifests/var/maintdb.pp
@@ -0,0 +1,11 @@
+class buildsystem::var::maintdb(
+ $hostname = "maintdb.${::domain}",
+ $login = 'maintdb',
+ $homedir = '/var/lib/maintdb'
+) {
+ include buildsystem::var::webstatus
+ $dbdir = "${homedir}/db"
+ $binpath = '/usr/local/sbin/maintdb'
+ $dump = "${buildsystem::var::webstatus::location}/data/maintdb.txt"
+ $unmaintained = "${buildsystem::var::webstatus::location}/data/unmaintained.txt"
+}
diff --git a/modules/buildsystem/manifests/var/mgarepo.pp b/modules/buildsystem/manifests/var/mgarepo.pp
new file mode 100644
index 00000000..9099c7ee
--- /dev/null
+++ b/modules/buildsystem/manifests/var/mgarepo.pp
@@ -0,0 +1,22 @@
+# $submit_host:
+# hostname used to submit packages
+# $svn_hostname:
+# hostname of the svn server used for packages
+# $svn_root_packages:
+# svn root url of the svn repository for packages
+# $svn_root_packages_ssh:
+# svn+ssh root url of the svn repository for packages
+# $oldurl:
+# svn url where the import logs of the rpm are stored
+# $conf:
+# $conf{'global'} is a has table of values used in mgarepo.conf in
+# the [global] section
+class buildsystem::var::mgarepo(
+ $submit_host,
+ $svn_hostname,
+ $svn_root_packages,
+ $svn_root_packages_ssh,
+ $oldurl,
+ $conf
+) {
+}
diff --git a/modules/buildsystem/manifests/var/repository.pp b/modules/buildsystem/manifests/var/repository.pp
new file mode 100644
index 00000000..0ea1058c
--- /dev/null
+++ b/modules/buildsystem/manifests/var/repository.pp
@@ -0,0 +1,9 @@
+class buildsystem::var::repository(
+ $hostname = "repository.${::domain}",
+ $bootstrap_root = '/distrib/bootstrap',
+ $mirror_root = '/distrib/mirror',
+ $distribdir = 'distrib'
+) {
+ $bootstrap_reporoot = "${bootstrap_root}/${distribdir}"
+ $mirror_reporoot = "${mirror_root}/${distribdir}"
+}
diff --git a/modules/buildsystem/manifests/var/scheduler.pp b/modules/buildsystem/manifests/var/scheduler.pp
new file mode 100644
index 00000000..b431594c
--- /dev/null
+++ b/modules/buildsystem/manifests/var/scheduler.pp
@@ -0,0 +1,31 @@
+# $admin_mail:
+# the email address from which the build failure notifications
+# will be sent
+# $pkg_uphost:
+# hostname of the server where submitted packages are uploaded
+# $build_nodes:
+# a hash containing available build nodes indexed by architecture
+# $build_nodes_aliases:
+# a hash containing build nodes indexed by their alias
+# $build_src_node:
+# hostname of the server building the initial src.rpm
+# $clean_uploads_logs_age:
+# old logs are cleaned when they are older than some amount of days.
+# You can define this amount of time using this variable. Set it to
+# 14 for two weeks, 2 for two days, or 0 if you don't want to
+# clean old logs at all
+# $clean_uploads_packages_age:
+# same as $clean_uploads_logs_age but for old RPMs
+class buildsystem::var::scheduler(
+ $admin_mail = "root@${::domain}",
+ $pkg_uphost = "pkgsubmit.${::domain}",
+ $build_nodes,
+ $build_nodes_aliases = {},
+ $build_src_node,
+ $clean_uploads_logs_age = 14,
+ $clean_uploads_packages_age = 7
+){
+ $login = 'schedbot'
+ $homedir = "/var/lib/${login}"
+ $logdir = "/var/log/${login}"
+}
diff --git a/modules/buildsystem/manifests/var/signbot.pp b/modules/buildsystem/manifests/var/signbot.pp
new file mode 100644
index 00000000..7d92a324
--- /dev/null
+++ b/modules/buildsystem/manifests/var/signbot.pp
@@ -0,0 +1,15 @@
+# $keyid:
+# the key id of the gnupg key used to sign packages
+# $keyemail:
+# email address of the key used to sign packages
+# $keyname:
+# name of the key used to sign packages
+class buildsystem::var::signbot(
+ $keyid,
+ $keyemail,
+ $keyname
+) {
+ $login = 'signbot'
+ $home_dir = "/var/lib/${login}"
+ $sign_keydir = "${home_dir}/keys"
+}
diff --git a/modules/buildsystem/manifests/var/webstatus.pp b/modules/buildsystem/manifests/var/webstatus.pp
new file mode 100644
index 00000000..21f8d59f
--- /dev/null
+++ b/modules/buildsystem/manifests/var/webstatus.pp
@@ -0,0 +1,25 @@
+# $git_url:
+# git url where the sources of webstatus are located
+# $hostname:
+# vhost name of the webstatus page
+# $location:
+# path of the directory where the webstatus files are located
+# $package_commit_url:
+# url to view a commit on a package. %d is replaced by the commit id.
+# $max_modified:
+# how much history should we display, in days
+# $theme_name:
+# name of the webstatus theme
+# $themes_dir:
+# path of the directory where the themes are located. If you want
+# to use a theme not included in webstatus, you need to change this.
+class buildsystem::var::webstatus(
+ $git_url = "git://git.${::domain}/web/pkgsubmit",
+ $hostname = "pkgsubmit.${::domain}",
+ $location = '/var/www/bs',
+ $package_commit_url,
+ $max_modified = '2',
+ $theme_name = 'mageia',
+ $themes_dir = '/var/www/bs/themes/'
+) {
+}
diff --git a/modules/buildsystem/manifests/var/youri.pp b/modules/buildsystem/manifests/var/youri.pp
new file mode 100644
index 00000000..f20b6c7b
--- /dev/null
+++ b/modules/buildsystem/manifests/var/youri.pp
@@ -0,0 +1,401 @@
+# The youri configuration files are created using information from 3
+# different hash variables :
+# - the $youri_conf_default variable defined in this class, containing
+# the default configuration for youri. It contains the repository
+# configuration, and the definitions of the checks, actions and posts.
+# - the $youri_conf parameter passed to this class. The values defined
+# in this hash override the values defined in the default configuration.
+# - for each distribution defined in the hash variable $distros from
+# var::buildsystem::distros the hash defined in index 'youri' contains
+# some distro specific options for youri checks, actions or posts. It
+# also contains for each distribution the list of active checks,
+# actions and posts.
+#
+# Each of those variables contain the configuration for youri submit-todo
+# (in index 'todo') and youri submit-upload (in index 'upload')
+#
+#
+# Parameters :
+# $tmpl_youri_upload_conf:
+# template file for youri submit-upload.conf
+# $tmpl_youri_todo_conf:
+# template file for youri submit-todo.conf
+# $packages_archivedir:
+# the directory where youri will archive old packages when they are
+# replaced by a new version
+# $youri_conf:
+# a hash containing the youri configuration
+class buildsystem::var::youri(
+ $tmpl_youri_upload_conf = 'buildsystem/youri/submit.conf',
+ $tmpl_youri_todo_conf = 'buildsystem/youri/submit.conf',
+ $packages_archivedir,
+ $youri_conf = {}
+) {
+ include buildsystem::var::repository
+ include buildsystem::var::mgarepo
+ include buildsystem::var::distros
+ include buildsystem::var::signbot
+ include buildsystem::var::scheduler
+
+ $check_tag = { 'class' => 'Youri::Submit::Check::Tag', }
+ $check_recency = { 'class' => 'Youri::Submit::Check::Recency', }
+ $check_queue_recency = { 'class' => 'Youri::Submit::Check::Queue_recency', }
+ $check_host = {
+ 'class' => 'Youri::Submit::Check::Host',
+ 'options' => {
+ 'host_file' => '/etc/youri/host.conf',
+ },
+ }
+ $check_rpmlint = { 'class' => 'Youri::Submit::Check::Rpmlint', }
+ $check_acl = {
+ 'class' => 'Youri::Submit::Check::ACL',
+ 'options' => {
+ 'acl_file' => '/etc/youri/acl.conf',
+ },
+ }
+ $check_source = { 'class' => 'Youri::Submit::Check::Source', }
+ $check_version = {
+ 'class' => 'Youri::Submit::Check::Version',
+ 'options' => {},
+ }
+
+ $youri_conf_default = {
+ 'upload' => {
+ 'repository' => {
+ 'class' => 'Youri::Repository::Mageia',
+ 'options' => {
+ 'install_root' => $buildsystem::var::repository::bootstrap_reporoot,
+ 'upload_root' => '$home/uploads/',
+ 'archive_root' => $packages_archivedir,
+ 'upload_state' => 'queue',
+ 'queue' => 'queue',
+ 'noarch' => 'x86_64',
+ 'svn' => "${buildsystem::var::mgarepo::svn_root_packages_ssh}/${buildsystem::var::distros::default_distro}",
+ },
+ },
+ 'checks' => {
+ 'tag' => $check_tag,
+ 'recency' => $check_recency,
+ 'queue_recency' => $check_queue_recency,
+ 'host' => $check_host,
+ 'section' => {
+ 'class' => 'Youri::Submit::Check::Section',
+ },
+ 'rpmlint' => $check_rpmlint,
+ 'svn' => {
+ 'class' => 'Youri::Submit::Check::SVN',
+ },
+ 'acl' => $check_acl,
+ 'history' => {
+ 'class' => 'Youri::Submit::Check::History',
+ },
+ 'source' => $check_source,
+ 'precedence' => {
+ 'class' => 'Youri::Submit::Check::Precedence',
+ 'options' => {
+ 'target' => $buildsystem::var::distros::default_distro,
+ },
+ },
+ 'version' => $check_version,
+ },
+ 'actions' => {
+ 'install' => {
+ 'class' => 'Youri::Submit::Action::Install',
+ },
+ 'markrelease' => {
+ 'class' => 'Youri::Submit::Action::Markrelease',
+ },
+ 'link' => {
+ 'class' => 'Youri::Submit::Action::Link',
+ },
+ 'archive' => {
+ 'class' => 'Youri::Submit::Action::Archive',
+ },
+ 'clean' => {
+ 'class' => 'Youri::Submit::Action::Clean',
+ },
+ 'sign' => {
+ 'class' => 'Youri::Submit::Action::Sign',
+ 'options' => {
+ 'signuser' => $buildsystem::var::signbot::login,
+ 'path' => $buildsystem::var::signbot::sign_keydir,
+ 'name' => $buildsystem::var::signbot::keyid,
+ 'signscript' => '/usr/local/bin/sign-check-package',
+ },
+ },
+ 'unpack_gfxboot_theme' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'mageia-gfxboot-theme',
+ 'source_subdir' => '/usr/share/gfxboot/themes/Mageia/install/',
+ 'dest_directory' => 'isolinux',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_meta_task' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'meta-task',
+ 'source_subdir' => '/usr/share/meta-task',
+ 'dest_directory' => 'media/media_info',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_installer_images' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'drakx-installer-images',
+ 'source_subdir' => '/usr/lib*/drakx-installer-images',
+ 'dest_directory' => '.',
+ 'preclean_directory' => 'install/images/alternatives',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_installer_images_nonfree' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'drakx-installer-images-nonfree',
+ 'source_subdir' => '/usr/lib*/drakx-installer-images',
+ 'dest_directory' => '.',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_installer_stage2' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'drakx-installer-stage2',
+ 'source_subdir' => '/usr/lib*/drakx-installer-stage2',
+ 'dest_directory' => '.',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_installer_advertising' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'drakx-installer-advertising',
+ 'source_subdir' => '/usr/share/drakx-installer-advertising',
+ 'dest_directory' => '.',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_installer_rescue' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'drakx-installer-rescue',
+ 'source_subdir' => '/usr/lib*/drakx-installer-rescue',
+ 'dest_directory' => 'install/stage2',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_release_notes' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'mageia-release-common',
+ 'source_subdir' => '/usr/share/doc/mageia-release-common',
+ 'grep_files' => 'release-notes.*',
+ 'dest_directory' => '.',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_syslinux' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'syslinux',
+ 'source_subdir' => '/usr/lib/syslinux/',
+ 'grep_files' => '\\(hdt\\|ifcpu\\|ldlinux\\|libcom32\\|libgpl\\|libmenu\\|libutil\\).c32',
+ 'dest_directory' => 'isolinux',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'unpack_pci_usb_ids' => {
+ 'class' => 'Youri::Submit::Action::Unpack',
+ 'options' => {
+ 'name' => 'ldetect-lst',
+ 'source_subdir' => '/usr/share/',
+ 'grep_files' => '\\(pci\\|usb\\).ids',
+ 'dest_directory' => 'isolinux',
+ 'unpack_inside_distribution_root' => '1',
+ },
+ },
+ 'mail' => {
+ 'class' => 'Youri::Submit::Action::Mail',
+ 'options' => {
+ 'mta' => '/usr/sbin/sendmail',
+ },
+ },
+ 'maintdb' => {
+ 'class' => 'Youri::Submit::Action::UpdateMaintDb',
+ },
+ 'rebuild' => {
+ 'class' => 'Youri::Submit::Action::RebuildPackage',
+ 'options' => {
+ 'rules' => {
+ 'drakx-installer-binaries' => ['drakx-installer-images'],
+ 'drakx-kbd-mouse-x11' => ['drakx-installer-stage2'],
+ 'drakx-net' => ['drakx-installer-stage2'],
+ 'kernel-desktop-latest' => ['drakx-installer-images', 'kmod-virtualbox', 'kmod-xtables-addons'],
+ 'kernel-desktop586-latest' => ['drakx-installer-images', 'kmod-virtualbox', 'kmod-xtables-addons'],
+ 'kernel-server-latest' => ['kmod-virtualbox', 'kmod-xtables-addons'],
+ 'ldetect-lst' => ['drakx-installer-stage2'],
+ 'meta-task' => ['drakx-installer-stage2'],
+ 'perl' => ['drakx-installer-stage2'],
+ 'perl-URPM' => ['drakx-installer-stage2'],
+ 'rpm' => ['drakx-installer-stage2'],
+ 'rpm-mageia-setup' => ['drakx-installer-stage2'],
+ 'urpmi' => ['drakx-installer-stage2'],
+ },
+ },
+ },
+ },
+ 'posts' => {
+ 'genhdlist2' => {
+ 'class' => 'Youri::Submit::Post::Genhdlist2',
+ 'options' => {
+ 'command' => '/usr/bin/genhdlist2 --xml-info-filter ".lzma:xz -T4" --synthesis-filter ".cz:xz -7 -T8"',
+ },
+ },
+ 'genhdlist2_zstd' => {
+ 'class' => 'Youri::Submit::Post::Genhdlist2',
+ 'options' => {
+ 'command' => '/usr/bin/genhdlist2 --xml-info-filter ".lzma:xz -T4" --synthesis-filter ".cz:zstd -19 -T8"',
+ },
+ },
+ 'createrepo_mga6' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => 'createrepo_c --no-database --update --workers=10',
+ },
+ },
+ 'createrepo_mga7' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/',
+ },
+ },
+ 'createrepo_mga8' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/',
+ },
+ },
+ 'createrepo_mga9' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => 'createrepo_c --no-database --update --workers=10 --zck --zck-dict-dir /usr/share/mageia-repo-zdicts/mga7/',
+ },
+ },
+ 'createrepo_cauldron' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => 'createrepo_c --no-database --update --workers=10',
+ },
+ },
+ 'appstream_mga6' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => '/distrib/appstream/appstream-6-modifyrepo.sh',
+ },
+ },
+ 'appstream_mga7' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => '/distrib/appstream/appstream-7-modifyrepo.sh',
+ },
+ },
+ 'appstream_mga8' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => '/distrib/appstream/appstream-8-modifyrepo.sh',
+ },
+ },
+ 'appstream_mga9' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => '/distrib/appstream/appstream-9-modifyrepo.sh',
+ },
+ },
+ 'appstream_cauldron' => {
+ 'class' => 'Youri::Submit::Post::RunOnModifiedMedia',
+ 'options' => {
+ 'command' => '/distrib/appstream/appstream-cauldron-modifyrepo.sh',
+ },
+ },
+ 'clean_rpmsrate' => {
+ 'class' => 'Youri::Submit::Post::CleanRpmsrate',
+ },
+ 'mirror' => {
+ 'class' => 'Youri::Submit::Post::Mirror',
+ 'options' => {
+ 'destination' => $buildsystem::var::repository::mirror_reporoot,
+ },
+ },
+ },
+ },
+ 'todo' => {
+ 'repository' => {
+ 'class' => 'Youri::Repository::Mageia',
+ 'options' => {
+ 'install_root' => $buildsystem::var::repository::bootstrap_reporoot,
+ 'upload_root' => '$home/uploads/',
+ 'upload_state' => 'todo done queue',
+ 'queue' => 'todo',
+ 'noarch' => 'x86_64',
+ 'svn' => "${buildsystem::var::mgarepo::svn_root_packages_ssh}/${buildsystem::var::distros::default_distro}",
+ },
+ },
+ 'checks' => {
+ 'tag' => $check_tag,
+ 'recency' => $check_recency,
+ 'queue_recency' => $check_queue_recency,
+ 'host' => $check_host,
+ 'rpmlint' => $check_rpmlint,
+ 'acl' => $check_acl,
+ 'source' => $check_source,
+ 'version' => $check_version,
+ 'deps' => {
+ 'class' => 'Youri::Submit::Check::Deps',
+ },
+ },
+ 'actions' => {
+ 'send' => {
+ 'class' => 'Youri::Submit::Action::Send',
+ 'options' => {
+ 'user' => $buildsystem::var::scheduler::login,
+ 'keep_svn_release' => 'yes',
+ 'uphost' => $buildsystem::var::scheduler::pkg_uphost,
+ 'root' => '$home/uploads',
+ 'ssh_key' => '$home/.ssh/id_rsa',
+ },
+ },
+ 'dependencies' => {
+ 'class' => 'Youri::Submit::Action::Dependencies',
+ 'options' => {
+ 'user' => $buildsystem::var::scheduler::login,
+ 'uphost' => $buildsystem::var::scheduler::pkg_uphost,
+ 'root' => '$home/uploads',
+ 'ssh_key' => '$home/.ssh/id_rsa',
+ },
+ },
+ 'rpminfo' => {
+ 'class' => 'Youri::Submit::Action::Rpminfo',
+ 'options' => {
+ 'user' => $buildsystem::var::scheduler::login,
+ 'uphost' => $buildsystem::var::scheduler::pkg_uphost,
+ 'root' => '$home/uploads',
+ 'ssh_key' => '$home/.ssh/id_rsa',
+ },
+ },
+ 'ulri' => {
+ 'class' => 'Youri::Submit::Action::Ulri',
+ 'options' => {
+ 'user' => $buildsystem::var::scheduler::login,
+ 'uphost' => $buildsystem::var::scheduler::pkg_uphost,
+ 'ssh_key' => '$home/.ssh/id_rsa',
+ },
+ },
+ },
+ 'posts' => {
+ },
+ },
+ }
+}
diff --git a/modules/buildsystem/manifests/webstatus.pp b/modules/buildsystem/manifests/webstatus.pp
new file mode 100644
index 00000000..49346dbc
--- /dev/null
+++ b/modules/buildsystem/manifests/webstatus.pp
@@ -0,0 +1,44 @@
+class buildsystem::webstatus {
+ include buildsystem::var::webstatus
+ include buildsystem::var::scheduler
+ include apache::mod::php
+
+ file { [ $buildsystem::var::webstatus::location, "${buildsystem::var::webstatus::location}/data" ]:
+ ensure => directory,
+ }
+
+ $vhost = $buildsystem::var::webstatus::hostname
+ apache::vhost::base { $vhost:
+ aliases => {
+ '/uploads' => "${buildsystem::var::scheduler::homedir}/uploads",
+ '/autobuild/cauldron/x86_64/core/log/status.core.log' => "${buildsystem::var::webstatus::location}/autobuild/broken.php",
+ '/themes' => $buildsystem::var::webstatus::themes_dir,
+ },
+ location => $buildsystem::var::webstatus::location,
+ content => template('buildsystem/vhost_webstatus.conf'),
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ aliases => {
+ '/uploads' => "${buildsystem::var::scheduler::homedir}/uploads",
+ '/autobuild/cauldron/x86_64/core/log/status.core.log' => "${buildsystem::var::webstatus::location}/autobuild/broken.php",
+ '/themes' => $buildsystem::var::webstatus::themes_dir,
+ },
+ location => $buildsystem::var::webstatus::location,
+ content => template('buildsystem/vhost_webstatus.conf'),
+ }
+
+ git::snapshot { $buildsystem::var::webstatus::location:
+ source => $buildsystem::var::webstatus::git_url,
+ }
+
+ file { '/etc/bs-webstatus.conf':
+ ensure => present,
+ content => template('buildsystem/bs-webstatus.conf'),
+ mode => '0644',
+ owner => root,
+ group => root,
+ }
+}
diff --git a/modules/buildsystem/manifests/youri_submit.pp b/modules/buildsystem/manifests/youri_submit.pp
new file mode 100644
index 00000000..6b4d7dc2
--- /dev/null
+++ b/modules/buildsystem/manifests/youri_submit.pp
@@ -0,0 +1,83 @@
+class buildsystem::youri_submit {
+ include sudo
+ include buildsystem::rpmlint
+ include buildsystem::repository
+ include buildsystem::var::scheduler
+ include buildsystem::var::youri
+
+ mga_common::local_script {
+ 'mga-youri-submit':
+ content => template('buildsystem/mga-youri-submit');
+ 'mga-youri-submit.wrapper':
+ content => template('buildsystem/mga-youri-submit.wrapper');
+ 'submit_package':
+ content => template('buildsystem/submit_package.pl');
+ 'mga-clean-distrib':
+ content => template('buildsystem/cleaner.rb');
+ }
+
+ sudo::sudoers_config { 'mga-youri-submit':
+ content => template('buildsystem/sudoers.youri')
+ }
+ $release_managers = group_members('mga-release_managers')
+ # ordering is automatic :
+ # https://docs.puppetlabs.com/learning/ordering.html#autorequire
+ file {
+ '/etc/youri/':
+ ensure => 'directory';
+ '/etc/youri/acl.conf':
+ content => template('buildsystem/youri/acl.conf');
+ '/etc/youri/host.conf':
+ content => template('buildsystem/youri/host.conf');
+ }
+
+ buildsystem::youri_submit_conf{ 'upload':
+ tmpl_file => $buildsystem::var::youri::tmpl_youri_upload_conf,
+ }
+ buildsystem::youri_submit_conf{ 'todo':
+ tmpl_file => $buildsystem::var::youri::tmpl_youri_todo_conf,
+ }
+
+ cron { 'Archive orphan packages from cauldron':
+ command => "/usr/local/bin/mga-clean-distrib --auto -v cauldron -d ${buildsystem::var::youri::packages_archivedir} -l ${buildsystem::var::scheduler::homedir}/tmp/upload",
+ hour => 5,
+ minute => 30,
+ user => $buildsystem::var::scheduler::login,
+ }
+
+ file { $buildsystem::var::youri::packages_archivedir:
+ ensure => 'directory',
+ owner => $buildsystem::var::scheduler::login,
+ require => File[$buildsystem::var::scheduler::homedir],
+ }
+
+ tidy { $buildsystem::var::youri::packages_archivedir:
+ type => 'ctime',
+ recurse => true,
+ age => '1w',
+ matches => '*.rpm',
+ }
+
+ include mga_common::var::perl
+ file { [ "${mga_common::var::perl::site_perl_dir}/Youri",
+ "${mga_common::var::perl::site_perl_dir}/Youri/Repository"]:
+ ensure => directory,
+ mode => '0755',
+ owner => root,
+ group => root,
+ }
+ file { "${mga_common::var::perl::site_perl_dir}/Youri/Repository/Mageia.pm":
+ source => 'puppet:///modules/buildsystem/Mageia.pm',
+ }
+
+ $package_list= ['perl-SVN',
+ 'mdv-distrib-tools',
+ 'perl-Youri-Media',
+ 'perl-Youri-Package',
+ 'perl-Youri-Repository',
+ 'perl-Youri-Utils',
+ 'perl-Youri-Config',
+ 'mga-youri-submit']
+
+ package { $package_list: }
+}
diff --git a/modules/buildsystem/manifests/youri_submit_conf.pp b/modules/buildsystem/manifests/youri_submit_conf.pp
new file mode 100644
index 00000000..28b911d9
--- /dev/null
+++ b/modules/buildsystem/manifests/youri_submit_conf.pp
@@ -0,0 +1,6 @@
+define buildsystem::youri_submit_conf($tmpl_file) {
+ $conf_name = $name
+ file { "/etc/youri/submit-${conf_name}.conf":
+ content => template($tmpl_file),
+ }
+}
diff --git a/modules/buildsystem/templates/binrepo/sudoers.binrepo b/modules/buildsystem/templates/binrepo/sudoers.binrepo
new file mode 100644
index 00000000..c20810cf
--- /dev/null
+++ b/modules/buildsystem/templates/binrepo/sudoers.binrepo
@@ -0,0 +1 @@
+%<%= scope.lookupvar('buildsystem::var::groups::packagers_committers') %> ALL =(<%= scope.lookupvar('buildsystem::var::binrepo::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::binrepo::uploadbinpath') %>
diff --git a/modules/buildsystem/templates/binrepo/upload-bin b/modules/buildsystem/templates/binrepo/upload-bin
new file mode 100755
index 00000000..7cad5838
--- /dev/null
+++ b/modules/buildsystem/templates/binrepo/upload-bin
@@ -0,0 +1,32 @@
+#!/bin/sh
+set -e
+
+binrepodir=<%= scope.lookupvar('buildsystem::var::binrepo::repodir') %>
+uploadinfosdir=<%= scope.lookupvar('buildsystem::var::binrepo::uploadinfosdir') %>
+tmpfile=$(mktemp)
+mail_from="<%= scope.lookupvar('buildsystem::var::binrepo::uploadmail_from') %>"
+mail_dest="<%= scope.lookupvar('buildsystem::var::binrepo::uploadmail_to') %>"
+
+test $# = 2 || exit 3
+username="$1"
+comment="$2"
+
+/bin/cat > "$tmpfile"
+sha1sum=$(/usr/bin/sha1sum "$tmpfile" | sed 's/ .*$//')
+test -n "$sha1sum"
+if [ -f "$binrepodir/$sha1sum" ]
+then
+ echo "File $sha1sum already exists." >&2
+ /bin/rm -f "$tmpfile"
+ exit 2
+fi
+/bin/mv "$tmpfile" "$binrepodir/$sha1sum"
+/bin/chmod 644 "$binrepodir/$sha1sum"
+echo "$username:$comment" > "$uploadinfosdir/$sha1sum"
+echo "User $username uploaded file $sha1sum: $comment"
+
+echo "User $username uploaded file $sha1sum: $comment" | \
+ /usr/bin/mailx -s "New file uploaded: $sha1sum - $comment" -S "from=$username <$mail_from>" "$mail_dest"
+
+exit 0
+
diff --git a/modules/buildsystem/templates/binrepo/vhost_binrepo.conf b/modules/buildsystem/templates/binrepo/vhost_binrepo.conf
new file mode 100644
index 00000000..f411c07a
--- /dev/null
+++ b/modules/buildsystem/templates/binrepo/vhost_binrepo.conf
@@ -0,0 +1,3 @@
+<Directory <%= scope.lookupvar('buildsystem::var::binrepo::repodir') %>>
+ Options None
+</Directory>
diff --git a/modules/buildsystem/templates/binrepo/wrapper.upload-bin b/modules/buildsystem/templates/binrepo/wrapper.upload-bin
new file mode 100644
index 00000000..3def84a0
--- /dev/null
+++ b/modules/buildsystem/templates/binrepo/wrapper.upload-bin
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+binrepouser="<%= scope.lookupvar('buildsystem::var::binrepo::login') %>"
+uploadbinpath="<%= scope.lookupvar('buildsystem::var::binrepo::uploadbinpath') %>"
+packagerscommittersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers_committers') %>"
+
+function isingroup()
+{
+ grp="$1"
+ for group in `groups`
+ do if [ "$grp" = "$group" ]
+ then
+ return 0
+ fi
+ done
+ return 1
+}
+
+if ! isingroup "$packagerscommittersgroup"
+then
+ echo "You are not in $packagerscommittersgroup group."
+ exit 1
+fi
+
+sudo -u "$binrepouser" "$uploadbinpath" $(whoami) $@
+
diff --git a/modules/buildsystem/templates/bs-webstatus.conf b/modules/buildsystem/templates/bs-webstatus.conf
new file mode 100644
index 00000000..9f37a990
--- /dev/null
+++ b/modules/buildsystem/templates/bs-webstatus.conf
@@ -0,0 +1,32 @@
+<?php
+
+/** Where is the current app located. */
+$g_webapp_dir = '<%= scope.lookupvar('buildsystem::var::webstatus::location') %>';
+
+/** Full system path where packages are uploaded. */
+$upload_dir = '<%= scope.lookupvar('buildsystem::var::scheduler::homedir') %>/uploads';
+
+/** How long a history should we keep, in days. */
+$max_modified = <%= scope.lookupvar('buildsystem::var::webstatus::max_modified') %>;
+
+/** How many nodes are available. */
+$g_nodes_count = 2;
+
+/** html > body > h1 title */
+$title = 'Build system status';
+
+/** Should crawlers index this page or not? meta[robots] tag.*/
+$robots = 'index,nofollow,nosnippet,noarchive';
+
+/** */
+$g_root_url = 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/';
+
+/** URL to view a package svn revision. %d is replaced by the revision */
+$package_commit_url = '<%= scope.lookupvar('buildsystem::var::webstatus::package_commit_url') %>';
+
+/** name of the theme */
+$theme_name = '<%= scope.lookupvar('buildsystem::var::webstatus::theme_name') %>';
+
+/** themes directory */
+$themes_dir = '<%= scope.lookupvar('buildsystem::var::webstatus::themes_dir') %>';
+
diff --git a/modules/buildsystem/templates/cleaner.rb b/modules/buildsystem/templates/cleaner.rb
new file mode 100755
index 00000000..fa0d08ca
--- /dev/null
+++ b/modules/buildsystem/templates/cleaner.rb
@@ -0,0 +1,235 @@
+#!/usr/bin/ruby
+
+def usage
+ puts "Usage: #{$0} [options]"
+ puts "Moves obsolete packages"
+ puts
+ puts "-h, --help show this help"
+ puts "-a, --archs <arch1>,<arch2>,... list of architectures to clean"
+ puts "-a, --auto do not ask confirmation"
+ puts "-p, --base <path> base path to the repository"
+ puts "-m, --media <media1>,<media2>,... list of media to clean (default: core/release,tainted/release,nonfree/release)"
+ puts "-d, --destination <path> path to the old packages storage"
+ puts "-v, --version <version> version to clean (default: cauldron)"
+end
+
+require 'fileutils'
+require 'getoptlong'
+require 'readline'
+
+def process
+ opts = GetoptLong.new(
+ [ '--help', '-h', GetoptLong::NO_ARGUMENT ],
+ [ '--archs', '-a', GetoptLong::REQUIRED_ARGUMENT ],
+ [ '--auto', '-A', GetoptLong::NO_ARGUMENT ],
+ [ '--base', '-p', GetoptLong::REQUIRED_ARGUMENT ],
+ [ '--media', '-m', GetoptLong::REQUIRED_ARGUMENT ],
+ [ '--destination', '-d', GetoptLong::REQUIRED_ARGUMENT ],
+ [ '--version', '-v', GetoptLong::REQUIRED_ARGUMENT ],
+ [ '--lockfile', '-l', GetoptLong::REQUIRED_ARGUMENT ],
+ )
+
+ base_path = "<%= scope.lookupvar('buildsystem::var::repository::bootstrap_root') %>/distrib"
+ archs = [ "x86_64", "i686", "aarch64", "armv7hl" ]
+ medias = ["core/release", "tainted/release", "nonfree/release"]
+ old_path = "<%= scope.lookupvar('buildsystem::var::youri::packages_archivedir') %>"
+ version = "cauldron"
+ auto = false
+ lockfile = nil
+
+ opts.each do |opt, arg|
+ case opt
+ when '--help'
+ usage
+ exit 0
+ when '--destination'
+ old_path = arg
+ when '--media'
+ medias = arg.split(",")
+ when '--archs'
+ archs = arg.split(",")
+ when '--auto'
+ auto = true
+ when '--base'
+ base_path = arg
+ when '--version'
+ version = arg
+ when '--lockfile'
+ lockfile = arg
+ end
+ end
+
+ take_upload_lock(lockfile) if lockfile
+
+ medias.each{|media|
+ src_path = "#{base_path}/#{version}/SRPMS/#{media}"
+
+ $used_srcs = {}
+ $old_srcs = {}
+ $srcs = {}
+ $srcages = {}
+ $noarch = {}
+
+ # Get a list of all src.rpm and their build time
+ `urpmf --synthesis "#{src_path}/media_info/synthesis.hdlist.cz" --qf '%filename:%buildtime:%buildarchs' "."`.each_line{|l|
+ l2 = l.split(':')
+ filename = l2[0]
+ buildtime = l2[1].to_i
+ buildarch = l2[2].rstrip
+ name = name_from_filename(filename)
+ if $srcages[name] then
+ if buildtime < $srcages[name][1] then
+ # This src.rpm is older, ignore it and store it in the list to be deleted
+ $old_srcs[filename] = true
+ next
+ else
+ # This src.rpm has an older version, ignore that version and store it in the list to be deleted
+ old_filename = $srcages[name][0]
+ $old_srcs[old_filename] = true
+ $srcs.delete(old_filename)
+ end
+ end
+ $srcages[name] = [ filename, buildtime ]
+ $srcs[filename] = true
+ $noarch[name] = true if buildarch == 'noarch'
+ }
+ archs.each{|arch|
+ bin_path = "#{base_path}/#{version}/#{arch}/media/#{media}"
+ debug_path = bin_path.sub("/media/", "/media/debug/")
+ old_packages = check_binaries(arch, $srcs, $srcages, src_path, bin_path, $used_srcs)
+ old_debug_packages = check_binaries(arch, $srcs, {}, src_path, debug_path, nil)
+ move_packages(bin_path, old_path, old_packages, auto)
+ move_packages(debug_path, old_path, old_debug_packages, auto)
+ }
+ $used_srcs.keys.each{|s| $srcs.delete(s)}
+
+ move_packages(src_path, old_path, $srcs.keys + $old_srcs.keys, auto)
+ }
+end
+
+def take_upload_lock(path)
+ start_time = Time.new
+ has_lock = false
+ at_exit {
+ if File.exists?(path)
+ if File.readlines(path)[0].to_i == Process.pid
+ File.delete(path)
+ end
+ end
+ }
+ until has_lock
+ while File.exists?(path)
+ if Time.new - start_time > 2*3600.0
+ puts "Could not acquire upload lock for more than 2h, giving up"
+ end
+ sleep(5)
+ end
+ File.write(path, Process.pid)
+ if File.readlines(path)[0].to_i == Process.pid
+ has_lock = true
+ end
+ end
+end
+
+def move_packages(src, dst, list, auto)
+ list.reject!{|f| !File.exist?(src + "/" + f)}
+ return if list.empty?
+ list.each{|b|
+ puts b
+ }
+ puts "The #{list.length} listed packages will be moved from #{src} to #{dst}."
+ line = Readline::readline('Are you sure [Yn]? ') unless auto
+ if auto || line =~ /^y?$/i
+ list.each{|s|
+ oldfile = src + "/" + s
+ newfile = dst + "/" + s
+ next unless File.exist?(oldfile)
+ if (File.exist?(newfile))
+ File.unlink(oldfile)
+ else
+ FileUtils.mv(oldfile, newfile)
+ end
+ }
+ end
+end
+
+# For each binary media:
+# - Check if we have the src.rpm (else the binary package is obsolete)
+# * If we don't have the src.rpm, check if we have a newer version
+# - If there is a new version:
+# * check if this architecture has packages from it to avoid deleting armv7hl packages before the new one get rebuilt
+# * check if the new version is old enough to allow rebuilding everything (7d?)
+# - Mark used src.rpm (if one is never marked, the src.rpm is obsolete)
+
+def packages(path)
+ `urpmf --synthesis "#{path}/media_info/synthesis.hdlist.cz" --qf '%sourcerpm:%filename:%buildtime' ":"`.each_line{|l|
+ l2 = l.split(':')
+ sourcerpm = l2[0]
+ filename = l2[1]
+ buildtime = l2[2].to_i
+ yield(sourcerpm, filename, buildtime)
+ }
+end
+
+def name_from_filename(filename)
+ filename.sub(/-[^-]*-[^-]*$/, '')
+end
+
+def arch_wanted(src, arch)
+ exclusive_arch = `rpmquery -p #{src} --qf '[%{EXCLUSIVEARCH} ]'`.rstrip
+ if exclusive_arch != "" then
+ if !exclusive_arch.split(/ /).include?(arch) then
+ return false
+ end
+ end
+ exclude_arch = `rpmquery -p #{src} --qf '[%{EXCLUDEARCH} ]'`.rstrip
+ if exclude_arch != "" then
+ if exclude_arch.split(/ /).include?(arch) then
+ return false
+ end
+ end
+
+ return true
+end
+
+def check_binaries(arch, srcs, srcages, src_path, path, used_srcs)
+ used_here_srcs = {}
+ all_versions = {}
+ packages(path) {|src, filename, buildtime|
+ used_srcs[src] = true if used_srcs != nil
+ if filename =~ /noarch.rpm$/ then
+ # We need to mark the src.rpm present on this arch only for full noarch packages
+ used_here_srcs[src] = true if $noarch[name_from_filename(src)]
+ else
+ used_here_srcs[src] = true
+ end
+ name = name_from_filename(filename)
+ if all_versions[name] then
+ all_versions[name] << src
+ else
+ all_versions[name] = [src]
+ end
+ }
+ old_binaries = []
+ packages(path) {|src, filename, buildtime|
+ if ! srcs[src] then
+ srcname = name_from_filename(src)
+ if srcages[srcname] then
+ # The src.rpm is gone but there is a different version of it
+ latestsrc = srcages[srcname][0]
+ # Only delete old binaries after 7d or if there is a new version
+ name = name_from_filename(filename)
+ next unless (srcages[srcname][1] < Time.now.to_i - 24*60*60*7 || all_versions[name].include?(latestsrc))
+ # Do not delete if the new version of the package hasn't been built for this arch yet
+ # but still delete it if it is no longer expected to be built.
+ next unless (used_here_srcs[latestsrc] || !arch_wanted("#{src_path}/#{latestsrc}", arch))
+ end
+ old_binaries << filename
+ end
+ }
+ old_binaries
+end
+
+if __FILE__ == $0 then
+ process
+end
diff --git a/modules/buildsystem/templates/cleaner_test.rb b/modules/buildsystem/templates/cleaner_test.rb
new file mode 100644
index 00000000..804bd1b5
--- /dev/null
+++ b/modules/buildsystem/templates/cleaner_test.rb
@@ -0,0 +1,83 @@
+require 'cleaner'
+require "test/unit"
+
+class TestCleaner < Test::Unit::TestCase
+
+ @pkgs = []
+
+ def setpackages(pkgs)
+ @pkgs = pkgs
+ end
+
+ def packages(path)
+ @pkgs.map{|p|
+ l2 = p.split(':')
+ sourcerpm = l2[0]
+ filename = l2[1]
+ buildtime = l2[2].to_i
+ yield(sourcerpm, filename, buildtime)
+ }
+ end
+
+ def test_old
+ # Package was built on this arch and src.rpm for new version is 15d old
+ setpackages(['foo-43-1.src.rpm:libfoo2-43-1.armv7hl.rpm:43', 'foo-42-1.src.rpm:libfoo1-42-1.armv7hl.rpm:42'])
+ srcages = {}
+ srcages['foo'] = [ 'foo-43-1.src.rpm', Time.now.to_i - 15*24*3600 ]
+ srcs = {}
+ srcs['foo-43-1.src.rpm'] = true
+ assert_equal(['libfoo1-42-1.armv7hl.rpm'], check_binaries('armv7hl', srcs, srcages, '', '', nil))
+ end
+
+ def test_recent
+ # Package was built on this arch but src.rpm for new version is only 1d old
+ setpackages(['foo-43-1.src.rpm:foo-43-1.armv7hl.rpm:43', 'foo-42-1.src.rpm:foo-42-1.armv7hl.rpm:42'])
+ srcages = {}
+ srcages['foo'] = [ 'foo-43.src.rpm', Time.now.to_i - 24*3600 ]
+ srcs = {}
+ srcs['foo-43-1.src.rpm'] = true
+ assert_equal([], check_binaries('armv7hl', srcs, srcages, '', '', nil))
+ end
+
+ def test_arm_late
+ # Package was not yet built on this arch
+ setpackages(['foo-42-1.src.rpm:foo-42-1.armv7hl.rpm:42'])
+ srcages = {}
+ srcages['foo'] = [ 'foo-43.src.rpm', Time.now.to_i - 24*3600 ]
+ srcs = {}
+ srcs['foo-43-1.src.rpm'] = true
+ assert_equal([], check_binaries('armv7hl', srcs, srcages, '', '', nil))
+ end
+
+ def test_multiple_versions
+ # Old package remains (usually happens to noarch due to youri bug)
+ $noarch = { 'foo' => true }
+ setpackages(['foo-42-1.src.rpm:foo-42-1.noarch.rpm:42', 'foo-42-2.src.rpm:foo-42-2.noarch.rpm:43'])
+ srcages = {}
+ srcages['foo'] = [ 'foo-42-2.src.rpm', Time.now.to_i - 24*3600 ]
+ srcs = {}
+ srcs['foo-42-2.src.rpm'] = true
+ assert_equal(['foo-42-1.noarch.rpm'], check_binaries('i586', srcs, srcages, '', '', nil))
+ end
+
+ def test_icu
+ $noarch = {}
+ now = Time.now.to_i
+ srctime = now - 3600
+ oldbintime = now - 10*24*3600
+ newbintime = now - 3200
+ setpackages([
+ "icu-71.1-2.mga9.src.rpm:icu71-data-71.1-2.mga9.noarch.rpm:#{oldbintime}",
+ "icu-71.1-2.mga9.src.rpm:lib64icu71-71.1-2.mga9.aarch64.rpm:#{oldbintime}",
+ "icu-72.1-1.mga9.src.rpm:icu72-data-72.1-1.mga9.noarch.rpm:#{newbintime}",
+ "icu-72.1-1.mga9.src.rpm:lib64icu-devel-72.1-1.mga9.aarch64.rpm:#{newbintime}",
+ "icu-72.1-1.mga9.src.rpm:lib64icu72-72.1-1.mga9.aarch64.rpm:#{newbintime}"
+ ])
+ srcages = {}
+ srcages['icu'] = [ 'icu-71.1-2.mga9.src.rpm', srctime ]
+ srcs = {}
+ srcs['icu-71.1-2.mga9.src.rpm'] = true
+ assert_equal([], check_binaries('aarch64', srcs, srcages, '', '', nil))
+ end
+
+end
diff --git a/modules/buildsystem/templates/iurt.cauldron.conf b/modules/buildsystem/templates/iurt.cauldron.conf
deleted file mode 100644
index 5a7f047e..00000000
--- a/modules/buildsystem/templates/iurt.cauldron.conf
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- supported_arch => [ 'i586', 'x86_64' ],
- all_media => { 'main' => [ 'release' ], 'contrib' => [ 'release' ] },
- upload => 'schedbot@pkgsubmit:~/uploads/',
- upload_queue => 'schedbot@pkgsubmit:~/uploads/queue/',
- unwanted_packages => '^monotone-',
- repository => '/mnt/BIG/dis/',
- rsync_to => 'schedbot@pkgsubmit:/mnt/BIG/dis/uploads/build/',
- log_url => 'http://pkgsubmit.mageia.org/queue/build/',
- admin => 'mageia-sysadm@mageia.org',
- iurt_root_command => '/home/buildbot/iurt-trunk/iurt_root_command',
- packager => 'Iurt the rebuild bot <mageia-sysadm@mageia.org>',
- sendmail => 0,
- build_timeout => {
- 'default' => 18000,
- 'gcc' => 57600,
- 'paraview' => 115200,
- 'salome' => 57600,
- 'itk' => 115200,
- 'wrapitk' => 115200,
- 'kernel-rt' => 57600,
- 'kernel-xen' => 57600,
- 'kernel-tmb' => 57600,
- 'openoffice.org' => 345600,
- 'openoffice.org64' => 345600,
- 'openoffice.org-go-ooo' => 345600,
- 'openoffice.org64-go-ooo' => 345600
- },
-}
-
diff --git a/modules/buildsystem/templates/iurt.conf b/modules/buildsystem/templates/iurt.conf
new file mode 100644
index 00000000..2dd8bf0e
--- /dev/null
+++ b/modules/buildsystem/templates/iurt.conf
@@ -0,0 +1,37 @@
+<%- distro = scope.lookupvar('buildsystem::var::distros::distros')[@distribution] -%>
+{
+ supported_arch => [ '<%= distro['arch'].join("', '") %>' ],
+ all_media =>{
+<%- distro['medias'].keys.sort.each{|media| -%>
+ '<%= media %>' => [ '<%=
+ distro['medias'][media]['repos'].keys.sort.join("', '") %>' ],
+<%-
+} -%>
+ },
+ distribution => '<%= distro['macros']['distribution'] %>',
+ vendor => '<%= distro['macros']['vendor'] %>',
+ base_media => [ '<%= distro['base_media'].join("', '") %>' ],
+ upload => '<%= build_login %>@pkgsubmit:~/uploads/',
+ upload_queue => '<%= build_login %>@pkgsubmit:~/uploads/queue/',
+ unwanted_packages => '^monotone-',
+ repository => 'http://<%= scope.lookupvar('buildsystem::var::repository::hostname') %>/<%= scope.lookupvar('buildsystem::var::repository::distribdir') %>/',
+ log_url => 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/queue/build/',
+ admin => 'mageia-sysadm@mageia.org',
+ packager => 'Iurt the rebuild bot <mageia-sysadm@mageia.org>',
+ sendmail => 0,
+ log_size_limit => '600M',
+ build_timeout => {
+<%- build_timeout.keys.sort.each{|package| -%>
+ '<%= package %>' => <%= (build_timeout[package].to_f * scope.lookupvar('buildsystem::var::iurt::timeout_multiplier').to_f).to_i %>,
+<%-
+} -%>
+ },
+ use_netns => {
+ 'default' => 1,
+<%- allow_network_access.sort.each{|package| -%>
+ '<%= package %>' => 0,
+<%-
+} -%>
+ },
+}
+
diff --git a/modules/buildsystem/templates/maintdb/maintdb.bin b/modules/buildsystem/templates/maintdb/maintdb.bin
new file mode 100755
index 00000000..903ee009
--- /dev/null
+++ b/modules/buildsystem/templates/maintdb/maintdb.bin
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+MAINTDBDIR="<%= scope.lookupvar('buildsystem::var::maintdb::dbdir') %>"
+
+function checkname()
+{
+ if [ -z "$1" ] ||
+ echo "$1" | grep -q '[/*{}%]' ||
+ echo "$1" | fgrep -q '..'
+ then
+ echo "Error: invalid package name." >&2
+ exit 1
+ fi
+}
+
+function maintnew()
+{
+ if [ "$user" != "root" ]; then
+ echo "Error: new is only allowed to root." >&2
+ exit 1
+ fi
+ checkname "$1"
+ maintfile="$MAINTDBDIR/$1"
+ if [ -f "$maintfile" ]; then
+ exit 0
+ fi
+ echo "$2" > "$maintfile"
+}
+
+function maintset()
+{
+ checkname "$1"
+ maintfile="$MAINTDBDIR/$1"
+ newmaint="$2"
+ if [ ! -f "$maintfile" ]; then
+ echo "Error: package $1 does not exist in maintdb." >&2
+ exit 1
+ fi
+ curmaint=$(cat "$maintfile")
+ if [ "$newmaint" = "nobody" ] || [[ "$newmaint" = *-team ]]; then
+ if [ "$curmaint" = "$user" ]; then
+ echo "$newmaint" > "$maintfile"
+ exit 0
+ else
+ echo "Error: cannot set maintainer for $1." >&2
+ exit 1
+ fi
+ elif [ "$newmaint" = "$user" ]; then
+ if [ "$curmaint" = "nobody" ] || [[ "$curmaint" = *-team ]]; then
+ echo "$newmaint" > "$maintfile"
+ exit 0
+ else
+ echo "Error: cannot set maintainer for $1." >&2
+ exit 1
+ fi
+ else
+ echo "Error: cannot set someone else as maintainer." >&2
+ exit 1
+ fi
+}
+
+function maintgetall()
+{
+ cd "$MAINTDBDIR"
+ for file in *; do
+ echo "$file $(cat $file)"
+ done
+ exit 0
+}
+
+function maintget()
+{
+ if [ -z "$1" ]; then
+ maintgetall
+ fi
+ checkname "$1"
+ maintfile="$MAINTDBDIR/$1"
+ if [ -f "$maintfile" ]; then
+ cat "$maintfile"
+ else
+ echo "Error: package $1 does not exist in maintdb." >&2
+ exit 1
+ fi
+}
+
+user="$1"
+action="$2"
+
+if [ "$action" = "new" ]; then
+ maintnew "$3" "$4"
+elif [ "$action" = "set" ]; then
+ maintset "$3" "$4"
+elif [ "$action" = "get" ]; then
+ maintget "$3"
+else
+ echo "Error: unknown command." >&2
+ exit 2
+fi
diff --git a/modules/buildsystem/templates/maintdb/sudoers.maintdb b/modules/buildsystem/templates/maintdb/sudoers.maintdb
new file mode 100644
index 00000000..91c88e47
--- /dev/null
+++ b/modules/buildsystem/templates/maintdb/sudoers.maintdb
@@ -0,0 +1,4 @@
+%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* get
+%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* [gs]et [a-zA-Z0-9]*
+%<%= scope.lookupvar('buildsystem::var::groups::packagers') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* set [a-zA-Z0-9]* [a-z]*
+<%= scope.lookupvar('buildsystem::var::scheduler::login') %> ALL =(<%= scope.lookupvar('buildsystem::var::maintdb::login') %>) NOPASSWD: <%= scope.lookupvar('buildsystem::var::maintdb::binpath') %> [a-z]* new [a-zA-Z0-9]* [a-z]*
diff --git a/modules/buildsystem/templates/maintdb/vhost_maintdb.conf b/modules/buildsystem/templates/maintdb/vhost_maintdb.conf
new file mode 100644
index 00000000..146413a7
--- /dev/null
+++ b/modules/buildsystem/templates/maintdb/vhost_maintdb.conf
@@ -0,0 +1,3 @@
+<Directory <%= scope.lookupvar('buildsystem::var::maintdb::dbdir') %>>
+ Options None
+</Directory>
diff --git a/modules/buildsystem/templates/maintdb/wrapper.maintdb b/modules/buildsystem/templates/maintdb/wrapper.maintdb
new file mode 100644
index 00000000..fcf69dab
--- /dev/null
+++ b/modules/buildsystem/templates/maintdb/wrapper.maintdb
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+maintdbuser="<%= scope.lookupvar('buildsystem::var::maintdb::login') %>"
+maintdbpath="<%= scope.lookupvar('buildsystem::var::maintdb::binpath') %>"
+packagersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers') %>"
+
+function isingroup()
+{
+ grp="$1"
+ for group in `groups`
+ do if [ "$grp" = "$group" ]
+ then
+ return 0
+ fi
+ done
+ return 1
+}
+
+if ! isingroup "$packagersgroup"
+then
+ echo "You are not in $packagersgroup group."
+ exit 1
+fi
+
+sudo -u "$maintdbuser" "$maintdbpath" $(whoami) "$@"
diff --git a/modules/buildsystem/templates/media.cfg b/modules/buildsystem/templates/media.cfg
new file mode 100644
index 00000000..64757a2b
--- /dev/null
+++ b/modules/buildsystem/templates/media.cfg
@@ -0,0 +1,142 @@
+<%-
+def media_name(media, repo, type, archname)
+ name = [ media.capitalize ]
+ if archname != nil
+ name += [ archname ]
+ end
+ for r in repo.split('_') do
+ name += [ r.capitalize ]
+ end
+ if type != nil
+ name += [ type.capitalize ]
+ end
+ return name.join(' ')
+end
+
+def media_out(name, media_hash)
+ media_out = "[%s]\n" % name
+ media_hash.keys.sort.each{|key|
+ value = media_hash[key]
+ if value != nil
+ media_out += "%s=%s\n" % [ key, value ]
+ end
+ }
+ return media_out
+end
+distro = scope.lookupvar('buildsystem::var::distros::distros')[@distro_name]
+-%>
+[media_info]
+version=<%= distro['version'] %>
+mediacfg_version=2
+branch=<%= distro['branch'] %>
+<%-
+if @arch != 'armv7hl'
+-%>
+arch=<%= @arch %>
+<%-
+end
+-%>
+xml-info=1
+
+<%-
+distro['medias'].keys.sort { |x,y| distro['medias'][x]['order'] <=> distro['medias'][y]['order'] }.each{|medianame|
+ media = distro['medias'][medianame]
+ media['repos'].keys.sort { |x,y| media['repos'][x]['order'] <=> media['repos'][y]['order'] }.each{|reponame|
+ repo = media['repos'][reponame]
+ media_type = []
+ if media['media_type'] != nil
+ media_type += media['media_type']
+ end
+ if repo['media_type'] != nil
+ media_type += repo['media_type']
+ end
+ noauto=nil
+ if (media['noauto'] == '1') or (repo['noauto'] == '1')
+ noauto='1'
+ end
+ updates_for = nil
+ if repo['updates_for'] != nil
+ updates_for = [ medianame, repo['updates_for'] ].join('/')
+ end
+ -%><%=
+ media_out [ medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist', medianame, reponame ].join('_') + '.cz',
+ :name => media_name(medianame, reponame, nil, nil),
+ :srpms => [ '../../SRPMS', medianame, reponame ].join('/'),
+ :media_type => media_type.join(':'),
+ :updates_for => updates_for,
+ :noauto => noauto
+
+ %>
+ <%-# debug -%>
+ <%-
+ debug_media_type = media_type + [ 'debug' ]
+ -%><%=
+ media_out [ 'debug', medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist_debug', medianame, reponame ].join('_') + '.cz',
+ :name => media_name(medianame, reponame, 'debug', nil),
+ :srpms => [ '../../SRPMS', medianame, reponame ].join('/'),
+ :media_type => debug_media_type.join(':'),
+ :noauto => '1'
+
+ %>
+ <%-# source -%>
+ <%-
+ source_media_type = media_type + [ 'source' ]
+ -%><%=
+ media_out [ '../../SRPMS', medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist', medianame, reponame ].join('_') + '.src.cz',
+ :name => media_name(medianame, reponame, 'sources', nil),
+ :rpms => [ medianame, reponame ].join('/'),
+ :media_type => source_media_type.join(':'),
+ :noauto => '1'
+
+ %>
+ <%-# we add 32bit media if arch is x86_64 -%>
+ <%-
+ if @arch == 'x86_64' and distro['arch'].include?('i586')
+ medianame32 = [ medianame, '32' ].join('')
+ -%><%=
+ media_out [ '../../i586/media', medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist', medianame32, reponame ].join('_') + '.src.cz',
+ :name => media_name(medianame, reponame, nil, '32bit'),
+ :media_type => media_type.join(':'),
+ :noauto => noauto
+
+ %>
+ <%-
+ end
+ if @arch == 'x86_64' and distro['arch'].include?('i686')
+ medianame32 = [ medianame, '32' ].join('')
+ -%><%=
+ media_out [ '../../i686/media', medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist', medianame32, reponame ].join('_') + '.src.cz',
+ :name => media_name(medianame, reponame, nil, '32bit'),
+ :media_type => media_type.join(':'),
+ :noauto => noauto
+
+ %>
+ <%-
+ end
+ }
+}
+if distro['based_on'] != nil
+ distro['based_on'].keys.sort.each{|bdistroname|
+ bdistro = distro['based_on'][bdistroname]
+ bdistro.keys.sort.each{|medianame|
+ media = bdistro[medianame]
+ for reponame in media
+ -%><%=
+ media_out [ bdistroname, medianame, reponame ].join('/'),
+ :hdlist => [ 'hdlist', bdistroname, medianame,
+ reponame ].join('_'),
+ :name => media_name([ medianame, bdistroname].join(''), reponame, nil, nil),
+ :media_type => 'base_distro',
+ :noauto => 1
+ %>
+ <%-
+ end
+ }
+ }
+end
+-%>
diff --git a/modules/buildsystem/templates/mga-youri-submit b/modules/buildsystem/templates/mga-youri-submit
new file mode 100755
index 00000000..0d29d462
--- /dev/null
+++ b/modules/buildsystem/templates/mga-youri-submit
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec sudo /usr/local/bin/mga-youri-submit.wrapper "$@"
diff --git a/modules/buildsystem/templates/mga-youri-submit.wrapper b/modules/buildsystem/templates/mga-youri-submit.wrapper
new file mode 100755
index 00000000..66fc59bc
--- /dev/null
+++ b/modules/buildsystem/templates/mga-youri-submit.wrapper
@@ -0,0 +1,36 @@
+#!/usr/bin/perl
+# youri-submit wrapper
+
+use strict;
+use warnings;
+use Fcntl ':mode';
+use File::Basename;
+use MDK::Common;
+
+my $log_dir = "$ENV{HOME}/submit-logs";
+
+my $sudo_user = $ENV{SUDO_USER} or die "should be run through sudo";
+my @prog = ('perl', '-I/usr/share/mga-youri-submit/lib', '/usr/share/mga-youri-submit/bin/youri-submit');
+
+my @options;
+foreach my $arg (@ARGV) {
+ if ($arg =~ /^-?-(\S+)/) {
+ # drop prohibited options
+ if ($arg =~ /-c/ || $arg =~ /-s/) {
+ print STDERR "prohibited option $arg, skipping\n";
+ next;
+ }
+ }
+ push(@options, $arg);
+}
+
+# logging for bug #30315 -spuk, 2007-05-29
+mkdir_p($log_dir);
+open(STDERR, "| tee -a $log_dir/$sudo_user.err >&2");
+open(STDOUT, "| tee -a $log_dir/$sudo_user.out");
+
+# call wrapped program
+print "Executing @prog --config /etc/youri/submit-todo.conf --define user=$sudo_user @options (sudo_user $sudo_user)\n";
+my $err = system(@prog, "--config", "/etc/youri/submit-todo.conf", "--define", "user=$sudo_user", @options) && ($? >> 8 || 1);
+
+exit $err
diff --git a/modules/buildsystem/templates/mgarepo.conf b/modules/buildsystem/templates/mgarepo.conf
new file mode 100644
index 00000000..fbe5109c
--- /dev/null
+++ b/modules/buildsystem/templates/mgarepo.conf
@@ -0,0 +1,88 @@
+<%-
+ default_distro = scope.lookupvar('buildsystem::var::distros::default_distro')
+ distros = scope.lookupvar('buildsystem::var::distros::distros')
+-%>
+[global]
+verbose = no
+default_parent = <%= scope.lookupvar('buildsystem::var::mgarepo::svn_root_packages') %>/<%= default_distro %>
+#url-map = svn\+ssh://svn\.mageia\.org/(.*) file:///\1
+tempdir = <%= sched_home_dir %>/repsys/tmp
+trunk-dir = <%= default_distro %>
+<%-
+conf = scope.lookupvar('buildsystem::var::mgarepo::conf')
+if conf['global'] != nil
+ conf['global'].keys.sort.each{|key|
+ value = conf['global'][key]
+ -%><%= key %> = <%= value %>
+<%-
+ }
+end
+-%>
+
+
+[log]
+oldurl = <%= scope.lookupvar('buildsystem::var::mgarepo::oldurl') %>
+# controls up to which revision the rpm changelog
+# will be constructed (default zero, i.e., oldest
+# commit)
+# revision-offset = 0
+# commit lines containing this string won't be shown in the changelog:
+ignore-string = SILENT
+
+[template]
+path = /usr/share/mgarepo/default.chlog
+
+[users]
+iurt = Mageia build bot <mageia-sysadm@<%= domain %>>
+
+[submit]
+default = <%= default_distro %>
+host = <%= scope.lookupvar('buildsystem::var::mgarepo::submit_host') %>
+
+<%-
+ distros.keys.sort.each{|d|
+ distro = distros[d]
+-%>
+[submit <%= d %>]
+target = <%= sched_home_dir %>/repsys/srpms
+allowed = <%= distro['submit_allowed'] %> <%= distro['backports_allowed'] %>
+rpm-macros = global <%= d %>
+
+<%-
+ }
+-%>
+
+[macros global]
+# mkrel definition to be removed when rpm-setup is updated on main build node
+mkrel(c:) = %{-c: 0.%{-c*}.}%{1}%{?subrel:.%subrel}%{?distsuffix:%distsuffix}%{?!distsuffix:.mga}%{?distro_release:%distro_release}
+dist = %{?distsuffix:%distsuffix}%{?!distsuffix:.mga}%{?distro_release:%distro_release}
+
+<%-
+ distros.keys.sort.each{|d|
+ distro = distros[d]
+-%>
+[macros <%= d %>]
+distro_release = <%= distro['version'] %>
+<%-
+ distro['macros'].keys.sort.each{|macro|
+ value = distro['macros'][macro]
+ -%><%= macro %> = <%= value %>
+ <%- } %>
+<%- }
+%>
+
+[helper]
+create-srpm = /usr/share/repsys/create-srpm
+upload-srpm = /usr/local/bin/mga-youri-submit
+# needed by mdvsys 2.0
+install-buildrequires = sudo rurpmi --auto --no-suggests
+upload-bin = /usr/local/bin/wrapper.upload-bin
+
+[srpm]
+run-prep = yes
+
+[binrepo]
+<%- binrepo_hostname = scope.lookupvar('buildsystem::var::binrepo::hostname') -%>
+download_url = http://<%= binrepo_hostname %>/
+upload_host = <%= binrepo_hostname %>
+
diff --git a/modules/buildsystem/templates/repoctl.conf b/modules/buildsystem/templates/repoctl.conf
new file mode 100644
index 00000000..14506a25
--- /dev/null
+++ b/modules/buildsystem/templates/repoctl.conf
@@ -0,0 +1,40 @@
+<%-
+distribdir = scope.lookupvar('buildsystem::var::repository::distribdir')
+distros = scope.lookupvar('buildsystem::var::distros::distros')
+arches = {}
+distrosections = {}
+sectionsrepos = {}
+distros.each{|distroname, distro|
+ distro['medias'].each{|medianame, media|
+ distrosections[medianame] = 1
+ media['repos'].each{|reponame, repo|
+ sectionsrepos[reponame] = 1
+ }
+ }
+ distro['arch'].each{|arch|
+ arches[arch] = 1
+ }
+}
+-%>
+dryrun=echo
+if [ -z $SUDO_USER ]
+then
+ requestuser="$USER"
+else
+ requestuser="$SUDO_USER"
+fi
+lockdir=/var/lib/repoctl/locks
+hdlistsdir=/var/lib/repoctl/hdlists
+rootdir=<%= scope.lookupvar('buildsystem::var::repository::bootstrap_root') %>
+finalrootdir=<%= scope.lookupvar('buildsystem::var::repository::mirror_root') %>
+distribdir=$rootdir/<%= distribdir %>
+finaldistribdir=$finalrootdir/<%= distribdir %>
+distroreleases='<%= distros.keys.sort.join(' ') -%>'
+distrosections='<%= distrosections.keys.sort.join(' ') -%>'
+sectionsrepos='<%= sectionsrepos.keys.sort.join(' ') -%>'
+arches='<%= arches.keys.sort.join(' ') -%>'
+mirror_rsync_options="-v --delete -alH"
+timestampfile="mageia_timestamp"
+sha1sumfile="mageia_sha1sum"
+sha1sumsigfile="mageia_sha1sum.gpg"
+sign_mirror_sha1sum=/bin/true
diff --git a/modules/buildsystem/templates/rpmlint.conf b/modules/buildsystem/templates/rpmlint.conf
new file mode 100644
index 00000000..b81f169b
--- /dev/null
+++ b/modules/buildsystem/templates/rpmlint.conf
@@ -0,0 +1,7 @@
+from Config import *
+execfile('/etc/rpmlint/extracted.d/distribution.exceptions.conf')
+
+for i in open('/etc/rpmlint/extracted.d/distribution.error.list').readlines():
+ setBadness(i, 10)
+
+
diff --git a/modules/buildsystem/templates/signbot/sudoers.signpackage b/modules/buildsystem/templates/signbot/sudoers.signpackage
new file mode 100644
index 00000000..4ea30238
--- /dev/null
+++ b/modules/buildsystem/templates/signbot/sudoers.signpackage
@@ -0,0 +1,2 @@
+<%= sched_login %> ALL =(<%= scope.lookupvar('buildsystem::var::signbot::login') %>) NOPASSWD: /usr/local/bin/mga-signpackage
+<%= sched_login %> ALL =(<%= scope.lookupvar('buildsystem::var::signbot::login') %>) NOPASSWD: /usr/local/bin/sign-check-package
diff --git a/modules/buildsystem/templates/submit_package.pl b/modules/buildsystem/templates/submit_package.pl
new file mode 100755
index 00000000..1fdf7749
--- /dev/null
+++ b/modules/buildsystem/templates/submit_package.pl
@@ -0,0 +1,18 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+
+my $svn_server = '<%= scope.lookupvar('buildsystem::var::mgarepo::svn_hostname') %>';
+my $packagersgroup="<%= scope.lookupvar('buildsystem::var::groups::packagers') %>";
+
+my $login = getpwuid($<);
+my (undef, undef, undef, $members) = getgrnam $packagersgroup;
+if (not $members =~ /\b$login\b/) {
+ print "You are not in $packagersgroup group\n";
+ exit 1;
+}
+
+# for bug 914
+# https://bugs.mageia.org/show_bug.cgi?id=914
+map { $_ =~ s|^svn\+ssh://$svn_server/|svn://$svn_server/| } @ARGV;
+exec "/usr/share/mgarepo/create-srpm", @ARGV;
diff --git a/modules/buildsystem/templates/sudoers.iurt b/modules/buildsystem/templates/sudoers.iurt
index 266f301c..21e81e87 100644
--- a/modules/buildsystem/templates/sudoers.iurt
+++ b/modules/buildsystem/templates/sudoers.iurt
@@ -1 +1 @@
-<%= build_login %> ALL = NOPASSWD: /usr/sbin/iurt_root_command
+<%= scope.lookupvar('buildsystem::var::iurt::login') %> ALL = NOPASSWD: /usr/sbin/iurt_root_command
diff --git a/modules/buildsystem/templates/sudoers.youri b/modules/buildsystem/templates/sudoers.youri
new file mode 100644
index 00000000..3bc7cc2d
--- /dev/null
+++ b/modules/buildsystem/templates/sudoers.youri
@@ -0,0 +1,6 @@
+<%- sched_login = scope.lookupvar('buildsystem::var::scheduler::login') -%>
+Cmnd_Alias YOURI = /usr/local/bin/mga-youri-submit.wrapper
+Defaults!YOURI always_set_home
+Defaults!YOURI runas_default = <%= sched_login %>
+Defaults!YOURI !requiretty
+%<%= scope.lookupvar('buildsystem::var::groups::packagers') -%> ALL = (<%= sched_login %>) NOPASSWD: YOURI
diff --git a/modules/buildsystem/templates/upload.conf b/modules/buildsystem/templates/upload.conf
new file mode 100644
index 00000000..af610c92
--- /dev/null
+++ b/modules/buildsystem/templates/upload.conf
@@ -0,0 +1,131 @@
+###
+#
+# Do not disable the host without appropriate warning
+# to somebody able to fix the machine
+#
+# Please run 'perl -cw .upload.conf' in order to check the file is OK.
+#
+###
+
+<%-
+ build_nodes = scope.lookupvar('buildsystem::var::scheduler::build_nodes')
+-%>
+my %nodes = (
+<%-
+ build_nodes.keys.sort.each{|arch|
+-%>
+ <%= arch -%> => [ '<%= build_nodes[arch].join("', '") -%>' ],
+<%-
+ }
+-%>
+);
+my $repository = "http://<%= scope.lookupvar('buildsystem::var::repository::hostname') %>/<%= scope.lookupvar('buildsystem::var::repository::distribdir') %>/";
+my $homedir = "<%= scope.lookupvar('buildsystem::var::iurt::homedir') %>";
+
+{
+ bot => {
+ (map {
+ my $arch = $_;
+ $arch => {
+ map {
+ my $node = $_;
+ ($node => {
+ iurt => {
+ user => '<%= scope.lookupvar('buildsystem::var::iurt::login') %>',
+# (spuk, 2007-08-16) disabled iurt_cache additional media, locks trying to mount -o bind
+# command => "iurt --copy-srpm --group -v 6 --config local_spool $homedir/iurt/__DIR__ --no_rsync --chrooted-urpmi -m __MEDIA__ -- $repository --additional-media -m __MEDIA__ -- file://$homedir/cache/ -p \"__PACKAGER__\" -r __TARGET__ __ARCH__",
+ command => "iurt --copy_srpm --group --rpmmacros \"%distro_section __SECTION__\" --config local_spool $homedir/iurt/__DIR__ --no_rsync --chrooted-urpmi -m __MEDIA__ -- $repository -p \"__PACKAGER__\" -r __TARGET__ __ARCH__",
+ packages => "$homedir/iurt/",
+ },
+ });
+ } @{$nodes{$arch}},
+ };
+ } keys %nodes),
+ },
+ media => {
+ <%-
+ def repo_deps(distros, dname, mname, rname)
+ deps = {}
+ distro = distros[dname]
+ if distro['based_on'] != nil
+ distro['based_on'].each{|bdistro, bmedias|
+ if bmedias[mname] != nil and \
+ bmedias[mname].include?(rname) then
+ deps[ [ bdistro, mname, rname ].join('/') ] = 1
+ end
+ }
+ end
+ if distro['medias'][mname] != nil \
+ and distro['medias'][mname]['repos'][rname] != nil
+ then
+ deps[ [ mname, rname ].join('/') ] = 1
+ else
+ return deps
+ end
+ mlist = distro['medias'][mname]['requires']
+ mlist = mlist == nil ? [ mname ] : [ mname ] + mlist
+ mlist.each{|mreq|
+ rlist = distro['medias'][mname]['repos'][rname]['requires']
+ rlist = [] if rlist == nil
+ rlist += [ rname ] if mreq != mname
+ rlist.each{|rreq|
+ deps.merge!(repo_deps(distros, dname, mreq, rreq))
+ }
+ }
+ return deps
+ end
+ distros = scope.lookupvar('buildsystem::var::distros::distros')
+ distros.keys.sort.each{|distroname|
+ -%>
+ '<%= distroname -%>' => {
+ <%-
+ distro = distros[distroname]
+ distro['medias'].keys.sort.each{|medianame|
+ media = distro['medias'][medianame]
+ media['repos'].keys.sort.each{|reponame|
+ deps = repo_deps(distros, distroname, medianame, reponame)
+ -%>
+ "<%= [ medianame, reponame ].join('/') %>" => [ "<%=
+ deps.keys.sort.join('", "')
+ %>" ],
+ <%-
+ }
+ }
+ -%>
+ },
+ <%-
+ }
+ -%>
+ },
+ admin => '<%= scope.lookupvar('buildsystem::var::scheduler::admin_mail') %>',
+ http_queue => 'https://<%= scope.lookupvar('buildsystem::var::webstatus::hostname') %>/uploads',
+ upload_user => '<%= scope.lookupvar('buildsystem::var::scheduler::login') %>',
+ email_domain => '<%= domain %>',
+ arch => {
+ <%-
+ distros.keys.sort.each{|distroname|
+ -%>
+ <%= distroname -%> => [ '<%= distros[distroname]['arch'].join("', '") %>' ],
+ <%-
+ }
+ -%>
+ default => [ 'i586', 'x86_64' ],
+ },
+ mandatory_arch => {
+ <%-
+ distros.keys.sort.each{|distroname|
+ if distros[distroname]['mandatory_arch'] != nil
+ march = distros[distroname]['mandatory_arch']
+ else
+ march = distros[distroname]['arch']
+ end
+ -%>
+ <%= distroname -%> => [ '<%= march.join("', '") %>' ],
+ <%-
+ }
+ -%>
+ default => [ 'i586', 'x86_64' ],
+ },
+ ssh_options => "-o ServerAliveInterval=10 -o ConnectTimeout=20 -o BatchMode=yes",
+ faildelay => 360000,
+}
diff --git a/modules/buildsystem/templates/vhost_repository.conf b/modules/buildsystem/templates/vhost_repository.conf
new file mode 100644
index 00000000..e082ffca
--- /dev/null
+++ b/modules/buildsystem/templates/vhost_repository.conf
@@ -0,0 +1,73 @@
+<%-
+mirror_root = scope.lookupvar('buildsystem::var::repository::mirror_root')
+mirror_reporoot = scope.lookupvar('buildsystem::var::repository::mirror_reporoot')
+bootstrap_reporoot = scope.lookupvar('buildsystem::var::repository::bootstrap_reporoot')
+distribdir = scope.lookupvar('buildsystem::var::repository::distribdir')
+repo_allow_from_ips = scope.lookupvar('buildsystem::var::distros::repo_allow_from_ips')
+repo_allow_from_domains = scope.lookupvar('buildsystem::var::distros::repo_allow_from_ips')
+distros = scope.lookupvar('buildsystem::var::distros::distros')
+-%>
+<VirtualHost *:80>
+ ServerName <%= scope.lookupvar('buildsystem::var::repository::hostname') %>
+ DocumentRoot <%= mirror_root %>
+
+ # Some simple API to check existence of SRPMs for QA
+ RewriteEngine On
+
+ RewriteCond /distrib/bootstrap/distrib/$2/SRPMS/$3/$1s_testing/$4.src.rpm -f
+ RewriteRule ^/qa/checksrpm/(update|backport)/([1-9][0-9]*)/([a-z_]+)/([^/]+)$ http://repository.mageia.org/qa/checksrpm/found [L,R=302]
+
+ RewriteRule ^/qa/checksrpm/ - [L,G]
+
+<%-
+ if repo_allow_from_ips != nil || repo_allow_from_domains != nil then
+ access_requires = [ 'all denied' ]
+ if repo_allow_from_ips != nil then
+ for allow in repo_allow_from_ips do
+ access_requires << 'ip ' + allow
+ end
+ end
+ if repo_allow_from_domains != nil then
+ for allow in repo_allow_from_domains do
+ access_requires << 'host ' + allow
+ end
+ end
+ else
+ access_requires = [ 'all granted' ]
+ end
+%>
+ Alias /bootstrap/ "<%= bootstrap_reporoot %>/"
+<%-
+ distros.keys.sort.each{|distroname|
+ distro = distros[distroname]
+ if distro['no_mirror'] -%>
+ Alias /<%= distribdir %>/<%= distroname %>/ "<%= bootstrap_reporoot %>/<%= distroname %>/"
+<%-
+ end
+ }
+-%>
+
+ <Directory <%= bootstrap_reporoot %>>
+ Header append Cache-Control "public, must-revalidate"
+<%-
+ for req in access_requires do
+-%>
+ Require <%= req %>
+<%-
+ end
+-%>
+ Options Indexes FollowSymLinks
+ </Directory>
+
+ <Directory <%= mirror_root %>>
+ Header append Cache-Control "public, must-revalidate"
+<%-
+ for req in access_requires do
+-%>
+ Require <%= req %>
+<%-
+ end
+-%>
+ Options Indexes FollowSymLinks
+ </Directory>
+</VirtualHost>
diff --git a/modules/buildsystem/templates/vhost_webstatus.conf b/modules/buildsystem/templates/vhost_webstatus.conf
new file mode 100644
index 00000000..3b0e6246
--- /dev/null
+++ b/modules/buildsystem/templates/vhost_webstatus.conf
@@ -0,0 +1,13 @@
+<Location /uploads>
+ Require all granted
+ Options Indexes
+ IndexOptions NameWidth=*
+</Location>
+<Location /autobuild>
+ Require all granted
+ Options Indexes
+ IndexOptions NameWidth=*
+</Location>
+<Directory /var/www/bs/autobuild>
+ Options FollowSymlinks FollowSymLinks
+</Directory>
diff --git a/modules/buildsystem/templates/youri/acl.conf b/modules/buildsystem/templates/youri/acl.conf
new file mode 100644
index 00000000..f0949f8a
--- /dev/null
+++ b/modules/buildsystem/templates/youri/acl.conf
@@ -0,0 +1 @@
+.* .* .* ^glib$ ^blacklisted$
diff --git a/modules/buildsystem/templates/youri/host.conf b/modules/buildsystem/templates/youri/host.conf
new file mode 100644
index 00000000..bf4fa086
--- /dev/null
+++ b/modules/buildsystem/templates/youri/host.conf
@@ -0,0 +1,23 @@
+<%-
+ aliases = scope.lookupvar('buildsystem::var::scheduler::build_nodes_aliases')
+ nodes = {}
+ nodes['src'] = [ scope.lookupvar('buildsystem::var::scheduler::build_src_node') ]
+ scope.lookupvar('buildsystem::var::scheduler::build_nodes').each{|arch,n|
+ a = arch + '|noarch|src'
+ nodes[a] = []
+ n.each{|node|
+ if aliases[node] != nil
+ nodes[a] += [ aliases[node] ]
+ else
+ nodes[a] += [ node ]
+ end
+ }
+ }
+ str = ''
+ nodes.keys.sort.each{|arch|
+ nodes[arch].sort.uniq.each{|node|
+ str += node + ' ' + arch + "\n"
+ }
+ }
+-%>
+<%= str -%>
diff --git a/modules/buildsystem/templates/youri/submit.conf b/modules/buildsystem/templates/youri/submit.conf
new file mode 100644
index 00000000..0d7cf927
--- /dev/null
+++ b/modules/buildsystem/templates/youri/submit.conf
@@ -0,0 +1,140 @@
+<%
+Puppet::Parser::Functions.autoloader.loadall
+sched_home_dir = scope.lookupvar('buildsystem::var::scheduler::homedir')
+
+conf = scope.lookupvar('buildsystem::var::youri::youri_conf')
+conf_default = scope.lookupvar('buildsystem::var::youri::youri_conf_default')
+distros = scope.lookupvar('buildsystem::var::distros::distros')
+
+def line(text, indent)
+ res = ''
+ i = 0
+ while i < indent
+ res += ' '
+ i += 1
+ end
+ res += text + "\n"
+end
+
+def array_text(array, indent)
+ res = ''
+ array.each{|a|
+ res += line('- ' + a, indent)
+ }
+ return res
+end
+
+def hash_text(hash, indent)
+ res = ''
+ curindent = indent
+ hash.keys.sort.each{|key|
+ if hash[key].instance_of? Hash
+ res += line(key + ':', indent)
+ res += hash_text(hash[key], indent + 4)
+ elsif hash[key].instance_of? Array
+ res += line(key + ':', indent)
+ res += array_text(hash[key], indent + 4)
+ elsif hash[key].instance_of? String
+ res += line(key + ': ' + hash[key], indent)
+ end
+ }
+ return res
+end
+
+def class_hash(conf, conf_default)
+ res = {}
+ res['class'] = get_conf(conf, ['class']) == nil ?
+ conf_default['class'] : conf['class']
+ res['options'] = get_conf(conf_default, ['options']) == nil ? {} :
+ conf_default['options'].dup
+ if get_conf(conf, ['options']) != nil
+ res['options'].merge!(conf['options'])
+ end
+ return res
+end
+
+def get_conf(conf, path)
+ res = conf
+ path.each{|p|
+ if res == nil
+ return nil
+ end
+ res = res[p]
+ }
+ return res
+end
+
+def get_distros_conf(distros, conf_name, path)
+ res = {}
+ distros.keys.each{|distro|
+ t = get_conf(distros[distro], [ 'youri', conf_name ] + path)
+ if t != nil
+ res[distro] = t.dup
+ end
+ }
+ return res
+end
+
+def get_definitions(def_name, conf_name, conf, conf_default, distros)
+ res = {}
+ res[def_name] = {}
+ def_list = conf_default[conf_name][def_name].keys
+ def_list += get_conf(conf, [ conf_name, def_name ]) != nil ? \
+ conf[conf_name][def_name].keys : []
+ def_list.uniq.each{|d|
+ res[def_name][d] = class_hash(
+ get_conf(conf, [ conf_name, def_name, d ]),
+ get_conf(conf_default, [ conf_name, def_name, d ])
+ )
+ res[def_name][d]['options'].merge!(get_distros_conf(distros,
+ conf_name, [ def_name, d ]))
+ }
+ return res
+end
+
+%>
+home: <%= sched_home_dir %>
+
+<%-
+ repository = {
+ 'repository' => class_hash(get_conf(conf[conf_name],
+ ['repository']),
+ conf_default[conf_name]['repository']),
+ }
+ distros.keys.each{|distro|
+ repository['repository']['options'][distro] = {
+ 'arch' => distros[distro]['arch'].join(' '),
+ }
+ }
+-%>
+# repository declaration
+<%= hash_text(repository, 0) %>
+
+<%-
+ targets = {
+ 'targets' => get_distros_conf(distros, conf_name, [ 'targets' ]),
+ }
+-%>
+# targets definition
+<%= hash_text(targets, 0) %>
+
+<%-
+ checks = get_definitions('checks', conf_name, conf, conf_default, distros)
+-%>
+# checks definition
+<%= hash_text(checks, 0) -%>
+
+<%-
+ actions = get_definitions('actions', conf_name, conf, conf_default, distros)
+-%>
+# actions definitions
+<%= hash_text(actions, 0) -%>
+
+<%-
+ posts = get_definitions('posts', conf_name, conf, conf_default, distros)
+-%>
+
+# posts definitions
+<%= hash_text(posts, 0) -%>
+
+# vim:ft=yaml:et:sw=4
diff --git a/modules/catdap/manifests/init.pp b/modules/catdap/manifests/init.pp
index 018b6ed5..f7172208 100644
--- a/modules/catdap/manifests/init.pp
+++ b/modules/catdap/manifests/init.pp
@@ -1,42 +1,47 @@
class catdap {
- $catdap_location = "/var/www/identity"
- $catdap_vhost = "identity.$domain"
+ $upstream_git = "git://git.${::domain}/web/identity"
# TODO switch to a proper rpm packaging
- $rpm_requirement = ['perl-Catalyst-Runtime',"perl-FCGI", 'perl-Catalyst-Plugin-Authorization-Roles',
-"perl-Catalyst-Action-RenderView", "perl-Catalyst-Model-LDAP-FromAuthentication", "perl-Catalyst-P-A-Store-LDAP", "perl-Catalyst-Plugin-Authentication", "perl-Catalyst-Plugin-Captcha",
-"perl-Catalyst-Plugin-ConfigLoader", "perl-Catalyst-Plugin-I18N", "perl-Catalyst-Plugin-Session-Store-File", "perl-Catalyst-Plugin-Static-Simple",
-"perl-Catalyst-P-S-State-Cookie", "perl-Catalyst-P-S-Store-File", "perl-Catalyst-View-Email",
-"perl-Catalyst-View-TT", "perl-Config-General", "perl-Crypt-CBC", "perl-Data-UUID",
-"perl-Email-Valid", "perl-Moose", "perl-namespace-autoclean", "perl-Test-Simple",
-"perl-Crypt-Blowfish", "perl-Email-Date-Format", "perl-YAML-LibYAML",
-]
+ $rpm_requirement = ['perl-Catalyst-Runtime',
+ 'perl-FCGI',
+ 'perl-Catalyst-Plugin-Authorization-Roles',
+ 'perl-Catalyst-Action-RenderView',
+ 'perl-Catalyst-Model-LDAP-FromAuthentication',
+ 'perl-Catalyst-P-A-Store-LDAP',
+ 'perl-Catalyst-Plugin-Authentication',
+ 'perl-Catalyst-Plugin-Captcha',
+ 'perl-Catalyst-Plugin-ConfigLoader',
+ 'perl-Catalyst-Plugin-I18N',
+ 'perl-Catalyst-Plugin-Session-Store-File',
+ 'perl-Catalyst-Plugin-Static-Simple',
+ 'perl-Catalyst-P-S-State-Cookie',
+ 'perl-Catalyst-View-Email',
+ 'perl-Catalyst-View-TT',
+ 'perl-Config-General',
+ 'perl-Crypt-CBC',
+ 'perl-Data-UUID',
+ 'perl-Email-Valid',
+ 'perl-Moose',
+ 'perl-namespace-autoclean',
+ 'perl-Test-Simple',
+ 'perl-Crypt-Blowfish',
+ 'perl-Email-Date-Format',
+ 'perl-YAML-LibYAML',
+ 'perl-IO-Socket-INET6' ]
- package { $rpm_requirement:
- ensure => installed
- }
+ package { $rpm_requirement: }
- subversion::snapshot { $catdap_location:
- source => "svn://svn.mageia.org/soft/identity/CatDap/branches/live"
- }
+ $ldap_password = extlookup('catdap_ldap','x')
- $catdap_password = extlookup('catdap_password')
-
- file { "$catdap_location/catdap_local.yml":
- ensure => present,
- owner => root,
- group => apache,
- mode => 640,
- content => template("catdap/catdap_local.yml"),
- require => Subversion::Snapshot[$catdap_location]
+ catdap::snapshot { "identity.${::domain}":
+ location => '/var/www/identity',
+ git_location => $upstream_git,
+ git_branch => 'topic/production',
}
- apache::vhost_catalyst_app { $catdap_vhost:
- script => "$catdap_location/script/catdap_fastcgi.pl",
- location => $catdap_location,
- use_ssl => true,
+ catdap::snapshot { "identity-trunk.${::domain}":
+ location => '/var/www/identity-trunk',
+ git_location => $upstream_git,
}
-
- apache::vhost_redirect_ssl { $catdap_vhost: }
}
diff --git a/modules/catdap/manifests/snapshot.pp b/modules/catdap/manifests/snapshot.pp
new file mode 100644
index 00000000..35ca692e
--- /dev/null
+++ b/modules/catdap/manifests/snapshot.pp
@@ -0,0 +1,21 @@
+define catdap::snapshot($location, $git_location, $git_branch = 'master') {
+ file { "${location}/catdap_local.yml":
+ group => apache,
+ mode => '0640',
+ content => template('catdap/catdap_local.yml'),
+ require => Git::Snapshot[$location],
+ }
+
+ git::snapshot { $location:
+ source => $git_location,
+ branch => $git_branch,
+ }
+
+ apache::vhost::catalyst_app { $name:
+ script => "${location}/script/catdap_fastcgi.pl",
+ location => $location,
+ use_ssl => true,
+ }
+
+ apache::vhost::redirect_ssl { $name: }
+}
diff --git a/modules/catdap/templates/catdap_local.yml b/modules/catdap/templates/catdap_local.yml
index 50f43601..d982b40b 100644
--- a/modules/catdap/templates/catdap_local.yml
+++ b/modules/catdap/templates/catdap_local.yml
@@ -1,22 +1,20 @@
<%
-ldap_server = 'ldap.' + domain
+ldap_server = "ldap-master.#{domain}"
-ldap_password = catdap_password
-
-ldap_account = 'cn=catdap-valstar,ou=System Accounts,' + dc_suffix
+ldap_account = "cn=catdap-#{hostname},ou=System Accounts,#{dc_suffix}"
%>
organisation: Mageia
apptitle: Mageia Identity Management
-emailfrom: noreply@<%= domain %>
+emailfrom: noreply@<%= @domain %>
Model::Proxy:
- base: ou=People,<%= dc_suffix %>
+ base: ou=People,<%= @dc_suffix %>
dn: <%= ldap_account %>
- password: <%= ldap_password %>
+ password: <%= scope.lookupvar("catdap::ldap_password") %>
Model::User:
- base: <%= dc_suffix %>
+ base: <%= @dc_suffix %>
host: <%= ldap_server %>
start_tls: 1
@@ -27,7 +25,98 @@ authentication:
store:
ldap_server: <%= ldap_server %>
binddn: <%= ldap_account %>
- bindpw: <%= ldap_password %>
- user_basedn: ou=People,<%= dc_suffix %>
- role_basedn: <%= dc_suffix %>
+ bindpw: <%= scope.lookupvar("catdap::ldap_password") %>
+ user_basedn: ou=People,<%= @dc_suffix %>
+ role_basedn: <%= @dc_suffix %>
+
+register:
+ login_regex: ^[a-z][a-z0-9]*$
+ login_blacklist:
+ - abuse
+ - apache
+ - bcd
+ - hostmaster
+ - iurt
+ - listmaster
+ - MAILER-DAEMON
+ - mirror
+ - noc
+ - postmaster
+ - president
+ - schedbot
+ - secretary
+ - security
+ - signbot
+ - treasurer
+ - webmaster
+ - www
+
+ email_domain_blacklist:
+ - armyspy.com
+ - bitmessage.ch
+ - codehot.co.uk
+ - crazymailing.com
+ - dayrep.com
+ - group.mageia.org
+ - grr.la
+ - guerrillamail.biz
+ - guerrillamail.com
+ - guerrillamail.de
+ - guerrillamail.info
+ - guerrillamail.net
+ - guerrillamail.org
+ - guerrillamailblock.com
+ - jourrapide.com
+ - ml.mageia.org
+ - namecheap.com
+ - pokemail.net
+ - rhyta.com
+ - runbox.com
+ - sharklasers.com
+ - spam4.me
+ - vmani.com
+ - wowring.ru
+ - yopmail.com
+ - zasod.com
+Controller::User:
+ editable_attrs:
+ - cn
+ - sn
+ - givenName
+ - mobile
+ - mailForwardingAddress
+ - preferredLanguage
+ uneditable_attrs:
+ - uid
+ - uidNumber
+ - gidNumber
+ - homeDirectory
+ - mail
+ - sshPublicKey
+ - loginShell
+ skip_attrs:
+ - objectClass
+ - krb5Key
+ - sambaMungedDial
+ - sambaPasswordHistory
+ - userPassword
+ - sambaLMPassword
+ - sambaNTPassword
+ - sambaPwdMustChange
+ - sambaSID
+ - sambaPrimaryGroupSID
+ - sambaAcctFlags
+ - sambaPwdCanChange
+ - sambaPwdLastSet
+ - sambaKickOffTime
+ - sambaUserWorkstations
+ - sambaLogonTime
+ - krb5KeyVersionNumber
+ - krb5PasswordEnd
+ - krb5MaxLife
+ - krb5MaxRenew
+ - krb5KDCFlags
+ - shadowLastChange
+ - roomNumber
+ - secretary
diff --git a/modules/cgit/manifests/init.pp b/modules/cgit/manifests/init.pp
new file mode 100644
index 00000000..60dc9bad
--- /dev/null
+++ b/modules/cgit/manifests/init.pp
@@ -0,0 +1,27 @@
+class cgit {
+ package { 'cgit': }
+
+ file { '/etc/cgitrc':
+ content => template('cgit/cgitrc'),
+ notify => Service['apache'],
+ require => Package['cgit'],
+ }
+
+ apache::webapp_other { 'cgit':
+ webapp_file => 'cgit/webapp.conf',
+ }
+
+ mga_common::local_script { 'cgit.filter.commit-links.sh':
+ content => template('cgit/filter.commit-links.sh'),
+ }
+
+ apache::vhost::base { "gitweb.${::domain}":
+ content => template('cgit/vhost.conf')
+ }
+
+ apache::vhost::base { "ssl_gitweb.${::domain}":
+ use_ssl => true,
+ vhost => "gitweb.${::domain}",
+ content => template('cgit/vhost.conf')
+ }
+}
diff --git a/modules/cgit/templates/cgitrc b/modules/cgit/templates/cgitrc
new file mode 100644
index 00000000..1e1a399c
--- /dev/null
+++ b/modules/cgit/templates/cgitrc
@@ -0,0 +1,137 @@
+#
+# See cgitrc(5) or /usr/share/doc/cgit-*/cgitrc.5.html for details
+#
+
+# Enable caching of up to 1000 output entries
+cache-size=1000
+
+
+# Specify some default clone urls using macro expansion
+clone-url=git://git.mageia.org/$CGIT_REPO_URL ssh://git@git.mageia.org/$CGIT_REPO_URL
+
+# Specify the css url
+css=/cgit-data/cgit.css
+
+
+# Show owner on index page
+enable-index-owner=1
+
+
+# Allow http transport git clone
+enable-git-clone=1
+
+
+# Show extra links for each repository on the index page
+enable-index-links=1
+
+
+# Enable ASCII art commit history graph on the log pages
+enable-commit-graph=1
+
+
+# Show number of affected files per commit on the log pages
+enable-log-filecount=1
+
+
+# Show number of added/removed lines per commit on the log pages
+enable-log-linecount=1
+
+
+# Sort branches by date
+branch-sort=age
+
+
+# Add a cgit favicon
+#favicon=/favicon.ico
+
+
+# Use a custom logo
+logo=//nav.mageia.org/css/mageia-logo-nav-3.png
+
+# Try to avoid pagination on the mail page (until we have too many software repos)
+max-repo-count=200
+
+# Enable statistics per week, month and quarter
+max-stats=quarter
+
+
+# Set the title and heading of the repository index page
+root-title=Mageia git Repositories
+
+
+# Set a subheading for the repository index page
+root-desc=A web frontend to the git repositories of the Mageia project
+
+
+# Include some more info about example.com on the index page
+#root-readme=/var/www/htdocs/about.html
+
+
+# Allow download of tar.gz, tar.bz2 and zip-files
+#snapshots=tar.gz tar.bz2 zip
+snapshots=all
+
+
+##
+## List of common mimetypes
+##
+
+mimetype.gif=image/gif
+mimetype.html=text/html
+mimetype.jpg=image/jpeg
+mimetype.jpeg=image/jpeg
+mimetype.pdf=application/pdf
+mimetype.png=image/png
+mimetype.svg=image/svg+xml
+
+
+# Highlight source code with python pygments-based highlighter
+source-filter=/usr/libexec/cgit/filters/syntax-highlighting.sh
+
+# Format markdown, restructuredtext, manpages, text files, and html files
+# through the right converters
+about-filter=/usr/libexec/cgit/filters/about-formatting.sh
+
+##
+## Search for these files in the root of the default branch of repositories
+## for coming up with the about page:
+##
+readme=:README.mga.md
+readme=:README.md
+readme=:README.rst
+readme=:README.html
+readme=:README.txt
+readme=:README
+readme=:INSTALL.md
+readme=:INSTALL.rst
+readme=:INSTALL.html
+readme=:INSTALL.txt
+readme=:INSTALL
+
+# Special Case mainly for initscripts git repo where we cannot write to master
+readme=distro/mga:README.md
+
+##
+## List of repositories.
+## PS: Any repositories listed when section is unset will not be
+## displayed under a section heading
+## PPS: This list could be kept in a different file (e.g. '/etc/cgitrepos')
+## and included like this:
+## include=/etc/cgitrepos
+##
+
+#repo.url=foo
+#repo.path=/var/lib/git/foo.git
+#repo.desc=the master foo repository
+#repo.owner=fooman@example.com
+#repo.readme=info/web/about.html
+
+
+commit-filter=/usr/local/bin/cgit.filter.commit-links.sh
+
+enable-git-config=1
+section-from-path=-1
+case-sensitive-sort=0
+remove-suffix=1
+scan-path=/git
+enable-http-clone=0
diff --git a/modules/cgit/templates/filter.commit-links.sh b/modules/cgit/templates/filter.commit-links.sh
new file mode 100755
index 00000000..f0f7ee14
--- /dev/null
+++ b/modules/cgit/templates/filter.commit-links.sh
@@ -0,0 +1,44 @@
+#!/bin/sh
+# This script can be used to generate links in commit messages.
+#
+# To use this script, refer to this file with either the commit-filter or the
+# repo.commit-filter options in cgitrc.
+#
+# The following environment variables can be used to retrieve the configuration
+# of the repository for which this script is called:
+# CGIT_REPO_URL ( = repo.url setting )
+# CGIT_REPO_NAME ( = repo.name setting )
+# CGIT_REPO_PATH ( = repo.path setting )
+# CGIT_REPO_OWNER ( = repo.owner setting )
+# CGIT_REPO_DEFBRANCH ( = repo.defbranch setting )
+# CGIT_REPO_SECTION ( = section setting )
+# CGIT_REPO_CLONE_URL ( = repo.clone-url setting )
+#
+
+regex=''
+
+# This expression generates links to commits referenced by their SHA1.
+regex=$regex'
+s|\b([0-9a-fA-F]{7,40})\b|<a href="./?id=\1">\1</a>|g'
+
+# This expression generates links various common bugtrackers.
+# When editing this list, remember to edit the same list in
+# deployment/mgagit/templates/git-post-receive-hook
+regex=$regex'
+s|mga#([0-9]+)\b|<a href="https://bugs.mageia.org/\1">mga#\1</a>|g'
+regex=$regex'
+s|rhbz#([0-9]+)\b|<a href="https://bugzilla.redhat.com/show_bug.cgi?id=\1">rhbz#\1</a>|g'
+regex=$regex'
+s|fdo#([0-9]+)\b|<a href="https://bugs.freedesktop.org/show_bug.cgi?id=\1">fdo#\1</a>|g'
+regex=$regex'
+s|bko#([0-9]+)\b|<a href="https://bugs.kde.org/show_bug.cgi?id=\1">bko#\1</a>|g'
+regex=$regex'
+s|kde#([0-9]+)\b|<a href="https://bugs.kde.org/show_bug.cgi?id=\1">kde#\1</a>|g'
+regex=$regex'
+s|bgo#([0-9]+)\b|<a href="https://bugzilla.gnome.org/show_bug.cgi?id=\1">bgo#\1</a>|g'
+regex=$regex'
+s|gnome#([0-9]+)\b|<a href="https://bugzilla.gnome.org/show_bug.cgi?id=\1">gnome#\1</a>|g'
+regex=$regex'
+s|lp#([0-9]+)\b|<a href="https://launchpad.net/bugs/\1">lp#\1</a>|g'
+
+sed -re "$regex"
diff --git a/modules/cgit/templates/vhost.conf b/modules/cgit/templates/vhost.conf
new file mode 100644
index 00000000..5c1d99e7
--- /dev/null
+++ b/modules/cgit/templates/vhost.conf
@@ -0,0 +1,8 @@
+Alias /cgit-data /usr/share/cgit
+Alias /robots.txt /usr/share/cgit/robots.txt
+ScriptAliasMatch ^(.*) /var/www/cgi-bin/cgit$1
+
+<Directory /usr/share/cgit>
+ Order allow,deny
+ Allow from all
+</Directory>
diff --git a/modules/cgit/templates/webapp.conf b/modules/cgit/templates/webapp.conf
new file mode 100644
index 00000000..4e1d8289
--- /dev/null
+++ b/modules/cgit/templates/webapp.conf
@@ -0,0 +1,3 @@
+# Disable standard cgit configuration
+#Alias /cgit-data /usr/share/cgit
+#ScriptAlias /cgit /var/www/cgi-bin/cgit
diff --git a/modules/cron/manifests/init.pp b/modules/cron/manifests/init.pp
new file mode 100644
index 00000000..6dd0ea44
--- /dev/null
+++ b/modules/cron/manifests/init.pp
@@ -0,0 +1,7 @@
+class cron {
+ package { 'cronie': }
+
+ service { 'crond':
+ subscribe => Package['cronie'],
+ }
+}
diff --git a/modules/dashboard/manifests/init.pp b/modules/dashboard/manifests/init.pp
new file mode 100644
index 00000000..34ef41b3
--- /dev/null
+++ b/modules/dashboard/manifests/init.pp
@@ -0,0 +1,44 @@
+class dashboard {
+ $dashboard_login = 'dashboard'
+ $dashboard_home_dir = "/var/lib/${dashboard_login}"
+ $dashboard_dir = "${dashboard_home_dir}/dashboard"
+ $dashboard_bindir = "${dashboard_home_dir}/bin"
+ $dashboard_wwwdir = "/var/www/vhosts/dashboard.${::domain}"
+
+ user { $dashboard_login:
+ comment => 'dashboard system user',
+ home => $dashboard_home_dir,
+ }
+
+ git::snapshot { $dashboard_dir:
+ source => "git://git.${::domain}/web/generators/dashboard",
+ }
+
+ package { 'php-cli': }
+
+ file { $dashboard_wwwdir:
+ ensure => directory,
+ owner => $dashboard_login,
+ group => $dashboard_login,
+ }
+
+ file { $dashboard_bindir:
+ ensure => directory,
+ }
+
+ file { "${dashboard_bindir}/make_report":
+ mode => '0755',
+ content => template('dashboard/make_report'),
+ }
+
+ apache::vhost::base { "dashboard.${::domain}":
+ location => $dashboard_wwwdir,
+ }
+
+ cron { 'update dashboard':
+ command => "${dashboard_bindir}/make_report",
+ user => $dashboard_login,
+ hour => '*/2',
+ minute => '15',
+ }
+}
diff --git a/modules/dashboard/templates/make_report b/modules/dashboard/templates/make_report
new file mode 100644
index 00000000..5da59617
--- /dev/null
+++ b/modules/dashboard/templates/make_report
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+dashboard_dir='<%= @dashboard_dir %>'
+dashboard_wwwdir='<%= @dashboard_wwwdir %>'
+
+cd "$dashboard_dir"
+/usr/bin/php ./make_report.php > "$dashboard_wwwdir/index.html"
+
diff --git a/modules/django_application/files/custom_backend.py b/modules/django_application/files/custom_backend.py
new file mode 100644
index 00000000..5ab35385
--- /dev/null
+++ b/modules/django_application/files/custom_backend.py
@@ -0,0 +1,7 @@
+
+from django_auth_ldap.backend import LDAPBackend,_LDAPUser
+
+class ForceUidLDAPBackend(LDAPBackend):
+ def ldap_to_django_username(self, username):
+ # force uid if someone give a email
+ return _LDAPUser(self, username=username).attrs['uid'][0]
diff --git a/modules/django_application/files/django_add_permission_to_group.py b/modules/django_application/files/django_add_permission_to_group.py
new file mode 100644
index 00000000..69ac7be5
--- /dev/null
+++ b/modules/django_application/files/django_add_permission_to_group.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+import sys
+group_name = sys.argv[1]
+permission = sys.argv[2]
+
+# as codename is not unique, we need to give the application name
+app = ''
+if len(sys.argv) > 3:
+ app = sys.argv[3]
+
+from django.contrib.auth.models import Group, Permission
+group = Group.objects.get(name=group_name)
+
+permissions = Permission.objects.filter(codename=permission)
+if app:
+ permissions = permissions.filter(content_type__app_label__exact=app)
+
+if len(permissions) > 1:
+ print "Error, result not unique, please give the application among :"
+ print ' '.join([p.content_type.app_label for p in permissions])
+ sys.exit(1)
+elif len(permissions) < 1:
+ print "Error, wrong codename"
+ sys.exit(1)
+
+group.permissions.add(permissions[0])
+group.save()
diff --git a/modules/django_application/files/django_create_group.py b/modules/django_application/files/django_create_group.py
new file mode 100644
index 00000000..b5052217
--- /dev/null
+++ b/modules/django_application/files/django_create_group.py
@@ -0,0 +1,10 @@
+#!/usr/bin/python
+import sys
+group_name = sys.argv[1]
+
+from django.contrib.auth.models import Group
+try:
+ group = Group.objects.get(name=group_name)
+except Group.DoesNotExist:
+ group = Group.objects.create(name=group_name)
+ group.save()
diff --git a/modules/django_application/manifests/add_permission_to_group.pp b/modules/django_application/manifests/add_permission_to_group.pp
new file mode 100644
index 00000000..6e0663ed
--- /dev/null
+++ b/modules/django_application/manifests/add_permission_to_group.pp
@@ -0,0 +1,11 @@
+define django_application::add_permission_to_group( $path,
+ $module,
+ $group,
+ $app='') {
+ exec { "/usr/local/bin/django_add_permission_to_group.py ${group} ${name} ${app}":
+ user => 'root',
+ environment => ["DJANGO_SETTINGS_MODULE=${module}.settings",
+ "PYTHONPATH=${path}" ],
+ require => Django_application::Script['django_add_permission_to_group.py']
+ }
+}
diff --git a/modules/django_application/manifests/create_group.pp b/modules/django_application/manifests/create_group.pp
new file mode 100644
index 00000000..1931205f
--- /dev/null
+++ b/modules/django_application/manifests/create_group.pp
@@ -0,0 +1,10 @@
+define django_application::create_group($path, $module) {
+ exec { "/usr/local/bin/django_create_group.py ${name}":
+ user => 'root',
+ environment => ["DJANGO_SETTINGS_MODULE=${module}.settings",
+ "PYTHONPATH=${path}" ],
+ require => Django_application::Script['django_create_group.py']
+ }
+}
+
+
diff --git a/modules/django_application/manifests/init.pp b/modules/django_application/manifests/init.pp
new file mode 100644
index 00000000..f56f73ef
--- /dev/null
+++ b/modules/django_application/manifests/init.pp
@@ -0,0 +1,18 @@
+# this class hold the common stuff for all django applications
+# as we cannot declare the same resource twice ( ie,
+# python-psycopg2 for example )
+# it is required to place this in a common class
+class django_application {
+ package {['python-django',
+ 'python-psycopg2',
+ 'python-django-auth-ldap']: }
+
+ file { '/usr/local/lib/custom_backend.py':
+ source => 'puppet:///modules/django_application/custom_backend.py',
+ notify => Service['apache']
+ }
+
+ django_application::script { ['django_create_group.py',
+ 'django_add_permission_to_group.py']: }
+
+}
diff --git a/modules/django_application/manifests/script.pp b/modules/django_application/manifests/script.pp
new file mode 100644
index 00000000..f414d864
--- /dev/null
+++ b/modules/django_application/manifests/script.pp
@@ -0,0 +1,9 @@
+define django_application::script() {
+ file { $name:
+ path => "/usr/local/bin/${name}",
+ mode => '0755',
+ source => "puppet:///modules/django_application/${name}",
+ }
+}
+
+
diff --git a/modules/draklive/files/clean-live.sh b/modules/draklive/files/clean-live.sh
new file mode 100755
index 00000000..cceb6a4a
--- /dev/null
+++ b/modules/draklive/files/clean-live.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+# clean old draklive build sets
+DRAKLIVE_ROOT=/var/lib/draklive
+RM="rm -rf"
+
+# keep only chroot/build sets from previous day
+MAX_BUILD_AGE=1
+find $DRAKLIVE_ROOT/{chroot/*,build/*/*} -maxdepth 0 -not -name dist -mtime +$(expr $MAX_BUILD_AGE - 1) -exec $RM {} \;
+
+# keep dist (iso + lists) for all sets during 20 days
+MAX_DIST_AGE=20
+find $DRAKLIVE_ROOT/build/*/dist -maxdepth 0 -mtime +$(expr $MAX_DIST_AGE - 1) -exec $RM {} \;
+
+find $DRAKLIVE_ROOT/build -maxdepth 1 -links 2 -exec rmdir {} \;
diff --git a/modules/draklive/manifests/init.pp b/modules/draklive/manifests/init.pp
new file mode 100644
index 00000000..ade2527f
--- /dev/null
+++ b/modules/draklive/manifests/init.pp
@@ -0,0 +1,58 @@
+class draklive {
+ $login = 'draklive'
+ $home = '/home/draklive'
+ $config = "${home}/live-config"
+ $var_data = "${home}/var-data"
+ # TODO merge with bcd
+ $isomakers_group = 'mga-iso_makers'
+
+ include sudo
+
+ group { $login: }
+
+ user { $login:
+ home => $home,
+ comment => 'User for creating live ISOs',
+ }
+
+ package { 'drakiso': }
+
+ sudo::sudoers_config { 'draklive':
+ content => template('draklive/sudoers.draklive')
+ }
+
+ file { $var_data:
+ ensure => directory,
+ owner => $login,
+ group => $login,
+ mode => '0755',
+ }
+
+ file { '/var/lib/draklive':
+ ensure => symlink,
+ target => $var_data,
+ }
+
+ git::snapshot { $config:
+ source => "git://git.${::domain}/software/build-system/draklive-config",
+ user => $login,
+ }
+
+ cron { 'build live images':
+ command => "${config}/tools/build_live.sh",
+ user => $login,
+ hour => '4',
+ minute => '30',
+ }
+
+ file { '/usr/local/bin/clean-live.sh':
+ mode => '0755',
+ source => 'puppet:///modules/draklive/clean-live.sh',
+ }
+
+ cron { 'clean live build data':
+ command => '/usr/local/bin/clean-live.sh',
+ hour => '4',
+ minute => '20',
+ }
+}
diff --git a/modules/draklive/templates/sudoers.draklive b/modules/draklive/templates/sudoers.draklive
new file mode 100644
index 00000000..536e4e9f
--- /dev/null
+++ b/modules/draklive/templates/sudoers.draklive
@@ -0,0 +1,3 @@
+<%= @login %> ALL=(root) NOPASSWD: /usr/sbin/draklive
+<%= @login %> ALL=(root) NOPASSWD: /usr/bin/draklive2
+%<%= isomakers_group %> ALL=(<%= @login %>) SETENV: NOPASSWD: ALL
diff --git a/modules/epoll/manifests/create_db.pp b/modules/epoll/manifests/create_db.pp
new file mode 100644
index 00000000..8ef9c0aa
--- /dev/null
+++ b/modules/epoll/manifests/create_db.pp
@@ -0,0 +1,7 @@
+class epoll::create_db () {
+ postgresql::remote_db_and_user { $epoll::var::db_name:
+ description => 'Epoll database',
+ password => $epoll::var::db_password,
+ }
+}
+# vim: sw=2
diff --git a/modules/epoll/manifests/init.pp b/modules/epoll/manifests/init.pp
index e981a952..fb86f23a 100644
--- a/modules/epoll/manifests/init.pp
+++ b/modules/epoll/manifests/init.pp
@@ -1,23 +1,20 @@
class epoll {
+ include epoll::var
- $vhost = "epoll.$domain"
+ package { 'Epoll': }
- package { 'Epoll':
- ensure => installed
+ apache::vhost::catalyst_app { $epoll::var::vhost:
+ script => '/usr/bin/epoll_fastcgi.pl',
+ use_ssl => true,
+ require => Package['Epoll']
}
-
- apache::vhost_catalyst_app { $vhost:
- script => "/usr/bin/epoll_fastcgi.pl"
- }
-
- $password = extlookup("epoll_password")
-
- file { "epoll.yml":
- path => "/etc/epoll.yml",
- ensure => "present",
- owner => root,
- group => apache,
- mode => 640,
- content => template("epoll/epoll.yml")
+
+ apache::vhost::redirect_ssl { $epoll::var::vhost: }
+
+ file { 'epoll.yml':
+ path => '/etc/epoll.yml',
+ group => 'apache',
+ mode => '0640',
+ content => template('epoll/epoll.yml')
}
}
diff --git a/modules/epoll/manifests/var.pp b/modules/epoll/manifests/var.pp
new file mode 100644
index 00000000..1ddc342a
--- /dev/null
+++ b/modules/epoll/manifests/var.pp
@@ -0,0 +1,35 @@
+# == Class: epoll::var
+#
+# epoll configuration
+#
+# === Parameters
+#
+# [*vhost*]
+# epoll vhost
+#
+# [*db_hostname*]
+# hostname of the database server
+#
+# [*db_name*]
+# name of the database
+#
+# [*db_user*]
+# user to connect to the database
+#
+# [*db_password*]
+# password to connect to the database
+#
+# [*password*]
+# password to create new polls
+#
+
+class epoll::var (
+ $vhost = "epoll.${::domain}",
+ $db_hostname = 'localhost',
+ $db_name = 'epoll',
+ $db_user = 'epoll',
+ $db_password,
+ $password
+) {
+}
+# vim: sw=2
diff --git a/modules/epoll/templates/epoll.yml b/modules/epoll/templates/epoll.yml
index 74e44efd..d442a41e 100644
--- a/modules/epoll/templates/epoll.yml
+++ b/modules/epoll/templates/epoll.yml
@@ -2,10 +2,10 @@
name: Vote
# db: connection, see libpq documentation
# dbname=BASENAME;host=SERVER;user=USER;password=PASS
-db: dbname=epoll;host=localhost;user=epoll;password=<%= password %>
+db: dbname=<%= scope.lookupvar('epoll::var::db_name') %>;host=<%= scope.lookupvar('epoll::var::db_hostname') %>;user=<%= scope.lookupvar('epoll::var::db_user') %>;password=<%= scope.lookupvar('epoll::var::db_password') %>
# The smtp serveur to use, default is localhost
# smtp:
# This change the poll creation behavior, instead ask want confirmation by
# mail
# it ask for this password (in clear)
-# newpollpasswd:
+newpollpasswd: <%= scope.lookupvar('epoll::var::password') %>
diff --git a/modules/facter/lib/facter/dc_suffix.rb b/modules/facter/lib/facter/dc_suffix.rb
index a8526978..c480e3ac 100644
--- a/modules/facter/lib/facter/dc_suffix.rb
+++ b/modules/facter/lib/facter/dc_suffix.rb
@@ -2,9 +2,9 @@ Facter.add("dc_suffix") do
setcode do
begin
Facter.domain
- rescue
+ rescue
Facter.loadfacts()
end
dc_suffix = 'dc=' + Facter.value('domain').gsub('.',',dc=')
end
-end
+end
diff --git a/modules/facter/lib/facter/lib_dir.rb b/modules/facter/lib/facter/lib_dir.rb
index fe7d6a31..315d7594 100644
--- a/modules/facter/lib/facter/lib_dir.rb
+++ b/modules/facter/lib/facter/lib_dir.rb
@@ -2,9 +2,9 @@ Facter.add("lib_dir") do
setcode do
begin
Facter.architecture
- rescue
+ rescue
Facter.loadfacts()
end
- '/usr/lib' + ( Facter.value('architecture') == "x86_64" ? '64' : '') + '/'
+ '/usr/lib' + ( Facter.value('architecture') == "x86_64" ? '64' : '')
end
-end
+end
diff --git a/modules/facter/lib/facter/wildcard_sslcert.rb b/modules/facter/lib/facter/wildcard_sslcert.rb
new file mode 100644
index 00000000..093982d9
--- /dev/null
+++ b/modules/facter/lib/facter/wildcard_sslcert.rb
@@ -0,0 +1,16 @@
+Facter.add("wildcard_sslcert") do
+ setcode do
+ begin
+ Facter.domain
+ rescue
+ Facter.loadfacts()
+ end
+ sslfiles = '/etc/ssl/wildcard.' + Facter.value('domain')
+ if File.exist?(sslfiles + '.crt') and File.exist?(sslfiles + '.key') \
+ and File.exist?(sslfiles + '.pem')
+ 'true'
+ else
+ 'false'
+ end
+ end
+end
diff --git a/modules/facter/spec/spec_helper.rb b/modules/facter/spec/spec_helper.rb
new file mode 100644
index 00000000..ec3fe615
--- /dev/null
+++ b/modules/facter/spec/spec_helper.rb
@@ -0,0 +1,34 @@
+# taken from facter source code
+# ASL 2.0
+dir = File.expand_path(File.dirname(__FILE__))
+
+SPECDIR = dir
+$LOAD_PATH.unshift("#{dir}/../lib")
+
+require 'mocha'
+require 'rspec'
+require 'facter'
+require 'fileutils'
+
+RSpec.configure do |config|
+ config.mock_with :mocha
+
+ config.before :each do
+ # Ensure that we don't accidentally cache facts and environment
+ # between test cases.
+ Facter::Util::Loader.any_instance.stubs(:load_all)
+ Facter.clear
+ Facter.clear_messages
+
+ # Store any environment variables away to be restored later
+ @old_env = {}
+ ENV.each_key {|k| @old_env[k] = ENV[k]}
+ end
+
+ config.after :each do
+ # Restore environment variables after execution of each test
+ @old_env.each_pair {|k, v| ENV[k] = v}
+ to_remove = ENV.keys.reject {|key| @old_env.include? key }
+ to_remove.each {|key| ENV.delete key }
+ end
+end
diff --git a/modules/facter/spec/unit/dc_suffix.rb b/modules/facter/spec/unit/dc_suffix.rb
new file mode 100644
index 00000000..4b7a4648
--- /dev/null
+++ b/modules/facter/spec/unit/dc_suffix.rb
@@ -0,0 +1,15 @@
+#!/usr/bin/env rspec
+
+require 'spec_helper'
+
+describe "Dc_suffix fact" do
+ it "should be based on tld domain" do
+ Facter.fact(:domain).stubs(:value).returns("test")
+ Facter.fact(:dc_suffix).value.should == "dc=test"
+ end
+
+ it "should be based on domain" do
+ Facter.fact(:domain).stubs(:value).returns("test.example.org")
+ Facter.fact(:dc_suffix).value.should == "dc=test,dc=example,dc=org"
+ end
+end
diff --git a/modules/facter/spec/unit/lib_dir.rb b/modules/facter/spec/unit/lib_dir.rb
new file mode 100644
index 00000000..50049f19
--- /dev/null
+++ b/modules/facter/spec/unit/lib_dir.rb
@@ -0,0 +1,23 @@
+#!/usr/bin/env rspec
+
+require 'spec_helper'
+
+describe "Lib_dir fact" do
+ it "should default to /usr/lib" do
+ Facter.fact(:architecture).stubs(:value).returns("bogus")
+ Facter.fact(:lib_dir).value.should == "/usr/lib"
+ end
+
+ archs = Hash.new
+ # TODO add arm 64 and others
+ archs = {
+ "i586" => "/usr/lib",
+ "x86_64" => "/usr/lib64",
+ }
+ archs.each do |arch, dir|
+ it "should be #{dir} on #{arch}" do
+ Facter.fact(:architecture).stubs(:value).returns(arch)
+ Facter.fact(:lib_dir).value.should == dir
+ end
+ end
+end
diff --git a/modules/git/files/apply_git_puppet_config.sh b/modules/git/files/apply_git_puppet_config.sh
new file mode 100644
index 00000000..1ed6fbf1
--- /dev/null
+++ b/modules/git/files/apply_git_puppet_config.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+while read line
+do
+ # --local is a option for the newer git
+ git config --add $line
+done < config.puppet
diff --git a/modules/git/files/create_git_repo.sh b/modules/git/files/create_git_repo.sh
new file mode 100644
index 00000000..144d063b
--- /dev/null
+++ b/modules/git/files/create_git_repo.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+umask 0002
+# https://eagleas.livejournal.com/18907.html
+name="$1"
+mkdir -p $name
+cd $name
+git --bare init --shared=group
+chmod g+ws branches info objects refs
+( cd objects; chmod g+ws * )
+git config receive.denyNonFastForwards true
diff --git a/modules/git/files/update_git_svn.sh b/modules/git/files/update_git_svn.sh
new file mode 100644
index 00000000..b3802f81
--- /dev/null
+++ b/modules/git/files/update_git_svn.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+GIT_REP="$1"
+LOCKFILE="$GIT_REP/.git/update.cron.lock"
+
+cd "$GIT_REP"
+[ -f $LOCKFILE ] && exit 0
+trap "rm -f '$LOCKFILE'" EXIT
+
+touch "$LOCKFILE"
+
+/usr/bin/git svn fetch
+/usr/bin/git svn rebase
+exit 0
diff --git a/modules/git/manifests/client.pp b/modules/git/manifests/client.pp
new file mode 100644
index 00000000..2ba50721
--- /dev/null
+++ b/modules/git/manifests/client.pp
@@ -0,0 +1,3 @@
+class git::client {
+ include git::common
+}
diff --git a/modules/git/manifests/common.pp b/modules/git/manifests/common.pp
new file mode 100644
index 00000000..ed8ebbdf
--- /dev/null
+++ b/modules/git/manifests/common.pp
@@ -0,0 +1,3 @@
+class git::common {
+ package { 'git-core': }
+}
diff --git a/modules/git/manifests/init.pp b/modules/git/manifests/init.pp
new file mode 100644
index 00000000..dece14f0
--- /dev/null
+++ b/modules/git/manifests/init.pp
@@ -0,0 +1 @@
+class git { }
diff --git a/modules/git/manifests/mirror.pp b/modules/git/manifests/mirror.pp
new file mode 100644
index 00000000..f7364846
--- /dev/null
+++ b/modules/git/manifests/mirror.pp
@@ -0,0 +1,20 @@
+define git::mirror( $source,
+ $description,
+ $refresh = '*/5') {
+
+ include git::common
+ exec { "/usr/bin/git clone --mirror ${source} ${name}":
+ alias => "git mirror ${name}",
+ creates => $name,
+ before => File["${name}/description"],
+ }
+
+ file { "${name}/description":
+ content => $description,
+ }
+
+ cron { "update ${name}":
+ command => "cd ${name} ; /usr/bin/git fetch -q",
+ minute => $refresh
+ }
+}
diff --git a/modules/git/manifests/server.pp b/modules/git/manifests/server.pp
new file mode 100644
index 00000000..3f07ed9c
--- /dev/null
+++ b/modules/git/manifests/server.pp
@@ -0,0 +1,37 @@
+class git::server {
+ include git::common
+
+ $git_base_path = '/git/'
+
+ xinetd::service { 'git':
+ content => template('git/xinetd')
+ }
+
+ file { '/usr/local/bin/create_git_repo.sh':
+ mode => '0755',
+ source => 'puppet:///modules/git/create_git_repo.sh',
+ }
+
+ file { '/usr/local/bin/apply_git_puppet_config.sh':
+ mode => '0755',
+ source => 'puppet:///modules/git/apply_git_puppet_config.sh',
+ }
+
+
+ # TODO
+ # define common syntax check, see svn
+ # https://stackoverflow.com/questions/3719883/git-hook-syntax-check
+ # proper policy : fast-forward-only
+ # ( https://progit.org/book/ch7-4.html )
+ # no branch ?
+ # no binary
+ # no big file
+ # no empty commit message
+ # no commit from root
+ # see https://www.itk.org/Wiki/Git/Hooks
+ # automated push to another git repo ( see https://noone.org/blog/English/Computer/VCS/Thoughts%20on%20Gitorious%20and%20GitHub%20plus%20a%20useful%20git%20hook.futile
+ #
+ # how do we handle commit permission ?
+ # mail sending
+ #
+}
diff --git a/modules/git/manifests/snapshot.pp b/modules/git/manifests/snapshot.pp
new file mode 100644
index 00000000..06473efe
--- /dev/null
+++ b/modules/git/manifests/snapshot.pp
@@ -0,0 +1,24 @@
+define git::snapshot( $source,
+ $refresh = '*/5',
+ $user = 'root',
+ $branch = 'master') {
+ include git::client
+ #TODO
+ # should handle branch -> clone -n + branch + checkout
+ # create a script
+ # Ideally, should be handled by vcsrepo
+ # https://github.com/bruce/puppet-vcsrepo
+ # once it is merged in puppet
+ exec { "/usr/bin/git clone -b ${branch} ${source} ${name}":
+ creates => $name,
+ user => $user
+ }
+
+ if ($refresh != '0') {
+ cron { "update ${name}":
+ command => "cd ${name} && /usr/bin/git pull -q && /usr/bin/git submodule --quiet update --init --recursive",
+ user => $user,
+ minute => $refresh
+ }
+ }
+}
diff --git a/modules/git/manifests/svn.pp b/modules/git/manifests/svn.pp
new file mode 100644
index 00000000..43df012b
--- /dev/null
+++ b/modules/git/manifests/svn.pp
@@ -0,0 +1,4 @@
+class git::svn {
+ include git::client
+ package { 'git-svn': }
+}
diff --git a/modules/git/manifests/svn_repository.pp b/modules/git/manifests/svn_repository.pp
new file mode 100644
index 00000000..ea215ce6
--- /dev/null
+++ b/modules/git/manifests/svn_repository.pp
@@ -0,0 +1,35 @@
+define git::svn_repository( $source,
+ $std_layout = true,
+ $refresh = '*/5') {
+ include git::svn
+ include git::server
+ # a cron job
+ # a exec
+ if $std_layout {
+ $options = '-s'
+ } else {
+ $options = ''
+ }
+
+ exec { "/usr/bin/git svn init ${options} ${source} ${name}":
+ alias => "git svn ${name}",
+ creates => $name,
+ }
+
+ file { '/usr/local/bin/update_git_svn.sh':
+ mode => '0755',
+ source => 'puppet:///modules/git/update_git_svn.sh',
+ }
+
+ cron { "update ${name}":
+ # done in 2 times, so fetch can fill the repo after init
+ command => "/usr/local/bin/update_git_svn.sh ${name}" ,
+ minute => $refresh
+ }
+
+ file { "${name}/.git/hooks/pre-receive":
+ mode => '0755',
+ content => template('git/pre-receive'),
+ require => Exec["git svn ${name}"]
+ }
+}
diff --git a/modules/git/templates/config.puppet b/modules/git/templates/config.puppet
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/modules/git/templates/config.puppet
diff --git a/modules/git/templates/post-receive b/modules/git/templates/post-receive
new file mode 100644
index 00000000..b4330e13
--- /dev/null
+++ b/modules/git/templates/post-receive
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# FIXME the contrib/hooks should be in /usr/share/git-core
+# but this may cause issue with automated requirement
+. /usr/share/doc/git-core/contrib/hooks/post-receive-email
+
diff --git a/modules/git/templates/pre-receive b/modules/git/templates/pre-receive
new file mode 100644
index 00000000..7eec7505
--- /dev/null
+++ b/modules/git/templates/pre-receive
@@ -0,0 +1,5 @@
+#!/bin/bash
+echo
+echo "This repository is readonly"
+echo
+false
diff --git a/modules/git/templates/xinetd b/modules/git/templates/xinetd
new file mode 100644
index 00000000..654ae2be
--- /dev/null
+++ b/modules/git/templates/xinetd
@@ -0,0 +1,14 @@
+service git
+{
+ disable = no
+ type = UNLISTED
+ port = 9418
+ socket_type = stream
+ server = <%= @lib_dir %>/git-core/git-daemon
+ wait = no
+ user = nobody
+ server_args = --inetd --verbose --export-all --base-path=<%= @git_base_path %>
+ log_on_failure += HOST
+ flags = IPv6
+}
+
diff --git a/modules/gitmirror/files/on-the-pull b/modules/gitmirror/files/on-the-pull
new file mode 100755
index 00000000..416b75a4
--- /dev/null
+++ b/modules/gitmirror/files/on-the-pull
@@ -0,0 +1,365 @@
+#!/usr/bin/python3
+
+import cgi
+import http.server
+import os
+import pwd
+import re
+import subprocess
+import sys
+from optparse import OptionParser
+from queue import Queue
+from threading import Thread
+
+
+GitUpdaterQueue = Queue(0)
+
+
+# NB The following class and bits for running git commands were "liberated"
+# from git_multimail.py
+
+class CommandError(Exception):
+ def __init__(self, cmd, retcode):
+ self.cmd = cmd
+ self.retcode = retcode
+ Exception.__init__(
+ self,
+ 'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,)
+ )
+
+
+# It is assumed in many places that the encoding is uniformly UTF-8,
+# so changing these constants is unsupported. But define them here
+# anyway, to make it easier to find (at least most of) the places
+# where the encoding is important.
+ENCODING = 'UTF-8'
+
+
+# The "git" program (this could be changed to include a full path):
+GIT_EXECUTABLE = 'git'
+
+
+# How "git" should be invoked (including global arguments), as a list
+# of words. This variable is usually initialized automatically by
+# read_git_output() via choose_git_command(), but if a value is set
+# here then it will be used unconditionally.
+GIT_CMD = None
+
+
+def choose_git_command():
+ """Decide how to invoke git, and record the choice in GIT_CMD."""
+
+ global GIT_CMD
+
+ if GIT_CMD is None:
+ try:
+ # Check to see whether the "-c" option is accepted (it was
+ # only added in Git 1.7.2). We don't actually use the
+ # output of "git --version", though if we needed more
+ # specific version information this would be the place to
+ # do it.
+ cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version']
+ read_output(cmd)
+ GIT_CMD = [GIT_EXECUTABLE, '-c', f'i18n.logoutputencoding={ENCODING}']
+ except CommandError:
+ GIT_CMD = [GIT_EXECUTABLE]
+
+
+def read_git_output(args, inp=None, keepends=False, **kw):
+ """Read the output of a Git command."""
+
+ if GIT_CMD is None:
+ choose_git_command()
+
+ return read_output(GIT_CMD + args, inp=inp, keepends=keepends, **kw)
+
+
+# NOTE: output is in bytes, not a string
+def read_output(cmd, inp=None, keepends=False, **kw):
+ if inp:
+ stdin = subprocess.PIPE
+ else:
+ stdin = None
+ p = subprocess.Popen(
+ cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw
+ )
+ (out, err) = p.communicate(inp)
+ retcode = p.wait()
+ if retcode:
+ raise CommandError(cmd, retcode)
+ if not keepends:
+ out = out.rstrip(b'\n\r')
+ return out
+
+
+def run_git_command(args, **kw):
+ """Runs a git command, ignoring the output.
+ """
+
+ read_git_output(args, **kw)
+
+
+def run_command(args, **kw):
+ """Runs a git command, ignoring the output.
+ """
+
+ read_output(args, **kw)
+
+
+class GitUpdater(Thread):
+ def __init__(self, server, basedir, repoprefix, branch='master', cmd=''):
+ Thread.__init__(self)
+ self.server = server
+ self.basedir = basedir
+ self.repoprefix = repoprefix
+ self.branch = branch
+ self.cmd = cmd
+
+ def run(self):
+ while 42:
+ repo = GitUpdaterQueue.get()
+ if repo is None:
+ break
+ try:
+ print(f"Got update request for '{repo}'", file=sys.stderr)
+ clonefolder = os.path.join(self.basedir, repo)
+ if self.repoprefix:
+ if not repo.startswith(self.repoprefix):
+ print(f"Ignoring repo '{repo}' due to invalid prefix", file=sys.stderr)
+ GitUpdaterQueue.task_done()
+ continue
+ clonefolder = os.path.join(self.basedir, repo[len(self.repoprefix):])
+ command = []
+ treeish = ''
+ changed = True
+ if not os.path.exists(clonefolder):
+ cloneparent = os.path.dirname(clonefolder)
+ if not os.path.exists(cloneparent):
+ os.makedirs(cloneparent)
+ cloneurl = self.server + '/' + repo
+ command = ['clone']
+ if '--mirror' == self.branch:
+ command.append('--mirror')
+ command.append(cloneurl)
+ command.append(clonefolder)
+ print(f"Cloning repo '{repo}' ('{cloneurl}' -> '{clonefolder}')", file=sys.stderr)
+
+ run_git_command(command)
+ if not os.path.isdir(clonefolder):
+ raise Exception(f"Clone folder '{clonefolder}' is not a directory. Cloning failed or file in it's place?")
+ os.chdir(clonefolder)
+ if '--mirror' != self.branch and 'master' != self.branch:
+ command = ['checkout', '-t', 'origin/' + self.branch]
+ run_git_command(command)
+ elif os.path.isdir(clonefolder):
+ os.chdir(clonefolder)
+ print(f"Updating existing repo '{repo}' ({clonefolder})", file=sys.stderr)
+ command = ['remote', 'update']
+ run_git_command(command)
+ if '--mirror' != self.branch:
+ sha1before = read_git_output(['rev-parse', 'refs/heads/' + self.branch])
+ sha1after = read_git_output(['rev-parse', 'refs/remotes/origin/' + self.branch])
+ if sha1before and sha1after:
+ if sha1before == sha1after:
+ changed = False
+ print(f"Repo '{repo}' update on branch '{self.branch}': No changed detected", file=sys.stderr)
+ else:
+ treeish = sha1before.decode(ENCODING) + '..' + sha1after.decode(ENCODING)
+ print(f"Repo '{repo}' update on branch '{self.branch}': Treeish '{treeish}'", file=sys.stderr)
+ else:
+ print(f"Repo '{repo}' update on branch '{self.branch}': Before or after sha1 could not be extracted.", file=sys.stderr)
+ command = ['update-ref', 'refs/heads/' + self.branch, 'refs/remotes/origin/' + self.branch]
+ run_git_command(command)
+ command = ['checkout', '-f', self.branch]
+ run_git_command(command)
+ else:
+ raise Exception(f"Clone folder '{clonefolder}' appears to be a file :s")
+
+ if changed and self.cmd:
+ # Update the info/web/last-modified file as used by cgit
+ os.chdir(clonefolder)
+ command = [self.cmd, repo]
+ if treeish:
+ command += [treeish]
+ run_command(command)
+
+ print(f"Update for '{repo}' complete.", file=sys.stderr)
+ except Exception as e:
+ print(f"Error processing repo '{repo}'", file=sys.stderr)
+ print(str(e), file=sys.stderr)
+
+ GitUpdaterQueue.task_done()
+ sys.stderr.flush()
+
+
+class TimeoutServer(http.server.HTTPServer):
+ def get_request(self):
+ result = self.socket.accept()
+ result[0].settimeout(10)
+ return result
+
+
+class PostHandler(http.server.BaseHTTPRequestHandler):
+ def do_POST(self):
+ ctype, pdict = cgi.parse_header(self.headers['content-type'])
+ repo = ""
+ try:
+ if ctype != 'x-git/repo':
+ self.send_response(415)
+ self.end_headers()
+ return
+
+ # chunked mode is a legitimate reason there would be no content-length,
+ # but it's easier to just insist on it
+ length = int(self.headers['content-length']) if self.headers['content-length'] else 0
+ if length < 1:
+ self.send_response(411)
+ self.end_headers()
+ return
+ if length > 1024:
+ self.send_response(413)
+ self.end_headers()
+ return
+ repo = self.rfile.read(length).decode(ENCODING)
+
+ if re.match(r"^[-_/a-zA-Z0-9\+\.]+$", repo) is None:
+ self.send_response(400)
+ self.end_headers()
+ return
+
+ GitUpdaterQueue.put(repo)
+ self.send_response(202)
+ self.end_headers()
+
+ except Exception as e:
+ print("Error processing request", file=sys.stderr)
+ print(str(e), file=sys.stderr)
+ self.send_response(500)
+ self.end_headers()
+
+ sys.stderr.flush()
+
+
+def Demote(pidfile, uid, gid):
+ def result():
+ piddir = os.path.dirname(pidfile)
+ if not os.path.exists(piddir):
+ os.makedirs(piddir)
+ fd = open(pidfile, 'w')
+ fd.write(str(os.getpid()))
+ fd.close()
+
+ if uid and gid:
+ os.setgid(gid)
+ os.setuid(uid)
+ return result
+
+
+def daemonise(options, serverprefix, basefolder):
+ pw = None
+ uid = False
+ gid = False
+ if options.user:
+ pw = pwd.getpwnam(options.user)
+ uid = pw.pw_uid
+ gid = pw.pw_gid
+ else:
+ pw = pwd.getpwnam(os.getlogin())
+
+ user = pw.pw_name
+ dirname = pw.pw_dir
+ env = {
+ 'HOME': dirname,
+ 'LOGNAME': user,
+ 'PWD': dirname,
+ 'USER': user,
+ }
+ if os.getenv('PATH') is not None:
+ env['PATH'] = os.getenv('PATH')
+ if os.getenv('PYTHONPATH') is not None:
+ env['PYTHONPATH'] = os.getenv('PYTHONPATH')
+
+ args = [os.path.abspath(sys.argv[0])]
+ args.append('-a')
+ args.append(options.addr)
+ args.append('-p')
+ args.append(str(options.port))
+ args.append('-r')
+ args.append(options.repoprefix)
+ args.append('-b')
+ args.append(options.branch)
+ args.append('-c')
+ args.append(options.cmd)
+ args.append(serverprefix)
+ args.append(basefolder)
+
+ subprocess.Popen(
+ args, preexec_fn=Demote(options.pidfile, uid, gid), cwd=dirname, env=env
+ )
+ exit(0)
+
+
+def main():
+ usage = "usage: %prog [options] <serverprefix> <basefolder>"
+ description = """Listen for repository names being posted via a simple HTTP interface and clone/update them.
+POST data simply via curl:
+e.g. curl --header 'Content-Type: x-git/repo' --data 'my/repo/name' http://localhost:8000
+"""
+ parser = OptionParser(usage=usage, description=description)
+ parser.add_option("-a", "--addr",
+ type="string", dest="addr", default="0.0.0.0",
+ help="The interface address to bind to")
+ parser.add_option("-p", "--port",
+ type="int", dest="port", default=8000,
+ help="The port to bind to")
+ parser.add_option("-r", "--repo-prefix",
+ type="string", dest="repoprefix", default="",
+ help="Only handle repositories with the following prefix. This SHOULD contain a trailing slash if it's a folder but SHOULD NOT include a leading slash")
+ parser.add_option("-b", "--branch",
+ type="string", dest="branch", default="--mirror",
+ help="The branch to track on clone. If you pass '--mirror' (the default) as the branch name we will clone as a bare mirror")
+ parser.add_option("-c", "--cmd",
+ type="string", dest="cmd", default="",
+ help="Third party command to execute after updates. It will execute in the "
+ "folder of the repo and if we're not in mirror mode, a treeish will be "
+ "passed as the only argument containing the refs that changed otherwise "
+ "the command will be run without any arguments")
+ parser.add_option("-d", "--pid-file",
+ type="string", dest="pidfile", default="",
+ help="Daemonise and write pidfile")
+ parser.add_option("-u", "--user",
+ type="string", dest="user", default="",
+ help="Drop privileges to the given user (must be run as root)")
+
+ (options, args) = parser.parse_args()
+ if len(args) < 2:
+ parser.error("Both the <serverprefix> and <basefolder> arguments must be supplied.")
+ if len(args) > 2:
+ parser.print_usage()
+ exit(1)
+
+ serverprefix = args[0]
+ basefolder = args[1]
+
+ if options.pidfile:
+ daemonise(options, serverprefix, basefolder)
+
+ if options.user:
+ parser.error("You can only specify a user if you're also deamonising (with a pid file).")
+
+ print("Server started", file=sys.stderr)
+ sys.stderr.flush()
+ srvr = TimeoutServer((options.addr, options.port), PostHandler)
+ updater = GitUpdater(serverprefix, basefolder, options.repoprefix, options.branch, options.cmd)
+ updater.start()
+
+ try:
+ srvr.serve_forever()
+ except KeyboardInterrupt:
+ srvr.socket.close()
+ GitUpdaterQueue.put(None)
+ updater.join()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/modules/gitmirror/files/on-the-pull.init b/modules/gitmirror/files/on-the-pull.init
new file mode 100755
index 00000000..cc256a06
--- /dev/null
+++ b/modules/gitmirror/files/on-the-pull.init
@@ -0,0 +1,67 @@
+#! /bin/bash
+#
+# on-the-pull Keep git mirrors up-to-date via external triggers
+#
+# chkconfig: 2345 80 30
+# description: Keep git mirrors up-to-date via external triggers
+#
+### BEGIN INIT INFO
+# Provides: on-the-pull
+# Required-Start: $network
+# Required-Stop: $network
+# Default-Start: 2 3 4 5
+# Short-Description: Keep git mirrors up-to-date via external triggers
+# Description: Keep git mirrors up-to-date via external triggers
+### END INIT INFO
+
+# Source function library.
+. /etc/init.d/functions
+
+pidfile=/var/run/on-the-pull/on-the-pull.pid
+prog=/usr/local/bin/on-the-pull
+args="--pid-file=$pidfile --user=git --cmd=/usr/local/bin/gitmirror-sync-metadata git://git.mageia.org /git"
+
+
+start() {
+ gprintf "Starting On-The-Pull Git Mirror Daemon: "
+ daemon --check on-the-pull --pidfile $pidfile "$prog $args >>/var/log/on-the-pull.log 2>&1"
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/on-the-pull
+ return $RETVAL
+}
+
+stop() {
+ gprintf "Stopping On-The-Pull Git Mirror Daemon: "
+ killproc -p $pidfile on-the-pull
+ echo
+ rm -f /var/lock/subsys/on-the-pull
+}
+
+restart() {
+ stop
+ start
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status on-the-pull $pidfile
+ ;;
+ restart|reload)
+ restart
+ ;;
+ condrestart)
+ [ -f /var/lock/subsys/on-the-pull ] && restart || :
+ ;;
+ *)
+ gprintf "Usage: %s {start|stop|status|restart|condrestart}\n" "$(basename $0)"
+ exit 1
+esac
+
+exit 0
diff --git a/modules/gitmirror/files/rsync-metadata.sh b/modules/gitmirror/files/rsync-metadata.sh
new file mode 100755
index 00000000..03a0fe41
--- /dev/null
+++ b/modules/gitmirror/files/rsync-metadata.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+REPO="$1"
+GITROOT="/git"
+RSYNCROOT="rsync://duvel.mageia.org/git"
+
+if [ ! -d "$GITROOT/$REPO" ]; then
+ echo "No repository found $REPO" >&2
+ exit 1
+fi
+
+/usr/bin/rsync -a --include="description" --include="info" --include="info/web" --include="info/web/last-modified" --exclude="*" "$RSYNCROOT/$REPO/" "$GITROOT/$REPO/"
+/usr/bin/rsync -a "$RSYNCROOT/$REPO/config" "$GITROOT/$REPO/config.upstream"
+
+OWNER=$(git config --file "$GITROOT/$REPO/config.upstream" gitweb.owner)
+DESC=$(git config --file "$GITROOT/$REPO/config.upstream" gitweb.description)
+rm -f "$GITROOT/$REPO/config.upstream"
+
+CUROWNER=$(git config --file "$GITROOT/$REPO/config" gitweb.owner)
+if [ "$CUROWNER" != "$OWNER" ]; then
+ git config --file "$GITROOT/$REPO/config" gitweb.owner "$OWNER"
+fi
+
+CURDESC=$(git config --file "$GITROOT/$REPO/config" gitweb.description)
+if [ "$CURDESC" != "$DESC" ]; then
+ git config --file "$GITROOT/$REPO/config" gitweb.description "$DESC"
+fi
diff --git a/modules/gitmirror/manifests/init.pp b/modules/gitmirror/manifests/init.pp
new file mode 100644
index 00000000..c1dcd894
--- /dev/null
+++ b/modules/gitmirror/manifests/init.pp
@@ -0,0 +1,48 @@
+class gitmirror {
+
+ $git_dir = '/git'
+ $git_login = 'git'
+ $git_homedir = "/var/lib/${git_login}"
+ $git_rundir = '/var/run/on-the-pull'
+
+ group { $git_login:
+ ensure => present,
+ }
+
+ user { $git_login:
+ ensure => present,
+ home => $git_homedir,
+ }
+
+ file { $git_dir:
+ ensure => directory,
+ owner => $git_login,
+ group => $git_login,
+ mode => '0755',
+ }
+
+ file { $git_rundir:
+ ensure => directory,
+ mode => '0755',
+ }
+
+ mga_common::local_script { 'on-the-pull':
+ source => 'puppet:///modules/gitmirror/on-the-pull',
+ }
+
+ file { '/etc/init.d/on-the-pull':
+ source => 'puppet:///modules/gitmirror/on-the-pull.init',
+ mode => '0755',
+ }
+
+ service { 'on-the-pull':
+ require => [
+ Mga_common::Local_script["on-the-pull"],
+ File['/etc/init.d/on-the-pull'],
+ ],
+ }
+
+ mga_common::local_script { 'gitmirror-sync-metadata':
+ source => 'puppet:///modules/gitmirror/rsync-metadata.sh',
+ }
+}
diff --git a/modules/gitweb/manifests/init.pp b/modules/gitweb/manifests/init.pp
new file mode 100644
index 00000000..d7c07b22
--- /dev/null
+++ b/modules/gitweb/manifests/init.pp
@@ -0,0 +1,32 @@
+class gitweb {
+ package { 'gitweb': }
+ # TODO some rpm may be needed ( like perl-FCGI )
+ # git >= 17.2 is needed for fastcgi support
+
+ # TODO fix git rpm to show the css, the js, and others missing file
+
+ file { '/etc/gitweb.conf':
+ content => template('gitweb/gitweb.conf'),
+ notify => Service['apache'],
+ require => Package['gitweb'],
+ }
+
+ apache::webapp_other { 'gitweb':
+ webapp_file => 'gitweb/webapp.conf',
+ }
+
+ mga_common::local_script { 'gitweb.wrapper.sh':
+ content => template('gitweb/wrapper.sh'),
+ notify => Service['apache'],
+ }
+
+ $vhost = "gitweb.${::domain}"
+ apache::vhost::base { $vhost:
+ content => template('gitweb/vhost.conf')
+ }
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ content => template('gitweb/vhost.conf'),
+ }
+}
diff --git a/modules/gitweb/templates/gitweb.conf b/modules/gitweb/templates/gitweb.conf
new file mode 100644
index 00000000..688844a8
--- /dev/null
+++ b/modules/gitweb/templates/gitweb.conf
@@ -0,0 +1,123 @@
+# default config file (in perl syntax)
+
+# absolute fs-path which will be prepended to the project path
+our $projectroot = "/git";
+
+# target of the home link on top of all pages
+our $home_link = "/";
+
+# string of the home link on top of all pages
+#our $home_link_str = "projects";
+
+# name of your site or organization to appear in page titles
+# replace this with something more descriptive for clearer bookmarks
+our $site_name = "Mageia Git";
+
+# filename of html text to include at top of each page
+#our $site_header = "";
+# html text to include at home page
+#our $home_text = "indextext.html";
+# filename of html text to include at bottom of each page
+#our $site_footer = "";
+
+# URI of stylesheets
+#our @stylesheets = ("gitweb.css");
+# URI of a single stylesheet
+#our $stylesheet = undef;
+# URI of GIT logo (72x27 size)
+#our $logo = "git-logo.png";
+# URI of GIT favicon, assumed to be image/png type
+#our $favicon = "git-favicon.png";
+
+# URI and label (title) of GIT logo link
+#our $logo_url = "http://git.or.cz/";
+#our $logo_label = "git homepage";
+
+# source of projects list
+#our $projects_list = "";
+
+# default order of projects list
+# valid values are none, project, descr, owner, and age
+#our $default_projects_order = "project";
+
+# show repository only if this file exists
+# (only effective if this variable evaluates to true)
+#our $export_ok = "";
+
+# only allow viewing of repositories also shown on the overview page
+#our $strict_export = "";
+
+# list of git base URLs used for URL to where fetch project from,
+# i.e. full URL is "$git_base_url/$project"
+#our @git_base_url_list = grep { $_ ne '' } ("");
+
+# Enable the 'blame' blob view, showing the last commit that modified
+# each line in the file. This can be very CPU-intensive.
+
+# To enable system wide have in /etc/gitweb.conf
+# $feature{'blame'}{'default'} = [1];
+# To have project specific config enable override in /etc/gitweb.conf
+# $feature{'blame'}{'override'} = 1;
+# and in project config gitweb.blame = 0|1;
+
+# Enable the 'snapshot' link, providing a compressed tarball of any
+# tree. This can potentially generate high traffic if you have large
+# project.
+
+# To disable system wide have in /etc/gitweb.conf
+# $feature{'snapshot'}{'default'} = [undef];
+# To have project specific config enable override in /etc/gitweb.conf
+# $feature{'snapshot'}{'override'} = 1;
+# and in project config gitweb.snapshot = none|gzip|bzip2;
+
+# Enable text search, which will list the commits which match author,
+# committer or commit text to a given string. Enabled by default.
+# Project specific override is not supported.
+
+# Enable grep search, which will list the files in currently selected
+# tree containing the given string. Enabled by default. This can be
+# potentially CPU-intensive, of course.
+
+# To enable system wide have in /etc/gitweb.conf
+# $feature{'grep'}{'default'} = [1];
+# To have project specific config enable override in /etc/gitweb.conf
+# $feature{'grep'}{'override'} = 1;
+# and in project config gitweb.grep = 0|1;
+
+# Enable the pickaxe search, which will list the commits that modified
+# a given string in a file. This can be practical and quite faster
+# alternative to 'blame', but still potentially CPU-intensive.
+
+# To enable system wide have in /etc/gitweb.conf
+# $feature{'pickaxe'}{'default'} = [1];
+# To have project specific config enable override in /etc/gitweb.conf
+# $feature{'pickaxe'}{'override'} = 1;
+# and in project config gitweb.pickaxe = 0|1;
+
+# Make gitweb use an alternative format of the URLs which can be
+# more readable and natural-looking: project name is embedded
+# directly in the path and the query string contains other
+# auxiliary information. All gitweb installations recognize
+# URL in either format; this configures in which formats gitweb
+# generates links.
+
+# To enable system wide have in /etc/gitweb.conf
+# $feature{'pathinfo'}{'default'} = [1];
+# Project specific override is not supported.
+
+# Note that you will need to change the default location of CSS,
+# favicon, logo and possibly other files to an absolute URL. Also,
+# if gitweb.cgi serves as your indexfile, you will need to force
+# $my_uri to contain the script name in your /etc/gitweb.conf.
+
+# Make gitweb consider projects in project root subdirectories
+# to be forks of existing projects. Given project $projname.git,
+# projects matching $projname/*.git will not be shown in the main
+# projects list, instead a '+' mark will be added to $projname
+# there and a 'forks' view will be enabled for the project, listing
+# all the forks. If project list is taken from a file, forks have
+# to be listed after the main project.
+
+# To enable system wide have in /etc/gitweb.conf
+# $feature{'forks'}{'default'} = [1];
+# Project specific override is not supported.
diff --git a/modules/gitweb/templates/vhost.conf b/modules/gitweb/templates/vhost.conf
new file mode 100644
index 00000000..d558d591
--- /dev/null
+++ b/modules/gitweb/templates/vhost.conf
@@ -0,0 +1,3 @@
+Alias /static/ /usr/share/gitweb/static/
+Alias / /usr/local/bin/gitweb.wrapper.sh
+FastCgiServer /usr/local/bin/gitweb.wrapper.sh -processes 1 -idle-timeout 30 -socket /tmp/gitweb.socket
diff --git a/modules/gitweb/templates/webapp.conf b/modules/gitweb/templates/webapp.conf
new file mode 100644
index 00000000..a4d13624
--- /dev/null
+++ b/modules/gitweb/templates/webapp.conf
@@ -0,0 +1,8 @@
+# gitweb configuration
+# disabled
+#Alias /gitweb /usr/share/gitweb
+
+<Directory /usr/share/gitweb>
+ Order allow,deny
+ Allow from all
+</Directory>
diff --git a/modules/gitweb/templates/wrapper.sh b/modules/gitweb/templates/wrapper.sh
new file mode 100644
index 00000000..4303007b
--- /dev/null
+++ b/modules/gitweb/templates/wrapper.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+export FCGI_SOCKET_PATH=/tmp/gitweb.socket
+
+/usr/share/gitweb/gitweb.cgi --fastcgi
diff --git a/modules/gnupg/manifests/client.pp b/modules/gnupg/manifests/client.pp
new file mode 100644
index 00000000..301e569a
--- /dev/null
+++ b/modules/gnupg/manifests/client.pp
@@ -0,0 +1,17 @@
+class gnupg::client {
+if versioncmp($::lsbdistrelease, '7') < 0 {
+ package {['gnupg',
+ 'rng-utils']:
+ }
+} else {
+ package {['gnupg2',
+ 'rng-utils']:
+ }
+}
+
+ mga_common::local_script { 'create_gnupg_keys.sh':
+ content => template('gnupg/create_gnupg_keys.sh')
+ }
+}
+
+
diff --git a/modules/gnupg/manifests/init.pp b/modules/gnupg/manifests/init.pp
new file mode 100644
index 00000000..d6ae319d
--- /dev/null
+++ b/modules/gnupg/manifests/init.pp
@@ -0,0 +1 @@
+class gnupg { }
diff --git a/modules/gnupg/manifests/keys.pp b/modules/gnupg/manifests/keys.pp
new file mode 100644
index 00000000..b99ed393
--- /dev/null
+++ b/modules/gnupg/manifests/keys.pp
@@ -0,0 +1,38 @@
+ # debian recommend SHA2, with 4096
+ # https://wiki.debian.org/Keysigning
+ # as they are heavy users of gpg, I will tend
+ # to follow them
+ # however, for testing purpose, 4096 is too strong,
+ # this empty the entropy of my vm
+define gnupg::keys($email,
+ $key_name,
+ $key_type = 'RSA',
+ $key_length = '4096',
+ $expire_date = '400d',
+ $login = 'signbot',
+ $batchdir = '/var/lib/signbot/batches',
+ $keydir = '/var/lib/signbot/keys') {
+
+ include gnupg::client
+ file { "${name}.batch":
+ path => "${batchdir}/${name}.batch",
+ content => template('gnupg/batch')
+ }
+
+ file { $keydir:
+ ensure => directory,
+ owner => $login,
+ mode => '0700',
+ }
+
+ file { $batchdir:
+ ensure => directory,
+ owner => $login,
+ }
+
+ exec { "/usr/local/bin/create_gnupg_keys.sh ${batchdir}/${name}.batch ${keydir} ${batchdir}/${name}.done":
+ user => $login,
+ creates => "${batchdir}/${name}.done",
+ require => [File[$keydir], File["${batchdir}/${name}.batch"], Package['rng-utils']],
+ }
+}
diff --git a/modules/gnupg/templates/batch b/modules/gnupg/templates/batch
new file mode 100644
index 00000000..d55bdd52
--- /dev/null
+++ b/modules/gnupg/templates/batch
@@ -0,0 +1,8 @@
+%echo Generating a standard key
+Key-Type: <%= @key_type %>
+Key-Length: <%= @key_length %>
+Name-Real: <%= @key_name %>
+Name-Email: <%= @email %>
+Expire-Date: <%= @expire_date %>
+%commit
+%echo done
diff --git a/modules/gnupg/templates/create_gnupg_keys.sh b/modules/gnupg/templates/create_gnupg_keys.sh
new file mode 100644
index 00000000..a2caba2d
--- /dev/null
+++ b/modules/gnupg/templates/create_gnupg_keys.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+BATCHFILE="$1"
+HOMEDIR="$2"
+LOCK="$3"
+
+test $# -eq 3 || exit 1
+
+if [ -e "$LOCK" ]
+then
+ echo "Lock file already exist." 1>&2
+ echo "Remove $LOCK if you want to regenerate key." 1>&2
+ exit 2
+fi
+
+touch "$LOCK"
+
+/sbin/rngd -f -r /dev/urandom &
+RAND=$!
+cd $HOMEDIR
+gpg --homedir $HOMEDIR --batch --gen-key $BATCHFILE
+EXIT=$?
+
+kill $RAND
+
+exit $EXIT
diff --git a/modules/icecream/manifests/client.pp b/modules/icecream/manifests/client.pp
new file mode 100644
index 00000000..5364d87d
--- /dev/null
+++ b/modules/icecream/manifests/client.pp
@@ -0,0 +1,6 @@
+define icecream::client($host = '') {
+ include icecream::client_common
+ file { '/etc/sysconfig/icecream':
+ content => template('icecream/sysconfig'),
+ }
+}
diff --git a/modules/icecream/manifests/client_common.pp b/modules/icecream/manifests/client_common.pp
new file mode 100644
index 00000000..b4ee4ac5
--- /dev/null
+++ b/modules/icecream/manifests/client_common.pp
@@ -0,0 +1,7 @@
+class icecream::client_common {
+ package { 'icecream': }
+
+ service { 'icecream':
+ subscribe => Package['icecream'],
+ }
+}
diff --git a/modules/icecream/manifests/init.pp b/modules/icecream/manifests/init.pp
new file mode 100644
index 00000000..01828f03
--- /dev/null
+++ b/modules/icecream/manifests/init.pp
@@ -0,0 +1 @@
+class icecream { }
diff --git a/modules/icecream/manifests/scheduler.pp b/modules/icecream/manifests/scheduler.pp
new file mode 100644
index 00000000..e3d876b8
--- /dev/null
+++ b/modules/icecream/manifests/scheduler.pp
@@ -0,0 +1,7 @@
+class icecream::scheduler {
+ package { 'icecream-scheduler': }
+
+ service { 'icecream-scheduler':
+ subscribe => Package['icecream-scheduler'],
+ }
+}
diff --git a/modules/icecream/templates/sysconfig b/modules/icecream/templates/sysconfig
new file mode 100644
index 00000000..8a5bc92c
--- /dev/null
+++ b/modules/icecream/templates/sysconfig
@@ -0,0 +1,89 @@
+#
+## Type: integer(0:19)
+## Path: Applications/icecream
+## Description: Icecream settings
+## ServiceRestart: icecream
+## Default: 5
+#
+# Nice level of running compilers
+#
+ICECREAM_NICE_LEVEL="5"
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: /var/log/iceccd
+#
+# icecream daemon log file
+#
+ICECREAM_LOG_FILE="/var/log/icecream.log"
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: no
+#
+# Start also the scheduler?
+#
+ICECREAM_RUN_SCHEDULER="no"
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: /var/log/icecc_scheduler
+#
+# icecream scheduler log file
+#
+ICECREAM_SCHEDULER_LOG_FILE="/var/log/scheduler.log"
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: ""
+#
+# Identification for the network the scheduler and daemon run on.
+# You can have several distinct icecream networks in the same LAN
+# for whatever reason.
+#
+ICECREAM_NETNAME=""
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: ""
+#
+# If the daemon can't find the scheduler by broadcast (e.g. because
+# of a firewall) you can specify it.
+#
+ICECREAM_SCHEDULER_HOST="<%= @host %>"
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: ""
+## Type: integer
+#
+# You can overwrite here the number of jobs to run in parallel. Per
+# default this depends on the number of (virtual) CPUs installed.
+#
+ICECREAM_MAX_JOBS=""
+
+#
+## Type: string
+## Path: Applications/icecream
+## Default: "/var/cache/icecream"
+#
+# This is the directory where the icecream daemon stores the environments
+# it compiles in. In a big network this can grow quite a bit, so use some
+# path if your /tmp is small - but the user icecream has to write to it.
+#
+ICECREAM_BASEDIR="/var/cache/icecream"
+
+#
+## Type: string
+## Path: Applications/icecream
+# Default: ""
+#
+# Just set the environment var to enable DEBUG
+ICECREAM_DEBUG="1"
+ICECREAM_SCHEDULER_DEBUG="1"
diff --git a/modules/ii/manifests/init.pp b/modules/ii/manifests/init.pp
new file mode 100644
index 00000000..2947c75d
--- /dev/null
+++ b/modules/ii/manifests/init.pp
@@ -0,0 +1,38 @@
+class ii {
+ class base {
+ package {['ii',
+ 'perl-Proc-Daemon']: }
+
+ file { '/var/lib/ii/':
+ ensure => directory,
+ owner => 'nobody',
+ }
+ }
+
+ define bot( $server = 'irc.freenode.net',
+ $channel) {
+
+ $nick = $name
+
+ include ii::base
+ # a custom wrapper is needed since ii does not fork in the
+ # background, and bash is not able to properly do it
+ mga_common::local_script { "ii_${nick}":
+ content => template('ii/ii_wrapper.pl'),
+ require => Class['ii::base'],
+ }
+
+ service { 'ii':
+ provider => base,
+ start => "/usr/local/bin/ii_${nick}",
+ require => Mga_common::Local_script["ii_${nick}"],
+ }
+
+ exec { "join channel ${nick}":
+ command => "echo '/j ${channel}' > /var/lib/ii/${nick}/${server}/in",
+ user => 'nobody',
+ creates => "/var/lib/ii/${nick}/${server}/${channel}/in",
+ require => Service['ii'],
+ }
+ }
+}
diff --git a/modules/ii/templates/ii_wrapper.pl b/modules/ii/templates/ii_wrapper.pl
new file mode 100644
index 00000000..68128314
--- /dev/null
+++ b/modules/ii/templates/ii_wrapper.pl
@@ -0,0 +1,15 @@
+#!/usr/bin/perl
+use warnings;
+use strict;
+use POSIX;
+use Proc::Daemon;
+my $nick = "<%= @nick %>";
+my $server = "<%= @server %>";
+
+
+Proc::Daemon::Init();
+my (undef, undef, $uid) = getpwnam("nobody");
+POSIX::setuid($uid);
+
+fork() || exec "ii -n $nick -i /var/lib/ii/$nick -s $server";
+wait();
diff --git a/modules/irkerd/manifests/init.pp b/modules/irkerd/manifests/init.pp
new file mode 100644
index 00000000..adffc452
--- /dev/null
+++ b/modules/irkerd/manifests/init.pp
@@ -0,0 +1,9 @@
+class irkerd {
+ package { 'irker':
+ ensure => installed,
+ }
+
+ service { 'irkerd':
+ ensure => running,
+ }
+}
diff --git a/modules/libvirtd/files/network_add.py b/modules/libvirtd/files/network_add.py
new file mode 100644
index 00000000..4ed63109
--- /dev/null
+++ b/modules/libvirtd/files/network_add.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python3
+import libvirt
+import os
+import IPy
+
+# bridge_name
+
+# forward -> nat/ route
+# forward-dev
+
+# network
+# => deduire la gateway , et le range
+# en dhcp automatiquement
+
+# tftp_root
+
+# enable_pxelinux
+
+
+bridge_name = os.environ.get('BRIDGE_NAME', 'virbr0')
+forward = os.environ.get('FORWARD', 'nat')
+forward_dev = os.environ.get('FORWARD_DEV', 'eth0')
+
+network = os.environ.get('NETWORK', '192.168.122.0/24')
+
+tftp_root = os.environ.get('TFTP_ROOT', '')
+disable_pxelinux = os.environ.get('DISABLE_PXE', False)
+
+name = os.environ.get('NAME', 'default')
+
+
+ip = IPy.IP(network)
+gateway = ip[1]
+dhcp_start = ip[2]
+dhcp_end = ip[-2]
+
+netmask = ip.netmask()
+tftp_xml = ''
+pxe_xml = ''
+
+if tftp_root:
+ tftp_xml = "<tftp root='" + tftp_root + "' />"
+ if not disable_pxelinux:
+ pxe_xml = "<bootp file='pxelinux.0' />"
+
+network_xml = """
+<network>
+ <name>%(name)s</name>
+ <bridge name="%(bridge_name)s" />
+ <forward mode="%(forward)s" dev="%(forward_dev)s"/>
+ <ip address="%(gateway)s" netmask="%(netmask)s">
+ %(tftp_xml)s
+ <dhcp>
+ <range start="%(dhcp_start)s" end="%(dhcp_end)s" />
+ %(pxe_xml)s
+ </dhcp>
+ </ip>
+</network>""" % globals()
+
+c=libvirt.open("qemu:///system")
+c.networkDefineXML(network_xml)
diff --git a/modules/libvirtd/files/storage_add.py b/modules/libvirtd/files/storage_add.py
new file mode 100644
index 00000000..10369e36
--- /dev/null
+++ b/modules/libvirtd/files/storage_add.py
@@ -0,0 +1,27 @@
+#!/usr/bin/python3
+import libvirt
+import sys
+
+name = sys.argv[1]
+path = sys.argv[2]
+
+storage_xml = """
+<pool type='dir'>
+ <name>%s</name>
+ <capacity>0</capacity>
+ <allocation>0</allocation>
+ <available>0</available>
+ <source>
+ </source>
+ <target>
+ <path>%s</path>
+ <permissions>
+ <mode>0700</mode>
+ <owner>-1</owner>
+ <group>-1</group>
+ </permissions>
+ </target>
+</pool>""" % ( name, path )
+
+c=libvirt.open("qemu:///system")
+c.storagePoolDefineXML(storage_xml,0)
diff --git a/modules/libvirtd/manifests/init.pp b/modules/libvirtd/manifests/init.pp
new file mode 100644
index 00000000..f0cbb887
--- /dev/null
+++ b/modules/libvirtd/manifests/init.pp
@@ -0,0 +1,109 @@
+class libvirtd {
+ class base {
+ # make sure to use a recent enough version
+ # dnsmasq-base -> for nat network
+ # netcat-openbsd -> for ssh remote access
+ # iptables -> for dhcp, message error was quite puzzling
+ # python-* => needed for helper script
+ package {['libvirt-utils',
+ 'dnsmasq',
+ 'netcat-openbsd',
+ 'iptables',
+ 'python3-libvirt',
+ 'python3-IPy']:
+ }
+ service { 'libvirtd':
+ require => Package['libvirt-utils'],
+ }
+
+ #TODO remove once libvirt package is fixed to manage the directory
+ file { ['/etc/libvirt/storage',
+ '/etc/libvirt/storage/autostart']:
+ ensure => directory,
+ require => Package['libvirt-utils'],
+ }
+
+ file { '/usr/local/bin/storage_add.py':
+ mode => '0755',
+ source => 'puppet:///modules/libvirtd/storage_add.py',
+ }
+
+ file { '/usr/local/bin/network_add.py':
+ mode => '0755',
+ source => 'puppet:///modules/libvirtd/network_add.py',
+ }
+
+ }
+
+ class kvm inherits base {
+ # pull cyrus-sasl, should be checked
+ package { 'qemu': }
+ }
+
+ # see https://wiki.libvirt.org/page/SSHPolicyKitSetup
+ define group_access() {
+ # to pull polkit and create the directory
+ include libvirtd::base
+ file { "/etc/polkit-1/localauthority/50-local.d/50-${name}-libvirt-remote-access.pkla":
+ content => template('libvirtd/50-template-libvirt-remote-access.pkla'),
+ require => Package['libvirt-utils'],
+ }
+ # give access to /dev/kvm to people allowed to use libvirt
+ file { '/dev/kvm':
+ group => $name,
+ owner => 'root',
+ mode => '0660',
+ }
+ }
+
+ define storage($path, $autostart = true) {
+ include libvirtd::base
+
+ exec { "/usr/local/bin/storage_add.py ${name} ${path}":
+ creates => "/etc/libvirt/storage/${name}.xml",
+ require => [File['/usr/local/bin/storage_add.py'],
+ Package['python3-libvirt'] ]
+ }
+
+ #TODO use API of libvirt
+ file { "/etc/libvirt/storage/autostart/${name}.xml":
+ ensure => $autostart ? {
+ true => "/etc/libvirt/storage/${name}.xml",
+ false => absent
+ },
+ require => Package['libvirt-utils'],
+ }
+ }
+
+ define network( $bridge_name = 'virbr0',
+ $forward = 'nat',
+ $forward_dev = 'eth0',
+ $network = '192.168.122.0/24',
+ $tftp_root = '',
+ $disable_pxe = '',
+ $autostart = true,
+ $vm_type = 'qemu') {
+
+ exec { '/usr/local/bin/network_add.py':
+ environment => ["BRIDGE_NAME=${bridge_name}",
+ "FORWARD=${forward}",
+ "FORWARD_DEV=${forward_dev}",
+ "NETWORK=${network}",
+ "TFTP_ROOT=${tftp_root}",
+ "DISABLE_PXE=\"${disable_pxe}\""],
+
+ creates => "/etc/libvirt/${vm_type}/networks/${name}.xml",
+ require => [File['/usr/local/bin/network_add.py'],
+ Package['python3-IPy'], Package['python3-libvirt'] ]
+ }
+
+ #TODO use API of libvirt
+ file { "/etc/libvirt/${vm_type}/networks/autostart/${name}.xml":
+ ensure => $autostart ? {
+ true => "/etc/libvirt/${vm_type}/networks/${name}.xml",
+ false => absent
+ },
+ require => Package['libvirt-utils'],
+ }
+ }
+}
diff --git a/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla b/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla
new file mode 100644
index 00000000..8806e3cb
--- /dev/null
+++ b/modules/libvirtd/templates/50-template-libvirt-remote-access.pkla
@@ -0,0 +1,6 @@
+[Remote libvirt SSH access]
+Identity=unix-user:root;unix-group:<%= @name %>
+Action=org.libvirt.unix.manage
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
diff --git a/modules/mediawiki/files/init_wiki.php b/modules/mediawiki/files/init_wiki.php
new file mode 100644
index 00000000..da1d46f5
--- /dev/null
+++ b/modules/mediawiki/files/init_wiki.php
@@ -0,0 +1,31 @@
+<?
+$wiki_root = $argv[1];
+$mw_root = '/usr/share/mediawiki';
+
+if (!is_dir("$wiki_root/config")) {
+ exit(1);
+}
+
+// DefaultSettings.php complain if not defined
+define('MEDIAWIKI',1);
+
+require_once("$mw_root/includes/Defines.php");
+require_once("$mw_root/includes/AutoLoader.php");
+require_once("$mw_root/includes/GlobalFunctions.php");
+include("$wiki_root/LocalSettings.php");
+
+$dbclass = 'Database'.ucfirst($wgDBtype);
+$wgDatabase = new $dbclass($wgDBserver,
+ $wgDBuser,
+ $wgDBpassword, $wgDBname, 1);
+
+$wgDatabase->initial_setup($wgDBpassword, $wgDBname);
+$wgDatabase->setup_database();
+
+$dir = "$wiki_root/config";
+foreach (scandir($dir) as $item) {
+ if (!is_dir($item) || is_link($item))
+ unlink($item);
+}
+rmdir("$dir");
+?>
diff --git a/modules/mediawiki/files/robots.txt b/modules/mediawiki/files/robots.txt
new file mode 100644
index 00000000..a58c6199
--- /dev/null
+++ b/modules/mediawiki/files/robots.txt
@@ -0,0 +1,4 @@
+User-agent: *
+Disallow: /mw-*/index.php?
+Disallow: /*/Special:
+Crawl-delay: 30
diff --git a/modules/mediawiki/manifests/base.pp b/modules/mediawiki/manifests/base.pp
new file mode 100644
index 00000000..76c8625b
--- /dev/null
+++ b/modules/mediawiki/manifests/base.pp
@@ -0,0 +1,46 @@
+class mediawiki::base {
+ include apache::mod::php
+ $vhost = $mediawiki::config::vhost
+ $root = $mediawiki::config::root
+
+ package { ['mediawiki','mediawiki-ldapauthentication']: }
+
+ file { $mediawiki::config::root:
+ ensure => directory,
+ }
+
+ $wiki_root = $mediawiki::config::root
+ $robotsfile = "$wiki_root/robots.txt"
+ file { $robotsfile:
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => root,
+ source => 'puppet:///modules/mediawiki/robots.txt',
+ }
+
+# file { '/usr/local/bin/init_wiki.php':
+# mode => '0755',
+# source => 'puppet:///modules/mediawiki/init_wiki.php',
+# }
+
+ $user = 'mediawiki'
+
+ postgresql::remote_user { $user:
+ password => $mediawiki::config::pgsql_password,
+ }
+
+ # TODO create the ldap user
+
+ if $vhost {
+ apache::vhost::redirect_ssl { $vhost: }
+
+ apache::vhost::base { "ssl_${vhost}":
+ location => $root,
+ use_ssl => true,
+ vhost => $vhost,
+ content => template('mediawiki/wiki_vhost.conf'),
+ }
+ }
+ # add index.php
+}
diff --git a/modules/mediawiki/manifests/config.pp b/modules/mediawiki/manifests/config.pp
new file mode 100644
index 00000000..0c54cdf6
--- /dev/null
+++ b/modules/mediawiki/manifests/config.pp
@@ -0,0 +1,9 @@
+# the class is just here to handle global configuration
+# a smart variation of the methods exposed on
+# https://puppetlabs.com/blog/the-problem-with-separating-data-from-puppet-code/
+class mediawiki::config(
+ $pgsql_password,
+ $secretkey,
+ $ldap_password,
+ $vhost = "wiki.${::domain}",
+ $root = '/srv/wiki/') {}
diff --git a/modules/mediawiki/manifests/init.pp b/modules/mediawiki/manifests/init.pp
new file mode 100644
index 00000000..28e79fab
--- /dev/null
+++ b/modules/mediawiki/manifests/init.pp
@@ -0,0 +1 @@
+class mediawiki { }
diff --git a/modules/mediawiki/manifests/instance.pp b/modules/mediawiki/manifests/instance.pp
new file mode 100644
index 00000000..c6906449
--- /dev/null
+++ b/modules/mediawiki/manifests/instance.pp
@@ -0,0 +1,100 @@
+define mediawiki::instance( $title,
+ $wiki_settings = '',
+ $skinsdir = '/usr/share/mediawiki/skins') {
+
+ include mediawiki::base
+
+ $path = $name
+ $lang = $name
+ $wiki_root = "${mediawiki::base::root}/${path}"
+ $db_name = "mediawiki_${name}"
+ $db_user = $mediawiki::base::user
+ $db_password = $mediawiki::config::pgsql_password
+ $secret_key = $mediawiki::config::secretkey
+ $ldap_password = $mediawiki::config::ldap_password
+ $includedir = "/usr/share/mediawiki/includes"
+ $maintenancedir = "/usr/share/mediawiki/maintenance"
+ $vendordir = "/usr/share/mediawiki/vendor"
+ $resourcesdir = "/usr/share/mediawiki/resources"
+ $extensionsdir = "/usr/share/mediawiki/extensions"
+
+ file { $wiki_root:
+ ensure => directory
+ }
+
+ file { "${wiki_root}/skins":
+ ensure => link,
+ target => $skinsdir,
+ require => File[$wiki_root],
+ }
+ file { "${wiki_root}/includes":
+ ensure => link,
+ target => $includedir,
+ require => File[$wiki_root],
+ }
+
+ file { "${wiki_root}/maintenance":
+ ensure => link,
+ target => $maintenancedir,
+ require => File[$wiki_root],
+ }
+
+ file { "${wiki_root}/vendor":
+ ensure => link,
+ target => $vendordir,
+ require => File[$wiki_root],
+ }
+
+ file { "${wiki_root}/resources":
+ ensure => link,
+ target => $resourcesdir,
+ require => File[$wiki_root],
+ }
+
+ file { "${wiki_root}/extensions":
+ ensure => link,
+ target => $extensionsdir,
+ require => File[$wiki_root],
+ }
+
+ file { "${wiki_root}/cache":
+ ensure => directory,
+ owner => apache,
+ mode => '0755',
+ }
+
+ file { "${wiki_root}/tmp":
+ ensure => directory,
+ owner => apache,
+ mode => '0755',
+ }
+
+ exec { "wikicreate ${name}":
+ command => "mediawiki-create ${wiki_root}",
+ cwd => $mediawiki::base::root,
+ require => [File[$wiki_root],Package['mediawiki']],
+ creates => "${wiki_root}/index.php",
+ }
+
+# postgresql::remote_database { $db_name:
+# user => $db_user,
+# callback_notify => Exec["deploy_db ${name}"],
+# }
+#
+# exec { "deploy_db ${name}":
+# command => "php /usr/local/bin/init_wiki.php ${wiki_root}",
+# refreshonly => true,
+# onlyif => "/usr/bin/test -d ${wiki_root}/config",
+# }
+
+ file { "${wiki_root}/LocalSettings.php":
+ owner => 'apache',
+ mode => '0600',
+ content => template('mediawiki/LocalSettings.php'),
+ # if LocalSettings is created first, the wikicreate script
+ # do not create a confg directory, and so it doesn't
+ # trigger deploy_db exec
+ require => Exec["wikicreate ${name}"],
+ }
+}
+
diff --git a/modules/mediawiki/templates/LocalSettings.php b/modules/mediawiki/templates/LocalSettings.php
new file mode 100644
index 00000000..c340dfd9
--- /dev/null
+++ b/modules/mediawiki/templates/LocalSettings.php
@@ -0,0 +1,208 @@
+<?php
+
+# This file was created by puppet, so any change will be overwritten
+
+# See includes/DefaultSettings.php for all configurable settings
+# and their default values, but don't forget to make changes in _this_
+# file, not there.
+#
+# Further documentation for configuration settings may be found at:
+# https://www.mediawiki.org/wiki/Manual:Configuration_settings
+
+# Protect against web entry
+if ( !defined( 'MEDIAWIKI' ) ) {
+ exit;
+}
+
+## Installation path (should default to this value, but define for clarity)
+$IP = '/usr/share/mediawiki';
+
+## Include path necessary to load LDAP module
+$path = array( $IP, "$IP/includes", "$IP/languages" );
+set_include_path( implode( PATH_SEPARATOR, $path ) . PATH_SEPARATOR . get_include_path() );
+
+## Uncomment this to disable output compression
+# $wgDisableOutputCompression = true;
+
+$wgSitename = "<%= @title %>";
+# $wgMetaNamespace = ""; # Defaults to $wgSitename
+
+## The URL base path to the directory containing the wiki;
+## defaults for all runtime URL paths are based off of this.
+## For more information on customizing the URLs
+## (like /w/index.php/Page_title to /wiki/Page_title) please see:
+## https://www.mediawiki.org/wiki/Manual:Short_URL
+$wgScriptPath = "/<%= @path %>";
+
+## The protocol and server name to use in fully-qualified URLs
+$wgServer = "https://wiki.mageia.org";
+
+## The URL path to static resources (images, scripts, etc.)
+$wgResourceBasePath = $wgScriptPath;
+
+## The relative URL path to the skins directory
+$wgStylePath = "$wgScriptPath/skins";
+
+## The relative URL path to the logo. Make sure you change this from the default,
+## or else you'll overwrite your logo when you upgrade!
+$wgLogo = "$wgStylePath/common/images/wiki_mga.png";
+
+## UPO means: this is also a user preference option
+
+$wgEnableEmail = true;
+$wgEnableUserEmail = true; # UPO
+
+$wgEmergencyContact = "root@<%= @domain %>";
+$wgPasswordSender = "wiki_noreply@ml.<%= @domain %>";
+
+$wgEnotifUserTalk = true; # UPO
+$wgEnotifWatchlist = true; # UPO
+$wgEmailAuthentication = true;
+
+## Database settings
+$wgDBtype = "postgres";
+$wgDBserver = "pg.<%= @domain %>";
+$wgDBname = "<%= @db_name %>";
+$wgDBuser = "<%= @db_user %>";
+$wgDBpassword = "<%= @db_password %>";
+
+# Postgres specific settings
+$wgDBport = "5432";
+$wgDBmwschema = "mediawiki";
+$wgDBts2schema = "public";
+
+## Shared memory settings
+$wgMainCacheType = CACHE_NONE;
+$wgMemCachedServers = [];
+
+## To enable image uploads, make sure the 'images' directory
+## is writable, then set this to true:
+$wgEnableUploads = true;
+# use gd, as convert do not work for big image
+# see https://bugs.mageia.org/show_bug.cgi?id=3202
+$wgUseImageMagick = true;
+#$wgImageMagickConvertCommand = "/usr/bin/convert";
+
+# InstantCommons allows wiki to use images from https://commons.wikimedia.org
+$wgUseInstantCommons = false;
+
+## If you use ImageMagick (or any other shell command) on a
+## Linux server, this will need to be set to the name of an
+## available UTF-8 locale
+$wgShellLocale = "en_US.UTF-8";
+
+## Set $wgCacheDirectory to a writable directory on the web server
+## to make your wiki go slightly faster. The directory should not
+## be publicly accessible from the web.
+# This seems actually mandatory to get the Vector skin to work properly
+# https://serverfault.com/a/744059
+# FIXME: Dehardcode that path (maybe via ${wiki_root} if exposed?)
+$wgCacheDirectory = "/srv/wiki/<%= @path %>/cache";
+
+$wgUploadDirectory = "/srv/wiki/<%= @path %>/images";
+
+# This seems mandatory to get the Vector skin to work properly
+# https://phabricator.wikimedia.org/T119934
+# FIXME: Dehardcode that path (maybe via ${wiki_root} if exposed?)
+$wgTmpDirectory = "/srv/wiki/<%= @path %>/tmp";
+
+# Array of interwiki prefixes for current wiki.
+$wgLocalInterwikis = array( strtolower( $wgSitename ) );
+
+# Site language code, should be one of the list in ./languages/data/Names.php
+$wgLanguageCode = "<%= @lang %>";
+
+$wgSecretKey = "<%= @secret_key %>";
+
+# Changing this will log out all existing sessions.
+$wgAuthenticationTokenVersion = "1";
+
+# Site upgrade key. Must be set to a string (default provided) to turn on the
+# web installer while LocalSettings.php is in place
+# FIXME: This should be set to a secure value:
+# https://www.mediawiki.org/wiki/Manual:$wgUpgradeKey
+# $wgUpgradeKey = "";
+
+## For attaching licensing metadata to pages, and displaying an
+## appropriate copyright notice / icon. GNU Free Documentation
+## License and Creative Commons licenses are supported so far.
+$wgEnableCreativeCommonsRdf = true;
+# TODO add a proper page
+$wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright
+$wgRightsUrl = "https://creativecommons.org/licenses/by-sa/3.0/";
+$wgRightsText = "Creative Commons - Attribution-ShareAlike 3.0 Unported";
+# TODO get the icon to host it on our server
+$wgRightsIcon = "https://licensebuttons.net/l/by-sa/3.0/88x31.png";
+
+# Path to the GNU diff3 utility. Used for conflict resolution.
+$wgDiff3 = "/usr/bin/diff3";
+
+## Default skin: you can change the default skin. Use the internal symbolic
+## names, ie 'vector', 'monobook':
+$wgDefaultSkin = 'vector';
+
+# Enabled skins.
+# The following skins were automatically enabled:
+wfLoadSkin( 'MonoBook' );
+wfLoadSkin( 'Vector' );
+
+
+# End of automatically generated settings.
+# Add more configuration options below.
+
+
+# Setting this to true will invalidate all cached pages whenever
+# LocalSettings.php is changed.
+$wgInvalidateCacheOnLocalSettingsChange = true;
+
+# FIXME: Obsoleted, to be replaced by $wgPasswordPolicy
+# https://www.mediawiki.org/wiki/Manual:$wgPasswordPolicy
+$wgMinimalPasswordLength = 1;
+
+# Give more details on errors
+$wgShowExceptionDetails = true;
+
+
+## LDAP setup
+
+require_once 'extensions/LdapAuthentication/LdapAuthentication.php';
+$wgAuth = new LdapAuthenticationPlugin();
+
+## uncomment to debug
+# $wgLDAPDebug = 10;
+# $wgDebugLogGroups["ldap"] = "/tmp/wiki_ldap.log";
+#
+$wgDebugLogFile = "/tmp/wiki.log";
+#
+
+$wgLDAPUseLocal = false;
+
+$wgLDAPDomainNames = array( 'ldap' );
+
+# TODO make it workable with more than one server
+$wgLDAPServerNames = array( 'ldap' => 'ldap.<%= @domain %>' );
+
+$wgLDAPSearchStrings = array( 'ldap' => 'uid=USER-NAME,ou=People,<%= @dc_suffix %>' );
+
+$wgLDAPEncryptionType = array( 'ldap' => 'tls' );
+
+$wgLDAPBaseDNs = array( 'ldap' => '<%= @dc_suffix %>' );
+$wgLDAPUserBaseDNs = array( 'ldap' => 'ou=People,<%= @dc_suffix %>' );
+$wgLDAPGroupBaseDNs = array ( 'ldap' => 'ou=Group,<%= @dc_suffix %>' );
+
+$wgLDAPProxyAgent = array( 'ldap' => 'cn=mediawiki-alamut,ou=System Accounts,<%= @dc_suffix %>' );
+
+$wgLDAPProxyAgentPassword = array( 'ldap' => '<%= @ldap_password %>' );
+
+$wgLDAPUseLDAPGroups = array( 'ldap' => true );
+$wgLDAPGroupNameAttribute = array( 'ldap' => 'cn' );
+$wgLDAPGroupUseFullDN = array( 'ldap' => true );
+$wgLDAPLowerCaseUsername = array( 'ldap' => true );
+$wgLDAPGroupObjectclass = array( 'ldap' => 'posixGroup' );
+$wgLDAPGroupAttribute = array( 'ldap' => 'member' );
+
+$wgLDAPLowerCaseUsername = array( 'ldap' => true );
+
+$wgLDAPPreferences = array( 'ldap' => array( 'email'=>'mail','realname'=>'cn','nickname'=>'uid','language'=>'preferredlanguage') );
+
+<%= @wiki_settings %>
diff --git a/modules/mediawiki/templates/wiki_vhost.conf b/modules/mediawiki/templates/wiki_vhost.conf
new file mode 100644
index 00000000..1ae3492d
--- /dev/null
+++ b/modules/mediawiki/templates/wiki_vhost.conf
@@ -0,0 +1,17 @@
+# heavily used by the wiki farm stuff
+<Directory <%= @root %>>
+Options +FollowSymLinks
+</Directory>
+
+<Directory <%= @root %>/images>
+ SetHandler default-handler
+</Directory>
+
+AliasMatch /.*/skins/(.*)$ /usr/share/mediawiki/skins/$1
+
+RewriteEngine On
+
+RewriteCond %{REQUEST_URI} ^/.*/index.php$
+RewriteCond %{QUERY_STRING} ^title=Special:UserLogin
+RewriteCond %{HTTPS} ^off$
+RewriteRule ^(.*)$ https://%{SERVER_NAME}/$1 [R]
diff --git a/modules/memcached/files/memcached.sysconfig b/modules/memcached/files/memcached.sysconfig
new file mode 100644
index 00000000..a29f2270
--- /dev/null
+++ b/modules/memcached/files/memcached.sysconfig
@@ -0,0 +1,23 @@
+# Specify the binary to use
+# MEMCACHED_DAEMON="memcached-replication"
+MEMCACHED_DAEMON="memcached"
+
+# TCP port to listen on
+TCP_PORT="11211"
+# UDP port to listen on, can be disabled by setting it to 0
+UDP_PORT="11211"
+# User to run under
+USER="memcached"
+# Max simultaneous connections
+MAXCONN="1024"
+# MB memory max to use for object storage
+CACHESIZE="64"
+# IP address to listen on. Set to "INADDR_ANY" or "" to listen on all interfaces
+IPADDR="127.0.0.1"
+# Number of threads to use to process incoming requests
+THREADS="4"
+# Unix socket path to listen on (disables network support)
+#UNIX_SOCKET="/var/run/memcached/memcached.sock"
+# Additional options
+OPTIONS=""
+
diff --git a/modules/memcached/manifests/init.pp b/modules/memcached/manifests/init.pp
new file mode 100644
index 00000000..50152871
--- /dev/null
+++ b/modules/memcached/manifests/init.pp
@@ -0,0 +1,13 @@
+class memcached {
+ package { 'memcached': }
+
+ service { 'memcached':
+ require => Package['memcached'],
+ }
+
+ file { '/etc/sysconfig/memcached':
+ require => Package['memcached'],
+ source => 'puppet:///modules/memcached/memcached.sysconfig',
+ notify => Service['memcached'],
+ }
+}
diff --git a/modules/mga-advisories/manifests/init.pp b/modules/mga-advisories/manifests/init.pp
new file mode 100644
index 00000000..1937bb62
--- /dev/null
+++ b/modules/mga-advisories/manifests/init.pp
@@ -0,0 +1,98 @@
+class mga-advisories(
+ $advisories_svn = "svn://svn.${::domain}/svn/advisories",
+ $vhost
+){
+ $mgaadv_login = 'mga-advisories'
+ $mgaadv_homedir = "/var/lib/${mgaadv_login}"
+ $vhostdir = "${mgaadv_homedir}/vhost"
+ $advisories_dir = "${mgaadv_homedir}/advisories"
+ $status_dir = "${mgaadv_homedir}/status"
+ $update_script = '/usr/local/bin/update_mga-advisories'
+ $move_script = '/root/tmp/mgatools-new/mga-move-pkg'
+ $move_wrapper_script = '/usr/local/bin/mga-adv-move-pkg'
+
+ group { $mgaadv_login:
+ ensure => present,
+ }
+
+ user { $mgaadv_login:
+ ensure => present,
+ home => $mgaadv_homedir,
+ managehome => true,
+ gid => $mgaadv_login,
+ }
+
+ package { 'mga-advisories':
+ ensure => installed,
+ }
+
+ file {'/etc/mga-advisories.conf':
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mga-advisories/mga-advisories.conf'),
+ require => Package['mga-advisories'],
+ }
+
+ file { [ $vhostdir, $status_dir ]:
+ ensure => directory,
+ owner => $mgaadv_login,
+ group => $mgaadv_login,
+ mode => '0755',
+ }
+
+ $vhost_aliases = {
+ "/static" => '/usr/share/mga-advisories/static',
+ }
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ aliases => $vhost_aliases,
+ require => File[$vhostdir],
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ use_ssl => true,
+ vhost => $vhost,
+ aliases => $vhost_aliases,
+ location => $vhostdir,
+ require => File[$vhostdir],
+ }
+
+ subversion::snapshot { $advisories_dir:
+ source => $advisories_svn,
+ user => $mgaadv_login,
+ refresh => '0',
+ require => User[$mgaadv_login],
+ }
+
+ file { $update_script:
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0755',
+ content => template('mga-advisories/update_script'),
+ }
+
+ file { $move_wrapper_script:
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0755',
+ content => template('mga-advisories/adv-move-pkg'),
+ }
+
+ sudo::sudoers_config { 'mga-adv-move-pkg':
+ content => template('mga-advisories/sudoers.adv-move-pkg')
+ }
+
+ # Disable for now... we may re-instate once it's been a little more tested.
+ #cron { $update_script:
+ # command => $update_script,
+ # user => $mgaadv_login,
+ # hour => '*',
+ # minute => '10',
+ # require => Subversion::Snapshot[$advisories_dir],
+ #}
+}
+# vim: sw=2
diff --git a/modules/mga-advisories/templates/adv-move-pkg b/modules/mga-advisories/templates/adv-move-pkg
new file mode 100644
index 00000000..71e1880e
--- /dev/null
+++ b/modules/mga-advisories/templates/adv-move-pkg
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+if [ "$USER" != "<%= @mgaadv_login %>" ]; then
+ echo "This script must be run as the <%= @mgaadv_login %> user." >&2
+ exit 1
+fi
+
+exec sudo <%= @move_script %> "$@"
diff --git a/modules/mga-advisories/templates/mga-advisories.conf b/modules/mga-advisories/templates/mga-advisories.conf
new file mode 100644
index 00000000..4dab1543
--- /dev/null
+++ b/modules/mga-advisories/templates/mga-advisories.conf
@@ -0,0 +1,14 @@
+mode: site
+send_adv_mail: yes
+move_pkg_cmd: <%= @move_wrapper_script %>
+send_report_mail: yes
+out_dir: <%= @vhostdir %>
+advisories_dir: <%= @advisories_dir %>
+status_dir: <%= @status_dir %>
+adv_mail_to: updates-announce@ml.mageia.org
+adv_mail_from: Mageia Updates <buildsystem-daemon@mageia.org>
+report_mail_to: qa-reports@ml.mageia.org
+report_mail_from: Mageia Advisories <buildsystem-daemon@mageia.org>
+bugzilla_url: https://bugs.mageia.org/
+bugzilla_login: bot
+bugzilla_password: file:///var/lib/git/.gitzilla-password
diff --git a/modules/mga-advisories/templates/sudoers.adv-move-pkg b/modules/mga-advisories/templates/sudoers.adv-move-pkg
new file mode 100644
index 00000000..5d9618a9
--- /dev/null
+++ b/modules/mga-advisories/templates/sudoers.adv-move-pkg
@@ -0,0 +1 @@
+<%= @mgaadv_login %> ALL=(root) NOPASSWD:<%= @move_script %> *
diff --git a/modules/mga-advisories/templates/update_script b/modules/mga-advisories/templates/update_script
new file mode 100644
index 00000000..71d8d1d4
--- /dev/null
+++ b/modules/mga-advisories/templates/update_script
@@ -0,0 +1,16 @@
+#!/bin/sh
+set -e
+
+if [ "$UID" = "0" ]; then
+ echo "Re-running as '<%= @mgaadv_login %>' user." >&2
+ exec /bin/su -c <%= @update_script %> - <%= @mgaadv_login %>
+fi
+
+if [ "$USER" != "<%= @mgaadv_login %>" ]; then
+ echo "This script must be run as the <%= @mgaadv_login %> user." >&2
+ exit 1
+fi
+
+cd <%= @advisories_dir %>
+svn up
+exec /usr/bin/mgaadv process
diff --git a/modules/mga-mirrors/files/check_mirrors_status b/modules/mga-mirrors/files/check_mirrors_status
new file mode 100755
index 00000000..9c00ac8d
--- /dev/null
+++ b/modules/mga-mirrors/files/check_mirrors_status
@@ -0,0 +1,271 @@
+#!/usr/bin/ruby
+
+require 'date'
+require 'net/http'
+require 'optparse'
+require 'thread'
+require 'uri'
+
+def get_dates(base, archs_per_distro, optional=true)
+ r = {}
+ begin
+ r['base'] = get_timestamp(base)
+ rescue Net::OpenTimeout, Timeout::Error, ArgumentError, NoMethodError, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::ECONNRESET, IOError, OpenSSL::SSL::SSLError => e
+ end
+
+ archs_per_distro.each{|d, archs|
+ r[d] = {}
+ archs.each{|a|
+ begin
+ r[d][a] = get_date(base, d, a)
+ rescue Net::OpenTimeout, Timeout::Error, ArgumentError, NoMethodError, Errno::EHOSTUNREACH, Errno::ECONNREFUSED, Errno::ECONNRESET, IOError, OpenSSL::SSL::SSLError => e
+ if !optional then
+ STDERR.puts "Failed to fetch #{version_url(base, d, a)}"
+ raise
+ end
+ end
+ }
+ }
+ r
+end
+
+def get_mirrors
+ # TODO Get it from the DB
+ mirrors = []
+ url = nil
+ tier1 = false
+ fetch_url("https://mirrors.mageia.org/").each_line{|l|
+ if l =~ /rsync.mageia.org/ then
+ tier1 = true
+ next
+ end
+ if l=~ /<\/tr>/ && !url.nil? then
+ if tier1 then
+ mirrors.prepend url
+ tier1 = false
+ else
+ mirrors.append url
+ end
+ url = nil
+ next
+ end
+ next unless l =~ /https?:.*>http/
+ # No need to check twice mirrors available in http + https
+ if !url.nil? && url =~ /https:/ && l =~ /https:\/\//
+ # Skip http:// if https:// already seen for current mirror
+ # If the are in the other order http one will just be replaced
+ next
+ end
+ url = l.sub(/<a href="(http[^"]*)".*\n/, '\1')
+ url += "/" unless url =~ /\/$/
+ }
+ mirrors
+end
+
+def fetch_url(url, redirect_limit = 3)
+ return if redirect_limit < 0
+ if url =~ /^\// then
+ open(url){|f|
+ return f.read
+ }
+ else
+ uri = URI.parse(url)
+ http = Net::HTTP.new(uri.host, uri.port)
+ http.open_timeout = 30
+ http.read_timeout = 30
+ if uri.scheme == 'https' then
+ http.use_ssl = true
+ end
+ # Ruby 1.8.7 doesn't set a default User-Agent which causes at
+ # least one mirror to return 403
+ response = http.get(uri.path, {'User-Agent' => 'check_mirrors'})
+ case response
+ when Net::HTTPSuccess then
+ return response.body
+ when Net::HTTPRedirection then
+ location = response['location']
+ # Make location absolute if it was not
+ if location =~ /:\/\// then
+ fetch_url(location, redirect_limit - 1)
+ else
+ uri.path = location
+ fetch_url(uri.to_s, redirect_limit - 1)
+ end
+ end
+ end
+end
+
+def timestamp_url(url)
+ "#{url}mageia_timestamp"
+end
+
+def get_timestamp(url)
+ ti = fetch_url(timestamp_url(url)).to_i
+ if ti == 0 then
+ return nil
+ end
+ return DateTime.strptime(ti.to_s, '%s')
+end
+
+def parse_version(version)
+ date = version.sub(/.* (........ ..:..)$/, '\1').rstrip
+ DateTime.strptime(date, '%Y%m%d %H:%M')
+end
+
+def version_url(url, distrib, arch)
+ "#{url}distrib/#{distrib}/#{arch}/VERSION"
+end
+
+def get_date(url, distrib, arch)
+ return parse_version(fetch_url(version_url(url, distrib, arch)))
+end
+
+def format_age(ref_time, time)
+ return " <td class='broken'>X</td>" unless ref_time and time
+
+ diff = ref_time - time
+ cls = 'broken'
+ if diff == 0 then
+ cls = 'ok'
+ elsif diff < 0.5 then
+ cls = 'almost'
+ elsif diff < 2 then
+ cls = 'bad'
+ end
+ if cls == 'ok' then
+ return " <td class='#{cls}'>&nbsp;</td>"
+ else
+ return " <td class='#{cls}'>#{time.strftime("%F %R")}</td>"
+ end
+end
+
+def print_output(archs_per_distro, mirrors, ref_times, times)
+ puts "<html><head><title>Mageia Mirror Status #{Time.now.utc.strftime("%F")}</title>
+<link rel=\"icon\" type=\"image/png\" href=\"//www.mageia.org/g/favicon.png\">
+<style>
+td.broken {background-color:#FF0033;}
+td.bad {background-color:#FF9933;}
+td.almost {background-color:#CCFF66;}
+td.ok {background-color:#00FF66;}
+
+td {text-align:center;}
+td.name {text-align:left;}
+
+td.sep {width:12px;}
+table.legend td {padding:4px;}
+
+th {background-color:#EEEEEE;}
+</style>
+</head>
+<body>"
+ puts "Last checked on #{Time.now.utc.strftime("%F %R %Z")}<br/>"
+ puts "<table class='legend'><tr><td class='ok'>Up to date</td><td class='almost'>Less than 12h old</td><td class='bad'>Less than 2 days old</td><td class='broken'>Old or broken</td></tr></table>"
+ puts "<table><thead>"
+ puts "<tr><td/>"
+ puts "<td/><th>Base directory</th>"
+ archs_per_distro.each{|d, archs|
+ nb_arches = archs.size
+ puts " <td/><th colspan='#{nb_arches}'>#{d}</th>"
+ }
+ puts "</tr>"
+ puts "<tr><td/><td/><td/>"
+ archs_per_distro.each{|d, archs|
+ puts " <td class='sep' />"
+ archs.each{|a|
+ puts " <th>#{a}</th>"
+ }
+ }
+ puts "</tr></thead>"
+ puts "<tbody>"
+ puts "<tr><td class='name'>Reference</td>"
+ puts " <td class='sep' />"
+ puts " <td>#{!ref_times['base'].nil? ? ref_times['base'].strftime("%F %R") : "?"}</td>"
+ archs_per_distro.each{|d, archs|
+ puts " <td class='sep' />"
+ archs.each{|a|
+ puts " <td>#{ref_times[d][a].strftime("%F %R")}</td>"
+ }
+ }
+ puts "</tr>"
+
+ mirrors.each{|u|
+ puts "<tr><td class='name'><a href='#{u}'>#{u}</a></td>"
+ puts " <td class='sep' />"
+ puts format_age(ref_times['base'], times[u]['base'])
+ archs_per_distro.each{|d, archs|
+ puts " <td class='sep' />"
+ archs.each{|a|
+ puts format_age(ref_times[d][a], times[u][d][a])
+ }
+ }
+ puts "</tr>"
+ }
+ puts "</tbody></table>"
+ puts "</body></html>"
+end
+
+
+
+# Defaults
+ref = 'http://repository.mageia.org/'
+archs_per_distro = {
+ 'cauldron' => ['i686', 'x86_64', 'armv7hl', 'aarch64'],
+ '9' => ['i586', 'x86_64', 'armv7hl', 'aarch64']
+}
+parallel = 8
+
+OptionParser.new {|opts|
+ opts.banner = "Usage: #{$0} [options]"
+ opts.on("--repository URL",
+ "Reference repository. Default: #{ref}") {
+ |url| ref = url
+ }
+ opts.on("--parallel n", Integer,
+ "Max number of parallel connections. Default: #{parallel}") {
+ |n| $parallel = n
+ }
+ opts.on("--output file",
+ "Write output into given file. Default to STDOUT") {
+ |f| $stdout.reopen(f, "w")
+ }
+}.parse!
+
+# Get dates from the reference repository, and fail if some requested distros
+# or archs are missing
+ref_times = get_dates(ref, archs_per_distro, false)
+
+# Get the list of mirror URLs to check
+mirrors = get_mirrors
+
+workqueue = Queue.new
+times = {}
+
+# Create all the thread and have them loop on the work queue
+threads = (1..parallel).map{|n|
+ Thread.new {
+ loop do
+ u = workqueue.pop
+ break if u == :exit
+ times[u] = get_dates(u, archs_per_distro)
+ end
+ }
+}
+
+# Push all mirrors into the queue
+mirrors.each{|u|
+ workqueue << u
+}
+
+# Get all the threads to exit after all the work is done
+parallel.times{|i|
+ workqueue << :exit
+}
+
+# Wait for the threads to exit
+threads.each{|t|
+ t.join
+}
+
+# Generate output
+print_output(archs_per_distro, mirrors, ref_times, times)
+
diff --git a/modules/mga-mirrors/manifests/init.pp b/modules/mga-mirrors/manifests/init.pp
index f602a47e..4b8b5552 100644
--- a/modules/mga-mirrors/manifests/init.pp
+++ b/modules/mga-mirrors/manifests/init.pp
@@ -1,23 +1,54 @@
class mga-mirrors {
-
- $vhost = "mirrors.$domain"
- package { 'mga-mirrors':
- ensure => installed
+ $vhost = "mirrors.${::domain}"
+ $mirrors_dir = '/var/www/mirrors'
+
+ package { 'mga-mirrors': }
+
+ apache::vhost::catalyst_app { $vhost:
+ script => '/usr/bin/mga_mirrors_fastcgi.pl',
+ require => Package['mga-mirrors'],
+ aliases => {
+ '/status' => '/var/www/mirrors/status.html',
+ }
+ }
+
+ apache::vhost::catalyst_app { "ssl_${vhost}":
+ script => '/usr/bin/mga_mirrors_fastcgi.pl',
+ require => Package['mga-mirrors'],
+ vhost => $vhost,
+ use_ssl => true,
+ aliases => {
+ '/status' => '/var/www/mirrors/status.html',
+ },
+ }
+
+ $pgsql_password = extlookup('mga_mirror_pgsql','x')
+
+ postgresql::remote_db_and_user { 'mirrors':
+ password => $pgsql_password,
+ description => 'Mirrors database',
+ }
+
+ file { '/etc/mga-mirrors.ini':
+ group => 'apache',
+ mode => '0640',
+ content => template('mga-mirrors/mga-mirrors.ini'),
+ require => Package['mga-mirrors']
+ }
+
+ file { '/etc/cron.d/check_mga_mirrors':
+ content => template('mga-mirrors/cron-mga_mirrors'),
+ require => Package['mga-mirrors']
}
- apache::vhost_catalyst_app { $vhost:
- script => "/usr/bin/mga_mirrors_fastcgi.pl"
+ file { $mirrors_dir:
+ ensure => directory,
+ owner => 'nobody',
}
- $password = extlookup("mga_mirror_password")
-
- file { "mga-mirrors.ini":
- path => "/etc/mga-mirrors.ini",
- ensure => "present",
- owner => root,
- group => apache,
- mode => 640,
- content => template("mga-mirrors/mga-mirrors.ini")
+ file { '/usr/local/bin/check_mirrors_status':
+ mode => '0755',
+ source => 'puppet:///modules/mga-mirrors/check_mirrors_status',
}
}
diff --git a/modules/mga-mirrors/templates/cron-mga_mirrors b/modules/mga-mirrors/templates/cron-mga_mirrors
new file mode 100644
index 00000000..7236be04
--- /dev/null
+++ b/modules/mga-mirrors/templates/cron-mga_mirrors
@@ -0,0 +1,2 @@
+MAILTO=root
+*/20 * * * * nobody /usr/local/bin/check_mirrors_status --output /var/www/mirrors/status.html.tmp && mv -f /var/www/mirrors/status.html.tmp /var/www/mirrors/status.html
diff --git a/modules/mga-mirrors/templates/mga-mirrors.ini b/modules/mga-mirrors/templates/mga-mirrors.ini
index 973c65fd..b438edd1 100644
--- a/modules/mga-mirrors/templates/mga-mirrors.ini
+++ b/modules/mga-mirrors/templates/mga-mirrors.ini
@@ -1,4 +1,4 @@
[db]
-pgconn=host=pgsql.<%= domain %>;dbname=mirrors
+pgconn=host=pg.<%= @domain %>;dbname=mirrors
user=mirrors
-password=<%= password %>
+password=<%= @pgsql_password %>
diff --git a/modules/mga-treasurer/manifests/init.pp b/modules/mga-treasurer/manifests/init.pp
new file mode 100644
index 00000000..d092e982
--- /dev/null
+++ b/modules/mga-treasurer/manifests/init.pp
@@ -0,0 +1,91 @@
+class mga-treasurer(
+ $grisbi_git = "git://git.${::domain}/org/accounts",
+ $grisbi_filename = 'mageia-accounts.gsb',
+ $vhost,
+ $vhostdir
+){
+ $mgatres_login = 'mga-treasurer'
+ $mgatres_homedir = "/var/lib/${mgatres_login}"
+ $grisbi_dir = "${mgatres_homedir}/grisbi"
+ $grisbi_path = "${grisbi_dir}/${grisbi_filename}"
+
+ $update_script = '/usr/local/bin/update_mga-treasurer'
+
+ group { $mgatres_login:
+ ensure => present,
+ }
+
+ user { $mgatres_login:
+ ensure => present,
+ comment => 'mga-treasurer user',
+ home => $mgatres_homedir,
+ managehome => true,
+ gid => $mgatres_login,
+ }
+
+ package { 'mga-treasurer':
+ ensure => installed,
+ }
+
+ file {'/etc/mga-treasurer.conf':
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mga-treasurer/mga-treasurer.conf'),
+ require => Package['mga-treasurer'],
+ }
+
+ file { $vhostdir:
+ ensure => directory,
+ owner => $mgatres_login,
+ group => $mgatres_login,
+ mode => '0755',
+ }
+
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ aliases => {
+ "/${grisbi_filename}" => $grisbi_path,
+ "/static" => '/usr/share/mga-treasurer/static',
+ },
+ content => template('mga-treasurer/vhost_mga-treasurer.conf'),
+ require => File[$vhostdir],
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ use_ssl => true,
+ vhost => $vhost,
+ location => $vhostdir,
+ aliases => {
+ "/${grisbi_filename}" => $grisbi_path,
+ "/static" => '/usr/share/mga-treasurer/static',
+ },
+ content => template('mga-treasurer/vhost_mga-treasurer.conf'),
+ require => File[$vhostdir],
+ }
+
+ file { $update_script:
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0755',
+ content => template('mga-treasurer/update_script'),
+ }
+
+ git::snapshot { $grisbi_dir:
+ source => $grisbi_git,
+ user => $mgatres_login,
+ refresh => '0',
+ require => User[$mgatres_login],
+ }
+
+ cron { $update_script:
+ command => $update_script,
+ user => $mgatres_login,
+ hour => '*/2',
+ minute => '10',
+ require => Git::Snapshot[$grisbi_dir],
+ }
+}
+# vim: sw=2
diff --git a/modules/mga-treasurer/templates/mga-treasurer.conf b/modules/mga-treasurer/templates/mga-treasurer.conf
new file mode 100644
index 00000000..75ac180f
--- /dev/null
+++ b/modules/mga-treasurer/templates/mga-treasurer.conf
@@ -0,0 +1,2 @@
+grisbi_file: <%= @grisbi_path %>
+out_dir: <%= @vhostdir %>
diff --git a/modules/mga-treasurer/templates/update_script b/modules/mga-treasurer/templates/update_script
new file mode 100644
index 00000000..30fab72d
--- /dev/null
+++ b/modules/mga-treasurer/templates/update_script
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -e
+
+cd <%= @grisbi_dir %>
+git pull
+exec /usr/bin/mktreasurer
diff --git a/modules/mga-treasurer/templates/vhost_mga-treasurer.conf b/modules/mga-treasurer/templates/vhost_mga-treasurer.conf
new file mode 100644
index 00000000..763cd87d
--- /dev/null
+++ b/modules/mga-treasurer/templates/vhost_mga-treasurer.conf
@@ -0,0 +1,3 @@
+<FilesMatch "\.json$">
+ Header set Access-Control-Allow-Origin "*"
+</FilesMatch>
diff --git a/modules/mga_common/lib/puppet/parser/functions/group_members.rb b/modules/mga_common/lib/puppet/parser/functions/group_members.rb
new file mode 100644
index 00000000..ea275be2
--- /dev/null
+++ b/modules/mga_common/lib/puppet/parser/functions/group_members.rb
@@ -0,0 +1,14 @@
+# group_members($group)
+# -> return a array with the login of the group members
+
+module Puppet::Parser::Functions
+ newfunction(:group_members, :type => :rvalue) do |args|
+ group = args[0]
+ `getent group`.each_line do |l|
+ if l =~ /^#{group}:/ then
+ return l.chomp.split(':')[3].split(',')
+ end
+ end
+ raise ArgumentError, "can't find group for #{group}"
+ end
+end
diff --git a/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb b/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb
new file mode 100644
index 00000000..3a926bee
--- /dev/null
+++ b/modules/mga_common/lib/puppet/parser/functions/hash_keys.rb
@@ -0,0 +1,10 @@
+module Puppet::Parser::Functions
+ newfunction(:hash_keys, :type => :rvalue) do |args|
+ unless args[0].is_a?(Hash)
+ Puppet.warning "hash_keys takes one argument, the input hash"
+ nil
+ else
+ args[0].keys
+ end
+ end
+end
diff --git a/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb b/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb
new file mode 100644
index 00000000..375bffa4
--- /dev/null
+++ b/modules/mga_common/lib/puppet/parser/functions/hash_merge.rb
@@ -0,0 +1,11 @@
+module Puppet::Parser::Functions
+ newfunction(:hash_merge, :type => :rvalue) do |args|
+ unless args[0].is_a?(Hash) and args[1].is_a?(Hash)
+ Puppet.warning "hash_merge takes two arguments"
+ nil
+ else
+ print "hash_merge\n"
+ args[0].merge(args[1])
+ end
+ end
+end
diff --git a/modules/mga_common/lib/puppet/parser/functions/str_join.rb b/modules/mga_common/lib/puppet/parser/functions/str_join.rb
new file mode 100644
index 00000000..c881c37d
--- /dev/null
+++ b/modules/mga_common/lib/puppet/parser/functions/str_join.rb
@@ -0,0 +1,11 @@
+# str_join($array, $sep)
+# -> return a string created by converting each element of the array to
+# a string, separated by $sep
+
+module Puppet::Parser::Functions
+ newfunction(:str_join, :type => :rvalue) do |args|
+ array = args[0]
+ sep = args[1]
+ return array.join(sep)
+ end
+end
diff --git a/modules/mga_common/manifests/local_script.pp b/modules/mga_common/manifests/local_script.pp
new file mode 100644
index 00000000..3272786b
--- /dev/null
+++ b/modules/mga_common/manifests/local_script.pp
@@ -0,0 +1,22 @@
+define mga_common::local_script(
+ $content = undef,
+ $source = undef,
+ $owner = 'root',
+ $group = 'root',
+ $mode = '0755') {
+ $filename = "/usr/local/bin/${name}"
+ file { $filename:
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+ if ($source == undef) {
+ File[$filename] {
+ content => $content,
+ }
+ } else {
+ File[$filename] {
+ source => $source,
+ }
+ }
+}
diff --git a/modules/mga_common/manifests/var/perl.pp b/modules/mga_common/manifests/var/perl.pp
new file mode 100644
index 00000000..47ff54be
--- /dev/null
+++ b/modules/mga_common/manifests/var/perl.pp
@@ -0,0 +1,3 @@
+class mga_common::var::perl(
+ $site_perl_dir = '/usr/lib/perl5/site_perl'
+) {}
diff --git a/modules/mgapeople/manifests/init.pp b/modules/mgapeople/manifests/init.pp
new file mode 100644
index 00000000..7c40ab9c
--- /dev/null
+++ b/modules/mgapeople/manifests/init.pp
@@ -0,0 +1,77 @@
+class mgapeople(
+ $site_name = "people.${::domain}",
+ $groupbase = 'ou=Group,dc=mageia,dc=org',
+ $maintdburl = undef,
+ $ldap_server,
+ $binddn,
+ $bindpw,
+ $vhost,
+ $vhostdir
+){
+ $mgapeople_login = 'mgapeople'
+ $bindpw_file = '/etc/mgapeople.ldapsecret'
+
+ group { $mgapeople_login:
+ ensure => present,
+ }
+
+ user { $mgapeople_login:
+ ensure => present,
+ comment => 'mgapeople user',
+ home => "/var/lib/${mgapeople_login}",
+ managehome => true,
+ gid => $mgapeople_login,
+ }
+
+ file { $bindpw_file:
+ ensure => present,
+ owner => $mgapeople_login,
+ group => $mgapeople_login,
+ mode => '0600',
+ content => $bindpw,
+ }
+
+ package { 'mgapeople':
+ ensure => installed,
+ }
+
+ file {'/etc/mgapeople.conf':
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ content => template('mgapeople/mgapeople.conf'),
+ require => Package['mgapeople'],
+ }
+
+ file { $vhostdir:
+ ensure => directory,
+ owner => $mgapeople_login,
+ group => $mgapeople_login,
+ mode => '0755',
+ }
+
+ $vhost_aliases = {
+ '/static' => '/usr/share/mgapeople/static',
+ }
+ apache::vhost::base { $vhost:
+ location => $vhostdir,
+ require => File[$vhostdir],
+ aliases => $vhost_aliases,
+ }
+ apache::vhost::base { "ssl_${vhost}":
+ vhost => $vhost,
+ use_ssl => true,
+ location => $vhostdir,
+ require => File[$vhostdir],
+ aliases => $vhost_aliases,
+ }
+
+ cron { '/usr/bin/mkpeople':
+ command => '/usr/bin/mkpeople',
+ user => $mgapeople_login,
+ hour => '*/2',
+ minute => '10',
+ }
+}
+# vim: sw=2
diff --git a/modules/mgapeople/templates/mgapeople.conf b/modules/mgapeople/templates/mgapeople.conf
new file mode 100644
index 00000000..5bc7b21b
--- /dev/null
+++ b/modules/mgapeople/templates/mgapeople.conf
@@ -0,0 +1,17 @@
+ldapserver: <%= @ldap_server %>
+binddn: <%= @binddn %>
+bindpwfile: <%= @bindpw_file %>
+groupbase: <%= @groupbase %>
+output_dir: <%= @vhostdir %>
+output_format:
+ - html
+ - txt
+tmpl_dir: /usr/share/mgapeople/tmpl
+<%- if @maintdburl -%>
+maintdburl: <%= @maintdburl %>
+<%- end -%>
+sitename: <%= @site_name %>
+staticdir_url: //people.mageia.org/static
+links_protocol: https://
+package_url: https://svnweb.mageia.org/packages/cauldron/
+package_url_suffix: /current/
diff --git a/modules/mgasoft/manifests/init.pp b/modules/mgasoft/manifests/init.pp
new file mode 100644
index 00000000..70431701
--- /dev/null
+++ b/modules/mgasoft/manifests/init.pp
@@ -0,0 +1,36 @@
+class mgasoft(
+ $anonsvn_soft = "svn://svn.${::domain}/svn/soft",
+ $pubinfodir = '/var/lib/mgasoft/infos',
+ $pubmirrordir = '/distrib/mirror/software',
+ $svn_soft_publish = 'file:///svn/soft_publish',
+ $mgasoft_login = 'mgasoft'
+) {
+ group { $mgasoft_login: }
+
+ user { $mgasoft_login:
+ managehome => true,
+ home => "/var/lib/${mgasoft_login}",
+ gid => $mgasoft_login,
+ require => Group[$mgasoft_login],
+ }
+
+ package { 'mgasoft-publish': }
+
+ file { '/etc/mgasoft.conf':
+ content => template('mgasoft/mgasoft.conf'),
+ }
+
+ subversion::snapshot { $pubinfodir:
+ source => $svn_soft_publish,
+ user => $mgasoft_login,
+ refresh => '0',
+ require => User[$mgasoft_login],
+ }
+
+ cron { "mgasoft-publish":
+ command => '/usr/bin/mgasoft-publish',
+ user => $mgasoft_login,
+ minute => '*/5',
+ require => User[$mgasoft_login],
+ }
+}
diff --git a/modules/mgasoft/templates/mgasoft.conf b/modules/mgasoft/templates/mgasoft.conf
new file mode 100644
index 00000000..81cce013
--- /dev/null
+++ b/modules/mgasoft/templates/mgasoft.conf
@@ -0,0 +1,5 @@
+svn_soft=svn+ssh://svn.mageia.org/svn/soft
+anonsvn_soft=<%= @anonsvn_soft %>
+svn_soft_publish=<%= @svn_soft_publish %>
+pubinfodir=<%= @pubinfodir %>
+pubmirrordir=<%= @pubmirrordir %>
diff --git a/modules/mirror/manifests/base.pp b/modules/mirror/manifests/base.pp
new file mode 100644
index 00000000..db48f808
--- /dev/null
+++ b/modules/mirror/manifests/base.pp
@@ -0,0 +1,15 @@
+class mirror::base {
+ $locksdir = '/home/mirror/locks'
+
+ file { $locksdir:
+ ensure => directory,
+ owner => 'mirror',
+ group => 'mirror',
+ }
+
+ group { 'mirror': }
+
+ user { 'mirror':
+ gid => 'mirror',
+ }
+}
diff --git a/modules/mirror/manifests/init.pp b/modules/mirror/manifests/init.pp
index 512b0463..bb89e1d0 100644
--- a/modules/mirror/manifests/init.pp
+++ b/modules/mirror/manifests/init.pp
@@ -1,40 +1 @@
-class mirror {
-
- file { "update_timestamp":
- path => "/home/mirror/bin/update_timestamp",
- ensure => present,
- owner => mirror,
- group => mirror,
- mode => 755,
- content => template("mirror/update_timestamp")
- }
-
- file { "/home/mirror/bin/":
- ensure => directory,
- owner => mirror,
- group => mirror,
- mode => 755
- }
-
- group {"mirror":
- ensure => present,
- }
-
- user {"mirror":
- ensure => present,
- comment => "System user use to run mirror scripts",
- managehome => true,
- gid => mirror,
- shell => "/bin/bash",
- }
-
-
- cron { mirror:
- user => mirror,
- hour => 10,
- minute => 14,
- command => "~mirror/bin/update_timestamp",
- require => File["update_timestamp"],
- }
-
-}
+class mirror { }
diff --git a/modules/mirror/manifests/mageia.pp b/modules/mirror/manifests/mageia.pp
new file mode 100644
index 00000000..c14a09bb
--- /dev/null
+++ b/modules/mirror/manifests/mageia.pp
@@ -0,0 +1,7 @@
+class mirror::mageia {
+ include mirror::base
+ mirrordir { 'mageia':
+ remoteurl => "rsync://rsync.${::domain}/mageia",
+ localdir => '/distrib/mageia',
+ }
+}
diff --git a/modules/mirror/manifests/main.pp b/modules/mirror/manifests/main.pp
new file mode 100644
index 00000000..f368038d
--- /dev/null
+++ b/modules/mirror/manifests/main.pp
@@ -0,0 +1,14 @@
+# For main Mageia mirror
+class mirror::main {
+ include mirror::base
+ mga_common::local_script { 'update_timestamp':
+ content => template('mirror/update_timestamp')
+ }
+
+ cron { 'mirror':
+ user => 'mirror',
+ minute => '*/10',
+ command => '/usr/local/bin/update_timestamp',
+ require => [Mga_common::Local_script['update_timestamp'], User['mirror']],
+ }
+}
diff --git a/modules/mirror/manifests/mdv2010spring.pp b/modules/mirror/manifests/mdv2010spring.pp
new file mode 100644
index 00000000..51a67284
--- /dev/null
+++ b/modules/mirror/manifests/mdv2010spring.pp
@@ -0,0 +1,7 @@
+class mirror::mdv2010spring {
+ include mirror::base
+ mirrordir { 'mdv2010.1':
+ remoteurl => 'rsync://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/2010.1',
+ localdir => '/distrib/mandriva/',
+ }
+}
diff --git a/modules/mirror/manifests/mirrordir.pp b/modules/mirror/manifests/mirrordir.pp
new file mode 100644
index 00000000..2100bc6c
--- /dev/null
+++ b/modules/mirror/manifests/mirrordir.pp
@@ -0,0 +1,23 @@
+define mirror::mirrordir ($remoteurl,
+ $localdir,
+ $rsync_options='-avH --delete') {
+ include mirror::base
+ $lockfile = "${mirror::base::locksdir}/${name}"
+
+ file { $localdir:
+ ensure => directory,
+ owner => 'mirror',
+ group => 'mirror',
+ }
+
+ mga_common::local_script { "mirror_${name}":
+ content => template('mirror/mirrordir'),
+ }
+
+ cron { "mirror_${name}":
+ user => mirror,
+ minute => '*/10',
+ command => "/usr/local/bin/mirror_${name}",
+ require => Mga_common::Local_script["mirror_${name}"],
+ }
+}
diff --git a/modules/mirror/templates/mirrordir b/modules/mirror/templates/mirrordir
new file mode 100644
index 00000000..9cf09650
--- /dev/null
+++ b/modules/mirror/templates/mirrordir
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+remoteurl="<%= @remoteurl%>"
+localdir="<%= @localdir %>"
+rsync_options="<%= @rsync_options %>"
+lockfile="<%= @lockfile %>"
+
+if [ -f "$lockfile" ]; then
+ # show error message when run from command line
+ [ -t 1 ] && cat $lockfile
+ exit
+fi
+echo "sync in progress since $(date)" > "$lockfile"
+/usr/bin/rsync $rsync_options "$remoteurl" "$localdir"
+rm -f "$lockfile"
diff --git a/modules/mirror/templates/update_timestamp b/modules/mirror/templates/update_timestamp
index a037d10d..1f7711c6 100644
--- a/modules/mirror/templates/update_timestamp
+++ b/modules/mirror/templates/update_timestamp
@@ -2,4 +2,4 @@
# $id$
-date +%s%n%c > /distrib/mirror/mageia_timestamp
+LC_ALL=C.UTF-8 date -u '+%s%n%c %Z' > /distrib/mirror/mageia_timestamp
diff --git a/modules/mirror_cleaner/files/orphans_cleaner.pl b/modules/mirror_cleaner/files/orphans_cleaner.pl
new file mode 100755
index 00000000..73e08912
--- /dev/null
+++ b/modules/mirror_cleaner/files/orphans_cleaner.pl
@@ -0,0 +1,76 @@
+#!/usr/bin/perl
+
+# this script will look at the list of rpm, and move orphan to a directory, if they are too old
+# another script should take care of cleaning this directory ( or puppet )
+
+use strict;
+use RPM4;
+use File::stat;
+use File::Basename;
+use File::Copy;
+use File::Path qw(make_path);
+
+my @arches = ('i586','x86_64', 'aarch64');
+my @sections = ('core','nonfree','tainted');
+my @medias = ('backports', 'backports_testing', 'release', 'updates', 'updates_testing');
+my $move_delay = 60*60*24*14;
+
+my ($path, $dest_path) = @ARGV;
+
+my $qf = "%{NAME}-%{VERSION}-%{RELEASE}.%{ARCH}.rpm %{SOURCERPM}";
+
+my %hash ;
+my ($filename, $srpm, $dest_rpm);
+
+
+my ($source_hdlist, $binary_hdlist, $rpm_path, $srpm_path);
+
+foreach my $a ( @arches ) {
+ foreach my $s ( @sections ) {
+ foreach my $m ( @medias ) {
+
+ $rpm_path = "$path/$a/media/$s/$m";
+ $srpm_path = "$path/SRPMS/$s/$m";
+ $binary_hdlist = "$rpm_path/media_info/hdlist.cz";
+ $source_hdlist = "$srpm_path/media_info/hdlist.cz";
+
+ next if not -f $source_hdlist;
+ next if not -f $binary_hdlist;
+
+ next if stat($source_hdlist)->size() <= 64;
+ next if stat($binary_hdlist)->size() <= 64;
+
+ open(my $hdfh, "zcat '$binary_hdlist' 2>/dev/null |") or die "Can't open $_";
+ while (my $hdr = stream2header($hdfh)) {
+ ($filename, $srpm) = split(/ /,$hdr->queryformat($qf));
+ push(@{$hash{$srpm}}, $filename);
+ }
+ close($hdfh);
+
+
+ open($hdfh, "zcat '$source_hdlist' 2>/dev/null |") or die "Can't open $_";
+ while (my $hdr = stream2header($hdfh)) {
+ $srpm = $hdr->queryformat("%{NAME}-%{VERSION}-%{RELEASE}.src.rpm");
+ delete $hash{$srpm};
+ }
+ close($hdfh);
+
+ foreach my $s ( keys %hash )
+ {
+ # Be safe, maybe hdlists were not in sync
+ next if -f "$srpm_path/$s";
+ foreach my $rpm ( @{$hash{$s}} ) {
+ $rpm = "$rpm_path/$rpm";
+ # sometimes, packages are removed without hdlist to be updated
+ next if not -f "$rpm";
+ if (time() > $move_delay + stat("$rpm")->ctime()) {
+ ( $dest_rpm = $rpm ) =~ s/$path/$dest_path/;
+ my $dir = dirname $dest_rpm;
+ make_path $dir if not -d $dir;
+ move($rpm, $dest_rpm)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/modules/mirror_cleaner/manifests/base.pp b/modules/mirror_cleaner/manifests/base.pp
new file mode 100644
index 00000000..8ef82856
--- /dev/null
+++ b/modules/mirror_cleaner/manifests/base.pp
@@ -0,0 +1,6 @@
+class mirror_cleaner::base {
+ file { '/usr/local/bin/orphans_cleaner.pl':
+ mode => '0755',
+ source => 'puppet:///modules/mirror_cleaner/orphans_cleaner.pl',
+ }
+}
diff --git a/modules/mirror_cleaner/manifests/init.pp b/modules/mirror_cleaner/manifests/init.pp
new file mode 100644
index 00000000..615b4ffe
--- /dev/null
+++ b/modules/mirror_cleaner/manifests/init.pp
@@ -0,0 +1 @@
+class mirror_cleaner { }
diff --git a/modules/mirror_cleaner/manifests/orphans.pp b/modules/mirror_cleaner/manifests/orphans.pp
new file mode 100644
index 00000000..90be9a8c
--- /dev/null
+++ b/modules/mirror_cleaner/manifests/orphans.pp
@@ -0,0 +1,27 @@
+define mirror_cleaner::orphans($base) {
+ include mirror_cleaner::base
+
+ $orphan_dir = '/distrib/archive/orphans'
+
+ file { $orphan_dir:
+ ensure => directory
+ }
+
+# Disable cleaning as the ruby version is smarter and this one tends to break things
+# It should probably be deleted
+#
+# cron { "clean orphans ${name}":
+# command => "/usr/local/bin/orphans_cleaner.pl ${base}/${name} ${orphan_dir}",
+# hour => 5,
+# minute => 30,
+# weekday => 1,
+# user => root,
+# }
+
+ tidy { $orphan_dir:
+ type => 'ctime',
+ age => '4w',
+ recurse => true,
+ matches => ['*.rpm'],
+ }
+}
diff --git a/modules/mirrorbrain/manifests/init.pp b/modules/mirrorbrain/manifests/init.pp
new file mode 100644
index 00000000..f7f74ead
--- /dev/null
+++ b/modules/mirrorbrain/manifests/init.pp
@@ -0,0 +1,154 @@
+class mirrorbrain {
+
+ $mb_user = 'mirrorbrain'
+ $mb_home = "/var/lib/${mb_user}"
+ $mb_repo = "${mb_home}/mirror"
+ $mb_vhost = "dl.${::domain}"
+
+ $mb_pgsql_pw = extlookup('mirrorbrain_pgsql','x')
+
+ group { $mb_user:
+ ensure => present
+ }
+
+ user { $mb_user:
+ ensure => present,
+ home => $mb_home
+ }
+
+ file { $mb_home:
+ ensure => directory,
+ owner => $mb_user,
+ group => $mb_user,
+ mode => '0751'
+ }
+
+ file { $mb_repo:
+ ensure => directory,
+ owner => $mb_user,
+ group => $mb_user,
+ mode => '0755'
+ }
+
+ package {['mirrorbrain',
+ 'mirrorbrain-scanner',
+ 'mirrorbrain-tools',
+ 'apache-mod_mirrorbrain',
+ 'apache-mod_dbd']: }
+
+
+ postgresql::remote_db_and_user { 'mirrorbrain':
+ description => 'Mirrorbrain database',
+ password => $mb_pgsql_pw,
+ }
+
+ file { '/etc/httpd/conf/geoip.conf':
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ content => template('mirrorbrain/geoip.conf')
+ }
+
+ file { '/etc/httpd/conf/modules.d/11-mirrorbrain.conf':
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ content => template('mirrorbrain/mod_mirrorbrain.conf')
+ }
+
+ file { '/etc/mirrorbrain.conf':
+ owner => 'root',
+ group => "$mb_user",
+ mode => '0640',
+ content => template('mirrorbrain/mirrorbrain.conf')
+ }
+
+ apache::vhost::base { "${mb_vhost}":
+ vhost => "${mb_vhost}",
+ location => "${mb_repo}"
+ }
+
+ apache::vhost::base { "ssl_${mb_vhost}":
+ vhost => "${mb_vhost}",
+ use_ssl => true,
+ location => "${mb_repo}"
+ }
+
+ apache::webapp_other { 'mirrorbrain':
+ webapp_file => 'mirrorbrain/webapp.conf',
+ }
+
+ # Update GeoIP db
+ cron { 'MirrorBrain: weekly GeoIP update':
+ command => 'sleep $(($RANDOM/1024)); /usr/bin/geoip-lite-update',
+ user => 'root',
+ minute => 30,
+ hour => 3,
+ weekday => 0
+ }
+
+ # distrib tree
+ # mga 1-4 are frozen, so only one manual run has been done
+ # distrib/5 still active
+ cron { 'MirrorBrain: Sync Mga 5 every 4 hours ':
+ command => "/usr/bin/null-rsync rsync.mageia.org::mageia/distrib/5 ${mb_repo}/distrib/",
+ user => "$mb_user",
+ minute => '15',
+ hour => '*/4',
+ }
+
+ # distrib/cauldron
+ cron { 'MirrorBrain: Sync Cauldron every 1 hours ':
+ command => "/usr/bin/null-rsync rsync.mageia.org::mageia/distrib/cauldron ${mb_repo}/distrib/",
+ user => "$mb_user",
+ minute => '0',
+ hour => '*/1',
+ }
+
+ # iso tree
+ cron { 'MirrorBrain: Sync iso tree every 1 day ':
+ command => "/usr/bin/null-rsync rsync.mageia.org::mageia/iso ${mb_repo}/",
+ user => "$mb_user",
+ hour => '2',
+ minute => '30',
+ }
+
+ # people tree
+ cron { 'MirrorBrain: Sync people tree every 1 day ':
+ command => "/usr/bin/null-rsync rsync.mageia.org::mageia/people ${mb_repo}/",
+ user => "$mb_user",
+ hour => '3',
+ minute => '45',
+ }
+
+ # software tree
+ cron { 'MirrorBrain: Sync software tree every 1 day ':
+ command => "/usr/bin/null-rsync rsync.mageia.org::mageia/software ${mb_repo}/",
+ user => "$mb_user",
+ hour => '4',
+ minute => '45',
+ }
+
+ # Mirror online check
+ cron { 'MirrorBrain: mirror online status check every 5 minute':
+ command => '/usr/bin/mirrorprobe',
+ user => "$mb_user",
+ minute => 5
+ }
+
+ # Mirror scanning
+ cron { 'MirrorBrain: mirror scanning every 30 minute':
+ command => '/usr/bin/mb scan --quiet --jobs 4 --all',
+ user => "$mb_user",
+ minute => 30
+ }
+
+ # Mirror database cleanup
+ cron { 'MirrorBrain: mirror database cleanup every 1 week':
+ command => '/usr/bin/mb db vacuum',
+ user => "$mb_user",
+ minute => 45,
+ hour => 5,
+ weekday => 1
+ }
+}
diff --git a/modules/mirrorbrain/templates/geoip.conf b/modules/mirrorbrain/templates/geoip.conf
new file mode 100644
index 00000000..1f71a67d
--- /dev/null
+++ b/modules/mirrorbrain/templates/geoip.conf
@@ -0,0 +1,5 @@
+<IfModule mod_geoip.c>
+ GeoIPEnable On
+ GeoIPDBFile /var/lib/GeoIP/GeoLiteCity.dat.updated
+ GeoIPOutput Env
+</IfModule>
diff --git a/modules/mirrorbrain/templates/mirrorbrain.conf b/modules/mirrorbrain/templates/mirrorbrain.conf
new file mode 100644
index 00000000..94bef340
--- /dev/null
+++ b/modules/mirrorbrain/templates/mirrorbrain.conf
@@ -0,0 +1,14 @@
+[general]
+instances = main
+
+[main]
+dbuser = mirrorbrain
+dbpass = <%= @mb_pgsql_pw %>
+dbdriver = postgresql
+dbhost = pgsql.<%= @domain %>
+# optional: dbport = ...
+dbname = mirrorbrain
+
+[mirrorprobe]
+# logfile = /var/log/mirrorbrain/mirrorprobe.log
+# loglevel = INFO
diff --git a/modules/mirrorbrain/templates/mod_mirrorbrain.conf b/modules/mirrorbrain/templates/mod_mirrorbrain.conf
new file mode 100644
index 00000000..9b67d7fe
--- /dev/null
+++ b/modules/mirrorbrain/templates/mod_mirrorbrain.conf
@@ -0,0 +1,3 @@
+LoadModule form_module modules/mod_form.so
+LoadModule mirrorbrain_module modules/mod_mirrorbrain.so
+
diff --git a/modules/mirrorbrain/templates/webapp.conf b/modules/mirrorbrain/templates/webapp.conf
new file mode 100644
index 00000000..9606be64
--- /dev/null
+++ b/modules/mirrorbrain/templates/webapp.conf
@@ -0,0 +1,16 @@
+<Directory /var/lib/mirrorbrain/mirror>
+ MirrorBrainEngine On
+ MirrorBrainDebug Off
+ FormGET On
+ MirrorBrainHandleHEADRequestLocally Off
+ MirrorBrainFallback na us https://mirrors.kernel.org/mageia/
+ MirrorBrainFallback eu fr http://ftp.free.fr/mirrors/mageia.org/
+ MirrorBrainFallback eu se https://ftp.acc.umu.se/mirror/mageia/
+ MirrorBrainMinSize 0
+ #MirrorBrainExcludeUserAgent rpm/4.4.2*
+ #MirrorBrainExcludeUserAgent *APT-HTTP*
+ #MirrorBrainExcludeMimeType application/pgp-keys
+ DirectoryIndex disable
+ Options +FollowSymLinks +Indexes
+ Require all granted
+</Directory>
diff --git a/modules/mysql/manifests/init.pp b/modules/mysql/manifests/init.pp
new file mode 100644
index 00000000..1d180778
--- /dev/null
+++ b/modules/mysql/manifests/init.pp
@@ -0,0 +1,26 @@
+class mysql {
+ class server {
+ package {['mariadb',
+ 'mariadb-obsolete']: }
+
+ service { 'mysqld':
+ alias => mysql,
+ subscribe => Package['mariadb'],
+ }
+
+# file { "/etc/my.cnf":
+#
+# }
+ }
+
+ define database() {
+ exec { "mysqladmin create ${name}":
+ user => root,
+ # not sure if /dev/null is needed
+ unless => "mysqlshow ${name}"
+ }
+ }
+# define user($password) {
+#
+# }
+}
diff --git a/modules/ntp/manifests/init.pp b/modules/ntp/manifests/init.pp
index 3f9ecc14..f75310e7 100644
--- a/modules/ntp/manifests/init.pp
+++ b/modules/ntp/manifests/init.pp
@@ -1,22 +1,17 @@
class ntp {
+if versioncmp($::lsbdistrelease, '9') < 0 {
+ $ntppkg = 'ntp'
+} else {
+ $ntppkg = 'ntpsec'
+}
+ package { $ntppkg: }
- package { ntp:
- ensure => installed
+ service { 'ntpd':
+ subscribe => [Package[$ntppkg], File['/etc/ntp.conf']],
}
- service { ntpd:
- ensure => running,
- path => "/etc/init.d/ntpd",
- subscribe => [ Package["ntp"], File["ntp.conf"] ]
- }
-
- file { "ntp.conf":
- path => "/etc/ntp.conf",
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["ntp"],
- content => template("ntp/ntp.conf")
+ file { '/etc/ntp.conf':
+ require => Package[$ntppkg],
+ content => template('ntp/ntp.conf'),
}
}
diff --git a/modules/ntp/templates/ntp.conf b/modules/ntp/templates/ntp.conf
index 3f9582d7..72f233c0 100644
--- a/modules/ntp/templates/ntp.conf
+++ b/modules/ntp/templates/ntp.conf
@@ -25,6 +25,12 @@ driftfile /var/lib/ntp/drift
multicastclient # listen on default 224.0.1.1
broadcastdelay 0.008
+# https://www.kb.cert.org/vuls/id/348126
+restrict default nomodify notrap nopeer noquery
+restrict -6 default nomodify notrap nopeer noquery
+# https://isc.sans.edu/forums/diary/NTP+reflection+attack/17300
+disable monitor
+
#
# Keys file. If you want to diddle your server at run time, make a
# keys file (mode 600 for sure) and define the key number to be
diff --git a/modules/opendkim/Gemfile b/modules/opendkim/Gemfile
new file mode 100644
index 00000000..68ba397d
--- /dev/null
+++ b/modules/opendkim/Gemfile
@@ -0,0 +1,19 @@
+source 'https://rubygems.org'
+
+puppetversion = ENV.key?('PUPPET_VERSION') ? "= #{ENV['PUPPET_VERSION']}" : ['>= 3.3']
+gem 'puppet', puppetversion
+gem 'puppetlabs_spec_helper', '>= 0.1.0'
+gem 'facter', '>= 1.7.0'
+
+gem 'puppet-lint', '>= 0.3.2'
+gem 'rspec-puppet'
+gem "metadata-json-lint"
+gem 'beaker-rspec'
+gem "travis"
+gem "travis-lint"
+gem "puppet-blacksmith"
+gem "guard-rake"
+
+gem 'test-kitchen', '>= 1.4.0'
+gem 'kitchen-docker', '>= 2.1.0'
+gem 'kitchen-puppet', '>= 0.0.27'
diff --git a/modules/opendkim/LICENSE b/modules/opendkim/LICENSE
new file mode 100644
index 00000000..8f71f43f
--- /dev/null
+++ b/modules/opendkim/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/modules/opendkim/Modulefile b/modules/opendkim/Modulefile
new file mode 100644
index 00000000..7790c510
--- /dev/null
+++ b/modules/opendkim/Modulefile
@@ -0,0 +1,8 @@
+ name "bi4o4ek-opendkim"
+ version "0.0.7"
+ author "Vladimir Bykanov"
+ summary "Configures OpenDKIM"
+ license "Apache-2.0"
+ source "https://github.com/bi4o4ek/puppet-opendkim"
+ project_page "https://github.com/bi4o4ek/puppet-opendkim"
+
diff --git a/modules/opendkim/Puppetfile b/modules/opendkim/Puppetfile
new file mode 100644
index 00000000..177adf16
--- /dev/null
+++ b/modules/opendkim/Puppetfile
@@ -0,0 +1,7 @@
+#!/usr/bin/env ruby
+#^syntax detection
+
+forge "https://forgeapi.puppetlabs.com"
+
+# use dependencies defined in metadata.json
+metadata
diff --git a/modules/opendkim/README.md b/modules/opendkim/README.md
new file mode 100644
index 00000000..13c40bde
--- /dev/null
+++ b/modules/opendkim/README.md
@@ -0,0 +1,98 @@
+[![Build Status](https://travis-ci.org/bi4o4ek/puppet-opendkim.svg?branch=master)](https://travis-ci.org/bi4o4ek/puppet-opendkim)
+
+# opendkim
+
+#### Table of Contents
+
+1. [Overview](#overview)
+2. [Module Description](#module-description)
+3. [Setup - The basics of getting started with opendkim](#setup)
+ * [Beginning with opendkim](#beginning-with-opendkim)
+ * [Add domains for signing](#add-domains-for-signing)
+ * [Add allowed hosts](#add-allowed-hosts)
+4. [Usage - Configuration options and additional functionality](#usage)
+5. [Reference - An under-the-hood peek at what the module is doing and how](#reference)
+5. [Limitations - OS compatibility, etc.](#limitations)
+6. [Development - Guide for contributing to the module](#development)
+
+## Overview
+
+The opendkim module allows you to set up mail signing and manage DKIM services with minimal effort.
+
+## Module Description
+
+OpenDKIM is a widely-used DKIM service, and this module provides a simplified way of creating configurations to manage your infrastructure.
+This includes the ability to configure and manage a range of different domain, as well as a streamlined way to install and configure OpenDKIM service.
+
+## Setup
+
+### What opendkim affects
+
+* configuration files and directories (created and written to)
+* package/service/configuration files for OpenDKIM
+* signing domains list
+* trusted hosts list
+
+### Beginning with opendkim
+
+To install OpenDKIM with the default parameters
+
+ include opendkim
+
+### Add domains for signing
+
+ opendkim::domain{['example.com', 'example.org']:}
+
+
+### Add allowed hosts
+
+ opendkim::trusted{['10.0.0.0/8', '203.0.113.0/24']:}
+
+## Usage
+
+For example.
+There is internal ip 10.3.3.80 and external ip 203.0.113.100 on our mail-relay host with OpenDKIM.
+This host signs all mails for domains example.com and example.org.
+
+ # Postfix-relay
+ class{ 'postfix::server':
+ inet_interfaces => '10.3.3.80, localhost',
+ mynetworks => '10.0.0.0/8, 203.0.113.0/24',
+ smtpd_recipient_restrictions => 'permit_mynetworks, reject_unauth_destination',
+ smtpd_client_restrictions => 'permit_mynetworks, reject',
+ mydestination => '$myhostname',
+ myhostname => 'relay-site.example.com',
+ smtpd_banner => 'Hello',
+ extra_main_parameters => {
+ smtp_bind_address => '203.0.113.100',
+ smtpd_milters => 'inet:127.0.0.1:8891',
+ non_smtpd_milters => '$smtpd_milters',
+ milter_default_action => 'accept',
+ milter_protocol => '2',
+ },
+ }
+
+ # OpenDKIM
+ include opendkim
+ opendkim::domain{['example.com', 'example.org']:}
+ opendkim::trusted{['10.0.0.0/8', '203.0.113.0/24']:}
+
+After puppet-run you need to copy contents of /etc/opendkim/keys/example.com/relay-site.txt and paste into corresponding DNS-zone as TXT.
+Then repeat this action for example.org
+
+Puppet module for postfix in this example is [thias/postfix](https://forge.puppetlabs.com/thias/postfix) v0.3.3
+## Reference
+
+Puppetlabs are working on automating this section.
+
+## Limitations
+
+This module is tested on:
+* CentOS 6
+* Ubuntu 12.04
+* Ubuntu 14.04
+
+## Development
+
+Fork me on github and make pull request.
+
diff --git a/modules/opendkim/Rakefile b/modules/opendkim/Rakefile
new file mode 100644
index 00000000..312b2952
--- /dev/null
+++ b/modules/opendkim/Rakefile
@@ -0,0 +1,12 @@
+require 'rubygems'
+require 'puppetlabs_spec_helper/rake_tasks'
+require 'puppet-lint/tasks/puppet-lint'
+
+PuppetLint.configuration.fail_on_warnings = true
+PuppetLint.configuration.send('relative')
+PuppetLint.configuration.send('disable_80chars')
+PuppetLint.configuration.send('disable_class_inherits_from_params_class')
+PuppetLint.configuration.send('disable_documentation')
+PuppetLint.configuration.send('disable_single_quote_string_with_variables')
+PuppetLint.configuration.send('disable_only_variable_string')
+PuppetLint.configuration.ignore_paths = ["spec/**/*.pp", "pkg/**/*.pp"]
diff --git a/modules/opendkim/manifests/domain.pp b/modules/opendkim/manifests/domain.pp
new file mode 100644
index 00000000..c708ad08
--- /dev/null
+++ b/modules/opendkim/manifests/domain.pp
@@ -0,0 +1,46 @@
+define opendkim::domain (
+ $domain = $name,
+ $selector = $hostname,
+ $pathkeys = '/etc/opendkim/keys',
+ $keytable = 'KeyTable',
+ $signing_table = 'SigningTable',
+) {
+ # $pathConf and $pathKeys must be without trailing '/'.
+ # For example, '/etc/opendkim/keys'
+
+ Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ] }
+
+ # Create directory for domain
+ file { "${pathkeys}/${domain}":
+ ensure => directory,
+ owner => $opendkim::owner,
+ group => $opendkim::group,
+ mode => '0755',
+ notify => Service[$opendkim::service_name],
+ require => Package[$opendkim::package_name],
+ }
+
+ # Generate dkim-keys
+ exec { "opendkim-genkey -D ${pathkeys}/${domain}/ -d ${domain} -s ${selector}":
+ unless => "/usr/bin/test -f ${pathkeys}/${domain}/${selector}.private && /usr/bin/test -f ${pathkeys}/${domain}/${selector}.txt",
+ user => $opendkim::owner,
+ notify => Service[$opendkim::service_name],
+ require => [ Package[$opendkim::package_name], File["${pathkeys}/${domain}"], ],
+ }
+
+ # Add line into KeyTable
+ file_line { "${opendkim::pathconf}/${keytable}_${domain}":
+ path => "${opendkim::pathconf}/${keytable}",
+ line => "${selector}._domainkey.${domain} ${domain}:${selector}:${pathkeys}/${domain}/${selector}.private",
+ notify => Service[$opendkim::service_name],
+ require => Package[$opendkim::package_name],
+ }
+
+ # Add line into SigningTable
+ file_line { "${opendkim::pathconf}/${signing_table}_${domain}":
+ path => "${opendkim::pathconf}/${signing_table}",
+ line => "*@${domain} ${selector}._domainkey.${domain}",
+ notify => Service[$opendkim::service_name],
+ require => Package[$opendkim::package_name],
+ }
+}
diff --git a/modules/opendkim/manifests/init.pp b/modules/opendkim/manifests/init.pp
new file mode 100644
index 00000000..6e45345a
--- /dev/null
+++ b/modules/opendkim/manifests/init.pp
@@ -0,0 +1,105 @@
+# == Class: opendkim
+#
+# === Examples
+#
+# class { 'opendkim':}
+#
+# === Authors
+#
+# Vladimir Bykanov <vladimir@bykanov.ru>
+#
+# === Copyright
+#
+# Copyright 2015 Vladimir Bykanov
+#
+class opendkim (
+ $autorestart = 'Yes',
+ $autorestart_rate = '10/1h',
+ $log_why = 'Yes',
+ $syslog = 'Yes',
+ $syslog_success = 'Yes',
+ $mode = 's',
+ $canonicalization = 'relaxed/simple',
+ $external_ignore_list = 'refile:/etc/opendkim/TrustedHosts',
+ $internal_hosts = 'refile:/etc/opendkim/TrustedHosts',
+ $keytable = 'refile:/etc/opendkim/KeyTable',
+ $signing_table = 'refile:/etc/opendkim/SigningTable',
+ $signature_algorithm = 'rsa-sha256',
+ $socket = 'inet:8891@localhost',
+ $pidfile = '/var/run/opendkim/opendkim.pid',
+ $umask = '022',
+ $userid = 'opendkim:opendkim',
+ $temporary_directory = '/var/tmp',
+ $package_name = 'opendkim',
+ $service_name = 'opendkim',
+ $pathconf = '/etc/opendkim',
+ $owner = 'opendkim',
+ $group = 'opendkim',
+) {
+
+ package { $package_name:
+ ensure => present,
+ }
+
+ case $::operatingsystem {
+ /^(Debian|Ubuntu)$/: {
+ package { 'opendkim-tools':
+ ensure => present,
+ }
+ # Debian/Ubuntu doesn't ship this directory in its package
+ file { $pathconf:
+ ensure => directory,
+ owner => 'root',
+ group => 'opendkim',
+ mode => '0755',
+ require => Package[$package_name],
+ }
+ file { "${pathconf}/keys":
+ ensure => directory,
+ owner => 'opendkim',
+ group => 'opendkim',
+ mode => '0750',
+ require => Package[$package_name],
+ }
+ file { "${pathconf}/KeyTable":
+ ensure => present,
+ owner => 'opendkim',
+ group => 'opendkim',
+ mode => '0640',
+ require => Package[$package_name],
+ }
+ file { "${pathconf}/SigningTable":
+ ensure => present,
+ owner => 'opendkim',
+ group => 'opendkim',
+ mode => '0640',
+ require => Package[$package_name],
+ }
+ file { "${pathconf}/TrustedHosts":
+ ensure => present,
+ owner => 'opendkim',
+ group => 'opendkim',
+ mode => '0644',
+ require => Package[$package_name],
+ }
+ }
+ default: {}
+ }
+
+ file {'/etc/opendkim.conf':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0644',
+ content => template('opendkim/opendkim.conf'),
+ notify => Service[$service_name],
+ require => Package[$package_name],
+ }
+
+ service { $service_name:
+ ensure => running,
+ enable => true,
+ require => Package[$package_name],
+ }
+}
+
diff --git a/modules/opendkim/manifests/trusted.pp b/modules/opendkim/manifests/trusted.pp
new file mode 100644
index 00000000..dcf0f8b8
--- /dev/null
+++ b/modules/opendkim/manifests/trusted.pp
@@ -0,0 +1,13 @@
+define opendkim::trusted (
+ $host = $name,
+ $trusted_hosts = 'TrustedHosts',
+
+) {
+ # Add line into KeyTable
+ file_line { "${opendkim::pathconf}/${trusted_hosts}_${host}":
+ path => "${opendkim::pathconf}/${trusted_hosts}",
+ line => $host,
+ notify => Service[$opendkim::service_name],
+ require => Package[$opendkim::package_name],
+ }
+}
diff --git a/modules/opendkim/metadata.json b/modules/opendkim/metadata.json
new file mode 100644
index 00000000..81b2f70d
--- /dev/null
+++ b/modules/opendkim/metadata.json
@@ -0,0 +1,60 @@
+{
+ "name": "bi4o4ek-opendkim",
+ "version": "0.0.7",
+ "author": "Vladimir Bykanov",
+ "summary": "Configures OpenDKIM",
+ "license": "Apache-2.0",
+ "source": "https://github.com/bi4o4ek/puppet-opendkim",
+ "project_page": "https://github.com/bi4o4ek/puppet-opendkim",
+ "issues_url": "https://github.com/bi4o4ek/puppet-opendkim/issues",
+ "operatingsystem_support": [
+ {
+ "operatingsystem": "RedHat",
+ "operatingsystemrelease": [
+ "5",
+ "6",
+ "7"
+ ]
+ },
+ {
+ "operatingsystem": "CentOS",
+ "operatingsystemrelease": [
+ "5",
+ "6",
+ "7"
+ ]
+ },
+ {
+ "operatingsystem": "Mageia",
+ "operatingsystemrelease": [
+ "7",
+ "8",
+ "9"
+ ]
+ }
+ ],
+ "dependencies": [
+ {
+ }
+ ],
+ "description": "UNKNOWN",
+ "types": [
+
+ ],
+ "checksums": {
+ "Gemfile": "19456e851851a3bd7aa6729108429dde",
+ "LICENSE": "fa818a259cbed7ce8bc2a22d35a464fc",
+ "Modulefile": "9a3b46c73c1ae7309fe2d35c5e6fa549",
+ "Puppetfile": "607001b25e4f9d020b2ce4444174a654",
+ "README.md": "0764cc9bb9de221c97bce2664ba99657",
+ "Rakefile": "a162d9397ed53fa8fa49c57609feedcb",
+ "manifests/domain.pp": "61f78cbd4376e58a7b26f1298f38804b",
+ "manifests/init.pp": "4987dcd9ebc88e7ea0de3b74c9af6d9c",
+ "manifests/trusted.pp": "bcc132622e2c2e39bcbc3116c7788c8b",
+ "spec/classes/init_spec.rb": "0451831b29191c21b2cdc045c94a2243",
+ "spec/classes/opendkim_spec.rb": "9f06a3f005344875a0fb5753ab43cb34",
+ "spec/spec_helper.rb": "0db89c9a486df193c0e40095422e19dc",
+ "templates/opendkim.conf": "047e76e4c2a0a15754101f2da32ab2fe",
+ "tests/init.pp": "8c9ab8c85cd89dae1ad97cbe949a7e6e"
+ }
+}
diff --git a/modules/opendkim/spec/classes/init_spec.rb b/modules/opendkim/spec/classes/init_spec.rb
new file mode 100644
index 00000000..5ce0a75d
--- /dev/null
+++ b/modules/opendkim/spec/classes/init_spec.rb
@@ -0,0 +1,7 @@
+require 'spec_helper'
+describe 'opendkim' do
+
+ context 'with defaults for all parameters' do
+ it { should contain_class('opendkim') }
+ end
+end
diff --git a/modules/opendkim/spec/classes/opendkim_spec.rb b/modules/opendkim/spec/classes/opendkim_spec.rb
new file mode 100644
index 00000000..1901c1c0
--- /dev/null
+++ b/modules/opendkim/spec/classes/opendkim_spec.rb
@@ -0,0 +1,13 @@
+require 'spec_helper'
+
+describe 'opendkim', :type => :class do
+
+ describe "Opendkim class with no parameters, basic test" do
+ let(:params) { { } }
+
+ it {
+ should contain_package('opendkim')
+ should contain_service('opendkim')
+ }
+ end
+end
diff --git a/modules/opendkim/spec/spec_helper.rb b/modules/opendkim/spec/spec_helper.rb
new file mode 100644
index 00000000..2c6f5664
--- /dev/null
+++ b/modules/opendkim/spec/spec_helper.rb
@@ -0,0 +1 @@
+require 'puppetlabs_spec_helper/module_spec_helper'
diff --git a/modules/opendkim/templates/opendkim.conf b/modules/opendkim/templates/opendkim.conf
new file mode 100644
index 00000000..5dc61aa6
--- /dev/null
+++ b/modules/opendkim/templates/opendkim.conf
@@ -0,0 +1,52 @@
+<%- if @autorestart -%>
+AutoRestart <%= @autorestart %>
+<%- end -%>
+<%- if @autorestart_rate -%>
+AutoRestartRate <%= @autorestart_rate %>
+<%- end -%>
+<%- if @log_why -%>
+LogWhy <%= @log_why %>
+<%- end -%>
+<%- if @syslog -%>
+Syslog <%= @syslog %>
+<%- end -%>
+<%- if @syslog_success -%>
+SyslogSuccess <%= @syslog_success %>
+<%- end -%>
+<%- if @mode -%>
+Mode <%= @mode %>
+<%- end -%>
+<%- if @canonicalization -%>
+Canonicalization <%= @canonicalization %>
+<%- end -%>
+<%- if @external_ignore_list -%>
+ExternalIgnoreList <%= @external_ignore_list %>
+<%- end -%>
+<%- if @internal_hosts -%>
+InternalHosts <%= @internal_hosts %>
+<%- end -%>
+<%- if @keytable -%>
+KeyTable <%= @keytable %>
+<%- end -%>
+<%- if @signing_table -%>
+SigningTable <%= @signing_table %>
+<%- end -%>
+<%- if @signature_algorithm -%>
+SignatureAlgorithm <%= @signature_algorithm %>
+<%- end -%>
+<%- if @socket -%>
+Socket <%= @socket %>
+<%- end -%>
+<%- if @pidfile -%>
+PidFile <%= @pidfile %>
+<%- end -%>
+<%- if @umask -%>
+UMask <%= @umask %>
+<%- end -%>
+<%- if @userid -%>
+UserID <%= @userid %>
+<%- end -%>
+<%- if @temporary_directory -%>
+TemporaryDirectory <%= @temporary_directory %>
+<%- end -%>
+
diff --git a/modules/opendkim/tests/init.pp b/modules/opendkim/tests/init.pp
new file mode 100644
index 00000000..ff3d3b06
--- /dev/null
+++ b/modules/opendkim/tests/init.pp
@@ -0,0 +1,15 @@
+# The baseline for module testing used by Puppet Labs is that each manifest
+# should have a corresponding test manifest that declares that class or defined
+# type.
+#
+# Tests are then run by using puppet apply --noop (to check for compilation
+# errors and view a log of events) or by fully applying the test in a virtual
+# environment (to compare the resulting system state to the desired state).
+#
+# Learn more about module testing here:
+# http://docs.puppetlabs.com/guides/tests_smoke.html
+#
+Class['epel'] -> Class['opendkim']
+
+include epel
+include opendkim
diff --git a/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb b/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb
new file mode 100644
index 00000000..0d620926
--- /dev/null
+++ b/modules/openldap/lib/puppet/parser/functions/get_ldap_servers.rb
@@ -0,0 +1,13 @@
+# return a list of all ldap servers declared
+module Puppet::Parser::Functions
+ newfunction(:get_ldap_servers, :type => :rvalue) do |args|
+ Puppet::Parser::Functions.autoloader.loadall
+ res = ["master"]
+
+ function_list_exported_ressources(['Openldap::Exported_slave']).each { |i|
+ res << "slave-#{i}"
+ }
+ res.map! { |x| "ldap-#{x}." + lookupvar("domain") }
+ return res
+ end
+end
diff --git a/modules/openldap/manifests/config.pp b/modules/openldap/manifests/config.pp
new file mode 100644
index 00000000..336f8a23
--- /dev/null
+++ b/modules/openldap/manifests/config.pp
@@ -0,0 +1,7 @@
+define openldap::config($content) {
+ file { $name:
+ require => Package['openldap-servers'],
+ content => $content,
+ notify => Exec["slaptest"],
+ }
+}
diff --git a/modules/openldap/manifests/exported_slave.pp b/modules/openldap/manifests/exported_slave.pp
new file mode 100644
index 00000000..5b9f6b87
--- /dev/null
+++ b/modules/openldap/manifests/exported_slave.pp
@@ -0,0 +1,3 @@
+# this define is here only to be exported by slave
+# and later used by get_ldap_servers
+define openldap::exported_slave { }
diff --git a/modules/openldap/manifests/init.pp b/modules/openldap/manifests/init.pp
index 991aee40..34a214a2 100644
--- a/modules/openldap/manifests/init.pp
+++ b/modules/openldap/manifests/init.pp
@@ -1,71 +1,34 @@
class openldap {
- class base {
- package { 'openldap-servers':
- ensure => installed
- }
+ include openldap::var
- service { ldap:
- ensure => running,
- subscribe => [ Package['openldap-servers']],
- path => "/etc/init.d/ldap"
- }
+ package { 'openldap-servers': }
- file {"/etc/ssl/openldap/":
- ensure => directory,
- owner => root,
- group => root,
- mode => 755,
- }
-
- openssl::self_signed_cert{ 'ldap':
- directory => "/etc/ssl/openldap/"
- }
+ service { $openldap::var::service:
+ subscribe => Package['openldap-servers'],
+ require => Openssl::Self_signed_cert["ldap.${::domain}"],
}
- # /etc/
- # 11:57:48| blingme> misc: nothing special, just copy slapd.conf, mandriva-dit-access.conf across, slapcat one side, slapadd other side
-
- file { '/etc/openldap/slapd.conf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["openldap-servers"],
- content => "",
- notify => [Service['ldap']]
+ exec { "slaptest":
+ refreshonly => true,
+ notify => Service[$openldap::var::service],
}
- file { '/etc/openldap/mandriva-dit-access.conf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["openldap-servers"],
- content => "",
- notify => [Service['ldap']]
+ file { '/etc/ssl/openldap/':
+ ensure => directory,
}
- file { '/etc/sysconfig/ldap':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["openldap-servers"],
- content => "",
- notify => [Service['ldap']]
- }
-
- class master inherits base {
- file { '/etc/openldap/mandriva-dit-access.conf':
- content => template("openldap/mandriva-dit-access.conf"),
- }
-
- file { '/etc/openldap/slapd.conf':
- content => template("openldap/slapd.conf"),
- }
+ openssl::self_signed_cert{ "ldap.${::domain}":
+ directory => '/etc/ssl/openldap/',
+ }
- file { '/etc/sysconfig/ldap':
- content => template("openldap/ldap.sysconfig"),
- }
+ openldap::config {
+ '/etc/openldap/slapd.conf':
+ content => '';
+ '/etc/openldap/mandriva-dit-access.conf':
+ content => '';
+ '/etc/sysconfig/ldap':
+ content => '';
+ '/etc/sysconfig/slapd':
+ content => '';
}
}
diff --git a/modules/openldap/manifests/master.pp b/modules/openldap/manifests/master.pp
new file mode 100644
index 00000000..53122628
--- /dev/null
+++ b/modules/openldap/manifests/master.pp
@@ -0,0 +1,50 @@
+class openldap::master inherits openldap {
+ include openldap::var
+
+ Openldap::Config['/etc/openldap/mandriva-dit-access.conf'] {
+ content => template('openldap/mandriva-dit-access.conf'),
+ }
+
+ $ldap_test_password = extlookup('ldap_test_password','x')
+ $ldap_test_directory = '/var/lib/ldap/test'
+ file { $ldap_test_directory:
+ ensure => directory,
+ group => 'ldap',
+ owner => 'ldap',
+ require => Package['openldap-servers'],
+ before => Service[$openldap::var::service],
+ }
+
+ Openldap::Config['/etc/openldap/slapd.conf'] {
+ content => template('openldap/slapd.conf', 'openldap/slapd.test.conf'),
+ }
+
+ Openldap::Config['/etc/sysconfig/ldap'] {
+ content => template('openldap/ldap.sysconfig'),
+ }
+
+ Openldap::Config['/etc/sysconfig/slapd'] {
+ content => template('openldap/slapd.sysconfig'),
+ }
+
+ host { "ldap.${::domain}":
+ ip => '127.0.0.1',
+ }
+
+ if $::environment == 'test' {
+ # if we are in a test vm, we need to fill the directory
+ # with data
+ package { 'openldap-clients': }
+
+ mga_common::local_script { 'init_ldap.sh':
+ content => template('openldap/init_ldap.sh'),
+ require => Package['openldap-clients'],
+ }
+
+ exec { 'init_ldap.sh':
+ # taken arbitrary among all possible files
+ creates => '/var/lib/ldap/objectClass.bdb',
+ require => Mga_common::Local_script['init_ldap.sh'],
+ }
+ }
+}
diff --git a/modules/openldap/manifests/slave.pp b/modules/openldap/manifests/slave.pp
new file mode 100644
index 00000000..ba0cfb9d
--- /dev/null
+++ b/modules/openldap/manifests/slave.pp
@@ -0,0 +1,23 @@
+class openldap::slave($rid) inherits openldap {
+
+ @@openldap::exported_slave { $rid: }
+
+ $sync_password = extlookup("ldap_syncuser-${::hostname}",'x')
+
+ # same access rights as master
+ Openldap::Config['/etc/openldap/mandriva-dit-access.conf'] {
+ content => template('openldap/mandriva-dit-access.conf'),
+ }
+
+ Openldap::Config['/etc/openldap/slapd.conf'] {
+ content => template('openldap/slapd.conf','openldap/slapd.syncrepl.conf'),
+ }
+
+ Openldap::Config['/etc/sysconfig/ldap'] {
+ content => template('openldap/ldap.sysconfig'),
+ }
+
+ Openldap::Config['/etc/sysconfig/slapd'] {
+ content => template('openldap/slapd-slave.sysconfig'),
+ }
+}
diff --git a/modules/openldap/manifests/slave_instance.pp b/modules/openldap/manifests/slave_instance.pp
new file mode 100644
index 00000000..fbf998c6
--- /dev/null
+++ b/modules/openldap/manifests/slave_instance.pp
@@ -0,0 +1,8 @@
+# TODO create the user for sync in ldap
+# this define is mainly syntactic sugar
+define openldap::slave_instance($rid) {
+ include openldap
+ class { 'openldap::slave':
+ rid => $rid,
+ }
+}
diff --git a/modules/openldap/manifests/var.pp b/modules/openldap/manifests/var.pp
new file mode 100644
index 00000000..d6947eb8
--- /dev/null
+++ b/modules/openldap/manifests/var.pp
@@ -0,0 +1,3 @@
+class openldap::var {
+ $service = 'slapd'
+}
diff --git a/modules/openldap/templates/init_ldap.sh b/modules/openldap/templates/init_ldap.sh
new file mode 100644
index 00000000..dfcaf236
--- /dev/null
+++ b/modules/openldap/templates/init_ldap.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+ldapadd -Y EXTERNAL -H ldapi:/// <<EOF
+dn: <%= dc_suffix %>
+dc: <%= dc_suffix.split(',')[0].split('=')[1] %>
+objectClass: domain
+objectClass: domainRelatedObject
+associatedDomain: <%= domain %>
+
+<% for g in ['People','Group','Hosts'] %>
+dn: ou=<%= g%>,<%= dc_suffix %>
+ou: <%= g %>
+objectClass: organizationalUnit
+<% end %>
+
+<%
+gid = 5000
+for g in ['packagers','web','sysadmin','packagers-committers','forum-developers'] %>
+dn: cn=mga-<%= g %>,ou=Group,<%= dc_suffix %>
+objectClass: groupOfNames
+objectClass: posixGroup
+cn: mga-<%= g %>
+gidNumber: <%= gid %>
+member: cn=manager,<%= dc_suffix %>
+<%-
+gid+=1
+end -%>
+
+
+<% # FIXME automatically get the list of servers
+for g in ['duvel','alamut'] %>
+dn: cn=<%= g%>.<%= domain %>,ou=Hosts,<%= dc_suffix %>
+objectClass: device
+objectClass: simpleSecurityObject
+cn: <%= g%>.<%= domain %>
+userPassword: x
+<% end %>
+
+
+EOF
diff --git a/modules/openldap/templates/mandriva-dit-access.conf b/modules/openldap/templates/mandriva-dit-access.conf
index a4d9661a..361d956b 100644
--- a/modules/openldap/templates/mandriva-dit-access.conf
+++ b/modules/openldap/templates/mandriva-dit-access.conf
@@ -1,184 +1,195 @@
# mandriva-dit-access.conf
-limits group="cn=LDAP Replicators,ou=System Groups,dc=mageia,dc=org"
+limits group="cn=LDAP Replicators,ou=System Groups,<%= dc_suffix %>"
limit size=unlimited
limit time=unlimited
-limits group="cn=LDAP Admins,ou=System Groups,dc=mageia,dc=org"
+limits group="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>"
limit size=unlimited
limit time=unlimited
-limits group="cn=Account Admins,ou=System Groups,dc=mageia,dc=org"
+limits group="cn=Account Admins,ou=System Groups,<%= dc_suffix %>"
limit size=unlimited
limit time=unlimited
# so we don't have to add these to every other acl down there
-access to dn.subtree="dc=mageia,dc=org"
- by group.exact="cn=LDAP Admins,ou=System Groups,dc=mageia,dc=org" write
- by group.exact="cn=LDAP Replicators,ou=System Groups,dc=mageia,dc=org" read
+access to dn.subtree="<%= dc_suffix %>"
+ by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" write
+ by group.exact="cn=LDAP Replicators,ou=System Groups,<%= dc_suffix %>" read
by * break
# userPassword access
# Allow account registration to write userPassword of unprivileged users accounts
-access to dn.subtree="ou=People,dc=mageia,dc=org"
+access to dn.subtree="ou=People,<%= dc_suffix %>"
filter="(&(objectclass=inetOrgPerson)(!(objectclass=posixAccount)))"
- attrs=userPassword,pwdReset
- by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" +a
+ attrs=userPassword
+ by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" +w
by * +0 break
# shadowLastChange is here because it needs to be writable by the user because
# of pam_ldap, which will update this attr whenever the password is changed.
# And this is done with the user's credentials
-access to dn.subtree="dc=mageia,dc=org"
+access to dn.subtree="<%= dc_suffix %>"
attrs=shadowLastChange
by self write
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
-access to dn.subtree="dc=mageia,dc=org"
+access to dn.subtree="<%= dc_suffix %>"
attrs=userPassword
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by self write
by anonymous auth
by * none
# kerberos key access
# "by auth" just in case...
-access to dn.subtree="dc=mageia,dc=org"
+access to dn.subtree="<%= dc_suffix %>"
attrs=krb5Key
by self write
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by anonymous auth
by * none
# password policies
-access to dn.subtree="ou=Password Policies,dc=mageia,dc=org"
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+access to dn.subtree="ou=Password Policies,<%= dc_suffix %>"
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# samba password attributes
# by self not strictly necessary, because samba uses its own admin user to
# change the password on the user's behalf
# openldap also doesn't auth on these attributes, but maybe some day it will
-access to dn.subtree="dc=mageia,dc=org"
+access to dn.subtree="<%= dc_suffix %>"
attrs=sambaLMPassword,sambaNTPassword
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by anonymous auth
by self write
by * none
# password history attribute
-# pwdHistory is read-only, but ACL is simplier with it here
-access to dn.subtree="dc=mageia,dc=org"
+# pwdHistory is read-only, but ACL is simpler with it here
+access to dn.subtree="<%= dc_suffix %>"
attrs=sambaPasswordHistory,pwdHistory
by self read
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by * none
# pwdReset, so the admin can force an user to change a password
-access to dn.subtree="dc=mageia,dc=org"
+access to dn.subtree="<%= dc_suffix %>"
attrs=pwdReset,pwdAccountLockedTime
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by self read
# group owner can add/remove/edit members to groups
-access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),dc=mageia,dc=org$"
- attrs=member
+access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),<%= dc_suffix %>$"
+ attrs=member,owner
by dnattr=owner write
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
- by users +sx
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
+ by users +scrx
-access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),dc=mageia,dc=org$"
+access to dn.regex="^cn=[^,]+,ou=(System Groups|Group),<%= dc_suffix %>$"
attrs=cn,description,objectClass,gidNumber
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# registration - allow registrar group to create basic unprivileged accounts
-access to dn.subtree="ou=People,dc=mageia,dc=org"
+access to dn.subtree="ou=People,<%= dc_suffix %>"
attrs="objectClass"
val="inetOrgperson"
- by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" =asrx
+ by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" =asrx
by * +0 break
-access to dn.subtree="ou=People,dc=mageia,dc=org"
+access to dn.subtree="ou=People,<%= dc_suffix %>"
filter="(!(objectclass=posixAccount))"
attrs=cn,sn,gn,mail,entry,children,preferredLanguage
- by group/groupOfNames/member.exact="cn=registrars,ou=system groups,dc=mageia,dc=org" =asrx
+ by group/groupOfNames/member.exact="cn=registrars,ou=system groups,<%= dc_suffix %>" =asrx
+ by * +0 break
+
+# TODO maybe we should use a group instead of a user here
+access to dn.subtree="ou=People,<%= dc_suffix %>"
+ filter="(objectclass=posixAccount)"
+ attrs=homeDirectory,cn,uid,loginShell,gidNumber,uidNumber
+ by dn.one="ou=Hosts,<%= dc_suffix %>" read
by * +0 break
# let the user change some of his/her attributes
-access to dn.subtree="ou=People,dc=mageia,dc=org"
- attrs=carLicense,homePhone,homePostalAddress,mobile,pager,telephoneNumber,mail,preferredLanguage
+access to dn.subtree="ou=People,<%= dc_suffix %>"
+ attrs=cn,sn,givenName,carLicense,drink,homePhone,homePostalAddress,mobile,pager,telephoneNumber,mail,preferredLanguage,sshPublicKey
by self write
by users read
+access to dn.subtree="ou=People,<%= dc_suffix %>"
+ attrs=memberOf
+ by users read
+
+
# create new accounts
-access to dn.regex="^([^,]+,)?ou=(People|Group|Hosts),dc=mageia,dc=org$"
+access to dn.regex="^([^,]+,)?ou=(People|Group|Hosts),<%= dc_suffix %>$"
attrs=children,entry
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by * break
# access to existing entries
-access to dn.regex="^[^,]+,ou=(People|Hosts|Group),dc=mageia,dc=org$"
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+access to dn.regex="^[^,]+,ou=(People|Hosts|Group),<%= dc_suffix %>$"
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by * break
# sambaDomainName entry
-access to dn.regex="^(sambaDomainName=[^,]+,)?dc=mageia,dc=org$"
+access to dn.regex="^(sambaDomainName=[^,]+,)?<%= dc_suffix %>$"
attrs=children,entry,@sambaDomain,@sambaUnixIdPool
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# samba ID mapping
-access to dn.regex="^(sambaSID=[^,]+,)?ou=Idmap,dc=mageia,dc=org$"
+access to dn.regex="^(sambaSID=[^,]+,)?ou=Idmap,<%= dc_suffix %>$"
attrs=children,entry,@sambaIdmapEntry
- by group.exact="cn=Account Admins,ou=System Groups,dc=mageia,dc=org" write
- by group.exact="cn=IDMAP Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Account Admins,ou=System Groups,<%= dc_suffix %>" write
+ by group.exact="cn=IDMAP Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# global address book
# XXX - which class(es) to use?
-access to dn.regex="^(.*,)?ou=Address Book,dc=mageia,dc=org"
+access to dn.regex="^(.*,)?ou=Address Book,<%= dc_suffix %>"
attrs=children,entry,@inetOrgPerson,@evolutionPerson,@evolutionPersonList
- by group.exact="cn=Address Book Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Address Book Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# dhcp entries
# XXX - open up read access to anybody?
-access to dn.sub="ou=dhcp,dc=mageia,dc=org"
+access to dn.sub="ou=dhcp,<%= dc_suffix %>"
attrs=children,entry,@dhcpService,@dhcpServer,@dhcpSharedNetwork,@dhcpSubnet,@dhcpPool,@dhcpGroup,@dhcpHost,@dhcpClass,@dhcpSubClass,@dhcpOptions,@dhcpLeases,@dhcpLog
- by group.exact="cn=DHCP Admins,ou=System Groups,dc=mageia,dc=org" write
- by group.exact="cn=DHCP Readers,ou=System Groups,dc=mageia,dc=org" read
+ by group.exact="cn=DHCP Admins,ou=System Groups,<%= dc_suffix %>" write
+ by group.exact="cn=DHCP Readers,ou=System Groups,<%= dc_suffix %>" read
by * read
# sudoers
-access to dn.regex="^([^,]+,)?ou=sudoers,dc=mageia,dc=org$"
+access to dn.regex="^([^,]+,)?ou=sudoers,<%= dc_suffix %>$"
attrs=children,entry,@sudoRole
- by group.exact="cn=Sudo Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=Sudo Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# dns
-access to dn="ou=dns,dc=mageia,dc=org"
+access to dn="ou=dns,<%= dc_suffix %>"
attrs=entry,@extensibleObject
- by group.exact="cn=DNS Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=DNS Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
-access to dn.sub="ou=dns,dc=mageia,dc=org"
+access to dn.sub="ou=dns,<%= dc_suffix %>"
attrs=children,entry,@dNSZone
- by group.exact="cn=DNS Admins,ou=System Groups,dc=mageia,dc=org" write
- by group.exact="cn=DNS Readers,ou=System Groups,dc=mageia,dc=org" read
+ by group.exact="cn=DNS Admins,ou=System Groups,<%= dc_suffix %>" write
+ by group.exact="cn=DNS Readers,ou=System Groups,<%= dc_suffix %>" read
by * none
# MTA
# XXX - what else can we add here? Virtual Domains? With which schema?
-access to dn.one="ou=People,dc=mageia,dc=org"
+access to dn.one="ou=People,<%= dc_suffix %>"
attrs=@inetLocalMailRecipient,mail
- by group.exact="cn=MTA Admins,ou=System Groups,dc=mageia,dc=org" write
+ by group.exact="cn=MTA Admins,ou=System Groups,<%= dc_suffix %>" write
by users read
# KDE Configuration
-access to dn.sub="ou=KDEConfig,dc=mageia,dc=org"
- by group.exact="cn=KDEConfig Admins,ou=System Groups,dc=mageia,dc=org" write
+access to dn.sub="ou=KDEConfig,<%= dc_suffix %>"
+ by group.exact="cn=KDEConfig Admins,ou=System Groups,<%= dc_suffix %>" write
by * read
# last one
-access to dn.subtree="dc=mageia,dc=org" attrs=entry,uid,cn
+access to dn.subtree="<%= dc_suffix %>" attrs=entry,uid,cn
by users read
-
diff --git a/modules/openldap/templates/slapd-slave.sysconfig b/modules/openldap/templates/slapd-slave.sysconfig
new file mode 100644
index 00000000..9bff24ff
--- /dev/null
+++ b/modules/openldap/templates/slapd-slave.sysconfig
@@ -0,0 +1,38 @@
+# debug level for slapd
+SLAPDSYSLOGLEVEL="0"
+SLAPDSYSLOGLOCALUSER="local4"
+
+# SLAPD URL list
+SLAPDURLLIST="ldap:/// ldaps:/// ldapi:///"
+
+# Config file to use for slapd
+#SLAPDCONF=/etc/openldap/slapd.conf
+
+# Which user to run as
+#LDAPUSER=ldap
+#LDAPGROUP=ldap
+
+# Should file permissions on database files be fixed at startup. Default is yes
+# FIXPERMS=no
+
+# Whether database recovery should be run before starting slapd in start
+# (not strictly be necessary in 2.3). Default is no
+# AUTORECOVER=yes
+
+# At what intervals to run ldap-hot-db-backup from cron, which will
+# do hot database backups for all bdb/hdb databases, and archive
+# unnecessary transaction logs, one of hourly,daily,weekly,monthly,yearly
+# Default is daily
+# Slave does not need a backup
+RUN_DB_BACKUP=never
+
+# How many days to keep archived transaction logs for. This should be just
+# greater than the backup interval on these files. Default is 7
+# KEEP_ARCHIVES_DAYS=7
+
+# How many files slapd should be able to have open. By default, the process
+# will inherit the default per-process limit (usually 1024), which may
+# not be enough, so ulimit -n is run with the value in MAXFILES (which
+# defaults to 1024 as well). 4096 is the maximum OpenLDAP will use without
+# recompiling.
+# MAXFILES=4096
diff --git a/modules/openldap/templates/slapd.conf b/modules/openldap/templates/slapd.conf
index 7edab29b..d82fe088 100644
--- a/modules/openldap/templates/slapd.conf
+++ b/modules/openldap/templates/slapd.conf
@@ -11,7 +11,10 @@ include /usr/share/openldap/schema/rfc2307bis.schema
include /usr/share/openldap/schema/openldap.schema
#include /usr/share/openldap/schema/autofs.schema
include /usr/share/openldap/schema/samba.schema
-include /usr/share/openldap/schema/kolab.schema
+# removed as it cause issue on 2010.0 :
+# /usr/share/openldap/schema/kolab.schema:
+# line 175 objectclass: Duplicate objectClass: "1.3.6.1.4.1.5322.13.1.1"
+#include /usr/share/openldap/schema/kolab.schema
include /usr/share/openldap/schema/evolutionperson.schema
include /usr/share/openldap/schema/calendar.schema
include /usr/share/openldap/schema/sudo.schema
@@ -27,14 +30,23 @@ pidfile /var/run/ldap/slapd.pid
argsfile /var/run/ldap/slapd.args
modulepath <%= lib_dir %>/openldap
+<% if @hostname == 'duvel' then %>
+moduleload back_bdb.la
+<% else %>
+moduleload back_mdb.la
+<% end %>
moduleload back_monitor.la
moduleload syncprov.la
moduleload ppolicy.la
#moduleload refint.la
+moduleload memberof.la
+moduleload unique.la
+moduleload dynlist.la
+moduleload constraint.la
-TLSCertificateFile /etc/ssl/openldap/ldap.pem
-TLSCertificateKeyFile /etc/ssl/openldap/ldap.pem
-TLSCACertificateFile /etc/ssl/openldap/ldap.pem
+TLSCertificateFile /etc/ssl/openldap/ldap.<%= domain %>.pem
+TLSCertificateKeyFile /etc/ssl/openldap/ldap.<%= domain %>.pem
+TLSCACertificateFile /etc/ssl/openldap/ldap.<%= domain %>.pem
# Give ldapi connection some security
localSSF 56
@@ -46,20 +58,34 @@ security ssf=56
loglevel 256
+database monitor
+access to dn.subtree="cn=Monitor"
+ by group.exact="cn=LDAP Monitors,ou=System Groups,<%= dc_suffix %>" read
+ by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" read
+ by * none
+
+<% if @hostname == 'duvel' then %>
database bdb
+<% else %>
+database mdb
+# mdb defaults to 10MB max DB, so we need to hardcode some better value :(
+maxsize 500000000
+<% end %>
suffix "<%= dc_suffix %>"
directory /var/lib/ldap
rootdn "cn=manager,<%= dc_suffix %>"
checkpoint 256 5
+<% if @hostname == 'duvel' then %>
# 32Mbytes, can hold about 10k posixAccount entries
dbconfig set_cachesize 0 33554432 1
dbconfig set_lg_bsize 2097152
cachesize 1000
idlcachesize 3000
+<% end %>
index objectClass eq
-index uidNumber,gidNumber,memberuid,member eq
+index uidNumber,gidNumber,memberuid,member,owner eq
index uid eq,subinitial
index cn,mail,surname,givenname eq,subinitial
index sambaSID eq,sub
@@ -72,6 +98,8 @@ index sudouser eq,sub
index entryCSN,entryUUID eq
index dhcpHWAddress,dhcpClassData eq
+overlay memberof
+
overlay syncprov
syncprov-checkpoint 100 10
syncprov-sessionlog 100
@@ -81,6 +109,15 @@ ppolicy_default "cn=default,ou=Password Policies,<%= dc_suffix %>"
ppolicy_hash_cleartext yes
ppolicy_use_lockout yes
+overlay unique
+unique_uri ldap:///?mail?sub?
+
+overlay dynlist
+dynlist-attrset groupOfURLs memberURL member
+
+
+overlay constraint
+constraint_attribute sshPublicKey regex "^ssh-(rsa|dss|ed25519) [[:graph:]]+ [[:graph:]]+$"
# uncomment if you want to automatically update group
# memberships when an user is removed from the tree
@@ -89,16 +126,13 @@ ppolicy_use_lockout yes
#refint_attributes member
#refint_nothing "uid=LDAP Admin,ou=System Accounts,dc=example,dc=com"
+<% if environment == "test" %>
authz-regexp "gidNumber=0\\\+uidNumber=0,cn=peercred,cn=external,cn=auth"
- "uid=Account Admin,ou=System Accounts,<%= dc_suffix %>"
+ "cn=manager,<%= dc_suffix %>"
authz-regexp ^uid=([^,]+),cn=[^,]+,cn=auth$ uid=$1,ou=People,<%= dc_suffix %>
+<% end %>
include /etc/openldap/mandriva-dit-access.conf
-database monitor
-access to dn.subtree="cn=Monitor"
- by group.exact="cn=LDAP Monitors,ou=System Groups,<%= dc_suffix %>" read
- by group.exact="cn=LDAP Admins,ou=System Groups,<%= dc_suffix %>" read
- by * none
diff --git a/modules/openldap/templates/slapd.syncrepl.conf b/modules/openldap/templates/slapd.syncrepl.conf
new file mode 100644
index 00000000..2bfe7d50
--- /dev/null
+++ b/modules/openldap/templates/slapd.syncrepl.conf
@@ -0,0 +1,11 @@
+syncrepl rid=<%= rid %>
+ provider=ldaps://ldap-master.<%= domain %>:636
+ type=refreshAndPersist
+ searchbase="<%= dc_suffix %>"
+ schemachecking=off
+ bindmethod=simple
+ binddn="cn=syncuser-<%= hostname%>,ou=System Accounts,<%= dc_suffix %>"
+ credentials=<%= sync_password %>
+ tls_reqcert=never
+
+updateref ldaps://ldap-master.<%= domain %>:636
diff --git a/modules/openldap/templates/slapd.sysconfig b/modules/openldap/templates/slapd.sysconfig
new file mode 100644
index 00000000..e6ae2e05
--- /dev/null
+++ b/modules/openldap/templates/slapd.sysconfig
@@ -0,0 +1,37 @@
+# debug level for slapd
+SLAPDSYSLOGLEVEL="0"
+SLAPDSYSLOGLOCALUSER="local4"
+
+# SLAPD URL list
+SLAPDURLLIST="ldap:/// ldaps:/// ldapi:///"
+
+# Config file to use for slapd
+#SLAPDCONF=/etc/openldap/slapd.conf
+
+# Which user to run as
+#LDAPUSER=ldap
+#LDAPGROUP=ldap
+
+# Should file permissions on database files be fixed at startup. Default is yes
+# FIXPERMS=no
+
+# Whether database recovery should be run before starting slapd in start
+# (not strictly be necessary in 2.3). Default is no
+# AUTORECOVER=yes
+
+# At what intervals to run ldap-hot-db-backup from cron, which will
+# do hot database backups for all bdb/hdb databases, and archive
+# unnecessary transaction logs, one of hourly,daily,weekly,monthly,yearly
+# Default is daily
+# RUN_DB_BACKUP=daily
+
+# How many days to keep archived transaction logs for. This should be just
+# greater than the backup interval on these files. Default is 7
+# KEEP_ARCHIVES_DAYS=7
+
+# How many files slapd should be able to have open. By default, the process
+# will inherit the default per-process limit (usually 1024), which may
+# not be enough, so ulimit -n is run with the value in MAXFILES (which
+# defaults to 1024 as well). 4096 is the maximum OpenLDAP will use without
+# recompiling.
+# MAXFILES=4096
diff --git a/modules/openldap/templates/slapd.test.conf b/modules/openldap/templates/slapd.test.conf
new file mode 100644
index 00000000..8befa55a
--- /dev/null
+++ b/modules/openldap/templates/slapd.test.conf
@@ -0,0 +1,9 @@
+database bdb
+suffix "dc=test_ldap"
+directory /var/lib/ldap/test
+rootdn "cn=manager,dc=test_ldap"
+rootpw "<%= ldap_test_password %>"
+authz-regexp "gidNumber=0\\\+uidNumber=0,cn=peercred,cn=external,cn=auth"
+ "cn=manager,dc=test_ldap"
+# force ssl
+security ssf=56
diff --git a/modules/openssh/manifests/init.pp b/modules/openssh/manifests/init.pp
index e55660fd..bae0fa5c 100644
--- a/modules/openssh/manifests/init.pp
+++ b/modules/openssh/manifests/init.pp
@@ -1,25 +1 @@
-class openssh {
-
- # some trick to manage sftp server, who is arch dependent on mdv
- $path_to_sftp = "$lib_dir/ssh/"
-
- package { "openssh-server":
- ensure => installed
- }
-
- service { sshd:
- ensure => running,
- path => "/etc/init.d/sshd",
- subscribe => [ Package["openssh-server"], File["sshd_config"] ]
- }
-
- file { "sshd_config":
- path => "/etc/ssh/sshd_config",
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["openssh-server"],
- content => template("openssh/sshd_config")
- }
-}
+class openssh { }
diff --git a/modules/openssh/manifests/server.pp b/modules/openssh/manifests/server.pp
new file mode 100644
index 00000000..c45268d2
--- /dev/null
+++ b/modules/openssh/manifests/server.pp
@@ -0,0 +1,17 @@
+class openssh::server {
+ # some trick to manage sftp server, who is arch dependent on mdv
+ # TODO: the path changed on Mageia 6 to /usr/libexec/openssh/sftp-server
+ $path_to_sftp = "${::lib_dir}/ssh/"
+
+ package { 'openssh-server': }
+
+ service { 'sshd':
+ subscribe => Package['openssh-server'],
+ }
+
+ file { '/etc/ssh/sshd_config':
+ require => Package['openssh-server'],
+ content => template('openssh/sshd_config'),
+ notify => Service['sshd']
+ }
+}
diff --git a/modules/openssh/manifests/ssh_keys_from_ldap.pp b/modules/openssh/manifests/ssh_keys_from_ldap.pp
new file mode 100644
index 00000000..9ea6c139
--- /dev/null
+++ b/modules/openssh/manifests/ssh_keys_from_ldap.pp
@@ -0,0 +1,20 @@
+class openssh::ssh_keys_from_ldap inherits server {
+ package { 'python3-ldap': }
+
+ $ldap_pwfile = '/etc/ldap.secret'
+ $nslcd_conf_file = '/etc/nslcd.conf'
+ $ldap_servers = get_ldap_servers()
+ mga_common::local_script { 'ldap-sshkey2file.py':
+ content => template('openssh/ldap-sshkey2file.py'),
+ require => Package['python3-ldap']
+ }
+
+ cron { 'sshkey2file':
+ command => '/bin/bash -c "/usr/local/bin/ldap-sshkey2file.py && ( [[ -f /usr/bin/mgagit && -d /var/lib/git/.gitolite ]] && /bin/su -c \'/usr/bin/mgagit glrun\' - git ) ||:"',
+ hour => '*',
+ minute => '*/10',
+ user => 'root',
+ environment => 'MAILTO=root',
+ require => Mga_common::Local_script['ldap-sshkey2file.py'],
+ }
+}
diff --git a/modules/openssh/templates/ldap-sshkey2file.py b/modules/openssh/templates/ldap-sshkey2file.py
new file mode 100755
index 00000000..934e2865
--- /dev/null
+++ b/modules/openssh/templates/ldap-sshkey2file.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python3
+
+import argparse
+import os
+import random
+import shutil
+import sys
+import tempfile
+import textwrap
+from typing import Iterable
+
+try:
+ import ldap
+except ImportError:
+ print("Please install python-ldap before running this program")
+ sys.exit(1)
+
+basedn = "<%= @dc_suffix %>"
+peopledn = f"ou=people,{basedn}"
+<%-
+ ldap_servers.map! { |l| "'ldaps://#{l}'" }
+-%>
+uris = [<%= ldap_servers.join(", ") %>]
+random.shuffle(uris)
+uri = " ".join(uris)
+timeout = 5
+binddn = f"cn=<%= @fqdn %>,ou=Hosts,{basedn}"
+ldap_secret_file = "<%= @ldap_pwfile %>"
+nslcd_conf_file = "<%= @nslcd_conf_file %>"
+# filter out disabled accounts also
+# too bad uidNumber doesn't support >= filters
+objfilter = "(&(objectClass=inetOrgPerson)(objectClass=ldapPublicKey)(objectClass=posixAccount)(sshPublicKey=*))"
+keypathprefix = "/home"
+
+parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=textwrap.dedent(f'''\
+ Will fetch all enabled user accounts under {peopledn}
+ with ssh keys in them and write each one to
+ {keypathprefix}/<login>/.ssh/authorized_keys
+
+ It will return failure when no keys are updated and success
+ when one or more keys have changed.
+
+ This script is intended to be run from cron as root;
+ '''))
+parser.add_argument('-n', '--dry-run', action='store_true')
+parser.add_argument('-v', '--verbose', action='store_true')
+args = parser.parse_args()
+
+
+def get_bindpw() -> str:
+ try:
+ return get_nslcd_bindpw(nslcd_conf_file)
+ except:
+ pass
+
+ try:
+ return get_ldap_secret(ldap_secret_file)
+ except:
+ pass
+
+ print("Error while reading password file, aborting")
+ sys.exit(1)
+
+
+def get_nslcd_bindpw(pwfile: str) -> str:
+ try:
+ with open(pwfile, 'r') as f:
+ pwfield = "bindpw"
+ for line in f:
+ ls = line.strip().split()
+ if len(ls) == 2 and ls[0] == pwfield:
+ return ls[1]
+ except IOError as e:
+ print("Error while reading nslcd file " + pwfile)
+ print(e)
+ raise
+
+ print("No " + pwfield + " field found in nslcd file " + pwfile)
+ raise Exception()
+
+
+def get_ldap_secret(pwfile: str) -> str:
+ try:
+ with open(pwfile, 'r') as f:
+ pw = f.readline().strip()
+ except IOError as e:
+ print("Error while reading password file " + pwfile)
+ print(e)
+ raise
+ return pw
+
+
+def write_keys(keys: Iterable[bytes], user: bytes, uid: int, gid: int) -> bool:
+ userdir = f"{keypathprefix}/{user.decode('utf-8')}"
+ keyfile = f"{userdir}/.ssh/authorized_keys"
+
+ fromldap = ""
+ for key in keys:
+ fromldap += key.decode("utf-8").strip() + "\n"
+
+ fromfile = ""
+ try:
+ with open(keyfile, 'r') as f:
+ fromfile = f.read()
+ except FileNotFoundError:
+ pass
+
+ if fromldap == fromfile:
+ return False
+
+ if args.dry_run:
+ print(f"Would write {keyfile}")
+ return True
+
+ if args.verbose:
+ print(f"Writing {keyfile}")
+
+ if not os.path.isdir(userdir):
+ shutil.copytree('/etc/skel', userdir)
+ os.chown(userdir, uid, gid)
+ for root, dirs, files in os.walk(userdir):
+ for d in dirs:
+ os.chown(os.path.join(root, d), uid, gid)
+ for f in files:
+ os.chown(os.path.join(root, f), uid, gid)
+
+ try:
+ os.makedirs(f"{userdir}/.ssh", 0o700)
+ except FileExistsError:
+ pass
+ os.chmod(f"{userdir}/.ssh", 0o700)
+ os.chown(f"{userdir}/.ssh", uid, gid)
+
+ with tempfile.NamedTemporaryFile(
+ prefix='ldap-sshkey2file-', mode='w', delete=False) as tmpfile:
+ tmpfile.write(fromldap)
+ os.chmod(tmpfile.name, 0o600)
+ os.chown(tmpfile.name, uid, gid)
+ shutil.move(tmpfile.name, keyfile)
+ # Hmm, apparently shutil.move does not preserve user/group so let's reapply
+ # them. I still like doing it before as this should be more "atomic"
+ # if it actually worked, so it's "good practice", even if shutil.move sucks
+ os.chown(keyfile, uid, gid)
+ os.chmod(keyfile, 0o600)
+ return True
+
+
+bindpw = get_bindpw()
+
+changed = False
+try:
+ ld = ldap.initialize(uri)
+ ld.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout)
+ if uri.startswith("ldap:/"):
+ ld.start_tls_s()
+ ld.bind_s(binddn, bindpw)
+ res = ld.search_s(peopledn, ldap.SCOPE_ONELEVEL, objfilter,
+ ['uid', 'sshPublicKey', 'uidNumber', 'gidNumber'])
+ try:
+ os.makedirs(keypathprefix, 0o701)
+ except FileExistsError:
+ pass
+
+ if args.verbose:
+ print("Found users:",
+ ", ".join(sorted([x[1]['uid'][0].decode('utf-8') for x in res])))
+
+ for result in res:
+ dn, entry = result
+ # skip possible system users
+ if 'uidNumber' not in entry or int(entry['uidNumber'][0]) < 500:
+ continue
+ if write_keys(entry['sshPublicKey'], entry['uid'][0],
+ int(entry['uidNumber'][0]), int(entry['gidNumber'][0])):
+ changed = True
+
+ ld.unbind_s()
+except Exception:
+ print("Error")
+ raise
+
+if changed:
+ if args.verbose:
+ print("SSH keys changed")
+ sys.exit(0)
+
+if args.verbose:
+ print("No changes in SSH keys")
+sys.exit(1)
+
+
+# vim:ts=4:sw=4:et:ai:si
diff --git a/modules/openssh/templates/sshd_config b/modules/openssh/templates/sshd_config
index cb40a961..56ddd725 100644
--- a/modules/openssh/templates/sshd_config
+++ b/modules/openssh/templates/sshd_config
@@ -18,11 +18,10 @@
# The default requires explicit activation of protocol 1
#Protocol 2
-# HostKey for protocol version 1
-HostKey /etc/ssh/ssh_host_key
# HostKeys for protocol version 2
HostKey /etc/ssh/ssh_host_rsa_key
-HostKey /etc/ssh/ssh_host_dsa_key
+HostKey /etc/ssh/ssh_host_ecdsa_key
+HostKey /etc/ssh/ssh_host_ed25519_key
# Lifetime and size of ephemeral version 1 server key
#KeyRegenerationInterval 1h
@@ -45,6 +44,7 @@ PermitRootLogin without-password
#PubkeyAuthentication yes
#AuthorizedKeysFile .ssh/authorized_keys
+
# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
#RhostsRSAAuthentication no
# similar for protocol version 2
@@ -56,11 +56,11 @@ PermitRootLogin without-password
#IgnoreRhosts yes
# To disable tunneled clear text passwords, change to no here!
-#PasswordAuthentication yes
+PasswordAuthentication no
#PermitEmptyPasswords no
# Change to no to disable s/key passwords
-#ChallengeResponseAuthentication yes
+ChallengeResponseAuthentication no
# Kerberos options
#KerberosAuthentication no
@@ -81,7 +81,7 @@ PermitRootLogin without-password
# If you just want the PAM account and session checks to run without
# PAM authentication, then enable this but set PasswordAuthentication
# and ChallengeResponseAuthentication to 'no'.
-#UsePAM no
+UsePAM no
# Accept locale-related environment variables
AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
@@ -89,7 +89,7 @@ AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
AcceptEnv LC_IDENTIFICATION LC_ALL
#AllowAgentForwarding yes
-#AllowTcpForwarding yes
+AllowTcpForwarding no
#GatewayPorts no
X11Forwarding yes
#X11DisplayOffset 10
@@ -98,7 +98,6 @@ X11Forwarding yes
#PrintLastLog yes
#TCPKeepAlive yes
#UseLogin no
-UsePrivilegeSeparation yes
#PermitUserEnvironment no
#Compression delayed
#ClientAliveInterval 0
@@ -113,10 +112,15 @@ UsePrivilegeSeparation yes
#Banner none
# override default of no subsystems
-Subsystem sftp <%= path_to_sftp %>/sftp-server
+Subsystem sftp /usr/libexec/openssh/sftp-server
# Example of overriding settings on a per-user basis
#Match User anoncvs
# X11Forwarding no
# AllowTcpForwarding no
# ForceCommand cvs server
+<% if @hostname == 'duvel' then %>
+# git command is already forced to "gitolite <username>" in /var/lib/git/.ssh/authorized_keys
+Match User *,!schedbot,!root,!git Group *,!mga-sysadmin,!mga-unrestricted_shell_access
+ ForceCommand /usr/local/bin/sv_membersh.pl -c "$SSH_ORIGINAL_COMMAND"
+<% end %>
diff --git a/modules/openssl/manifests/init.pp b/modules/openssl/manifests/init.pp
index fb1f9239..b8c4d91e 100644
--- a/modules/openssl/manifests/init.pp
+++ b/modules/openssl/manifests/init.pp
@@ -1,12 +1,40 @@
class openssl {
- define self_signed_cert($directory = '/etc/certs') {
- package { 'openssl':
- ensure => installed
+ class base {
+ package { 'openssl': }
+ }
+
+ define self_signed_cert($directory = '/etc/certs') {
+ include openssl::base
+
+ $pem_file = "${name}.pem"
+ exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${pem_file} -out ${pem_file} -subj '/CN=${name}'":
+ cwd => $directory,
+ creates => "${directory}/${name}.pem",
+ require => Package['openssl']
}
- $pem_file = "$name.pem"
- exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $pem_file -out $pem_file -subj '/CN=$name.$domain'":
- cwd => "$directory",
- creates => "$directory/$name.pem"
+ }
+
+ define self_signed_splitted_cert( $filename = '',
+ $directory = '/etc/certs',
+ $owner = 'root',
+ $group = 'root',
+ $mode = '0600') {
+ include openssl::base
+
+ $crt_file = "${filename}.crt"
+ $key_file = "${filename}.key"
+ exec { "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${key_file} -out ${crt_file} -subj '/CN=${name}'":
+ cwd => $directory,
+ creates => "${directory}/${key_file}",
+ require => Package['openssl'],
+ before => [File["${directory}/${key_file}"],
+ File["${directory}/${crt_file}"]]
}
- }
+
+ file { ["${directory}/${key_file}","${directory}/${crt_file}"]:
+ owner => $owner,
+ group => $group,
+ mode => $mode,
+ }
+ }
}
diff --git a/modules/pam/manifests/base.pp b/modules/pam/manifests/base.pp
new file mode 100644
index 00000000..e29c8555
--- /dev/null
+++ b/modules/pam/manifests/base.pp
@@ -0,0 +1,32 @@
+class pam::base {
+ include pam::multiple_ldap_access
+ package { ['nscd', 'nss-pam-ldapd']: }
+
+ # This needs configuration or it generates an error every hour.
+ # If it's ever enabled, make sure restrict permissions on
+ # /var/db/passwd.db and /var/db/group.db at the same time.
+ package { 'nss_updatedb':
+ ensure => 'absent',
+ }
+
+ service { 'nscd':
+ require => Package['nscd'],
+ }
+
+ file {
+ '/etc/pam.d/system-auth':
+ content => template('pam/system-auth');
+ '/etc/nsswitch.conf':
+ content => template('pam/nsswitch.conf');
+ '/etc/ldap.conf':
+ content => template('pam/ldap.conf');
+ '/etc/openldap/ldap.conf':
+ content => template('pam/openldap.ldap.conf');
+ }
+
+ $ldap_password = extlookup("${::fqdn}_ldap_password",'x')
+ file { '/etc/ldap.secret':
+ mode => '0600',
+ content => $ldap_password
+ }
+}
diff --git a/modules/pam/manifests/init.pp b/modules/pam/manifests/init.pp
index 210526c9..180ad852 100644
--- a/modules/pam/manifests/init.pp
+++ b/modules/pam/manifests/init.pp
@@ -1,42 +1 @@
-class pam {
-
- class base {
- package { ["pam_ldap","nss_ldap"]:
- ensure => installed,
- }
-
- file { "system-auth":
- path => "/etc/pam.d/system-auth",
- owner => root,
- group => root,
- mode => 644,
- content => template("pam/system-auth")
- }
-
- file { "nsswitch.conf":
- path => "/etc/nsswitch.conf",
- owner => root,
- group => root,
- mode => 644,
- content => template("pam/nsswitch.conf")
- }
- file { "ldap.conf":
- path => "/etc/ldap.conf",
- owner => root,
- group => root,
- mode => 644,
- content => template("pam/ldap.conf")
- }
- }
-
- # for server where only admin can connect
- class admin_access inherits base {
- $access_class = "admin"
- # not sure if this line is needed anymore, wil check later
- }
-
- # for server where people can connect with ssh ( git, svn )
- class commiters_access inherits base {
- $access_class = "commiters"
- }
-}
+class pam { }
diff --git a/modules/pam/manifests/multiple_ldap_access.pp b/modules/pam/manifests/multiple_ldap_access.pp
new file mode 100644
index 00000000..1c5a391f
--- /dev/null
+++ b/modules/pam/manifests/multiple_ldap_access.pp
@@ -0,0 +1,15 @@
+class pam::multiple_ldap_access($access_classes, $restricted_shell = false) {
+ include stdlib
+
+ $default_access_classes = [ 'mga-sysadmin', 'mga-unrestricted_shell_access' ]
+ if empty($access_classes) {
+ $allowed_access_classes = $default_access_classes
+ } else {
+ $allowed_access_classes = concat($default_access_classes, $access_classes)
+ }
+
+ if $restricted_shell {
+ include restrictshell
+ }
+ include pam::base
+}
diff --git a/modules/pam/templates/ldap.conf b/modules/pam/templates/ldap.conf
index 0b3a19fc..235a6aac 100644
--- a/modules/pam/templates/ldap.conf
+++ b/modules/pam/templates/ldap.conf
@@ -1,7 +1,10 @@
+rootbinddn cn=<%= fqdn %>,ou=Hosts,<%= dc_suffix %>
-uri ldap://ldap.<%= domain %>
+uri ldaps://ldap.<%= domain %>
base <%= dc_suffix %>
-pam_lookup_policy no
+timelimit 4
+bind_timelimit 4
+pam_lookup_policy yes
pam_password exop
nss_base_passwd ou=People,<%= dc_suffix %>?one
nss_base_shadow ou=People,<%= dc_suffix %>?one
@@ -12,8 +15,10 @@ nss_map_attribute uniqueMember member
sudoers_base ou=sudoers,<%= dc_suffix %>
#sudoers_debug 2
-<% if access_class = 'commiters' %>
+<%-
+restricted_shell = scope.lookupvar('pam::multiple_ldap_access::restricted_shell')
+if restricted_shell
+-%>
# for restricted access
nss_override_attribute_value loginShell /usr/local/bin/sv_membersh.pl
<% end %>
-
diff --git a/modules/pam/templates/nsswitch.conf b/modules/pam/templates/nsswitch.conf
index f797885d..bfd042c1 100644
--- a/modules/pam/templates/nsswitch.conf
+++ b/modules/pam/templates/nsswitch.conf
@@ -1,7 +1,7 @@
passwd: files ldap [UNAVAIL=return]
shadow: files ldap [UNAVAIL=return]
group: files ldap [UNAVAIL=return]
-hosts: files mdns4_minimal [NOTFOUND=return] dns
+hosts: files dns
bootparams: files
ethers: files
netmasks: files
@@ -13,4 +13,3 @@ netgroup: files ldap
publickey: files
automount: files
aliases: files
-
diff --git a/modules/pam/templates/openldap.ldap.conf b/modules/pam/templates/openldap.ldap.conf
new file mode 100644
index 00000000..cd6ee640
--- /dev/null
+++ b/modules/pam/templates/openldap.ldap.conf
@@ -0,0 +1,25 @@
+#BASE dc=example, dc=com
+#HOST ldap.example.com ldap-master.example.com
+#URI ldap://ldap.example.com ldap://ldap-master.example.com:666
+
+#SIZELIMIT 12
+#TIMELIMIT 15
+#DEREF never
+
+# SSL/TSL configuration. With CA-signed certs, TLS_REQCERT should be
+# "demand", with the CA certificate accessible
+#TLS_REQCERT ([demand],never,allow,try)
+# We ship with allow by default as some LDAP clients (e.g. evolution) have
+# no interactive SSL configuration
+
+TLS_REQCERT allow
+
+# CA Certificate locations
+# Use the default self-signed cert generated by openldap-server postinstall
+# by default
+#TLS_CACERT /etc/pki/tls/certs/ldap.pem
+#TLS_CACERT /etc/ssl/openldap/ldap.<%= domain %>.pem
+
+# If requiring support for certificates signed by all CAs (noting risks
+# pam_ldap if doing DNS-based suffix lookup etc.
+#TLS_CACERTDIR /etc/pki/tls/rootcerts
diff --git a/modules/pam/templates/system-auth b/modules/pam/templates/system-auth
index b02aec3a..37d1da7d 100644
--- a/modules/pam/templates/system-auth
+++ b/modules/pam/templates/system-auth
@@ -1,21 +1,22 @@
-auth required pam_env.so
+auth required pam_env.so
# this part is here if the module don't exist
# basically, the idea is to copy the exact detail of sufficient,
# and add abort=ignore
auth [abort=ignore success=done new_authtok_reqd=done default=ignore] pam_tcb.so shadow fork nullok prefix=$2a$ count=8
-auth sufficient pam_unix.so likeauth nullok
+auth sufficient pam_unix.so likeauth nullok try_first_pass
auth sufficient pam_ldap.so use_first_pass
-<% if access_class = 'admin' %>
-auth required pam_wheel.so group=mga-sysadmin
-<% end %>
-<% if access_class = 'commiters' %>
-auth required pam_wheel.so group=mga-commiters
-<% end %>
auth required pam_deny.so
account sufficient pam_localuser.so
-account sufficient pam_ldap.so
+# not sure if the following bring something useful
+account required pam_ldap.so
+<%- allowed_access_classes = scope.lookupvar('pam::multiple_ldap_access::allowed_access_classes') -%>
+<%- if allowed_access_classes -%>
+<%- allowed_access_classes.each { |ldap_group| -%>
+account sufficient pam_succeed_if.so quiet user ingroup <%= ldap_group %>
+<%- } -%>
+<%- end -%>
account required pam_deny.so
@@ -32,4 +33,3 @@ session optional pam_mkhomedir.so
session required pam_limits.so
session required pam_unix.so
session optional pam_ldap.so
-
diff --git a/modules/phpbb/files/phpbb_apply_config.pl b/modules/phpbb/files/phpbb_apply_config.pl
new file mode 100644
index 00000000..a58df24e
--- /dev/null
+++ b/modules/phpbb/files/phpbb_apply_config.pl
@@ -0,0 +1,28 @@
+#!/usr/bin/perl
+use strict;
+use warnings;
+use Env qw(VALUE);
+use DBI;
+
+my $key = $ARGV[0];
+
+# DBI will use default value coming from env
+# see puppet manifests
+my $dbh = DBI->connect("dbi:Pg:","","", {
+ AutoCommit => 0,
+ RaiseError => 1,
+});
+
+my $table = "phpbb_config";
+
+# FIXME add rollback if there is a problem
+# https://docstore.mik.ua/orelly/linux/dbi/ch06_03.htm
+my $update = $dbh->prepare("UPDATE $table SET config_value = ?, is_dynamic = ? WHERE config_name = ?");
+my $insert = $dbh->prepare("INSERT INTO $table ( config_value, is_dynamic, config_name ) VALUES ( ? , ? , ? )");
+
+my $res = $update->execute($VALUE, 1, $key) or die "cannot do update $?";
+if ($res == 0 ) {
+ $insert->execute($VALUE, 1, $key) or die "cannot do insert $?";
+}
+$dbh->commit();
+$dbh->disconnect();
diff --git a/modules/phpbb/files/robots.txt b/modules/phpbb/files/robots.txt
new file mode 100644
index 00000000..1c335a73
--- /dev/null
+++ b/modules/phpbb/files/robots.txt
@@ -0,0 +1,7 @@
+User-agent: *
+Disallow: /*/faq.php?
+Disallow: /*/memberlist.php?
+Disallow: /*/posting.php?
+Disallow: /*/search.php?
+Disallow: /*/ucp.php?
+Crawl-delay: 30
diff --git a/modules/phpbb/manifests/base.pp b/modules/phpbb/manifests/base.pp
new file mode 100644
index 00000000..9f676cb4
--- /dev/null
+++ b/modules/phpbb/manifests/base.pp
@@ -0,0 +1,57 @@
+class phpbb::base {
+ $db = 'phpbb'
+ $user = 'phpbb'
+ $forums_dir = '/var/www/forums/'
+
+ include apache::mod::php
+
+ package {['php-gd',
+ 'php-xml',
+ 'php-zlib',
+ 'php-ftp',
+ 'php-magickwand',
+ 'php-pgsql',
+ 'php-ldap']: }
+
+ package { 'perl-DBD-Pg': }
+
+ file { '/usr/local/bin/phpbb_apply_config.pl':
+ mode => '0755',
+ source => 'puppet:///modules/phpbb/phpbb_apply_config.pl',
+ }
+
+ $pgsql_password = extlookup('phpbb_pgsql','x')
+ postgresql::remote_user { $user:
+ password => $pgsql_password,
+ }
+
+ file { $forums_dir:
+ ensure => directory,
+ }
+
+ $robotsfile = "$forums_dir/robots.txt"
+ file { $robotsfile:
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => root,
+ source => 'puppet:///modules/phpbb/robots.txt',
+ }
+
+ # TODO check that everything is locked down
+ apache::vhost::base { "forums.${::domain}":
+ content => template('phpbb/forums_vhost.conf'),
+ }
+
+ apache::vhost::base { "ssl_forums.${::domain}":
+ use_ssl => true,
+ vhost => "forums.${::domain}",
+ content => template('phpbb/forums_vhost.conf'),
+ }
+
+ file { '/etc/httpd/conf/vhosts.d/forums.d/':
+ ensure => directory,
+ }
+}
+
+
diff --git a/modules/phpbb/manifests/config.pp b/modules/phpbb/manifests/config.pp
new file mode 100644
index 00000000..553b0f74
--- /dev/null
+++ b/modules/phpbb/manifests/config.pp
@@ -0,0 +1,12 @@
+define phpbb::config($key, $value, $database) {
+ exec { "phpbb_apply ${name}":
+ command => "/usr/local/bin/phpbb_apply_config.pl ${key}",
+ user => 'root',
+ environment => ["PGDATABASE=${database}",
+ "PGUSER=${phpbb::base::user}",
+ "PGPASSWORD=${phpbb::base::pgsql_password}",
+ "PGHOST=pgsql.${::domain}",
+ "VALUE=${value}"],
+ require => File['/usr/local/bin/phpbb_apply_config.pl'],
+ }
+}
diff --git a/modules/phpbb/manifests/databases.pp b/modules/phpbb/manifests/databases.pp
new file mode 100644
index 00000000..dc255f75
--- /dev/null
+++ b/modules/phpbb/manifests/databases.pp
@@ -0,0 +1,3 @@
+define phpbb::databases() {
+ Phpbb::Locale_db <<| |>>
+}
diff --git a/modules/phpbb/manifests/init.pp b/modules/phpbb/manifests/init.pp
new file mode 100644
index 00000000..ccfa0ca2
--- /dev/null
+++ b/modules/phpbb/manifests/init.pp
@@ -0,0 +1 @@
+class phpbb { }
diff --git a/modules/phpbb/manifests/instance.pp b/modules/phpbb/manifests/instance.pp
new file mode 100644
index 00000000..e300d9e0
--- /dev/null
+++ b/modules/phpbb/manifests/instance.pp
@@ -0,0 +1,80 @@
+define phpbb::instance() {
+ include phpbb::base
+
+ $lang = $name
+ $database = "${phpbb::base::db}_${lang}"
+
+ $user = $phpbb::base::user
+ $pgsql_password = $phpbb::base::pgsql_password
+ $forums_dir = $phpbb::base::forums_dir
+
+ include git::client
+ exec { "git_clone ${lang}":
+ command =>"git clone git://git.${::domain}/web/forums/ ${lang}",
+ cwd => $forums_dir,
+ creates => "${forums_dir}/${lang}",
+ require => File[$forums_dir],
+ notify => Exec["rm_install ${lang}"],
+ }
+
+ # remove this or the forum will not work ( 'board disabled' )
+ # maybe it would be better to move this elsewhere, I
+ # am not sure ( and in any case, that's still in git )
+ exec { "rm_install ${lang}":
+ command => "rm -Rf ${forums_dir}/${lang}/phpBB/install",
+ onlyif => "test -d ${forums_dir}/${lang}/phpBB/install",
+ }
+
+ # list found by reading ./install/install_install.php
+ # end of check_server_requirements ( 2 loops )
+
+ $writable_dirs = ['cache',
+ 'images/avatars/upload',
+ 'files',
+ 'store' ]
+
+ $dir_names = regsubst($writable_dirs,'^',"${forums_dir}/${lang}/phpBB/")
+
+ file { $dir_names:
+ ensure => directory,
+ owner => 'apache',
+ require => Exec["git_clone ${lang}"],
+ }
+
+ file { "${forums_dir}/${lang}/phpBB/config.php":
+ content => template('phpbb/config.php'),
+ }
+
+ @@phpbb::locale_db { $database:
+ user => $user,
+ }
+
+ Phpbb::Config {
+ database => $database,
+ }
+
+ $ldap_password = extlookup( 'phpbb_ldap','x')
+
+ phpbb::config {
+ "ldap_user/${lang}":
+ key => 'ldap_user', value => "cn=phpbb-${::hostname},ou=System Accounts,${::dc_suffix}";
+ "ldap_server/${lang}":
+ key => 'ldap_server', value => "ldaps://ldap.${::domain} ldaps://ldap-slave-1.${::domain}";
+ "ldap_password/${lang}":
+ key => 'ldap_password', value => $ldap_password;
+ "ldap_base_dn/${lang}":
+ key => 'ldap_base_dn', value => "ou=People,${::dc_suffix}";
+ "auth_method/${lang}":
+ key => 'auth_method', value => 'ldap';
+ "ldap_mail/${lang}":
+ key => 'ldap_mail', value => 'mail';
+ "ldap_uid/${lang}":
+ key => 'ldap_uid', value => 'uid';
+ "cookie_domain/${lang}":
+ key => 'cookie_domain', value => "forums.${::domain}";
+ "server_name/${lang}":
+ key => 'server_name', value => "forums.${::domain}";
+ "default_lang/${lang}":
+ key => 'default_lang', value => $lang;
+ }
+}
diff --git a/modules/phpbb/manifests/locale_db.pp b/modules/phpbb/manifests/locale_db.pp
new file mode 100644
index 00000000..70116962
--- /dev/null
+++ b/modules/phpbb/manifests/locale_db.pp
@@ -0,0 +1,12 @@
+# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed
+define phpbb::locale_db($tag = 'default',
+ $user = $phpbb::base::user) {
+ postgresql::database { $name:
+ description => "${lang} db for phpbb forum",
+ user => $user,
+ tag => $tag,
+# this break due to the way it is remotely declared
+# this should only be a issue in case of bootstrapping again
+# require => Postgresql::User[$user]
+ }
+}
diff --git a/modules/phpbb/manifests/redirection_instance.pp b/modules/phpbb/manifests/redirection_instance.pp
new file mode 100644
index 00000000..332eac53
--- /dev/null
+++ b/modules/phpbb/manifests/redirection_instance.pp
@@ -0,0 +1,7 @@
+define phpbb::redirection_instance($url) {
+ $lang = $name
+ file { "/etc/httpd/conf/vhosts.d/forums.d/redirect_${name}.conf":
+ content => template('phpbb/forums_redirect.conf'),
+ notify => Exec['apachectl configtest'],
+ }
+}
diff --git a/modules/phpbb/templates/config.php b/modules/phpbb/templates/config.php
new file mode 100644
index 00000000..5d878235
--- /dev/null
+++ b/modules/phpbb/templates/config.php
@@ -0,0 +1,17 @@
+<?php
+// phpBB 3.0.x auto-generated configuration file
+// // Do not change anything in this file!
+$dbms = 'postgres';
+$dbhost = 'pg.<%= domain %>';
+$dbport = '';
+$dbname = '<%= database %>';
+$dbuser = '<%= user %>';
+$dbpasswd = '<%= pgsql_password %>';
+$table_prefix = 'phpbb_';
+$acm_type = 'apc';
+$load_extensions = '';
+
+@define('PHPBB_INSTALLED', true);
+// @define('DEBUG', true);
+// @define('DEBUG_EXTRA', true);
+?>
diff --git a/modules/phpbb/templates/forums_redirect.conf b/modules/phpbb/templates/forums_redirect.conf
new file mode 100644
index 00000000..24747b4c
--- /dev/null
+++ b/modules/phpbb/templates/forums_redirect.conf
@@ -0,0 +1,2 @@
+Redirect /<%= lang %> <%= url %>
+Redirect /<%= lang %>/ <%= url %>
diff --git a/modules/phpbb/templates/forums_vhost.conf b/modules/phpbb/templates/forums_vhost.conf
new file mode 100644
index 00000000..440dad1f
--- /dev/null
+++ b/modules/phpbb/templates/forums_vhost.conf
@@ -0,0 +1,62 @@
+ # TODO redirect based on language settings
+ # and the presence of the forum
+
+ # for locale redirection
+ Include conf/vhosts.d/forums.d/*.conf
+
+ # Prevent including forum site in tier iframe
+ Header set X-Frame-Options DENY
+
+
+ # using Redirect create a loop, so we use mod_rewrite here
+ RewriteEngine On
+ RewriteRule ^/$ /en/ [R]
+ RewriteRule ^/(..)$ /$1/ [R]
+
+ Alias /robots.txt <%= forums_dir %>/robots.txt
+
+ AliasMatch ^/(..)/(.*) <%= forums_dir %>/$1/phpBB/$2
+
+ <Directory ~ "<%= forums_dir %>/.*/phpBB/">
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order Allow,Deny
+ Allow from all
+ </IfModule>
+ </Directory>
+
+<%-
+forbidden = ['install',
+ 'cache',
+ 'includes',
+ 'phpbb_seo/includes',
+ 'store',
+ 'images/avatars/upload',
+ 'files',
+ 'umil/error_files',
+ 'gym_sitemaps/acp',
+ 'gym_sitemaps/sources',
+ 'gym_sitemaps/cache',
+ 'gym_sitemaps/includes',
+ 'gym_sitemaps/display',
+ 'gym_sitemaps/modules',
+]
+for f in forbidden
+-%>
+ <Directory <%= forums_dir %>/.*/phpBB/<%= f %>/ >
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all denied
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order Deny,Allow
+ Deny from all
+ </IfModule>
+ </Directory>
+
+<%- end -%>
diff --git a/modules/planet/manifests/init.pp b/modules/planet/manifests/init.pp
new file mode 100644
index 00000000..8aacd5cc
--- /dev/null
+++ b/modules/planet/manifests/init.pp
@@ -0,0 +1,57 @@
+class planet {
+
+ user { 'planet':
+ groups => 'apache',
+ comment => 'Planet Mageia',
+ home => '/var/lib/planet',
+ }
+
+ $vhost = "planet.${::domain}"
+ $location = "/var/www/vhosts/${vhost}"
+
+ include apache::mod::php
+
+ apache::vhost::base { $vhost:
+ location => $location,
+ content => template('planet/planet_vhosts.conf')
+ }
+
+ apache::vhost::base { "ssl_${vhost}":
+ use_ssl => true,
+ vhost => $vhost,
+ location => $location,
+ content => template('planet/planet_vhosts.conf')
+ }
+
+ mga_common::local_script { 'deploy_new-planet.sh':
+ content => template('planet/deploy_new-planet.sh')
+ }
+
+ file { $location:
+ ensure => directory,
+ }
+
+ file { "${location}/index.php":
+ content => template('planet/index.php')
+ }
+
+ package { ['php-iconv']: }
+
+ class files_backup inherits base {
+ file { '/var/lib/planet/backup':
+ ensure => directory,
+ }
+
+ mga_common::local_script { 'backup_planet-files.sh':
+ content => template('blog/backup_planet-files.sh')
+ }
+
+ cron { "Backup files (planet)":
+ user => root,
+ hour => '23',
+ minute => '42',
+ command => '/usr/local/bin/backup_planet-files.sh',
+ require => [File['backup_planet-files']],
+ }
+ }
+}
diff --git a/modules/planet/templates/backup_planet-files.sh b/modules/planet/templates/backup_planet-files.sh
new file mode 100755
index 00000000..8cab8d1e
--- /dev/null
+++ b/modules/planet/templates/backup_planet-files.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# Initialization
+PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/planet/backup}
+[ ! -f $PATH_TO_FILE/count ] && echo 0 > $PATH_TO_FILE/count
+COUNT=$(cat "$PATH_TO_FILE/count")
+# Backup each locale
+for locale in de en es fr it pl
+do
+ if [ ! -d $PATH_TO_FILE/$locale ]
+ then
+ /bin/mkdir $PATH_TO_FILE/$locale
+ fi
+ rsync -aHP --delete <%= location %>/$locale $PATH_TO_FILE/$locale/$locale-$COUNT
+done
+# Check count file to have a week of backup in the directory
+if [ $COUNT -ne 6 ]
+then
+ COUNT=$(expr $COUNT + 1)
+else
+ COUNT="0"
+fi
+echo $COUNT > $PATH_TO_FILE/count
diff --git a/modules/planet/templates/deploy_new-planet.sh b/modules/planet/templates/deploy_new-planet.sh
new file mode 100755
index 00000000..b3889d31
--- /dev/null
+++ b/modules/planet/templates/deploy_new-planet.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# Initialization
+PATH_TO_FILE=${PATH_TO_FILE:-/var/lib/planet}
+PATH_TO_PLANET=${PATH_TO_PLANET:-<%= location %>}
+
+#Ask for new locale name
+echo -n "Locale name: "
+read locale
+
+# Display the answer and ask for confirmation
+echo -e -n "Do you confirm the entry: \"$locale\"? (y/n) "
+read answer
+if [ "$answer" == "y" ]
+then
+ FILE="$PATH_TO_PLANET/$locale/"
+ if test -d $FILE
+ then
+ echo "Aborted, $FILE already exist."
+ exit 2
+ else
+ # Deploy new planet with locale given
+ /bin/mkdir $FILE
+ /bin/chown planet:apache $FILE
+ # TODO: this URL returns 403 (2024-01)
+ /usr/bin/wget -O $PATH_TO_FILE"/moonmoon.tar.gz" https://damsweb.net/files/moonmoon_mageia.tar.gz
+ if [ $? -ne 0 ]
+ then
+ echo "Aborted, can't download GZIP file"
+ exit 2
+ fi
+ /bin/tar zxvf $PATH_TO_FILE/moonmoon.tar.gz -C $FILE
+ /bin/mkdir $FILE"cache"
+ /bin/chown -R planet:apache $FILE
+ /bin/chmod g+w $FILE"custom" $FILE"custom/people.opml" $FILE"admin/inc/pwd.inc.php" $FILE"cache"
+ echo -e "Info: a new Planet had been deployed.\nThe locale is: \"$locale\" - https://planet.<%= domain %>/$locale \n-- \nMail sent by the script '$0' on `hostname`" | /bin/mail -s "New planet Mageia deployed" mageia-webteam@<%= domain %> mageia-marketing@<%= domain %>
+ fi
+else
+ echo "Aborted, please try again."
+ exit 2
+fi
diff --git a/modules/planet/templates/index.php b/modules/planet/templates/index.php
new file mode 100644
index 00000000..6c08e763
--- /dev/null
+++ b/modules/planet/templates/index.php
@@ -0,0 +1,23 @@
+<html>
+<body>
+<h1>Planet Mageia</h1>
+<h3>Please choose one of the following locales:</h3>
+<ul>
+<?php
+function displayloc($path = ''){
+ return array_slice(scandir($path), 2);
+}
+
+foreach(displayloc('.') as $loc)
+ if(is_dir($loc) && $loc != "test" && $loc != "test-2")
+ {
+ echo '<li><a href="'.$loc.'">'.$loc.'</a></li>';
+ }
+?>
+</ul>
+<h3>How to be listed in Planet Mageia:</h3>
+<ul>
+<li>just candidate by sending us a RSS feed talking about Mageia in only one locale.</li>
+</ul>
+</body>
+</html>
diff --git a/modules/planet/templates/planet_vhosts.conf b/modules/planet/templates/planet_vhosts.conf
new file mode 100644
index 00000000..b3a07ab9
--- /dev/null
+++ b/modules/planet/templates/planet_vhosts.conf
@@ -0,0 +1,11 @@
+<Directory <%= location %> >
+ Order deny,allow
+ Allow from All
+ AllowOverride All
+ Options FollowSymlinks
+ Options +Indexes
+</Directory>
+# Add a permanent redirection for '/*' as '/en/' for english planet
+<IfModule mod_alias.c>
+ RedirectMatch permanent ^/?$ /en/
+</IfModule>
diff --git a/modules/postfix/manifests/init.pp b/modules/postfix/manifests/init.pp
index 855778da..8a4394df 100644
--- a/modules/postfix/manifests/init.pp
+++ b/modules/postfix/manifests/init.pp
@@ -1,63 +1,24 @@
class postfix {
+ package { postfix: }
- class base {
- package { postfix:
- ensure => installed
- }
- package { 'nail':
- ensure => installed
- }
- service { postfix:
- ensure => running,
- subscribe => [ Package['postfix']],
- path => "/etc/init.d/postfix"
- }
- }
-
- file { '/etc/postfix/main.cf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["postfix"],
- content => "",
- notify => [Service['postfix']]
- }
-
-
- class simple_relay inherits base {
- file { '/etc/postfix/main.cf':
- content => template("postfix/simple_relay_main.cf"),
- }
+ service { 'postfix':
+ subscribe => Package['postfix'],
}
- class smtp_server inherits base {
- include postgrey
- file { '/etc/postfix/main.cf':
- content => template("postfix/main.cf"),
- }
-
- file { '/etc/postfix/transport_regexp':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("postfix/transport_regexp"),
- }
-
+ file { '/etc/postfix/main.cf':
+ require => Package['postfix'],
+ content => '',
+ notify => Service['postfix'],
}
- class primary_smtp inherits smtp_server {
- file { '/etc/postfix/master.cf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("postfix/primary_master.cf"),
- }
+ file { '/etc/ssl/postfix/':
+ ensure => directory,
}
- class secondary_smtp inherits smtp_server {
+ openssl::self_signed_splitted_cert { "${::hostname}.${::domain}":
+ filename => 'postfix',
+ directory => '/etc/ssl/postfix/',
+ owner => 'postfix',
+ group => 'postfix'
}
-
}
diff --git a/modules/postfix/manifests/server.pp b/modules/postfix/manifests/server.pp
new file mode 100644
index 00000000..85ab261c
--- /dev/null
+++ b/modules/postfix/manifests/server.pp
@@ -0,0 +1,13 @@
+class postfix::server inherits postfix {
+ include postgrey
+ include amavis
+ include spamassassin
+
+ File['/etc/postfix/main.cf'] {
+ content => template('postfix/main.cf'),
+ }
+
+ file { '/etc/postfix/transport_regexp':
+ content => template('postfix/transport_regexp'),
+ }
+}
diff --git a/modules/postfix/manifests/server/primary.pp b/modules/postfix/manifests/server/primary.pp
new file mode 100644
index 00000000..c14a8606
--- /dev/null
+++ b/modules/postfix/manifests/server/primary.pp
@@ -0,0 +1,43 @@
+class postfix::server::primary inherits postfix::server {
+
+ # Adding DKIM server
+ include opendkim
+ opendkim::domain{['mageia.org', 'sucuk.mageia.org', 'duvel.mageia.org', 'forums.mageia.org', 'madb.mageia.org','rabbit.mageia.org', 'fiona.mageia.org','identity.mageia.org', 'group.mageia.org', 'neru.mageia.org']:}
+ opendkim::trusted{['127.0.0.0/8', '212.85.158.0/24']:}
+
+ package { ['postfix-ldap', 'sqlite3-tools', 'dovecot-plugins-sqlite','rspamd']: }
+
+ # council is here until we fully decide who has aliases in com team,
+
+ # see https://bugs.mageia.org/show_bug.cgi?id=1345
+ # alumni is a special group for tracking previous members of
+ # the project, so they keep their aliases for a time
+ $aliases_group = ['mga-founders',
+ 'mga-packagers',
+ 'mga-sysadmin',
+ 'mga-council',
+ 'mga-alumni',
+ 'mga-i18n-committers']
+ $ldap_password = extlookup('postfix_ldap','x')
+ $ldap_servers = get_ldap_servers()
+
+ file {
+ '/etc/postfix/master.cf':
+ content => template('postfix/primary_master.cf');
+ '/etc/postfix/ldap_aliases.conf':
+ content => template('postfix/ldap_aliases.conf');
+ # TODO merge the file with the previous one, for common part (ldap, etc)
+ '/etc/postfix/group_aliases.conf':
+ content => template('postfix/group_aliases.conf');
+ # TODO make it conditional to the presence of sympa
+ '/etc/postfix/sympa_aliases':
+ content => template('postfix/sympa_aliases');
+ '/etc/postfix/virtual_aliases':
+ content => template('postfix/virtual_aliases');
+ }
+
+ exec { 'postmap /etc/postfix/virtual_aliases':
+ refreshonly => true,
+ subscribe => File['/etc/postfix/virtual_aliases'],
+ }
+}
diff --git a/modules/postfix/manifests/server/secondary.pp b/modules/postfix/manifests/server/secondary.pp
new file mode 100644
index 00000000..e4dd8721
--- /dev/null
+++ b/modules/postfix/manifests/server/secondary.pp
@@ -0,0 +1 @@
+class postfix::server::secondary inherits postfix::server { }
diff --git a/modules/postfix/manifests/simple_relay.pp b/modules/postfix/manifests/simple_relay.pp
new file mode 100644
index 00000000..8911f781
--- /dev/null
+++ b/modules/postfix/manifests/simple_relay.pp
@@ -0,0 +1,9 @@
+class postfix::simple_relay inherits postfix {
+ File['/etc/postfix/main.cf'] {
+ content => template('postfix/simple_relay_main.cf'),
+ }
+ file {
+ '/etc/postfix/sympa_aliases':
+ content => template('postfix/sympa_aliases');
+ }
+}
diff --git a/modules/postfix/templates/group_aliases.conf b/modules/postfix/templates/group_aliases.conf
new file mode 100644
index 00000000..eac16dab
--- /dev/null
+++ b/modules/postfix/templates/group_aliases.conf
@@ -0,0 +1,15 @@
+<%-
+ ldap = ldap_servers.map { |l| "ldaps://#{l}:636" }
+-%>
+server_host = <%= ldap.join(' ') %>
+search_base = <%= dc_suffix %>
+query_filter = (&(cn=mga-%u)(objectClass=groupOfNames))
+result_attribute = mail
+special_result_attribute = member
+bind = yes
+bind_dn = cn=postfix-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+bind_pw = <%= ldap_password %>
+# postfix complain on url
+# warning: dict_ldap_open: URL scheme ldaps requires protocol version 3
+version = 3
+domain = group.<%= domain %>
diff --git a/modules/postfix/templates/ldap_aliases.conf b/modules/postfix/templates/ldap_aliases.conf
new file mode 100644
index 00000000..40d7da13
--- /dev/null
+++ b/modules/postfix/templates/ldap_aliases.conf
@@ -0,0 +1,20 @@
+<%-
+# TODO I am sure that a more elegant way could be find
+query_string = ''
+aliases_group.each do |g|
+ query_string += '(memberOf=cn=' + g + ',ou=Group,' + dc_suffix + ')'
+end
+
+ldap = ldap_servers.map { |l| "ldaps://#{l}:636" }
+-%>
+server_host = <%= ldap.join(' ') %>
+search_base = <%= dc_suffix %>
+query_filter = (&(uid=%u)(|<%= query_string %>))
+result_attribute = mail
+bind = yes
+bind_dn = cn=postfix-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+bind_pw = <%= ldap_password %>
+# postfix complain on url
+# warning: dict_ldap_open: URL scheme ldaps requires protocol version 3
+version = 3
+domain = <%= domain %>
diff --git a/modules/postfix/templates/main.cf b/modules/postfix/templates/main.cf
index 7b60f3a3..6b42a4de 100644
--- a/modules/postfix/templates/main.cf
+++ b/modules/postfix/templates/main.cf
@@ -11,79 +11,143 @@ sendmail_path = /usr/sbin/sendmail.postfix
setgid_group = postdrop
command_directory = /usr/sbin
manpage_directory = /usr/share/man
-daemon_directory = <%= lib_dir %>/postfix/
+daemon_directory = /usr/libexec/postfix
+meta_directory = /etc/postfix
+shlib_directory = /usr/lib64
+compatibility_level = 2
data_directory = /var/lib/postfix
newaliases_path = /usr/bin/newaliases
mailq_path = /usr/bin/mailq
queue_directory = /var/spool/postfix
mail_owner = postfix
+<% if all_tags.include?('postfix::simple_relay') || all_tags.include?('postfix::server::secondary') %>
+relayhost = sucuk.<%= domain %>
+<%- end -%>
# User configurable parameters
<% if all_tags.include?('postfix::simple_relay') %>
-inet_interfaces = localhost
+inet_interfaces = localhost, 127.0.0.1
<% else %>
inet_interfaces = all
<% end %>
inet_protocols = all
-mynetworks_style = host
+<% if @hostname == 'neru' then %>
+# We do not have a reverse on ipv6 :(
+smtp_address_preference = ipv4
+<%- end -%>
+
+# FIXME Do not hardcode this
+mynetworks = 212.85.158.144/28 [2a02:2178:2:7::]/64 127.0.0.0/16 163.172.148.228 [2001:bc8:4400:2800::4115]
myhostname = <%= fqdn %>
mydomain = <%= domain %>
-mydestination = <%= fqdn %>
-myorigin = $mydomain
-<%- if all_tags.include?('postfix::secondary_smtp') -%>
-relay_domains = <%= domain %>, ml.<%= domain %>
+<%- if all_tags.include?('postfix::server::secondary') -%>
+relay_domains = <%= domain %>,
+ ml.<%= domain %>,
+ group.<%= domain %>
<%- end -%>
mydestination = <%= fqdn %>
-<%- if all_tags.include?('postfix::primary_smtp') -%>
- <%= domain %>,
-<%- if classes.include?('sympa') -%>
+<%- if all_tags.include?('postfix::server::primary') -%>
ml.<%= domain %>
-<%- end -%>
+<%- end -%>
+
+<%- if all_tags.include?('postfix::server::primary') -%>
+
+virtual_mailbox_domains = <%= domain %>,
+ group.<%= domain %>
+
+# postfix complain if not set
+# Mar 22 23:51:20 alamut postfix/virtual[22952]: fatal: bad string length 0 < 1: virtual_mailbox_base =
+virtual_mailbox_base = /var/lib/mail
+
+# local_recipient_maps is disabled, as we need to route all
+# non local email to ryu as long as mageia ml are hosted
+# there. Hence the use of fallback_transport , but this is
+# taken in account only of local_recipient_maps is empty
+local_recipient_maps =
+# route ml to ryu ( ml being mageia-*@mageia )
+fallback_transport_maps = regexp:/etc/postfix/transport_regexp
+
+# needed by sympa to handle bounce, according to the doc
+recipient_delimiter = +
-alias_maps = hash:/etc/aliases
- # uncomment if we want to enable ldap based alias
- # and create the file
- #ldap:/etc/postfix/ldap_aliases.conf
+
+alias_maps = hash:/etc/postfix/aliases
+
+virtual_alias_maps = ldap:/etc/postfix/ldap_aliases.conf
+ ldap:/etc/postfix/group_aliases.conf
+ hash:/etc/postfix/virtual_aliases
+<%- if classes.include?('sympa::server') -%>
+ regexp:/etc/postfix/sympa_aliases
+<%- end -%>
+<% else %>
+<%- if classes.include?('sympa::server') -%>
+virtual_alias_maps = regexp:/etc/postfix/sympa_aliases
+<%- end -%>
<%- end -%>
+<%- if all_tags.include?('postfix::server::primary') -%>
+# Adding DKIM Miler for primaryserver (sucuk)
+smtpd_milters = inet:127.0.0.1:8891
+non_smtpd_milters = $smtpd_milters
+milter_default_action = accept
+milter_protocol = 2
+
+# Adding Sender Rewriting Scheme
+sender_canonical_maps = socketmap:inet:localhost:10003:forward
+sender_canonical_classes = envelope_sender
+recipient_canonical_maps = socketmap:inet:localhost:10003:reverse
+recipient_canonical_classes= envelope_recipient,header_recipient
+<%- end -%>
-<%- if all_tags.include?('postfix::smtp_server') -%>
+<%- if all_tags.include?('postfix::server') -%>
transport_maps = regexp:/etc/postfix/transport_regexp
+content_filter = smtp-filter:[127.0.0.1]:10025
<%- end -%>
-<%- if classes.include?('sympa') -%>
+<%- if classes.include?('sympa::server') -%>
sympa_destination_recipient_limit = 1
sympabounce_destination_recipient_limit = 1
<%- end -%>
#delay_warning_time = 4h
-smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mandriva Linux)
+smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (<%= lsbdistid %>)
unknown_local_recipient_reject_code = 450
smtp-filter_destination_concurrency_limit = 2
lmtp-filter_destination_concurrency_limit = 2
+# enable opportunistic TLS when receiving
smtpd_use_tls = yes
-smtpd_tls_cert_file = /etc/pki/tls/certs/postfix.pem
-smtpd_tls_key_file = /etc/pki/tls/private/postfix.pem
+smtpd_tls_received_header = yes
+smtpd_tls_cert_file = /etc/ssl/postfix/postfix.crt
+smtpd_tls_key_file = /etc/ssl/postfix/postfix.key
smtpd_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt
+# enable opportunistic TLS when sending
+smtp_tls_security_level = may
+smtp_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt
-<%- if all_tags.include?('postfix::smtp_server') -%>
+<%- if all_tags.include?('postfix::server') -%>
smtpd_etrn_restrictions = reject
smtpd_helo_required = yes
-smtpd_data_restrictions = reject_unauth_pipelining
+smtpd_data_restrictions = permit_mynetworks
+ reject_unauth_pipelining
reject_multi_recipient_bounce
-smtpd_recipient_restrictions = reject_non_fqdn_recipient
- reject_non_fqdn_sender
+smtpd_recipient_restrictions = permit_mynetworks
# not done yet, not sure if we need to offer this kind of service
# permit_sasl_authenticated
- permit_mynetworks
- reject_unauth_destination
reject_non_fqdn_helo_hostname
+ reject_non_fqdn_recipient
+ reject_non_fqdn_sender
+ check_sender_access hash:/etc/postfix/access
+ reject_rhsbl_helo sbl.spamhaus.org
+ reject_rhsbl_reverse_client sbl.spamhaus.org
+ reject_rhsbl_sender sbl.spamhaus.org
+ reject_rbl_client sbl.spamhaus.org
+ reject_unauth_destination
reject_unknown_sender_domain
reject_unknown_client
<%- if classes.include?('postgrey') -%>
@@ -91,3 +155,5 @@ smtpd_recipient_restrictions = reject_non_fqdn_recipient
<%- end -%>
<%- end -%>
+# Needed for buggy clients
+always_add_missing_headers = yes
diff --git a/modules/postfix/templates/primary_master.cf b/modules/postfix/templates/primary_master.cf
index 299bbd6c..e05d33dc 100644
--- a/modules/postfix/templates/primary_master.cf
+++ b/modules/postfix/templates/primary_master.cf
@@ -116,7 +116,7 @@ cyrus-inet unix - - y - - lmtp
#mailman unix - n n - - pipe
# flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py
# ${nexthop} ${user}
-<% if classes.include?('sympa') %>
+<% if classes.include?('sympa::server') %>
sympa unix - n n - - pipe
flags=R user=sympa argv=/usr/sbin/queue ${recipient}
sympabounce unix - n n - - pipe
@@ -174,4 +174,3 @@ smtp-filter unix - - y - - smtp
-o max_use=20
#
##### END OF CONTENT FILTER CUSTOMIZATIONS #####
-
diff --git a/modules/postfix/templates/simple_relay_main.cf b/modules/postfix/templates/simple_relay_main.cf
index 5f8d44ca..e0c116a7 100644
--- a/modules/postfix/templates/simple_relay_main.cf
+++ b/modules/postfix/templates/simple_relay_main.cf
@@ -11,7 +11,7 @@ sendmail_path = /usr/sbin/sendmail.postfix
setgid_group = postdrop
command_directory = /usr/sbin
manpage_directory = /usr/share/man
-daemon_directory = <%= lib_dir %>/postfix/
+daemon_directory = /usr/libexec/postfix/
data_directory = /var/lib/postfix
newaliases_path = /usr/bin/newaliases
mailq_path = /usr/bin/mailq
@@ -20,11 +20,12 @@ mail_owner = postfix
# User configurable parameters
-inet_interfaces = localhost
+myhostname = <%= fqdn %>
+mydomain = <%= domain %>
inet_protocols = all
mynetworks_style = host
#delay_warning_time = 4h
-smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mandriva Linux)
+smtpd_banner = $myhostname ESMTP $mail_name ($mail_version) (Mageia Linux)
unknown_local_recipient_reject_code = 450
smtp-filter_destination_concurrency_limit = 2
lmtp-filter_destination_concurrency_limit = 2
@@ -32,3 +33,19 @@ smtpd_use_tls = yes
smtpd_tls_cert_file = /etc/pki/tls/certs/postfix.pem
smtpd_tls_key_file = /etc/pki/tls/private/postfix.pem
smtpd_tls_CAfile = /etc/pki/tls/certs/ca-bundle.crt
+
+<%- if classes.include?('sympa::server') -%>
+local_recipient_maps =
+fallback_transport_maps = regexp:/etc/postfix/transport_regexp
+transport_maps = regexp:/etc/postfix/transport_regexp
+mydestination = ml.<%= domain %>
+sympa_destination_recipient_limit = 1
+sympabounce_destination_recipient_limit = 1
+virtual_alias_maps = regexp:/etc/postfix/sympa_aliases
+# needed by sympa to handle bounce, according to the doc
+recipient_delimiter = +
+# This is ugly for a simple relay but we need ml.mageia.org to accept email :(
+inet_interfaces = all
+<%- else -%>
+inet_interfaces = localhost
+<%- end -%>
diff --git a/modules/postfix/templates/sympa_aliases b/modules/postfix/templates/sympa_aliases
new file mode 100644
index 00000000..436e7a28
--- /dev/null
+++ b/modules/postfix/templates/sympa_aliases
@@ -0,0 +1,8 @@
+# everything is handled with transports in postfix,
+# but according to https://www.sympa.org/faq/postfix, we also need this one
+<% escaped_domain = ( 'ml.' + domain ).sub('.','\.') %>
+/^(.*)-owner\@<%= escaped_domain %>$/ $1+owner@ml.<%= domain %>
+# redirect the mail from the ml domain to sysadmin
+/^listmaster\@<%= escaped_domain %>$/ listmaster@<%= domain %>
+# errors are sent there, so that should also be redirected
+/^sympa-request\@<%= escaped_domain %>$/ listmaster@<%= domain %>
diff --git a/modules/postfix/templates/transport_regexp b/modules/postfix/templates/transport_regexp
index 5d005c7b..3eb5494f 100644
--- a/modules/postfix/templates/transport_regexp
+++ b/modules/postfix/templates/transport_regexp
@@ -1,8 +1,10 @@
<%
ml_domain = 'ml\.' + domain.gsub('.','\.')
%>
-<%- if classes.include?('sympa') -%>
+<%- if classes.include?('sympa::server') -%>
/^.*+owner\@<%= ml_domain %>$/ sympabounce:
+/^bounce+.*\@<%= ml_domain %>$/ sympabounce:
/^.*\@<%= ml_domain %>$/ sympa:
+<%- else -%>
+/^.*\@<%= ml_domain %>$/ smtp:sucuk.mageia.org
<%- end -%>
-
diff --git a/modules/postfix/templates/virtual_aliases b/modules/postfix/templates/virtual_aliases
new file mode 100644
index 00000000..861e79c6
--- /dev/null
+++ b/modules/postfix/templates/virtual_aliases
@@ -0,0 +1,33 @@
+# do not forget to add $domain or it will not work
+# do not hardcode the domain, or it will be harvested by bot
+
+treasurer@<%= domain %> treasurer@group.<%= domain %>
+president@<%= domain %> ennael@<%= domain %>
+secretary@<%= domain %> obgr_seneca@<%= domain %>
+
+contact@<%= domain %> council@group.<%= domain %>
+press@<%= domain %> council@group.<%= domain %>
+
+# later switch to a team alias
+root@<%= domain %> sysadmin@group.<%= domain %>
+
+security@<%= domain %> security@group.<%= domain %>
+
+# Temporary(?) alias until there is a real board-commits@ list
+board-commits@ml.<%= domain %> board-public@ml.<%= domain %>
+
+# TODO see https://www.ietf.org/rfc/rfc2142.txt
+<%
+['postmaster','hostmaster','abuse','noc','listmaster','MAILER-DAEMON'].each { |a|
+%>
+<%= a %>@<%= domain %> root@<%= domain %>
+<%
+}
+
+['webmaster','www'].each { |a|
+%>
+<%= a %>@<%= domain %> web@group.<%= domain %>
+<% } %>
+
+# TODO :
+# info, marketing, sales -> marketing ( once we do have a team )
diff --git a/modules/postgresql/manifests/config.pp b/modules/postgresql/manifests/config.pp
new file mode 100644
index 00000000..a9f2ad7f
--- /dev/null
+++ b/modules/postgresql/manifests/config.pp
@@ -0,0 +1,10 @@
+define postgresql::config($content) {
+ file { $name:
+ owner => 'postgres',
+ group => 'postgres',
+ mode => '0600',
+ content => $content,
+ require => Package['postgresql-server'],
+ notify => Exec['service postgresql reload'],
+ }
+}
diff --git a/modules/postgresql/manifests/database.pp b/modules/postgresql/manifests/database.pp
new file mode 100644
index 00000000..34cee2a6
--- /dev/null
+++ b/modules/postgresql/manifests/database.pp
@@ -0,0 +1,20 @@
+# TODO convert it to a regular type ( so we can later change user and so on )
+define postgresql::database($description = '',
+ $user = 'postgres',
+ $callback_notify = '') {
+
+ exec { "createdb -O ${user} -U postgres ${name} '${description}' ":
+ user => 'root',
+ unless => "psql -A -t -U postgres -l | grep '^${name}|'",
+ require => Service['postgresql'],
+ }
+
+ # this is fetched by the manifest asking the database creation,
+ # once the db have been created
+ # FIXME proper ordering ?
+ # FIXME In puppet >3.0 word 'tag' is reserved, so it has to be renamed
+ @@postgresql::database_callback { $name:
+ tag => $name,
+ callback_notify => $callback_notify,
+ }
+}
diff --git a/modules/postgresql/manifests/database_callback.pp b/modules/postgresql/manifests/database_callback.pp
new file mode 100644
index 00000000..0ab1771f
--- /dev/null
+++ b/modules/postgresql/manifests/database_callback.pp
@@ -0,0 +1,9 @@
+define postgresql::database_callback($callback_notify = '') {
+ # dummy declaration, so we can trigger the notify
+ if $callback_notify {
+ exec { "callback ${name}":
+ command => '/bin/true',
+ notify => $callback_notify,
+ }
+ }
+}
diff --git a/modules/postgresql/manifests/db_and_user.pp b/modules/postgresql/manifests/db_and_user.pp
new file mode 100644
index 00000000..2d59e1ca
--- /dev/null
+++ b/modules/postgresql/manifests/db_and_user.pp
@@ -0,0 +1,15 @@
+define postgresql::db_and_user( $password,
+ $description = '',
+ $callback_notify = '') {
+
+ postgresql::database { $name:
+ callback_notify => $callback_notify,
+ description => $description,
+ user => $name,
+ require => Postgresql::User[$name],
+ }
+
+ postgresql::user { $name:
+ password => $password
+ }
+}
diff --git a/modules/postgresql/manifests/hba_entry.pp b/modules/postgresql/manifests/hba_entry.pp
new file mode 100644
index 00000000..30fccda0
--- /dev/null
+++ b/modules/postgresql/manifests/hba_entry.pp
@@ -0,0 +1,40 @@
+# == Define: postgresql::hba_entry
+#
+# Set a new entry to pg_hba.conf file
+#
+# === Parameters
+#
+# See pgsql doc for more details about pg_hba.conf parameters :
+# https://www.postgresql.org/docs/9.1/static/auth-pg-hba-conf.html
+#
+# [*namevar*]
+# namevar is not used.
+#
+# [*type*]
+# can be local, host, hostssl, hostnossl
+#
+# [*database*]
+# database name
+#
+# [*user*]
+# user name
+#
+# [*address*]
+# host name or IP address range
+#
+# [*method*]
+# authentication method to use
+#
+define postgresql::hba_entry(
+ $type,
+ $database,
+ $user,
+ $address,
+ $method
+) {
+ include postgresql::var
+ Postgresql::Pg_hba <| title == $postgresql::var::hba_file |> {
+ conf_lines +> "${type} ${database} ${user} ${address} ${method}",
+ }
+}
+# vim: sw=2
diff --git a/modules/postgresql/manifests/init.pp b/modules/postgresql/manifests/init.pp
index fb3ea06b..faec8b8c 100644
--- a/modules/postgresql/manifests/init.pp
+++ b/modules/postgresql/manifests/init.pp
@@ -1,60 +1 @@
-class postgresql {
-
- $pgsql_data = "/var/lib/pgsql/data/"
-
- package { 'postgresql9.0-server':
- alias => "postgresql-server",
- ensure => installed
- }
-
- service { postgresql:
- ensure => running,
- subscribe => Package["postgresql-server"],
- hasstatus => true,
- }
-
- exec { "service postgresql reload":
- refreshonly => true,
- subscribe => [ File["postgresql.conf"],
- File["pg_ident.conf"],
- File["pg_hba.conf"] ]
- }
-
- file { '/etc/pam.d/postgresql':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("postgresql/pam"),
- }
-
- file { "postgresql.conf":
- path => "$pgsql_data/postgresql.conf",
- ensure => present,
- owner => postgres,
- group => postgres,
- mode => 600,
- content => template("postgresql/postgresql.conf"),
- require => Package["postgresql-server"],
- }
-
- file { 'pg_hba.conf':
- path => "$pgsql_data/pg_hba.conf",
- ensure => present,
- owner => postgres,
- group => postgres,
- mode => 600,
- content => template("postgresql/pg_hba.conf"),
- require => Package["postgresql-server"],
- }
-
- file { 'pg_ident.conf':
- path => "$pgsql_data/pg_ident.conf",
- ensure => present,
- owner => postgres,
- group => postgres,
- mode => 600,
- content => template("postgresql/pg_ident.conf"),
- require => Package["postgresql-server"],
- }
-}
+class postgresql { }
diff --git a/modules/postgresql/manifests/pg_hba.pp b/modules/postgresql/manifests/pg_hba.pp
new file mode 100644
index 00000000..777eee47
--- /dev/null
+++ b/modules/postgresql/manifests/pg_hba.pp
@@ -0,0 +1,13 @@
+define postgresql::pg_hba(
+ $conf_lines = []
+) {
+ $db = list_exported_ressources('Postgresql::Db_and_user')
+
+ $forum_lang = list_exported_ressources('Phpbb::Locale_db')
+
+# (tmb) disable rewriting config as we are moving to mariadb
+# postgresql::config { $name:
+# content => template('postgresql/pg_hba.conf'),
+# }
+}
+# vim: sw=2
diff --git a/modules/postgresql/manifests/remote_database.pp b/modules/postgresql/manifests/remote_database.pp
new file mode 100644
index 00000000..15b54651
--- /dev/null
+++ b/modules/postgresql/manifests/remote_database.pp
@@ -0,0 +1,15 @@
+# FIXME: In puppet >3.0 word 'tag' is reserved, so it has to be renamed
+define postgresql::remote_database($description = '',
+ $user = 'postgresql',
+ $callback_notify = '',
+ $tag = 'default') {
+ @@postgresql::database { $name:
+ description => $description,
+ user => $user,
+ callback_notify => $callback_notify,
+ tag => $tag,
+ require => Postgresql::User[$user],
+ }
+
+ Postgresql::Database_callback <<| tag == $name |>>
+}
diff --git a/modules/postgresql/manifests/remote_db_and_user.pp b/modules/postgresql/manifests/remote_db_and_user.pp
new file mode 100644
index 00000000..07e3ea23
--- /dev/null
+++ b/modules/postgresql/manifests/remote_db_and_user.pp
@@ -0,0 +1,18 @@
+# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed
+define postgresql::remote_db_and_user($password,
+ $description = '',
+ $tag = 'default',
+ $callback_notify = '') {
+
+ @@postgresql::db_and_user { $name:
+ callback_notify => $callback_notify,
+ tag => $tag,
+ description => $description,
+ password => $password,
+ }
+
+ # fetch the exported resources that should have been exported
+ # once the db was created, and trigger a notify to the object
+ # passed as callback_notify
+ Postgresql::Database_callback <<| tag == $name |>>
+}
diff --git a/modules/postgresql/manifests/remote_user.pp b/modules/postgresql/manifests/remote_user.pp
new file mode 100644
index 00000000..fb53df4c
--- /dev/null
+++ b/modules/postgresql/manifests/remote_user.pp
@@ -0,0 +1,10 @@
+# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed
+define postgresql::remote_user( $password,
+ $tag = 'default') {
+ @@postgresql::user { $name:
+ tag => $tag,
+ password => $password,
+ }
+}
+
+
diff --git a/modules/postgresql/manifests/server.pp b/modules/postgresql/manifests/server.pp
new file mode 100644
index 00000000..8b92bb2b
--- /dev/null
+++ b/modules/postgresql/manifests/server.pp
@@ -0,0 +1,53 @@
+class postgresql::server {
+ include postgresql::var
+
+ # missing requires is corrected in cooker,
+ # should be removed
+ # once the fix is in a stable release
+ package { "postgresql${postgresql::var::pg_version}-plpgsql":
+ alias => 'postgresql-plpgsql',
+ }
+
+ package { "postgresql${postgresql::var::pg_version}-server":
+ alias => 'postgresql-server',
+ require => Package['postgresql-plpgsql'],
+ }
+
+ service { 'postgresql':
+ subscribe => Package['postgresql-server'],
+ }
+
+ exec { 'service postgresql reload':
+ refreshonly => true,
+ }
+
+ openssl::self_signed_splitted_cert { "pgsql.${::domain}":
+ filename => 'server',
+ directory => $postgresql::var::pgsql_data,
+ owner => 'postgres',
+ group => 'postgres',
+ require => Package['postgresql-server']
+ }
+
+
+ file { '/etc/pam.d/postgresql':
+ content => template('postgresql/pam'),
+ }
+
+ @postgresql::pg_hba { $postgresql::var::hba_file: }
+
+ postgresql::hba_entry { 'allow_local_ipv4':
+ type => 'host',
+ database => 'all',
+ user => 'all',
+ address => '127.0.0.1/32',
+ method => 'md5',
+ }
+
+ postgresql::config {
+ "${postgresql::var::pgsql_data}/pg_ident.conf":
+ content => template('postgresql/pg_ident.conf');
+ "${postgresql::var::pgsql_data}/postgresql.conf":
+ content => template('postgresql/postgresql.conf');
+ }
+}
diff --git a/modules/postgresql/manifests/tagged.pp b/modules/postgresql/manifests/tagged.pp
new file mode 100644
index 00000000..6a49e3ff
--- /dev/null
+++ b/modules/postgresql/manifests/tagged.pp
@@ -0,0 +1,8 @@
+# FIXME: In puppet >3.0 word 'tag' is reserved, so it have to be renamed
+define postgresql::tagged() {
+ # TODO add a system of tag so we can declare database on more than one
+ # server
+ Postgresql::User <<| tag == $name |>>
+ Postgresql::Database <<| tag == $name |>>
+ Postgresql::Db_and_user <<| tag == $name |>>
+}
diff --git a/modules/postgresql/manifests/user.pp b/modules/postgresql/manifests/user.pp
new file mode 100644
index 00000000..5b73b243
--- /dev/null
+++ b/modules/postgresql/manifests/user.pp
@@ -0,0 +1,13 @@
+# TODO convert to a regular type, so we can later change password
+# without erasing the current user
+define postgresql::user($password) {
+ $sql = "CREATE ROLE ${name} ENCRYPTED PASSWORD '\${pass}' NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;"
+
+ exec { "psql -U postgres -c \"${sql}\" ":
+ user => 'root',
+ # do not leak the password on commandline
+ environment => "pass=${password}",
+ unless => "psql -A -t -U postgres -c '\\du ${name}' | grep '${name}'",
+ require => Service['postgresql'],
+ }
+}
diff --git a/modules/postgresql/manifests/var.pp b/modules/postgresql/manifests/var.pp
new file mode 100644
index 00000000..b31c7ffe
--- /dev/null
+++ b/modules/postgresql/manifests/var.pp
@@ -0,0 +1,7 @@
+class postgresql::var {
+
+ $pgsql_data = '/var/lib/pgsql/data/'
+ $pg_version = '9.6'
+ $hba_file = "${pgsql_data}/pg_hba.conf"
+}
+# vim: sw=2
diff --git a/modules/postgresql/templates/pg_hba.conf b/modules/postgresql/templates/pg_hba.conf
index 4dd9906c..e4232a4e 100644
--- a/modules/postgresql/templates/pg_hba.conf
+++ b/modules/postgresql/templates/pg_hba.conf
@@ -75,31 +75,44 @@
# TYPE DATABASE USER CIDR-ADDRESS METHOD
-# This file is in mageia svn:
-# $Id$
+
+<%-
+ for line in @conf_lines
+-%>
+<%= line %>
+<%-
+ end
+-%>
# Nanar:
# This bypass global config for specific user/base
-host epoll epoll 127.0.0.1/32 md5
-host epoll epoll ::1/128 md5
-hostssl epoll epoll 212.85.158.146/32 md5
-hostssl epoll epoll 2a02:2178:2:7::2/128 md5
-
-host mirrors mirrors 127.0.0.1/32 md5
-host mirrors mirrors ::1/128 md5
-hostssl mirrors mirrors 212.85.158.146/32 md5
-hostssl mirrors mirrors 2a02:2178:2:7::2/128 md5
-
-host transifex transifex 127.0.0.1/32 md5
-host transifex transifex ::1/128 md5
-hostssl transifex transifex 212.85.158.146/32 md5
-hostssl transifex transifex 2a02:2178:2:7::2/128 md5
+<%
-host bugs bugs 127.0.0.1/32 md5
-host bugs bugs ::1/128 md5
-hostssl bugs bugs 212.85.158.146/32 md5
-hostssl bugs bugs 2a02:2178:2:7::2/128 md5
+# FIXME ip v6 is hardcoded, facter do not seems to support
+# fetch it
+for i in db
+%>
+host <%= i %> <%= i %> 127.0.0.1/32 md5
+host <%= i %> <%= i %> ::1/128 md5
+hostssl <%= i %> <%= i %> <%= ipaddress %>/32 md5
+hostssl <%= i %> <%= i %> 2a02:2178:2:7::2/128 md5
+<%
+end
+%>
+<%
+lang = ['en','de']
+for l in lang
+%>
+host phpbb_<%= l %> phpbb 127.0.0.1/32 md5
+host phpbb_<%= l %> phpbb ::1/128 md5
+hostssl phpbb_<%= l %> phpbb <%= ipaddress %>/32 md5
+hostssl phpbb_<%= l %> phpbb 2a02:2178:2:7::2/128 md5
+# temporary, for the forum on friteuse vm
+hostssl phpbb_<%= l %> phpbb 192.168.122.0/24 md5
+<%
+end
+%>
# When creating the database ( with bin/checkstup.pl ) bugzilla need to
# access to template1 ( https://bugzilla.mozilla.org/show_bug.cgi?id=542507 )
host template1 bugs 127.0.0.1/32 md5
@@ -107,17 +120,18 @@ host template1 bugs ::1/128 md5
hostssl template1 bugs 212.85.158.146/32 md5
hostssl template1 bugs 2a02:2178:2:7::2/128 md5
-host sympa sympa 127.0.0.1/32 md5
-host sympa sympa ::1/128 md5
-hostssl sympa sympa 212.85.158.146/32 md5
-hostssl sympa sympa 2a02:2178:2:7::2/128 md5
+# Allow youri-check on rabbit to access the results db
+hostssl youri_check youri 88.190.12.224/32 md5
+# Allow local access too
+hostssl youri_check youri 212.85.158.151/32 md5
+hostssl youri_check youri 2a02:2178:2:7::7/128 md5
# "local" is for Unix domain socket connections only
local all all ident map=local
# IPv4 local connections:
-host all all 127.0.0.1/32 pam
+host all all 127.0.0.1/32 md5
# IPv6 local connections:
-host all all ::1/128 pam
+host all all ::1/128 md5
-hostssl all all 0.0.0.0/0 pam
-hostssl all all ::0/0 pam
+hostssl all all 0.0.0.0/0 md5
+hostssl all all ::0/0 md5
diff --git a/modules/postgresql/templates/postgresql.conf b/modules/postgresql/templates/postgresql.conf
index 813c0910..c1e7c994 100644
--- a/modules/postgresql/templates/postgresql.conf
+++ b/modules/postgresql/templates/postgresql.conf
@@ -113,7 +113,7 @@ ssl = on
# - Memory -
-shared_buffers = 24MB # min 128kB
+shared_buffers = 2048MB # min 128kB
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
@@ -122,8 +122,8 @@ shared_buffers = 24MB # min 128kB
# per transaction slot, plus lock space (see max_locks_per_transaction).
# It is not advisable to set max_prepared_transactions nonzero unless you
# actively intend to use prepared transactions.
-#work_mem = 1MB # min 64kB
-#maintenance_work_mem = 16MB # min 1MB
+work_mem = 64MB # min 64kB
+maintenance_work_mem = 512MB # min 1MB
#max_stack_depth = 2MB # min 100kB
# - Kernel Resource Usage -
@@ -144,7 +144,7 @@ shared_buffers = 24MB # min 128kB
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
-#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
+#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
# - Asynchronous Behavior -
@@ -235,7 +235,7 @@ shared_buffers = 24MB # min 128kB
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
-#effective_cache_size = 128MB
+effective_cache_size = 4096MB
# - Genetic Query Optimizer -
@@ -467,7 +467,7 @@ shared_buffers = 24MB # min 128kB
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
-#timezone = unknown # actually, defaults to TZ environment
+timezone = 'Europe/Paris' # actually, defaults to TZ environment
# setting
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
diff --git a/modules/postgrey/manifests/init.pp b/modules/postgrey/manifests/init.pp
index 8d55a77c..8a2c9c18 100644
--- a/modules/postgrey/manifests/init.pp
+++ b/modules/postgrey/manifests/init.pp
@@ -1,31 +1,19 @@
class postgrey {
- package { postgrey:
- ensure => installed
- }
-
- service { postgrey:
- ensure => running,
- path => "/etc/init.d/postgrey",
- subscribe => [ Package[postgrey]]
+ package { 'postgrey': }
+
+ service { 'postgrey':
+ subscribe => Package['postgrey'],
}
- file { "/etc/sysconfig/postgrey":
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("postgrey/postgrey.sysconfig"),
- notify => [ Service[postgrey] ],
- require => Package[postgrey],
+ File {
+ notify => Service['postgrey'],
+ require => Package['postgrey'],
}
- file { "/etc/postfix/postgrey_whitelist_clients.local":
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("postgrey/whitelist_clients.local"),
- require => Package[postgrey],
- notify => [ Service[postgrey]],
+ file {
+ '/etc/sysconfig/postgrey':
+ content => template('postgrey/postgrey.sysconfig');
+ '/etc/postfix/postgrey_whitelist_clients.local':
+ content => template('postgrey/whitelist_clients.local');
}
}
diff --git a/modules/postgrey/templates/postgrey.sysconfig b/modules/postgrey/templates/postgrey.sysconfig
index ec4e6947..f08b8f6f 100644
--- a/modules/postgrey/templates/postgrey.sysconfig
+++ b/modules/postgrey/templates/postgrey.sysconfig
@@ -1,12 +1,10 @@
# change default configuration option here
-# SOCKET=$(postconf -h queue_directory)/extern/postgrey/socket
-# OPTIONS="--unix=$SOCKET"
-# DBPATH=/var/lib/postgrey
-# OPTIONS="$OPTIONS --dbdir=$DBPATH"
+# default: unix socket
+SOCKET="--unix=/var/spool/postfix/extern/postgrey/socket"
-# to use an inet connection instead of a socket
-#OPTIONS="--inet=127.0.0.1:10031"
+# to use an inet socket instead
+#SOCKET="--inet=127.0.0.1:10031"
# enable whitelisting
OPTIONS="$OPTIONS --auto-whitelist-clients"
diff --git a/modules/postgrey/templates/whitelist_clients.local b/modules/postgrey/templates/whitelist_clients.local
index 9457cc82..8c87b88c 100644
--- a/modules/postgrey/templates/whitelist_clients.local
+++ b/modules/postgrey/templates/whitelist_clients.local
@@ -1,5 +1,2 @@
-# zarb
-ryu.zarb.org
-cthulhu.zarb.org
-
-
+# mageia
+<%= domain %>
diff --git a/modules/puppet/manifests/client.pp b/modules/puppet/manifests/client.pp
new file mode 100644
index 00000000..1168373b
--- /dev/null
+++ b/modules/puppet/manifests/client.pp
@@ -0,0 +1,15 @@
+class puppet::client inherits puppet {
+
+ cron { 'puppet':
+ ensure => present,
+ command => 'puppet agent --onetime --no-daemonize -l syslog >/dev/null 2>&1',
+ user => 'root',
+ minute => fqdn_rand( 60 ),
+ }
+
+ # we are using cron, so no need for the service
+ service { 'puppet':
+ enable => false,
+ hasstatus => true,
+ }
+}
diff --git a/modules/puppet/manifests/hiera.pp b/modules/puppet/manifests/hiera.pp
new file mode 100644
index 00000000..02900cd7
--- /dev/null
+++ b/modules/puppet/manifests/hiera.pp
@@ -0,0 +1,14 @@
+class puppet::hiera {
+ package { ['ruby-hiera']: }
+
+ # ease the use fo the command line tool
+ # who use a different location for the config file
+ file { '/etc/hiera.yaml':
+ ensure => link,
+ target => '/etc/puppet/hiera.yaml',
+ }
+
+ file { '/etc/puppet/hiera.yaml':
+ content => template('puppet/hiera.yaml'),
+ }
+}
diff --git a/modules/puppet/manifests/init.pp b/modules/puppet/manifests/init.pp
index b23e9d6a..be72d17d 100644
--- a/modules/puppet/manifests/init.pp
+++ b/modules/puppet/manifests/init.pp
@@ -1,52 +1,11 @@
-
class puppet {
- class client {
- package { puppet:
- ensure => installed
- }
-
- service { puppet:
- ensure => running,
- subscribe => [ Package[puppet], File["/etc/puppet/puppet.conf"]]
- }
-
- file { "/etc/puppet/puppet.conf":
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("puppet/puppet.conf"),
- require => Package[puppet]
- }
- }
-
- class master inherits client {
- package { puppet-server:
- ensure => installed
- }
-
- service { puppetmaster:
- ensure => running,
- path => "/etc/init.d/puppetmaster",
- subscribe => [ Package[puppet-server], File["/etc/puppet/puppet.conf"]]
- }
+ include puppet::stored_config
- file { "extdata":
- path => "/etc/puppet/extdata",
- ensure => directory,
- owner => puppet,
- group => puppet,
- mode => 700,
- recurse => true
- }
+ package { 'puppet': }
- file { '/etc/puppet/tagmail.conf':
- ensure => present,
- owner => puppet,
- group => puppet,
- mode => 700,
- content => template("puppet/tagmail.conf"),
- }
-
+ # only here to be subclassed
+ file { '/etc/puppet/puppet.conf':
+ require => Package[puppet],
+ content => template('puppet/puppet.conf','puppet/puppet.agent.conf'),
}
}
diff --git a/modules/puppet/manifests/master.pp b/modules/puppet/manifests/master.pp
new file mode 100644
index 00000000..55529466
--- /dev/null
+++ b/modules/puppet/manifests/master.pp
@@ -0,0 +1,54 @@
+class puppet::master inherits puppet {
+ include puppet::client
+ include puppet::queue
+ include puppet::stored_config
+ include puppet::hiera
+# do not enable until bug 4591 is solved
+# include puppet::thin
+
+ # rails and sqlite3 are used for stored config
+ package { ["ruby-${puppet::stored_config::database}"]: }
+
+ File['/etc/puppet/puppet.conf'] {
+ content => template('puppet/puppet.conf',
+ 'puppet/puppet.agent.conf',
+ 'puppet/puppet.master.conf'),
+ }
+
+
+ package { 'puppet-server': }
+
+ service { 'puppetmaster':
+# uncomment once thin is enabled
+# ensure => stopped,
+ subscribe => [Package['puppet-server'],
+ File['/etc/puppet/puppet.conf']],
+ }
+
+ file { '/etc/puppet/extdata':
+ ensure => directory,
+ owner => puppet,
+ group => puppet,
+ mode => '0700',
+ recurse => true,
+ }
+
+ file { '/etc/puppet/tagmail.conf':
+ content => template('puppet/tagmail.conf'),
+ }
+
+ tidy { '/var/lib/puppet/reports':
+ age => '4w',
+ matches => '*.yaml',
+ recurse => true,
+ type => 'mtime',
+ }
+
+ file { '/etc/puppet/autosign.conf':
+ ensure => $::environment ? {
+ 'test' => 'present',
+ default => 'absent',
+ },
+ content => '*',
+ }
+}
diff --git a/modules/puppet/manifests/queue.pp b/modules/puppet/manifests/queue.pp
new file mode 100644
index 00000000..770fc6df
--- /dev/null
+++ b/modules/puppet/manifests/queue.pp
@@ -0,0 +1,13 @@
+class puppet::queue {
+ include stompserver
+
+ package { 'ruby-stomp': }
+
+ service { 'puppetqd':
+ provider => base,
+ start => 'puppet queue',
+ require => [Package['puppet-server'],
+ Package['ruby-stomp'],
+ File['/etc/puppet/puppet.conf']],
+ }
+}
diff --git a/modules/puppet/manifests/stored_config.pp b/modules/puppet/manifests/stored_config.pp
new file mode 100644
index 00000000..51820d83
--- /dev/null
+++ b/modules/puppet/manifests/stored_config.pp
@@ -0,0 +1,26 @@
+class puppet::stored_config {
+# TODO uncomment when the following problem have been fixed
+# - how to bootstrap the installation of the infrastructure ( since we use
+# stored_config for postgresql::remote_db_and_user, we need to have a
+# sqlite3 database first and then declare the database, and then switch
+# to it )
+# - how do we decide when we get sqlite3 ( for small test servers ) and
+# when do we decide to get the real pgsql server ( for production setup )
+#
+# if ($::environment == 'production') {
+# # FIXME not really elegant, but we do not have much choice
+# # this make servers not bootstrappable for now
+# $pgsql_password = extlookup('puppet_pgsql','x')
+#
+# postgresql::remote_db_and_user { 'bugs':
+# description => 'Puppet database',
+# password => $pgsql_password,
+# }
+#
+# $database = 'pg'
+# } else {
+ $database = 'sqlite3'
+# }
+#
+ $db_config = template('puppet/db_config.erb')
+}
diff --git a/modules/puppet/manifests/thin.pp b/modules/puppet/manifests/thin.pp
new file mode 100644
index 00000000..1ca03a7e
--- /dev/null
+++ b/modules/puppet/manifests/thin.pp
@@ -0,0 +1,35 @@
+class puppet::thin {
+ package { 'ruby-thin': }
+
+ include apache::base
+ include apache::mod::ssl
+ include apache::mod::proxy
+
+ apache::vhost::other_app { 'puppet_proxy':
+ vhost_file => 'puppet/apache_proxy_vhost.conf',
+ }
+
+ apache::config { "${apache::base::conf_d}/puppet.conf":
+ content => 'Listen 8140',
+ }
+
+ $service_name = 'thin_puppet_master'
+ file { '/etc/puppet/thin.yml':
+ content => template('puppet/thin.yml'),
+ notify => Service[$service_name],
+ }
+
+ file { '/usr/local/share/puppet.config.ru':
+ content => template('puppet/config.ru'),
+ }
+
+ service { $service_name:
+ provider => base,
+ require => [ Package['ruby-thin'],
+ File['/etc/puppet/thin.yml'],
+ File['/usr/local/share/puppet.config.ru']],
+ start => 'thin -C /etc/puppet/thin.yml start',
+ stop => 'thin -C /etc/puppet/thin.yml stop',
+ restart => 'thin -C /etc/puppet/thin.yml restart',
+ }
+}
diff --git a/modules/puppet/templates/apache_proxy_vhost.conf b/modules/puppet/templates/apache_proxy_vhost.conf
new file mode 100644
index 00000000..89157fc2
--- /dev/null
+++ b/modules/puppet/templates/apache_proxy_vhost.conf
@@ -0,0 +1,42 @@
+ProxyRequests Off
+
+<Proxy balancer://puppet>
+# TODO dynamically adjust that with a variable
+ BalancerMember http://127.0.0.1:18140
+ BalancerMember http://127.0.0.1:18141
+ BalancerMember http://127.0.0.1:18142
+</Proxy>
+
+<VirtualHost *:8140>
+ SSLEngine on
+ ServerName puppet.<%= domain %>
+
+ ErrorLog /var/log/httpd/puppet_proxy.<%= domain %>.error.log
+ CustomLog /var/log/httpd/puppet_proxy.<%= domain %>.access.log
+
+ SSLCipherSuite SSLv2:-LOW:-EXPORT:RC4+RSA
+
+ SSLCertificateFile /var/lib/puppet/ssl/certs/puppet.<%= domain %>.pem
+ SSLCertificateKeyFile /var/lib/puppet/ssl/private_keys/puppet.<%= domain %>.pem
+ SSLCertificateChainFile /var/lib/puppet/ssl/ca/ca_crt.pem
+ SSLCACertificateFile /var/lib/puppet/ssl/ca/ca_crt.pem
+
+ SSLVerifyClient require
+ SSLVerifyDepth 1
+
+ SSLOptions +StdEnvVars
+
+ RequestHeader set X-Client-DN %{SSL_CLIENT_S_DN}e
+ RequestHeader set X-Client-Verify %{SSL_CLIENT_VERIFY}e
+
+ <Location />
+ SetHandler balancer-manager
+ Order allow,deny
+ Allow from all
+ </Location>
+
+ ProxyPass / balancer://puppet/
+ ProxyPassReverse / balancer://puppet/
+ ProxyPreserveHost on
+
+</VirtualHost>
diff --git a/modules/puppet/templates/config.ru b/modules/puppet/templates/config.ru
new file mode 100644
index 00000000..aba07857
--- /dev/null
+++ b/modules/puppet/templates/config.ru
@@ -0,0 +1,16 @@
+# a config.ru, for use with every rack-compatible webserver.
+# SSL needs to be handled outside this, though.
+
+# if puppet is not in your RUBYLIB:
+# $:.unshift('/opt/puppet/lib')
+
+$0 = '<%= service_name %>'
+
+# if you want debugging:
+# ARGV << "--debug"
+
+ARGV << "--rack"
+require 'puppet/application/master'
+# we're usually running inside a Rack::Builder.new {} block,
+# therefore we need to call run *here*.
+run Puppet::Application[:master].run
diff --git a/modules/puppet/templates/db_config.erb b/modules/puppet/templates/db_config.erb
new file mode 100644
index 00000000..337a5043
--- /dev/null
+++ b/modules/puppet/templates/db_config.erb
@@ -0,0 +1,10 @@
+<%- if database == 'sqlite3' -%>
+ dbadapter = sqlite3
+ dblocation = /var/lib/puppet/storeconfigs.db
+<%- else -%>
+ dbadapter = postgresql
+ dbuser = puppet
+ dbpassword = <%= pgsql_password %>
+ dbserver = pgsql.<%= domain %>
+ dbname = puppet
+<%- end -%>
diff --git a/modules/puppet/templates/hiera.yaml b/modules/puppet/templates/hiera.yaml
new file mode 100644
index 00000000..fcef4278
--- /dev/null
+++ b/modules/puppet/templates/hiera.yaml
@@ -0,0 +1,9 @@
+---
+:backends:
+ - yaml
+:yaml:
+ :datadir: /etc/puppet/hieradata
+:logger: console
+:hierarchy:
+ - "%{::environment}"
+ - common
diff --git a/modules/puppet/templates/puppet.agent.conf b/modules/puppet/templates/puppet.agent.conf
new file mode 100644
index 00000000..44dfedb7
--- /dev/null
+++ b/modules/puppet/templates/puppet.agent.conf
@@ -0,0 +1,27 @@
+[agent]
+ server = puppet.<%= domain %>
+
+ pluginsync = true
+
+ # unfortunately, ecosse and jonund sync at the same time, thus causing problem
+ # the proper fix is to use something else than sqlite for stored config, but this would
+ # take more time to deploy, so the quick fix is this one (misc, 04/07/2011)
+ splay = true
+
+ report = true
+
+ graph = true
+<% if environment %>
+ environment = <%= environment %>
+<% end %>
+ # The file in which puppetd stores a list of the classes
+ # associated with the retrieved configuration. Can be loaded in
+ # the separate ``puppet`` executable using the ``--loadclasses``
+ # option.
+ # The default value is '$confdir/classes.txt'.
+ classfile = $vardir/classes.txt
+
+ # Where puppetd caches the local configuration. An
+ # extension indicating the cache format is added automatically.
+ # The default value is '$confdir/localconfig'.
+ localconfig = $vardir/localconfig
diff --git a/modules/puppet/templates/puppet.conf b/modules/puppet/templates/puppet.conf
index fcb81a35..28e8c363 100644
--- a/modules/puppet/templates/puppet.conf
+++ b/modules/puppet/templates/puppet.conf
@@ -1,4 +1,8 @@
+<% db_config = scope.lookupvar('puppet::stored_config::db_config') %>
[main]
+ # listen on both ipv4 and ipv6
+ bindaddress = *
+
# The Puppet log directory.
# The default value is '$vardir/log'.
logdir = /var/log/puppet
@@ -11,24 +15,7 @@
# The default value is '$confdir/ssl'.
ssldir = $vardir/ssl
-[master]
- certname = puppetmaster.<%= domain %>
- reports = tagmail
-
-[agent]
- server = puppetmaster.<%= domain %>
-
- pluginsync = true
-
- report = true
- # The file in which puppetd stores a list of the classes
- # associated with the retrieved configuratiion. Can be loaded in
- # the separate ``puppet`` executable using the ``--loadclasses``
- # option.
- # The default value is '$confdir/classes.txt'.
- classfile = $vardir/classes.txt
-
- # Where puppetd caches the local configuration. An
- # extension indicating the cache format is added automatically.
- # The default value is '$confdir/localconfig'.
- localconfig = $vardir/localconfig
+ modulepath = $confdir/modules:$confdir/deployment:$confdir/external:/usr/share/puppet/modules
+ queue_type = stomp
+ queue_source = stomp://localhost:61613
+<%= db_config %>
diff --git a/modules/puppet/templates/puppet.master.conf b/modules/puppet/templates/puppet.master.conf
new file mode 100644
index 00000000..0180fc2a
--- /dev/null
+++ b/modules/puppet/templates/puppet.master.conf
@@ -0,0 +1,14 @@
+<% db_config = scope.lookupvar('puppet::stored_config::db_config') %>
+[master]
+ certname = puppet.<%= domain %>
+
+ # tagmail should be kept last, until this bug is fixed
+ # https://projects.puppetlabs.com/issues/5018
+ reports = store,socket,tagmail
+ reportfrom = root@<%= domain %>
+
+ # Never remove this:
+ # Store config is used to populate others configs
+ storeconfigs = true
+ async_storeconfigs = true
+<%= db_config %>
diff --git a/modules/puppet/templates/tagmail.conf b/modules/puppet/templates/tagmail.conf
index cf988123..96b034aa 100644
--- a/modules/puppet/templates/tagmail.conf
+++ b/modules/puppet/templates/tagmail.conf
@@ -1 +1 @@
-err: mageia-sysadm@<%= domain %>
+err: sysadmin-reports@ml.<%= domain %>
diff --git a/modules/puppet/templates/thin.yml b/modules/puppet/templates/thin.yml
new file mode 100644
index 00000000..8cf4231d
--- /dev/null
+++ b/modules/puppet/templates/thin.yml
@@ -0,0 +1,18 @@
+---
+daemonize: true
+require: []
+
+timeout: 30
+user: puppet
+group: puppet
+wait: 30
+log: /var/log/thin.log
+max_conns: 1024
+chdir: /etc/puppet
+address: 127.0.0.1
+servers: 3
+environment: production
+max_persistent_conns: 512
+pid: /var/run/puppet/puppetmaster.pid
+rackup: /usr/local/share/puppet.config.ru
+port: 18140
diff --git a/modules/report-socket/lib/puppet/reports/socket.rb b/modules/report-socket/lib/puppet/reports/socket.rb
new file mode 100644
index 00000000..b1af057d
--- /dev/null
+++ b/modules/report-socket/lib/puppet/reports/socket.rb
@@ -0,0 +1,33 @@
+require 'puppet'
+require 'yaml'
+
+unless Puppet.version >= '2.6.5'
+ fail "This report processor requires Puppet version 2.6.5 or later"
+end
+
+Puppet::Reports.register_report(:socket) do
+ configfile = File.join([File.dirname(Puppet.settings[:config]), "socket.yaml"])
+ # do not raise a error since this will show in puppet log
+ # raise(Puppet::ParseError, "Socket report config file #{configfile} not readable") unless
+ if File.exist?(configfile)
+
+ # TODO add support for using another user ?
+ config = YAML.load_file(configfile)
+ SOCKET_PATH = config[:socket_path]
+ else
+ SOCKET_PATH = nil
+ end
+
+ desc <<-DESC
+ Send notification of failed reports to a socket.
+ DESC
+
+ def process
+ if self.status == 'failed'
+ message = "Puppet run for #{self.host} #{self.status} at #{Time.now.asctime}."
+ if File.exist?(SOCKET_PATH)
+ Puppet::Util.execute("echo #{message} > #{SOCKET_PATH}" , "nobody", "nogroup")
+ end
+ end
+ end
+end
diff --git a/modules/restrictshell/manifests/allow.pp b/modules/restrictshell/manifests/allow.pp
new file mode 100644
index 00000000..cb1fd9a2
--- /dev/null
+++ b/modules/restrictshell/manifests/allow.pp
@@ -0,0 +1,7 @@
+define restrictshell::allow {
+ include shell
+ file { "/etc/membersh-conf.d/allow_${name}.pl":
+ mode => '0755',
+ content => "\$use_${name} = 1;\n",
+ }
+}
diff --git a/modules/restrictshell/manifests/allow_git.pp b/modules/restrictshell/manifests/allow_git.pp
new file mode 100644
index 00000000..ed12a577
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_git.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_git {
+ restrictshell::allow { 'git': }
+}
diff --git a/modules/restrictshell/manifests/allow_maintdb.pp b/modules/restrictshell/manifests/allow_maintdb.pp
new file mode 100644
index 00000000..e5123cf1
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_maintdb.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_maintdb {
+ restrictshell::allow{ 'maintdb': }
+}
diff --git a/modules/restrictshell/manifests/allow_pkgsubmit.pp b/modules/restrictshell/manifests/allow_pkgsubmit.pp
new file mode 100644
index 00000000..14c6357b
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_pkgsubmit.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_pkgsubmit {
+ restrictshell::allow { 'pkgsubmit': }
+}
diff --git a/modules/restrictshell/manifests/allow_rsync.pp b/modules/restrictshell/manifests/allow_rsync.pp
new file mode 100644
index 00000000..6049122a
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_rsync.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_rsync {
+ restrictshell::allow { 'rsync': }
+}
diff --git a/modules/restrictshell/manifests/allow_scp.pp b/modules/restrictshell/manifests/allow_scp.pp
new file mode 100644
index 00000000..3e6cb1fb
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_scp.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_scp {
+ restrictshell::allow{ 'scp': }
+}
diff --git a/modules/restrictshell/manifests/allow_sftp.pp b/modules/restrictshell/manifests/allow_sftp.pp
new file mode 100644
index 00000000..55c1f396
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_sftp.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_sftp {
+ restrictshell::allow { 'sftp': }
+}
diff --git a/modules/restrictshell/manifests/allow_svn.pp b/modules/restrictshell/manifests/allow_svn.pp
new file mode 100644
index 00000000..99b2c9fa
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_svn.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_svn {
+ restrictshell::allow{ 'svn': }
+}
diff --git a/modules/restrictshell/manifests/allow_upload_bin.pp b/modules/restrictshell/manifests/allow_upload_bin.pp
new file mode 100644
index 00000000..b55c41b3
--- /dev/null
+++ b/modules/restrictshell/manifests/allow_upload_bin.pp
@@ -0,0 +1,3 @@
+class restrictshell::allow_upload_bin {
+ allow{ 'upload_bin': }
+}
diff --git a/modules/restrictshell/manifests/init.pp b/modules/restrictshell/manifests/init.pp
index c4569e94..c27f26dc 100644
--- a/modules/restrictshell/manifests/init.pp
+++ b/modules/restrictshell/manifests/init.pp
@@ -1,55 +1 @@
-class restrictshell {
- $allow_svn = "0"
- $allow_git = "0"
- $allow_rsync = "0"
- $allow_pkgsubmit = "0"
-
- $ldap_pwfile = "/etc/ldap.secret"
-
- class allow_svn_git_pkgsubmit {
- $allow_svn = "1"
- $allow_git = "1"
- $allow_pkgsubmit = "1"
- }
-
- file { '/usr/local/bin/sv_membersh.pl':
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("restrictshell/sv_membersh.pl"),
- }
-
- file { '/etc/membersh-conf.pl':
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("restrictshell/membersh-conf.pl"),
- }
-
- package { 'python-ldap':
- ensure => installed,
- }
-
- $pubkeys_directory = "/var/lib/pubkeys"
- file { $pubkeys_directory:
- ensure => directory,
- owner => root,
- group => root,
- mode => 755,
- }
-
- file { '/usr/local/bin/ldap-sshkey2file.py':
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("restrictshell/ldap-sshkey2file.py"),
- requires => Package['python-ldap']
- }
-
-
-
-
-}
+class restrictshell { }
diff --git a/modules/restrictshell/manifests/shell.pp b/modules/restrictshell/manifests/shell.pp
new file mode 100644
index 00000000..3ef2a036
--- /dev/null
+++ b/modules/restrictshell/manifests/shell.pp
@@ -0,0 +1,14 @@
+class restrictshell::shell {
+ file { '/etc/membersh-conf.d':
+ ensure => directory,
+ }
+
+ mga_common::local_script { 'sv_membersh.pl':
+ content => template('restrictshell/sv_membersh.pl'),
+ }
+
+ file { '/etc/membersh-conf.pl':
+ mode => '0755',
+ content => template('restrictshell/membersh-conf.pl'),
+ }
+}
diff --git a/modules/restrictshell/templates/ldap-sshkey2file.py b/modules/restrictshell/templates/ldap-sshkey2file.py
deleted file mode 100755
index ec5afc8e..00000000
--- a/modules/restrictshell/templates/ldap-sshkey2file.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import os
-import random
-
-try:
- import ldap
-except ImportError, e:
- print "Please install python-ldap before running this program"
- sys.exit(1)
-
-basedn="<%= dc_suffix %>"
-peopledn="ou=people,%s" % basedn
-uris=['ldap://ldap.<%= domain %>']
-random.shuffle(uris)
-uri = " ".join(uris)
-timeout=5
-binddn="cn=<%= fqdn %>,ou=Hosts," % basedn
-pwfile="<%= ldap_pwfile %>"
-# filter out disabled accounts also
-# too bad uidNumber doesn't support >= filters
-filter="(&(objectClass=inetOrgPerson)(objectClass=ldapPublicKey)(objectClass=posixAccount)(sshPublicKey=*)(!(shadowExpire=*)))"
-keypathprefix="<%= pubkeys_directory %>"
-
-def usage():
- print "%s" % sys.argv[0]
- print
- print "Will fetch all enabled user accounts under %s" % peopledn
- print "with ssh keys in them and write each one to"
- print "%s/<login>/authorized_keys" % keypathprefix
- print
- print "This script is intented to be run from cron as root"
- print
-
-def get_pw(pwfile):
- try:
- f = open(pwfile, 'r')
- except IOError, e:
- print "Error while reading password file, aborting"
- print e
- sys.exit(1)
- pw = f.readline().strip()
- f.close()
- return pw
-
-def write_keys(keys, user, uid, gid):
- try:
- os.makedirs("%s/%s" % (keypathprefix,user), 0700)
- except:
- pass
- keyfile = "%s/%s/authorized_keys" % (keypathprefix,user)
- f = open(keyfile, 'w')
- for key in keys:
- f.write(key.strip() + "\n")
- f.close()
- os.chmod(keyfile, 0600)
- os.chown(keyfile, uid, gid)
- os.chmod("%s/%s" % (keypathprefix,user), 0700)
- os.chown("%s/%s" % (keypathprefix,user), uid, gid)
-
-if len(sys.argv) != 1:
- usage()
- sys.exit(1)
-
-bindpw = get_pw(pwfile)
-
-try:
- ld = ldap.initialize(uri)
- ld.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout)
- ld.start_tls_s()
- ld.bind_s(binddn, bindpw)
- res = ld.search_s(peopledn, ldap.SCOPE_ONELEVEL, filter, ['uid','sshPublicKey','uidNumber','gidNumber'])
- try:
- os.makedirs(keypathprefix, 0701)
- except:
- pass
- for result in res:
- dn, entry = result
- # skip possible system users
- if int(entry['uidNumber'][0]) < 500:
- continue
- write_keys(entry['sshPublicKey'], entry['uid'][0], int(entry['uidNumber'][0]), int(entry['gidNumber'][0]))
- ld.unbind_s()
-except Exception, e:
- print "Error"
- raise
-
-sys.exit(0)
-
-
-# vim:ts=4:sw=4:et:ai:si
diff --git a/modules/restrictshell/templates/membersh-conf.pl b/modules/restrictshell/templates/membersh-conf.pl
index 0d9887e1..9e0c8bf5 100755
--- a/modules/restrictshell/templates/membersh-conf.pl
+++ b/modules/restrictshell/templates/membersh-conf.pl
@@ -1,16 +1,20 @@
-$use_svn = "<%= allow_svn %>";
+
+
$bin_svn = "/usr/bin/svnserve";
$regexp_svn = "^svnserve -t\$";
#@prepend_args_svn = ( '-r', '/svn' );
@prepend_args_svn = ();
-$use_git = "<%= allow_git %>";
-$bin_git = "/usr/bin/git-shell";
+$bin_git = "/usr/share/gitolite/gitolite-shell";
-$use_rsync = "<%= allow_rsync %>";
$bin_rsync = "/usr/bin/rsync";
$regexp_rsync = "^rsync --server";
$regexp_dir_rsync = "^/.*";
-$use_pkgsubmit = "<%= allow_pkgsubmit %>";
+$bin_sftp = "<%= @lib_dir %>/ssh/sftp-server";
+$regexp_sftp = "^(/usr/lib{64,}/ssh/sftp-server|/usr/lib/sftp-server|/usr/libexec/sftp-server|/usr/lib/openssh/sftp-server)";
+foreach my $f (glob("/etc/membersh-conf.d/allow_*pl")) {
+ do($f)
+}
+1;
diff --git a/modules/restrictshell/templates/sv_membersh.pl b/modules/restrictshell/templates/sv_membersh.pl
index 521587d0..0b07f23a 100644
--- a/modules/restrictshell/templates/sv_membersh.pl
+++ b/modules/restrictshell/templates/sv_membersh.pl
@@ -62,8 +62,16 @@ our $use_git = "0";
our $bin_git = "/usr/bin/git-shell";
our $use_pkgsubmit = "0";
-our $regexp_pkgsubmit = "^/usr/share/repsys/create-srpm ";
-our $bin_pkgsubmit = "/usr/share/repsys/create-srpm";
+our $regexp_pkgsubmit = "^/usr/share/repsys/create-srpm |^/usr/local/bin/submit_package ";
+our $bin_pkgsubmit = "/usr/local/bin/submit_package";
+
+our $use_maintdb = "0";
+our $regexp_maintdb = "^/usr/local/bin/wrapper.maintdb ";
+our $bin_maintdb = "/usr/local/bin/wrapper.maintdb";
+
+our $use_upload_bin = "0";
+our $regexp_upload_bin = "^/usr/local/bin/wrapper.upload-bin ";
+our $bin_upload_bin = "/usr/local/bin/wrapper.upload-bin";
# Open configuration file
if (-e "/etc/membersh-conf.pl") {
@@ -92,6 +100,10 @@ if (-e "/etc/membersh-conf.pl") {
# $regexp_dir_rsync = "^(/upload)|(/var/ftp)";
#
# $use_pkgsubmit = "1";
+#
+# $use_maintdb = "1";
+#
+# $use_upload_bin = "1";
if ($#ARGV == 1 and $ARGV[0] eq "-c") {
@@ -135,22 +147,37 @@ if ($#ARGV == 1 and $ARGV[0] eq "-c") {
push( @args, @args_user );
exec($bin_svn, @args) or die("Failed to exec $bin_svn: $!");
- } elsif ($use_git and $ARGV[1] =~ m:git-.+:) {
+ } elsif ($use_git and $ARGV[1] =~ m:^$bin_git\b:) {
- # Delegate filtering to git-shell
- exec($bin_git, @ARGV) or die("Failed to exec $bin_git: $!");
+ # Delegate filtering to gitolite-shell
+ my ($gitolite_bin, @rest) = split(' ', $ARGV[1]);
+ exec($bin_git, @rest) or die("Failed to exec $bin_git: $!");
} elsif ($use_pkgsubmit and
$ARGV[1] =~ m:$regexp_pkgsubmit:) {
my ($createsrpm, @rest) = split(' ', $ARGV[1]);
exec($bin_pkgsubmit, @rest) or die("Failed to exec $bin_pkgsubmit: $!");
+ } elsif ($use_maintdb and
+ $ARGV[1] =~ m:$regexp_maintdb:) {
+ my ($maintdb, @rest) = split(' ', $ARGV[1]);
+ exec($bin_maintdb, @rest) or die("Failed to exec $bin_maintdb: $!");
+ } elsif ($use_upload_bin and
+ $ARGV[1] =~ m:$regexp_upload_bin:) {
+ my ($upload_bin, @rest) = split(' ', $ARGV[1]);
+ exec($bin_upload_bin, @rest) or die("Failed to exec $bin_upload_bin: $!");
}
}
unless (-e "/etc/membersh-errormsg") {
- print STDERR "You tried to execute: @ARGV[1..$#ARGV]\n";
+ if (@ARGV) {
+ print STDERR "You tried to execute: @ARGV[1..$#ARGV]\n";
+ } else {
+ print STDERR "You tried to run a interactive shell.\n"
+ }
print STDERR "Sorry, you are not allowed to execute that command.\n";
+ print STDERR "You are member of the following groups :\n";
+ print STDERR qx(groups);
} else {
open(ERRORMSG, "< /etc/membersh-errormsg");
while (<ERRORMSG>) {
diff --git a/modules/rsnapshot/manifests/init.pp b/modules/rsnapshot/manifests/init.pp
new file mode 100644
index 00000000..5d145172
--- /dev/null
+++ b/modules/rsnapshot/manifests/init.pp
@@ -0,0 +1,74 @@
+class rsnapshot {
+ class base($confdir = '/data/backups/conf') {
+ package { ['rsnapshot']: }
+
+ file { $confdir:
+ ensure => directory,
+ owner => root,
+ group => root,
+ mode => '0700',
+ }
+
+ @rsnapshot::cron_file { 'hourly': }
+ @rsnapshot::cron_file { 'daily': }
+ @rsnapshot::cron_file { 'weekly': }
+ @rsnapshot::cron_file { 'monthly': }
+ }
+
+ define cron_file($rsnapshot_conf = []) {
+ $filepath = "/tmp/cron.${name}_rsnapshot-backups"
+ $rsnapshot_arg = $name
+ file { $filepath:
+ ensure => present,
+ content => template('rsnapshot/cron_file'),
+ owner => root,
+ group => root,
+ mode => '0755',
+ }
+ }
+
+ # - 'backup' is an array of "source destination" to backup
+ # - 'backup_script' is an array of "script destination"
+ # - ${x}_interval is the number of hourly, daily, weekly, monthly
+ # backups that should be kept. If you don't want hourly, daily,
+ # weekly or monthly backups, set ${x}_interval to '0'
+ define backup(
+ $snapshot_root = '/data/backups',
+ $one_fs = '1',
+ $backup = [],
+ $backup_script = [],
+ $hourly_interval = '0',
+ $daily_interval = '6',
+ $weekly_interval = '4',
+ $monthly_interval = '3'
+ ) {
+ $conffile = "${rsnapshot::base::confdir}/${name}.conf"
+ file { $conffile:
+ owner => root,
+ group => root,
+ mode => '0700',
+ content => template('rsnapshot/rsnapshot.conf'),
+ }
+
+ if ($hourly_interval != '0') {
+ Rsnapshot::Cron_file <| title == 'hourly' |> {
+ rsnapshot_conf +> $conffile,
+ }
+ }
+ if ($daily_interval != '0') {
+ Rsnapshot::Cron_file <| title == 'daily' |> {
+ rsnapshot_conf +> $conffile,
+ }
+ }
+ if ($weekly_interval != '0') {
+ Rsnapshot::Cron_file <| title == 'weekly' |> {
+ rsnapshot_conf +> $conffile,
+ }
+ }
+ if ($monthly_interval != '0') {
+ Rsnapshot::Cron_file <| title == 'monthly' |> {
+ rsnapshot_conf +> $conffile,
+ }
+ }
+ }
+}
diff --git a/modules/rsnapshot/templates/cron_file b/modules/rsnapshot/templates/cron_file
new file mode 100644
index 00000000..43ca9e1b
--- /dev/null
+++ b/modules/rsnapshot/templates/cron_file
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+<%- for conf in @rsnapshot_conf -%>
+/usr/bin/rsnapshot -c <%= conf %> <%= rsnapshot_arg %>
+<%- end -%>
diff --git a/modules/rsnapshot/templates/rsnapshot.conf b/modules/rsnapshot/templates/rsnapshot.conf
new file mode 100644
index 00000000..4eeee4d0
--- /dev/null
+++ b/modules/rsnapshot/templates/rsnapshot.conf
@@ -0,0 +1,209 @@
+#################################################
+# rsnapshot.conf - rsnapshot configuration file #
+#################################################
+# #
+# PLEASE BE AWARE OF THE FOLLOWING RULES: #
+# #
+# This file requires tabs between elements #
+# #
+# Directories require a trailing slash: #
+# right: /home/ #
+# wrong: /home #
+# #
+#################################################
+
+#######################
+# CONFIG FILE VERSION #
+#######################
+
+config_version 1.2
+
+###########################
+# SNAPSHOT ROOT DIRECTORY #
+###########################
+
+# All snapshots will be stored under this root directory.
+#
+snapshot_root <%= @snapshot_root %>
+
+# If no_create_root is enabled, rsnapshot will not automatically create the
+# snapshot_root directory. This is particularly useful if you are backing
+# up to removable media, such as a FireWire or USB drive.
+#
+#no_create_root 1
+
+#################################
+# EXTERNAL PROGRAM DEPENDENCIES #
+#################################
+
+# LINUX USERS: Be sure to uncomment "cmd_cp". This gives you extra features.
+# EVERYONE ELSE: Leave "cmd_cp" commented out for compatibility.
+#
+# See the README file or the man page for more details.
+#
+cmd_cp /bin/cp
+
+# uncomment this to use the rm program instead of the built-in perl routine.
+#
+cmd_rm /bin/rm
+
+# rsync must be enabled for anything to work. This is the only command that
+# must be enabled.
+#
+cmd_rsync /usr/bin/rsync
+
+# Uncomment this to enable remote ssh backups over rsync.
+#
+cmd_ssh /usr/bin/ssh
+
+# Comment this out to disable syslog support.
+#
+cmd_logger /bin/logger
+
+# Uncomment this to specify the path to "du" for disk usage checks.
+# If you have an older version of "du", you may also want to check the
+# "du_args" parameter below.
+#
+cmd_du /usr/bin/du
+
+# Uncomment this to specify the path to rsnapshot-diff.
+#
+cmd_rsnapshot_diff /usr/bin/rsnapshot-diff
+
+# Specify the path to a script (and any optional arguments) to run right
+# before rsnapshot syncs files
+#
+#cmd_preexec /path/to/preexec/script
+
+# Specify the path to a script (and any optional arguments) to run right
+# after rsnapshot syncs files
+#
+#cmd_postexec /path/to/postexec/script
+
+#########################################
+# BACKUP INTERVALS #
+# Must be unique and in ascending order #
+# i.e. hourly, daily, weekly, etc. #
+#########################################
+
+<%- if @hourly_interval != '0' -%>
+interval hourly <%= @hourly_interval %>
+<%- end -%>
+<%- if @daily_interval != '0' -%>
+interval daily <%= @daily_interval %>
+<%- end -%>
+<%- if @weekly_interval != '0' -%>
+interval weekly <%= @weekly_interval %>
+<%- end -%>
+<%- if @monthly_interval != '0' -%>
+interval monthly <%= @monthly_interval %>
+<%- end -%>
+
+############################################
+# GLOBAL OPTIONS #
+# All are optional, with sensible defaults #
+############################################
+
+# Verbose level, 1 through 5.
+# 1 Quiet Print fatal errors only
+# 2 Default Print errors and warnings only
+# 3 Verbose Show equivalent shell commands being executed
+# 4 Extra Verbose Show extra verbose information
+# 5 Debug mode Everything
+#
+verbose 2
+
+# Same as "verbose" above, but controls the amount of data sent to the
+# logfile, if one is being used. The default is 3.
+#
+loglevel 3
+
+# If you enable this, data will be written to the file you specify. The
+# amount of data written is controlled by the "loglevel" parameter.
+#
+logfile /var/log/rsnapshot
+
+# If enabled, rsnapshot will write a lockfile to prevent two instances
+# from running simultaneously (and messing up the snapshot_root).
+# If you enable this, make sure the lockfile directory is not world
+# writable. Otherwise anyone can prevent the program from running.
+#
+lockfile /var/run/rsnapshot.pid
+
+# Default rsync args. All rsync commands have at least these options set.
+#
+#rsync_short_args -a
+#rsync_long_args --delete --numeric-ids --relative --delete-excluded
+
+# ssh has no args passed by default, but you can specify some here.
+#
+#ssh_args -p 22
+
+# Default arguments for the "du" program (for disk space reporting).
+# The GNU version of "du" is preferred. See the man page for more details.
+# If your version of "du" doesn't support the -h flag, try -k flag instead.
+#
+#du_args -csh
+
+# If this is enabled, rsync won't span filesystem partitions within a
+# backup point. This essentially passes the -x option to rsync.
+# The default is 0 (off).
+#
+one_fs <%= @one_fs %>
+
+# The include and exclude parameters, if enabled, simply get passed directly
+# to rsync. If you have multiple include/exclude patterns, put each one on a
+# separate line. Please look up the --include and --exclude options in the
+# rsync man page for more details on how to specify file name patterns.
+#
+#include ???
+#include ???
+#exclude ???
+#exclude ???
+
+# The include_file and exclude_file parameters, if enabled, simply get
+# passed directly to rsync. Please look up the --include-from and
+# --exclude-from options in the rsync man page for more details.
+#
+#include_file /path/to/include/file
+#exclude_file /path/to/exclude/file
+
+# If your version of rsync supports --link-dest, consider enable this.
+# This is the best way to support special files (FIFOs, etc) cross-platform.
+# The default is 0 (off).
+#
+link_dest 1
+
+# When sync_first is enabled, it changes the default behaviour of rsnapshot.
+# Normally, when rsnapshot is called with its lowest interval
+# (i.e.: "rsnapshot hourly"), it will sync files AND rotate the lowest
+# intervals. With sync_first enabled, "rsnapshot sync" handles the file sync,
+# and all interval calls simply rotate files. See the man page for more
+# details. The default is 0 (off).
+#
+#sync_first 0
+
+# If enabled, rsnapshot will move the oldest directory for each interval
+# to [interval_name].delete, then it will remove the lockfile and delete
+# that directory just before it exits. The default is 0 (off).
+#
+#use_lazy_deletes 0
+
+# Number of rsync re-tries. If you experience any network problems or
+# network card issues that tend to cause ssh to crap-out with
+# "Corrupted MAC on input" errors, for example, set this to a non-zero
+# value to have the rsync operation re-tried
+#
+#rsync_numtries 0
+
+###############################
+### BACKUP POINTS / SCRIPTS ###
+###############################
+
+<%- for b in @backup -%>
+<%= b.split().unshift("backup").join("\t") %>
+<%- end -%>
+
+<%- for bs in @backup_script -%>
+<%= bs.split().unshift("backup_script").join("\t") %>
+<%- end -%>
diff --git a/modules/rsyncd/manifests/init.pp b/modules/rsyncd/manifests/init.pp
index 148cc426..5cc9e2fd 100644
--- a/modules/rsyncd/manifests/init.pp
+++ b/modules/rsyncd/manifests/init.pp
@@ -1,32 +1,12 @@
-class rsyncd {
+class rsyncd($rsyncd_conf = 'rsyncd/rsyncd.conf') {
- package { xinetd:
- ensure => installed
+ xinetd::service { 'rsync':
+ content => template('rsyncd/xinetd')
}
- service { xinetd:
- ensure => running,
- path => "/etc/init.d/xinetd",
- subscribe => [ Package["xinetd"], File["rsync"] ]
- }
-
- file { "rsync":
- path => "/etc/xinetd.d/rsync",
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["xinetd"],
- content => template("rsyncd/xinetd")
- }
-
- file { "rsyncd.conf":
- path => "/etc/rsyncd.conf",
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- require => Package["rsync"],
- content => template("rsyncd/rsyncd.conf")
+ file { 'rsyncd.conf':
+ path => '/etc/rsyncd.conf',
+ require => Package['rsync'],
+ content => template($rsyncd_conf)
}
}
diff --git a/modules/rsyncd/templates/rsyncd.conf b/modules/rsyncd/templates/rsyncd.conf
index e5cfa6d2..11dbc6a4 100644
--- a/modules/rsyncd/templates/rsyncd.conf
+++ b/modules/rsyncd/templates/rsyncd.conf
@@ -1,15 +1,7 @@
# $Id$
uid = nobody
-gid = nogroup
+gid = nogroup
-[mageia]
- path = /distrib/mirror/
- comment = Mageia Mirror Tree
- hosts allow = \
- distrib-coffee.ipsl.jussieu.fr \
- distribipsl.aero.jussieu.fr \
- ibiblio.org \
- 152.46.7.122 \
- 152.19.134.16 \
+# default empty rsyncd.conf
diff --git a/modules/rsyncd/templates/xinetd b/modules/rsyncd/templates/xinetd
index 46a3fd33..b477e413 100644
--- a/modules/rsyncd/templates/xinetd
+++ b/modules/rsyncd/templates/xinetd
@@ -1,4 +1,3 @@
-# $Id: xinetd 319 2009-02-28 17:05:16Z guillomovitch $
service rsync
{
disable = no
@@ -8,5 +7,9 @@ service rsync
server = /usr/bin/rsync
server_args = --daemon
log_on_failure += USERID
+ flags = IPv6
+ # some mirrors do not seems to use locks when downloading from
+ # us and try to download the same stuff 15 times in a row
+ per_source = 4
}
diff --git a/modules/serial_console/manifests/init.pp b/modules/serial_console/manifests/init.pp
new file mode 100644
index 00000000..b6716954
--- /dev/null
+++ b/modules/serial_console/manifests/init.pp
@@ -0,0 +1 @@
+class serial_console {}
diff --git a/modules/serial_console/manifests/serial_console.pp b/modules/serial_console/manifests/serial_console.pp
new file mode 100644
index 00000000..dd68c84c
--- /dev/null
+++ b/modules/serial_console/manifests/serial_console.pp
@@ -0,0 +1,8 @@
+# name: ttyS0
+define serial_console::serial_console() {
+ service { "serial-getty@${name}":
+ provider => systemd,
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/modules/shorewall/manifests/init.pp b/modules/shorewall/manifests/init.pp
index 7c8e1f55..daea6b2c 100644
--- a/modules/shorewall/manifests/init.pp
+++ b/modules/shorewall/manifests/init.pp
@@ -2,101 +2,101 @@ class shorewall {
include concat::setup
define shorewallfile () {
- $filename = "/tmp/shorewall/${name}"
- $header = "puppet:///modules/shorewall/headers/${name}"
- $footer = "puppet:///modules/shorewall/footers/${name}"
- concat{$filename:
- owner => root,
- group => root,
- mode => 600,
- }
+ $filename = "/tmp/shorewall/${name}"
+ $header = "puppet:///modules/shorewall/headers/${name}"
+ $footer = "puppet:///modules/shorewall/footers/${name}"
+ concat{$filename:
+ owner => root,
+ group => root,
+ mode => '0600',
+ }
- concat::fragment{"${name}_header":
- target => $filename,
- order => 1,
- source => $header,
- }
+ concat::fragment{"${name}_header":
+ target => $filename,
+ order => 1,
+ source => $header,
+ }
- concat::fragment{"${name}_footer":
- target => $filename,
- order => 99,
- source => $footer,
- }
+ concat::fragment{"${name}_footer":
+ target => $filename,
+ order => 99,
+ source => $footer,
+ }
}
### Rules
shorewallfile{ rules: }
define rule_line($order = 50) {
- $filename = "/tmp/shorewall/rules"
- $line = "${name}\n"
- concat::fragment{"newline_${name}":
- target => $filename,
- order => $order,
- content => $line,
- }
+ $filename = "/tmp/shorewall/rules"
+ $line = "${name}\n"
+ concat::fragment{"newline_${name}":
+ target => $filename,
+ order => $order,
+ content => $line,
+ }
}
class allow_ssh_in {
- rule_line { "ACCEPT all all tcp 22":
- order => 5,
- }
+ rule_line { "ACCEPT all all tcp 22":
+ order => 5,
+ }
}
class allow_dns_in {
- rule_line { "ACCEPT net fw tcp 53": }
- rule_line { "ACCEPT net fw udp 53": }
+ rule_line { "ACCEPT net fw tcp 53": }
+ rule_line { "ACCEPT net fw udp 53": }
}
class allow_smtp_in {
- rule_line { "ACCEPT net fw tcp 25": }
+ rule_line { "ACCEPT net fw tcp 25": }
}
class allow_www_in {
- rule_line { "ACCEPT net fw tcp 80": }
+ rule_line { "ACCEPT net fw tcp 80": }
}
### Zones
shorewallfile{ zones: }
define zone_line($order = 50) {
- $filename = "/tmp/shorewall/zones"
- $line = "${name}\n"
- concat::fragment{"newline_${name}":
- target => $filename,
- order => $order,
- content => $line,
- }
+ $filename = "/tmp/shorewall/zones"
+ $line = "${name}\n"
+ concat::fragment{"newline_${name}":
+ target => $filename,
+ order => $order,
+ content => $line,
+ }
}
class default_zones {
- zone_line { "net ipv4":
- order => 2,
- }
- zone_line { "fw firewall":
- order => 3,
- }
+ zone_line { "net ipv4":
+ order => 2,
+ }
+ zone_line { "fw firewall":
+ order => 3,
+ }
}
### Policy
shorewallfile{ policy: }
define policy_line($order = 50) {
- $filename = "/tmp/shorewall/policy"
- $line = "${name}\n"
- concat::fragment{"newline_${name}":
- target => $filename,
- order => $order,
- content => $line,
- }
+ $filename = "/tmp/shorewall/policy"
+ $line = "${name}\n"
+ concat::fragment{"newline_${name}":
+ target => $filename,
+ order => $order,
+ content => $line,
+ }
}
class default_policy {
- policy_line{ "fw net ACCEPT":
- order => 2,
- }
- policy_line{ "net all DROP info":
- order => 3,
- }
- policy_line{ "all all REJECT info":
- order => 4,
- }
+ policy_line{ "fw net ACCEPT":
+ order => 2,
+ }
+ policy_line{ "net all DROP info":
+ order => 3,
+ }
+ policy_line{ "all all REJECT info":
+ order => 4,
+ }
}
class default_firewall {
- include default_zones
- include default_policy
- include allow_ssh_in
+ include default_zones
+ include default_policy
+ include allow_ssh_in
}
}
diff --git a/modules/spamassassin/manifests/init.pp b/modules/spamassassin/manifests/init.pp
new file mode 100644
index 00000000..f0955513
--- /dev/null
+++ b/modules/spamassassin/manifests/init.pp
@@ -0,0 +1,18 @@
+class spamassassin {
+ # it should also requires make, bug fixed in cooker
+ package { 'spamassassin-sa-compile':
+ notify => Exec['sa-compile'],
+ }
+
+ package { 'spamassassin': }
+
+ file { '/etc/mail/spamassassin/local.cf':
+ require => Package['spamassassin'],
+ content => template('spamassassin/local.cf')
+ }
+
+ exec { 'sa-compile':
+ refreshonly => true,
+ require => [Package['spamassassin-sa-compile'],Package['spamassassin']]
+ }
+}
diff --git a/modules/spamassassin/templates/local.cf b/modules/spamassassin/templates/local.cf
new file mode 100644
index 00000000..0862cb87
--- /dev/null
+++ b/modules/spamassassin/templates/local.cf
@@ -0,0 +1,95 @@
+# This is the right place to customize your installation of SpamAssassin.
+#
+# See 'perldoc Mail::SpamAssassin::Conf' for details of what can be
+# tweaked.
+#
+# Only a small subset of options are listed below
+#
+###########################################################################
+
+# Add *****SPAM***** to the Subject header of spam e-mails
+#
+# rewrite_header Subject *****SPAM*****
+
+
+# Save spam messages as a message/rfc822 MIME attachment instead of
+# modifying the original message (0: off, 2: use text/plain instead)
+#
+# report_safe 1
+
+
+# Set which networks or hosts are considered 'trusted' by your mail
+# server (i.e. not spammers)
+#
+# trusted_networks 212.17.35.
+
+
+# Set file-locking method (flock is not safe over NFS, but is faster)
+#
+# lock_method flock
+
+
+# Set the threshold at which a message is considered spam (default: 5.0)
+#
+# required_score 5.0
+
+
+# Use Bayesian classifier (default: 1)
+#
+# use_bayes 1
+
+
+# Bayesian classifier auto-learning (default: 1)
+#
+# bayes_auto_learn 1
+
+
+# Set headers which may provide inappropriate cues to the Bayesian
+# classifier
+#
+# bayes_ignore_header X-Bogosity
+# bayes_ignore_header X-Spam-Flag
+# bayes_ignore_header X-Spam-Status
+
+
+# Some shortcircuiting, if the plugin is enabled
+#
+ifplugin Mail::SpamAssassin::Plugin::Shortcircuit
+#
+# default: strongly-whitelisted mails are *really* whitelisted now, if the
+# shortcircuiting plugin is active, causing early exit to save CPU load.
+# Uncomment to turn this on
+#
+# shortcircuit USER_IN_WHITELIST on
+# shortcircuit USER_IN_DEF_WHITELIST on
+# shortcircuit USER_IN_ALL_SPAM_TO on
+# shortcircuit SUBJECT_IN_WHITELIST on
+
+# the opposite; blacklisted mails can also save CPU
+#
+# shortcircuit USER_IN_BLACKLIST on
+# shortcircuit USER_IN_BLACKLIST_TO on
+# shortcircuit SUBJECT_IN_BLACKLIST on
+
+# if you have taken the time to correctly specify your "trusted_networks",
+# this is another good way to save CPU
+#
+# shortcircuit ALL_TRUSTED on
+
+# and a well-trained bayes DB can save running rules, too
+#
+# shortcircuit BAYES_99 spam
+# shortcircuit BAYES_00 ham
+
+endif # Mail::SpamAssassin::Plugin::Shortcircuit
+
+required_hits 5
+rewrite_header Subject [SPAM]
+report_safe 0
+ifplugin Mail::SpamAssassin::Plugin::AWL
+auto_whitelist_path /var/spool/spamassassin/auto-whitelist
+auto_whitelist_file_mode 0666
+endif # Mail::SpamAssassin::Plugin::AWL
+
+loadplugin Mail::SpamAssassin::Plugin::Rule2XSBody
+
diff --git a/modules/spec-tree-reports/manifests/init.pp b/modules/spec-tree-reports/manifests/init.pp
new file mode 100644
index 00000000..dc78ea72
--- /dev/null
+++ b/modules/spec-tree-reports/manifests/init.pp
@@ -0,0 +1,50 @@
+# spec-rpm-mismatch is a report that compares the versions of RPMs available
+# in the repository versus the versions created by the latest spec files and
+# shows those that don't match.
+
+class spec-tree-reports(
+ $report = '/var/www/bs/spec-rpm-mismatch.html',
+ $srpms = 'file:///distrib/bootstrap/distrib/{version}/SRPMS/{media}/{section}/',
+ $release = "mga${buildsystem::var::distros::distros['cauldron']['version']}",
+) {
+ $user = 'spec-tree-reports'
+ $home = "/var/lib/${user}"
+ $hour = 6
+ $minute = 39
+
+ user { $user:
+ comment => 'spec-tree report generator',
+ home => $home,
+ }
+
+ file { $home:
+ ensure => directory,
+ owner => $user,
+ mode => '0755',
+ }
+
+ package { 'spec-tree':
+ ensure => installed,
+ }
+
+ file { "${report}":
+ ensure => present,
+ owner => $user,
+ mode => '0644',
+ replace => false,
+ content => '*',
+ }
+
+ mga_common::local_script { 'generate-spec-rpm-mismatch-report':
+ content => template('spec-tree-reports/generate-spec-rpm-mismatch-report'),
+ }
+
+ cron { "rpm_mismatch_report":
+ command => "/usr/local/bin/generate-spec-rpm-mismatch-report |& systemd-cat -t generate-spec-rpm-mismatch-report",
+ hour => $hour,
+ minute => $minute,
+ user => $user,
+ environment => "MAILTO=root",
+ require => User[$user],
+ }
+}
diff --git a/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report b/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report
new file mode 100644
index 00000000..4bc2db65
--- /dev/null
+++ b/modules/spec-tree-reports/templates/generate-spec-rpm-mismatch-report
@@ -0,0 +1,10 @@
+#!/bin/bash
+# GENERATED BY PUPPET--DO NOT EDIT
+set -e
+trap 'test "$?" -ne 0 && echo Error in script' EXIT
+
+cd "$HOME"
+test -e errors.log && mv -f errors.log errors.log.1
+/usr/share/doc/spec-tree/examples/generate-mismatch-report --srpm_source <%= scope.function_shellquote([scope.lookupvar('srpms')]) -%> --release <%= scope.function_shellquote([scope.lookupvar('release')]) %>
+cp report.html <%= scope.function_shellquote([scope.lookupvar('report')]) %>
+rm -f report.html
diff --git a/modules/ssh/manifests/init.pp b/modules/ssh/manifests/init.pp
deleted file mode 100644
index 08570add..00000000
--- a/modules/ssh/manifests/init.pp
+++ /dev/null
@@ -1,336 +0,0 @@
-# =========
-# ssh::auth
-# =========
-#
-# The latest official release and documentation for ssh::auth can always
-# be found at http://reductivelabs.com/trac/puppet/wiki/Recipes/ModuleSSHAuth .
-#
-# Version: 0.3.2
-# Release date: 2009-12-29
-
-class ssh::auth {
-
-$keymaster_storage = "/var/lib/keys"
-
-Exec { path => "/usr/bin:/usr/sbin:/bin:/sbin" }
-Notify { withpath => false }
-
-
-##########################################################################
-
-
-# ssh::auth::key
-
-# Declare keys. The approach here is just to define a bunch of
-# virtual resources, representing key files on the keymaster, client,
-# and server. The virtual keys are then realized by
-# ssh::auth::{keymaster,client,server}, respectively. The reason for
-# doing things that way is that it makes ssh::auth::key into a "one
-# stop shop" where users can declare their keys with all of their
-# parameters, whether those parameters apply to the keymaster, server,
-# or client. The real work of creating, installing, and removing keys
-# is done in the private definitions called by the virtual resources:
-# ssh_auth_key_{master,server,client}.
-
-define key ($ensure = "present", $filename = "", $force = false, $group = "puppet", $home = "", $keytype = "rsa", $length = 2048, $maxdays = "", $mindate = "", $options = "", $user = "") {
-
- ssh_auth_key_namecheck { "${title}-title": parm => "title", value => $title }
-
- # apply defaults
- $_filename = $filename ? { "" => "id_${keytype}", default => $filename }
- $_length = $keytype ? { "rsa" => $length, "dsa" => 1024 }
- $_user = $user ? {
- "" => regsubst($title, '^([^@]*)@?.*$', '\1'),
- default => $user,
- }
- $_home = $home ? { "" => "/home/$_user", default => $home }
-
- ssh_auth_key_namecheck { "${title}-filename": parm => "filename", value => $_filename }
-
- @ssh_auth_key_master { $title:
- ensure => $ensure,
- force => $force,
- keytype => $keytype,
- length => $_length,
- maxdays => $maxdays,
- mindate => $mindate,
- }
- @ssh_auth_key_client { $title:
- ensure => $ensure,
- filename => $_filename,
- group => $group,
- home => $_home,
- user => $_user,
- }
- @ssh_auth_key_server { $title:
- ensure => $ensure,
- group => $group,
- home => $_home,
- options => $options,
- user => $_user,
- }
-}
-
-
-##########################################################################
-
-
-# ssh::auth::keymaster
-#
-# Keymaster host:
-# Create key storage; create, regenerate, and remove key pairs
-
-class keymaster {
-
- # Set up key storage
-
- file { $ssh::auth::keymaster_storage:
- ensure => directory,
- owner => puppet,
- group => puppet,
- mode => 644,
- }
-
- # Realize all virtual master keys
- Ssh_auth_key_master <| |>
-
-} # class keymaster
-
-
-##########################################################################
-
-
-# ssh::auth::client
-#
-# Install generated key pairs onto clients
-
-define client ($ensure = "", $filename = "", $group = "", $home = "", $user = "") {
-
- # Realize the virtual client keys.
- # Override the defaults set in ssh::auth::key, as needed.
- if $ensure { Ssh_auth_key_client <| title == $title |> { ensure => $ensure } }
- if $filename { Ssh_auth_key_client <| title == $title |> { filename => $filename } }
- if $group { Ssh_auth_key_client <| title == $title |> { group => $group } }
-
- if $user { Ssh_auth_key_client <| title == $title |> { user => $user, home => "/home/$user" } }
- if $home { Ssh_auth_key_client <| title == $title |> { home => $home } }
-
- realize Ssh_auth_key_client[$title]
-
-} # define client
-
-
-##########################################################################
-
-
-# ssh::auth::server
-#
-# Install public keys onto clients
-
-define server ($ensure = "", $group = "", $home = "", $options = "", $user = "") {
-
- # Realize the virtual server keys.
- # Override the defaults set in ssh::auth::key, as needed.
- if $ensure { Ssh_auth_key_server <| title == $title |> { ensure => $ensure } }
- if $group { Ssh_auth_key_server <| title == $title |> { group => $group } }
- if $options { Ssh_auth_key_server <| title == $title |> { options => $options } }
-
- if $user { Ssh_auth_key_server <| title == $title |> { user => $user, home => "/home/$user" } }
- if $home { Ssh_auth_key_server <| title == $title |> { home => $home } }
-
- realize Ssh_auth_key_server[$title]
-
-} # define server
-
-} # class ssh::auth
-
-
-##########################################################################
-
-
-# ssh_auth_key_master
-#
-# Create/regenerate/remove a key pair on the keymaster.
-# This definition is private, i.e. it is not intended to be called directly by users.
-# ssh::auth::key calls it to create virtual keys, which are realized in ssh::auth::keymaster.
-
-define ssh_auth_key_master ($ensure, $force, $keytype, $length, $maxdays, $mindate) {
-
- Exec { path => "/usr/bin:/usr/sbin:/bin:/sbin" }
- File {
- owner => puppet,
- group => puppet,
- mode => 600,
- }
-
- $keydir = "${ssh::auth::keymaster_storage}/${title}"
- $keyfile = "${keydir}/key"
-
- file {
- "$keydir":
- ensure => directory,
- mode => 644;
- "$keyfile":
- ensure => $ensure;
- "${keyfile}.pub":
- ensure => $ensure,
- mode => 644;
- }
-
- if $ensure == "present" {
-
- # Remove the existing key pair, if
- # * $force is true, or
- # * $maxdays or $mindate criteria aren't met, or
- # * $keytype or $length have changed
-
- $keycontent = file("${keyfile}.pub", "/dev/null")
- if $keycontent {
-
- if $force {
- $reason = "force=true"
- }
- if !$reason and $mindate and generate("/usr/bin/find", $keyfile, "!", "-newermt", "${mindate}") {
- $reason = "created before ${mindate}"
- }
- if !$reason and $maxdays and generate("/usr/bin/find", $keyfile, "-mtime", "+${maxdays}") {
- $reason = "older than ${maxdays} days"
- }
- if !$reason and $keycontent =~ /^ssh-... [^ ]+ (...) (\d+)$/ {
- if $keytype != $1 { $reason = "keytype changed: $1 -> $keytype" }
- else { if $length != $2 { $reason = "length changed: $2 -> $length" } }
- }
- if $reason {
- exec { "Revoke previous key ${title}: ${reason}":
- command => "rm $keyfile ${keyfile}.pub",
- before => Exec["Create key $title: $keytype, $length bits"],
- }
- }
- }
-
- # Create the key pair.
- # We "repurpose" the comment field in public keys on the keymaster to
- # store data about the key, i.e. $keytype and $length. This avoids
- # having to rerun ssh-keygen -l on every key at every run to determine
- # the key length.
- exec { "Create key $title: $keytype, $length bits":
- command => "ssh-keygen -t ${keytype} -b ${length} -f ${keyfile} -C \"${keytype} ${length}\" -N \"\"",
- user => "puppet",
- group => "puppet",
- creates => $keyfile,
- require => File[$keydir],
- before => File[$keyfile, "${keyfile}.pub"],
- }
-
- } # if $ensure == "present"
-
-} # define ssh_auth_key_master
-
-
-##########################################################################
-
-
-# ssh_auth_key_client
-#
-# Install a key pair into a user's account.
-# This definition is private, i.e. it is not intended to be called directly by users.
-
-define ssh_auth_key_client ($ensure, $filename, $group, $home, $user) {
-
- File {
- owner => $user,
- group => $group,
- mode => 600,
- require => [ User[$user], File[$home]],
- }
-
- $key_src_file = "${ssh::auth::keymaster_storage}/${title}/key" # on the keymaster
- $key_tgt_file = "${home}/.ssh/${filename}" # on the client
-
- $key_src_content_pub = file("${key_src_file}.pub", "/dev/null")
- if $ensure == "absent" or $key_src_content_pub =~ /^(ssh-...) ([^ ]+)/ {
- $keytype = $1
- $modulus = $2
- file {
- $key_tgt_file:
- ensure => $ensure,
- content => file($key_src_file, "/dev/null");
- "${key_tgt_file}.pub":
- ensure => $ensure,
- content => "$keytype $modulus $title\n",
- mode => 644;
- }
- } else {
- notify { "Private key file $key_src_file for key $title not found on keymaster; skipping ensure => present": }
- }
-
-} # define ssh_auth_key_client
-
-
-##########################################################################
-
-
-# ssh_auth_key_server
-#
-# Install a public key into a server user's authorized_keys(5) file.
-# This definition is private, i.e. it is not intended to be called directly by users.
-
-define ssh_auth_key_server ($ensure, $group, $home, $options, $user) {
-
- # on the keymaster:
- $key_src_dir = "${ssh::auth::keymaster_storage}/${title}"
- $key_src_file = "${key_src_dir}/key.pub"
- # on the server:
- $key_tgt_file = "${home}/.ssh/authorized_keys"
-
- File {
- owner => $user,
- group => $group,
- require => User[$user],
- mode => 600,
- }
- Ssh_authorized_key {
- user => $user,
- target => $key_tgt_file,
- }
-
- if $ensure == "absent" {
- ssh_authorized_key { $title: ensure => "absent" }
- }
- else {
- $key_src_content = file($key_src_file, "/dev/null")
- if ! $key_src_content {
- notify { "Public key file $key_src_file for key $title not found on keymaster; skipping ensure => present": }
- } else { if $ensure == "present" and $key_src_content !~ /^(ssh-...) ([^ ]*)/ {
- err("Can't parse public key file $key_src_file")
- notify { "Can't parse public key file $key_src_file for key $title on the keymaster: skipping ensure => $ensure": }
- } else {
- $keytype = $1
- $modulus = $2
- ssh_authorized_key { $title:
- ensure => "present",
- type => $keytype,
- key => $modulus,
- options => $options ? { "" => undef, default => $options },
- }
- }} # if ... else ... else
- } # if ... else
-
-} # define ssh_auth_key_server
-
-
-##########################################################################
-
-
-# ssh_auth_key_namecheck
-#
-# Check a name (e.g. key title or filename) for the allowed form
-
-define ssh_auth_key_namecheck ($parm, $value) {
- if $value !~ /^[A-Za-z0-9]/ {
- fail("ssh::auth::key: $parm '$value' not allowed: must begin with a letter or digit")
- }
- if $value !~ /^[A-Za-z0-9_.:@-]+$/ {
- fail("ssh::auth::key: $parm '$value' not allowed: may only contain the characters A-Za-z0-9_.:@-")
- }
-} # define namecheck
diff --git a/modules/ssmtp/manifests/init.pp b/modules/ssmtp/manifests/init.pp
new file mode 100644
index 00000000..fa4b94d2
--- /dev/null
+++ b/modules/ssmtp/manifests/init.pp
@@ -0,0 +1,7 @@
+class ssmtp {
+ package { 'ssmtp': }
+
+ file { '/etc/ssmtp/ssmtp.conf':
+ content => template('ssmtp/ssmtp.conf')
+ }
+}
diff --git a/modules/ssmtp/templates/ssmtp.conf b/modules/ssmtp/templates/ssmtp.conf
new file mode 100644
index 00000000..d7a9125f
--- /dev/null
+++ b/modules/ssmtp/templates/ssmtp.conf
@@ -0,0 +1,9 @@
+root=mageia-sysadm@<%= @domain %>
+
+mailhub=mx.<%= @domain %>
+
+rewriteDomain=
+
+# The full hostname
+hostname=<%= @fqdn %>
+
diff --git a/modules/stompserver/manifests/init.pp b/modules/stompserver/manifests/init.pp
new file mode 100644
index 00000000..9c7e1770
--- /dev/null
+++ b/modules/stompserver/manifests/init.pp
@@ -0,0 +1,7 @@
+class stompserver {
+ package { 'stompserver': }
+
+ service { 'stompserver':
+ require => Package['stompserver'],
+ }
+}
diff --git a/modules/stored_config/lib/puppet/parser/functions/get_fact.rb b/modules/stored_config/lib/puppet/parser/functions/get_fact.rb
new file mode 100644
index 00000000..8acdb2d5
--- /dev/null
+++ b/modules/stored_config/lib/puppet/parser/functions/get_fact.rb
@@ -0,0 +1,19 @@
+require 'puppet/rails'
+
+# get_fact($node,$fact)
+# -> return the fact, from stored config
+
+module Puppet::Parser::Functions
+ newfunction(:get_fact, :type => :rvalue) do |args|
+ node = args[0]
+ fact = args[1]
+ # TODO use
+ # Puppet::Node::Facts.indirection.find(Puppet[:certname])
+ Puppet::Rails.connect()
+ return Puppet::Rails::FactValue.find( :first,
+ :joins => [ :host, :fact_name ],
+ :conditions => { :fact_names => {:name => fact },
+ :hosts => {:name => node }}
+ ).value
+ end
+end
diff --git a/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb b/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb
new file mode 100644
index 00000000..ee0c3440
--- /dev/null
+++ b/modules/stored_config/lib/puppet/parser/functions/get_param_values.rb
@@ -0,0 +1,25 @@
+require 'puppet/rails'
+
+# function :
+# get_param_values($name, $type, $param_name)
+# -> return the value corresponding to $param_name for the $name object of type $type
+
+module Puppet::Parser::Functions
+ newfunction(:get_param_values, :type => :rvalue) do |args|
+ resource_name = args[0]
+ exported_type = args[1]
+ param_name = args[2]
+ Puppet::Rails.connect()
+ # TODO use find_each
+ # TODO fail more gracefully when nothing match
+ # using a default value, maybe ?
+ return Puppet::Rails::ParamValue.find(:first,
+ :joins => [ :resource, :param_name ],
+ :conditions => { :param_names => {:name => param_name },
+ :resources => { :exported => true,
+ :restype => exported_type,
+ :title => resource_name,
+ } }
+ ).value
+ end
+end
diff --git a/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb b/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb
new file mode 100644
index 00000000..4c7459a8
--- /dev/null
+++ b/modules/stored_config/lib/puppet/parser/functions/list_exported_ressources.rb
@@ -0,0 +1,17 @@
+require 'puppet/rails'
+
+# function :
+# list_exported_ressources($resource)
+# -> return a array of title
+
+module Puppet::Parser::Functions
+ newfunction(:list_exported_ressources, :type => :rvalue) do |args|
+ exported_type = args[0]
+ #TODO manage tags
+ Puppet::Rails.connect()
+ # TODO use find_each
+ return Puppet::Rails::Resource.find(:all,
+ :conditions => { :exported => true,
+ :restype => exported_type }).map { |r| r.title }
+ end
+end
diff --git a/modules/subversion/manifests/client.pp b/modules/subversion/manifests/client.pp
new file mode 100644
index 00000000..083a58da
--- /dev/null
+++ b/modules/subversion/manifests/client.pp
@@ -0,0 +1,13 @@
+class subversion::client {
+ # svn spam log with
+ # Oct 26 13:30:01 valstar svn: No worthy mechs found
+ # without it,
+ # https://mail-index.netbsd.org/pkgsrc-users/2008/11/23/msg008706.html
+ #
+ $sasl2_package = $::architecture ? {
+ x86_64 => 'lib64sasl2-plug-anonymous',
+ default => 'libsasl2-plug-anonymous'
+ }
+
+ package { ['subversion', $sasl2_package]: }
+}
diff --git a/modules/subversion/manifests/hook.pp b/modules/subversion/manifests/hook.pp
new file mode 100644
index 00000000..a29ae22d
--- /dev/null
+++ b/modules/subversion/manifests/hook.pp
@@ -0,0 +1,9 @@
+define subversion::hook($content, $type) {
+ $array = split($name,'\|')
+ $repo = $array[0]
+ $script = $array[1]
+ file { "${repo}/hooks/${type}.d/${script}":
+ content => $content,
+ mode => '0755',
+ }
+}
diff --git a/modules/subversion/manifests/hook/post_commit.pp b/modules/subversion/manifests/hook/post_commit.pp
new file mode 100644
index 00000000..90d939cd
--- /dev/null
+++ b/modules/subversion/manifests/hook/post_commit.pp
@@ -0,0 +1,6 @@
+define subversion::hook::post_commit($content) {
+ hook { $name:
+ content => $content,
+ type => 'post-commit',
+ }
+}
diff --git a/modules/subversion/manifests/hook/pre_commit.pp b/modules/subversion/manifests/hook/pre_commit.pp
new file mode 100644
index 00000000..fa44b168
--- /dev/null
+++ b/modules/subversion/manifests/hook/pre_commit.pp
@@ -0,0 +1,6 @@
+define subversion::hook::pre_commit($content) {
+ hook { $name:
+ content => $content,
+ type => 'pre-commit',
+ }
+}
diff --git a/modules/subversion/manifests/init.pp b/modules/subversion/manifests/init.pp
index 638fa1ec..9f009b5e 100644
--- a/modules/subversion/manifests/init.pp
+++ b/modules/subversion/manifests/init.pp
@@ -2,49 +2,62 @@
# https://github.com/reductivelabs/puppet-vcsrepo
# but not integrated in puppet directly for the moment
class subversion {
+ class server {
+ include subversion::tools
+ package { 'subversion-server': }
- class server {
- package { ["subversion-server", "subversion-tools"]:
- ensure => installed,
+ $svn_base_path = '/svn/'
+
+ xinetd::service { 'svnserve':
+ content => template('subversion/xinetd')
}
- package { ["perl-SVN-Notify-Config", "perl-SVN-Notify-Mirror"]:
- ensure => installed,
+ file { $svn_base_path:
+ ensure => directory,
}
-
- $local_dir = "/usr/local/share/subversion/"
- $local_dirs = ["$local_dir/pre-commit.d", "$local_dir/post-commit.d"]
+
+ package { ['perl-SVN-Notify-Config', 'perl-SVN-Notify-Mirror']: }
+
+ $local_dir = '/usr/local/share/subversion/'
+ $local_dirs = ["${local_dir}/pre-commit.d", "${local_dir}/post-commit.d"]
file { [$local_dir,$local_dirs]:
- owner => root,
- group => root,
- mode => 755,
- ensure => directory,
+ ensure => directory,
}
- # workaround the lack of umask command in puppet < 2.7
- file { "/usr/local/bin/create_svn_repo.sh":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template('subversion/create_svn_repo.sh')
- }
+ # workaround the lack of umask command in puppet < 2.7
+ mga_common::local_script { 'create_svn_repo.sh':
+ content => template('subversion/create_svn_repo.sh')
+ }
- file { "$local_dir/pre-commit.d/no_root_commit":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template('subversion/no_root_commit')
+ file { "${local_dir}/pre-commit.d/no_binary":
+ mode => '0755',
+ content => template('subversion/no_binary')
}
- file { "$local_dir/pre-commit.d/no_empty_message":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template('subversion/no_empty_message')
+ file { "${local_dir}/pre-commit.d/no_root_commit":
+ mode => '0755',
+ content => template('subversion/no_root_commit')
+ }
+
+ file { "${local_dir}/pre-commit.d/no_empty_message":
+ mode => '0755',
+ content => template('subversion/no_empty_message')
+ }
+
+ file { "${local_dir}/pre-commit.d/single_word_commit":
+ mode => '0755',
+ content => template('subversion/single_word_commit')
+ }
+
+ file { "${local_dir}/pre-revprop-change":
+ mode => '0755',
+ content => template('subversion/pre-revprop-change')
+ }
+
+ file { "${local_dir}/pre-commit.d/converted_to_git":
+ mode => '0755',
+ content => template('subversion/converted_to_git')
}
# TODO : add check for
@@ -57,166 +70,46 @@ class subversion {
# - openldap , like named
define syntax_check($regexp_ext,$check_cmd) {
- file { "$local_dir/pre-commit.d/$name":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template('subversion/syntax_check.sh')
+ file { "${subversion::server::local_dir}/pre-commit.d/${name}":
+ mode => '0755',
+ content => template('subversion/syntax_check.sh')
}
}
- syntax_check{"check_perl":
- regexp_ext => "\.p[lm]$",
- check_cmd => "perl -c"
- }
-
- syntax_check{"check_puppet":
- regexp_ext => "\.pp$",
- check_cmd => "puppet --color=false --confdir=/tmp --vardir=/tmp --parseonly"
- }
-
- syntax_check{"check_ruby":
- regexp_ext => "\.rb$",
- check_cmd => "ruby -c"
+ syntax_check{'check_perl':
+ regexp_ext => '\.p[lm]$',
+ check_cmd => 'perl -c'
}
- syntax_check{"check_puppet_templates":
- regexp_ext => "modules/.*/templates/.*$",
- check_cmd => "erb -x -T - | ruby -c"
+ syntax_check{'check_puppet':
+ regexp_ext => '\.pp$',
+ check_cmd => 'puppet parser validate -'
}
- }
-
- # FIXME ugly
- define pre_commit_link($directory) {
- file { "pre_commit_link-${name}":
- path => "$directory/$name",
- ensure => "/usr/local/share/subversion/pre-commit.d/$name",
- owner => root,
- group => root,
- mode => 755,
- }
- }
-
- # TODO
- # deploy a cronjob to make a backup file ( ie, dump in some directory )
-
- # documentation :
- # group : group that have commit access on the svn
- # public : boolean if the svn is readable by anybody or not
- # commit_mail : array of people who will receive mail after each commit
- # syntax_check : array of pre-commit script with syntax check to add
- # extract_dir : hash of directory to update upon commit ( with svn update ),
- # initial checkout is not handled, nor the permission
- # TODO, handle the tags ( see svn::notify::mirror )
-
- define repository ($group = "svn",
- $public = true,
- $commit_mail = [],
- $syntax_check = [],
- $extract_dir = []) {
- # check permissions
- # http://svnbook.red-bean.com/nightly/fr/svn.serverconfig.multimethod.html
- # $name ==> directory of the repo
- include subversion::server
- # TODO set umask -> requires puppet 2.7.0
- # unfortunatly, umask is required
- # http://projects.puppetlabs.com/issues/4424
- exec { "/usr/local/bin/create_svn_repo.sh $name":
- user => root,
- group => $group,
- creates => "$name/hooks",
- require => Package['subversion-tools'],
- }
-
- file { "$name":
- group => $group,
- owner => root,
- mode => $public ? {
- true => 644,
- false => 640
- },
- ensure => directory
- }
-
- file { ["$name/hooks/pre-commit","$name/hooks/post-commit"]:
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("subversion/hook_commit.sh"),
- require => Exec["/usr/local/bin/create_svn_repo.sh $name"],
- }
-
- file { ["$name/hooks/post-commit.d", "$name/hooks/pre-commit.d"]:
- ensure => directory,
- owner => root,
- group => root,
- mode => 755,
- require => File["$name/hooks/pre-commit"],
- }
-
- if $commit_mail {
- file { "$name/hooks/post-commit.d/send_mail":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("subversion/hook_sendmail.pl"),
- require => [Package['perl-SVN-Notify-Config']],
- }
+ syntax_check{'check_ruby':
+ regexp_ext => '\.rb$',
+ check_cmd => 'ruby -c'
}
- if $extract_dir {
- file { "$name/hooks/post-commit.d/extract_dir":
- ensure => present,
- owner => root,
- group => root,
- mode => 755,
- content => template("subversion/hook_extract.pl"),
- require => [Package['perl-SVN-Notify-Mirror']],
- }
+ syntax_check{'check_puppet_templates':
+ regexp_ext => 'modules/.*/templates/.*$',
+ check_cmd => 'erb -P -x -T - | ruby -c'
}
- pre_commit_link { ['no_empty_message','no_root_commit', $syntax_check]:
- directory => "$name/hooks/pre-commit.d/"
- }
- }
-
-
- class client {
- package { subversion:
- ensure => installed,
- }
- # svn spam log with
- # Oct 26 13:30:01 valstar svn: No worthy mechs found
- # without it, source http://mail-index.netbsd.org/pkgsrc-users/2008/11/23/msg008706.html
- #
- $sasl2_package = $architecture ? {
- x86_64 => "lib64sasl2-plug-anonymous",
- default => "libsasl2-plug-anonymous"
- }
-
- package {"$sasl2_package":
- ensure => "installed"
+ syntax_check{'check_po':
+ regexp_ext => '\.po$',
+ check_cmd => 'msgfmt -c -'
}
- }
-
- define snapshot($source, $refresh = '*/5', $user = 'root') {
- include subversion::client
-
- exec { "/usr/bin/svn co $source $name":
- creates => $name,
- user => $user,
+ syntax_check{'check_php':
+ regexp_ext => '\.php$',
+ check_cmd => 'php -d display_errors=1 -d error_reporting="E_ALL|E_STRICT" -l'
}
- cron { "update $name":
- command => "cd $name && /usr/bin/svn update -q",
- user => $user,
- minute => $refresh
- }
+ # needed for check_php
+ package { 'php-cli': }
}
+ # TODO
+ # deploy a cronjob to make a backup file ( ie, dump in some directory )
}
diff --git a/modules/subversion/manifests/mirror.pp b/modules/subversion/manifests/mirror.pp
new file mode 100644
index 00000000..2285ecb2
--- /dev/null
+++ b/modules/subversion/manifests/mirror.pp
@@ -0,0 +1,6 @@
+class subversion::mirror {
+ include subversion::tools
+ mga_common::local_script { 'create_svn_mirror.sh':
+ content => template('subversion/create_svn_mirror.sh')
+ }
+}
diff --git a/modules/subversion/manifests/mirror_repository.pp b/modules/subversion/manifests/mirror_repository.pp
new file mode 100644
index 00000000..1e0fabd3
--- /dev/null
+++ b/modules/subversion/manifests/mirror_repository.pp
@@ -0,0 +1,15 @@
+define subversion::mirror_repository( $source,
+ $refresh = '*/5') {
+ include subversion::mirror
+
+ exec { "/usr/local/bin/create_svn_mirror.sh ${name} ${source}":
+ creates => $name,
+ require => Package['subversion-tools']
+ }
+
+ cron { "update ${name}":
+ command => "/usr/bin/svnsync synchronize -q file://${name}",
+ minute => $refresh,
+ require => Exec["/usr/local/bin/create_svn_mirror.sh ${name} ${source}"],
+ }
+}
diff --git a/modules/subversion/manifests/pre_commit_link.pp b/modules/subversion/manifests/pre_commit_link.pp
new file mode 100644
index 00000000..fa3c2b2c
--- /dev/null
+++ b/modules/subversion/manifests/pre_commit_link.pp
@@ -0,0 +1,8 @@
+define subversion::pre_commit_link() {
+ $scriptname = regsubst($name,'^.*/', '')
+ file { $name:
+ ensure => 'link',
+ target => "/usr/local/share/subversion/pre-commit.d/${scriptname}",
+ mode => '0755',
+ }
+}
diff --git a/modules/subversion/manifests/repository.pp b/modules/subversion/manifests/repository.pp
new file mode 100644
index 00000000..b223e6ae
--- /dev/null
+++ b/modules/subversion/manifests/repository.pp
@@ -0,0 +1,132 @@
+# documentation :
+# group : group that have commit access on the svn
+# public : boolean if the svn is readable by anybody or not
+# commit_mail : array of people who will receive mail after each commit
+# irker_conf : hash containing irker config values. See man irkerhook
+# for possible values in irker.conf.
+# irkerhook_path : path to irkerhook.py script
+# no_binary : do not accept files with common binary extensions
+# on this repository
+# restricted_to_user : restrict commits to select user
+# syntax_check : array of pre-commit script with syntax check to add
+# extract_dir : hash of directory to update upon commit ( with svn update ),
+# initial checkout is not handled, nor the permission
+# TODO, handle the tags ( see svn::notify::mirror )
+
+define subversion::repository($group = 'svn',
+ $public = true,
+ $commit_mail = '',
+ $irker_conf = undef,
+ $irkerhook_path = '/usr/lib/irker/irkerhook.py',
+ $i18n_mail = '',
+ $no_binary = false,
+ $restricted_to_user = false,
+ $syntax_check = '',
+ $extract_dir = '') {
+ # check permissions
+ # https://svnbook.red-bean.com/nightly/fr/svn.serverconfig.multimethod.html
+ # $name ==> directory of the repo
+ include subversion::server
+ # TODO set umask -> requires puppet 2.7.0
+ # unfortunately, umask is required
+ # https://projects.puppetlabs.com/issues/4424
+ exec { "/usr/local/bin/create_svn_repo.sh ${name}":
+ user => 'root',
+ group => $group,
+ creates => "${name}/hooks",
+ require => Package['subversion-tools'],
+ }
+
+ file { $name:
+ ensure => directory,
+ group => $group,
+ owner => 'root',
+ mode => $public ? {
+ true => '0644',
+ false => '0640',
+ },
+ }
+
+ file { ["${name}/hooks/pre-commit","${name}/hooks/post-commit"]:
+ mode => '0755',
+ content => template('subversion/hook_commit.sh'),
+ require => Exec["/usr/local/bin/create_svn_repo.sh ${name}"],
+ }
+
+ file { ["${name}/hooks/post-commit.d", "${name}/hooks/pre-commit.d"]:
+ ensure => directory,
+ require => File["${name}/hooks/pre-commit"],
+ }
+
+ file { "${name}/hooks/pre-revprop-change":
+ ensure => "${subversion::server::local_dir}/pre-revprop-change",
+ mode => '0755',
+ require => File["${name}/hooks/pre-commit"],
+ }
+
+ if $restricted_to_user {
+ subversion::hook::pre_commit { "${name}|restricted_to_user":
+ content => template('subversion/restricted_to_user'),
+ }
+ } else {
+ file { "${name}/hooks/pre-commit.d/restricted_to_user":
+ ensure => absent,
+ }
+ }
+
+ if $commit_mail {
+ subversion::hook::post_commit { "${name}|send_mail":
+ content => template('subversion/hook_sendmail.pl'),
+ require => Package['perl-SVN-Notify-Config'],
+ }
+ } else {
+ file { "${name}/hooks/post-commit.d/send_mail":
+ ensure => absent,
+ }
+ }
+
+
+ if $irker_conf {
+ subversion::hook::post_commit { "${name}|irker":
+ content => template('subversion/hook_irker'),
+ }
+ file { "${name}/irker.conf":
+ content => template('subversion/irker.conf'),
+ }
+ } else {
+ file { "${name}/hooks/post-commit.d/irker":
+ ensure => absent,
+ }
+ }
+
+
+ if $no_binary {
+ pre_commit_link { "${name}/hooks/pre-commit.d/no_binary": }
+ } else {
+ file { "${name}/hooks/pre-commit.d/no_binary":
+ ensure => absent,
+ }
+ }
+
+ if $extract_dir {
+ subversion::hook::post_commit {"${name}|extract_dir":
+ content => template('subversion/hook_extract.pl'),
+ require => [Package['perl-SVN-Notify-Mirror']],
+ }
+ } else {
+ file { "${name}/hooks/post-commit.d/extract_dir":
+ ensure => absent,
+ }
+ }
+
+ pre_commit_link { "${name}/hooks/pre-commit.d/no_empty_message": }
+
+ pre_commit_link { "${name}/hooks/pre-commit.d/no_root_commit": }
+
+ pre_commit_link { "${name}/hooks/pre-commit.d/converted_to_git": }
+
+ if $syntax_check {
+ $syntax_check_array = regsubst($syntax_check,'^',"${name}/hooks/pre-commit.d/")
+ pre_commit_link { $syntax_check_array: }
+ }
+}
diff --git a/modules/subversion/manifests/snapshot.pp b/modules/subversion/manifests/snapshot.pp
new file mode 100644
index 00000000..00e66dde
--- /dev/null
+++ b/modules/subversion/manifests/snapshot.pp
@@ -0,0 +1,21 @@
+define subversion::snapshot($source,
+ $refresh = '*/5',
+ $user = 'root') {
+
+ include subversion::client
+
+ exec { "/usr/bin/svn co ${source} ${name}":
+ creates => $name,
+ user => $user,
+ require => Package['subversion'],
+ }
+
+ if ($refresh != '0') {
+ cron { "update ${name}":
+ command => "cd ${name} && /usr/bin/svn update -q",
+ user => $user,
+ minute => $refresh,
+ require => Exec["/usr/bin/svn co ${source} ${name}"],
+ }
+ }
+}
diff --git a/modules/subversion/manifests/tools.pp b/modules/subversion/manifests/tools.pp
new file mode 100644
index 00000000..39d86373
--- /dev/null
+++ b/modules/subversion/manifests/tools.pp
@@ -0,0 +1,3 @@
+class subversion::tools {
+ package { 'subversion-tools': }
+}
diff --git a/modules/subversion/templates/converted_to_git b/modules/subversion/templates/converted_to_git
new file mode 100644
index 00000000..8f137506
--- /dev/null
+++ b/modules/subversion/templates/converted_to_git
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+REPOS="$1"
+TXN="$2"
+
+if [ ! -f "$REPOS/conf/git.conf" ]; then
+ exit 0
+fi
+
+REGEX=$(cat "$REPOS/conf/git.conf" | grep -v "^#" | grep -v "^ *$" | xargs | sed 's/ /|/g')
+
+if (svnlook dirs-changed -t $TXN "$REPOS" | grep -qE "^($REGEX)"); then
+ echo "The subversion path you have attempted to commit to has been converted to git." >&2
+ echo "Please see: https://wiki.mageia.org/en/Git_Migration" >&2
+ exit 1
+fi
diff --git a/modules/subversion/templates/create_svn_mirror.sh b/modules/subversion/templates/create_svn_mirror.sh
new file mode 100644
index 00000000..ab0ada1b
--- /dev/null
+++ b/modules/subversion/templates/create_svn_mirror.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+umask 0002
+LOCAL_REPOS=$1
+REMOTE_REPOS=$2
+svnadmin create $LOCAL_REPOS
+# needed, as svnsync complain otherwise :
+# svnsync: Repository has not been enabled to accept revision propchanges;
+# ask the administrator to create a pre-revprop-change hook
+ln -s /bin/true $LOCAL_REPOS/hooks/pre-revprop-change
+svnsync init file://$1 $2
+# do not sync now,
+# let cron do it or puppet will complain ( especially for long sync )
+#svnsync synchronize file://$1
diff --git a/modules/subversion/templates/hook_commit.sh b/modules/subversion/templates/hook_commit.sh
index 0fdfc3e5..2b1b6ff3 100644
--- a/modules/subversion/templates/hook_commit.sh
+++ b/modules/subversion/templates/hook_commit.sh
@@ -1,5 +1,20 @@
#!/bin/sh
-for script in $0.d/*; do
+
+REP="$1"
+TXN="$2"
+
+author=$(svnlook author -t "$TXN" "$REP")
+
+# This is here only the time we use hook_sendmail.pl
+# We will be able to remove it when updating to a better send mail hook
+
+if [ "$author" = 'schedbot' ]; then
+ LIST=`ls -1 $0.d/* | grep -v send_mail`
+else
+ LIST=`ls -1 $0.d/*`
+fi
+
+for script in $LIST; do
if [ ! -x "$script" ]; then
continue
fi
@@ -10,4 +25,3 @@ for script in $0.d/*; do
$script $@ || exit 1
done
-
diff --git a/modules/subversion/templates/hook_irker b/modules/subversion/templates/hook_irker
new file mode 100644
index 00000000..8fd7a874
--- /dev/null
+++ b/modules/subversion/templates/hook_irker
@@ -0,0 +1,4 @@
+#!/bin/sh
+REPO=$1
+REV=$2
+<%= irkerhook_path %> --repository=$REPO $REV
diff --git a/modules/subversion/templates/hook_sendmail.pl b/modules/subversion/templates/hook_sendmail.pl
index 1fdc381f..cf3be6a4 100644
--- a/modules/subversion/templates/hook_sendmail.pl
+++ b/modules/subversion/templates/hook_sendmail.pl
@@ -6,8 +6,27 @@
handler: Alternative
alternative: HTML::ColorDiff
with-diff: 1
+ max_diff_length: 20000
+ ticket_map:
+ '(\bmga#(\d+)\b)': 'https://bugs.mageia.org/show_bug.cgi?id=%s'
+ revision-url: "https://svnweb.mageia.org/packages/?revision=%s&view=revision"
+ subject_cx: 1
+ from: subversion_noreply@ml.<%= @domain %>
to:
<%- commit_mail.each do |mail| -%>
- <%= mail %>
<%- end -%>
- from: root@<%= domain %>
+<%- if i18n_mail != '' -%>
+'.*\.pot$':
+ PATH: "/usr/bin:/usr/local/bin"
+ handler: Alternative
+ alternative: HTML::ColorDiff
+ with-diff: 1
+ max_diff_length: 20000
+ ticket_map:
+ '(\bmga#(\d+)\b)': 'https://bugs.mageia.org/show_bug.cgi?id=%s'
+ revision-url: "https://svnweb.mageia.org/packages/?revision=%s&view=revision"
+ subject_cx: 1
+ from: subversion_noreply@ml.<%= @domain %>
+ to: <%= i18n_mail %>
+<%- end -%>
diff --git a/modules/subversion/templates/irker.conf b/modules/subversion/templates/irker.conf
new file mode 100644
index 00000000..d037a120
--- /dev/null
+++ b/modules/subversion/templates/irker.conf
@@ -0,0 +1,7 @@
+<%-
+ content = ''
+ @irker_conf.keys.sort.each {|key|
+ content += key + ' = ' + @irker_conf[key] + "\n"
+ }
+-%>
+<%= content %>
diff --git a/modules/subversion/templates/no_binary b/modules/subversion/templates/no_binary
new file mode 100644
index 00000000..284642e5
--- /dev/null
+++ b/modules/subversion/templates/no_binary
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+REP="$1"
+TXN="$2"
+
+# Filter some binary files based on common filename extensions.
+# It does not fully prevent commit of binary files, this script is only
+# here to avoid simple mistakes
+if svnlook changed -t "$TXN" "$REP" | grep -qi '\.\(gz\|bz2\|xz\|lzma\|Z\|7z\|tar\|tgz\|zip\|jpg\|gif\|png\|ogg\|mp3\|wav\|rar\|pdf\)$'
+then
+ echo 'no binary files allowed on this repository' >&2
+ exit 1
+fi
+
diff --git a/modules/subversion/templates/pre-revprop-change b/modules/subversion/templates/pre-revprop-change
new file mode 100644
index 00000000..e9b18150
--- /dev/null
+++ b/modules/subversion/templates/pre-revprop-change
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# script taken from svn example hooks
+
+REPOS="$1"
+REV="$2"
+USER="$3"
+PROPNAME="$4"
+ACTION="$5"
+
+if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
+
+echo "Changing revision properties other than svn:log is prohibited" >&2
+exit 1
+
diff --git a/modules/subversion/templates/restricted_to_user b/modules/subversion/templates/restricted_to_user
new file mode 100644
index 00000000..98297627
--- /dev/null
+++ b/modules/subversion/templates/restricted_to_user
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+REP="$1"
+TXN="$2"
+
+author=$(svnlook author -t "$TXN" "$REP")
+
+if [ "$author" != '<%= restricted_to_user %>' ]; then
+ echo "this repository is restricted to user <%= restricted_to_user %>" >&2
+ exit 1
+fi
+
diff --git a/modules/subversion/templates/single_word_commit b/modules/subversion/templates/single_word_commit
new file mode 100644
index 00000000..1b0ff8a5
--- /dev/null
+++ b/modules/subversion/templates/single_word_commit
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+REP="$1"
+TXN="$2"
+
+LOG=$(svnlook log -t "$TXN" "$REP")
+
+if ! echo "$LOG" | grep -qvP '^\s*\b\S+\b\s*$'; then
+ echo "one word commit message not allowed" >&2
+ exit 1
+fi
+
diff --git a/modules/subversion/templates/syntax_check.sh b/modules/subversion/templates/syntax_check.sh
index 74d7bf4a..3960cdab 100644
--- a/modules/subversion/templates/syntax_check.sh
+++ b/modules/subversion/templates/syntax_check.sh
@@ -2,6 +2,7 @@
REPOS="$1"
TXN="$2"
+export PATH="/bin/:/sbin/:/usr/bin/:/usr/sbin/:/usr/local/bin:/usr/local/sbin/"
changed=`svnlook changed -t "$TXN" "$REPOS"`
files=`echo $changed | awk '{print $2}'`
@@ -11,7 +12,7 @@ then
if [ $? -ne 0 ]
then
echo "Syntax error in $files." 1>&2
- echo "Check it with <%= check_cmd %>"
+ echo "Check it with <%= check_cmd %>" 1>&2
exit 1
fi
fi
diff --git a/modules/subversion/templates/xinetd b/modules/subversion/templates/xinetd
new file mode 100644
index 00000000..0919ae60
--- /dev/null
+++ b/modules/subversion/templates/xinetd
@@ -0,0 +1,14 @@
+# default: off
+# description: svnserve is the server part of Subversion.
+service svnserve
+{
+ disable = no
+ port = 3690
+ socket_type = stream
+ protocol = tcp
+ wait = no
+ user = svn
+ server = /usr/bin/svnserve
+ server_args = -i -r <%= svn_base_path %>
+ flags = IPv6
+}
diff --git a/modules/sudo/manifests/init.pp b/modules/sudo/manifests/init.pp
index 93ebc249..7d1277ce 100644
--- a/modules/sudo/manifests/init.pp
+++ b/modules/sudo/manifests/init.pp
@@ -1,20 +1,13 @@
class sudo {
- package { sudo:
- ensure => installed;
- }
+ package { 'sudo': }
- file { "/etc/sudoers.d":
+ file { '/etc/sudoers.d':
ensure => directory,
- mode => 711,
- owner => root,
- group => root,
+ mode => '0711',
}
- file { "/etc/sudoers":
- ensure => present,
- owner => root,
- group => root,
- mode => 440,
- content => template("sudo/sudoers")
+ file { '/etc/sudoers':
+ mode => '0440',
+ content => template('sudo/sudoers'),
}
}
diff --git a/modules/sudo/manifests/sudoers_config.pp b/modules/sudo/manifests/sudoers_config.pp
new file mode 100644
index 00000000..fdc38e9b
--- /dev/null
+++ b/modules/sudo/manifests/sudoers_config.pp
@@ -0,0 +1,6 @@
+define sudo::sudoers_config($content) {
+ file { "/etc/sudoers.d/${name}":
+ mode => '0440',
+ content => $content,
+ }
+}
diff --git a/modules/sudo/templates/sudoers b/modules/sudo/templates/sudoers
index 80f4bfd7..5ac87f78 100644
--- a/modules/sudo/templates/sudoers
+++ b/modules/sudo/templates/sudoers
@@ -1 +1,14 @@
+Defaults env_reset
+Defaults env_keep = "COLORS DISPLAY HOSTNAME HISTSIZE LS_COLORS"
+Defaults env_keep += "MAIL PS1 PS2 USERNAME LANG LC_ADDRESS LC_CTYPE"
+Defaults env_keep += "LC_COLLATE LC_IDENTIFICATION LC_MEASUREMENT LC_MESSAGES"
+Defaults env_keep += "LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER LC_TELEPHONE"
+Defaults env_keep += "LC_TIME LC_ALL LANGUAGE LINGUAS _XKB_CHARSET XAUTHORITY"
+
+Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin
+
+## Allow root to run any commands anywhere
+root ALL=(ALL) ALL
+
+## Read drop-in files from /etc/sudoers.d (the # here does not mean a comment)
#includedir /etc/sudoers.d
diff --git a/modules/sympa/files/scenari/forbidden b/modules/sympa/files/scenari/forbidden
new file mode 100644
index 00000000..6c0ac7a8
--- /dev/null
+++ b/modules/sympa/files/scenari/forbidden
@@ -0,0 +1,2 @@
+title.gettext nobody
+true() smtp,md5,smime -> reject
diff --git a/modules/sympa/files/scenari/open_web_only_notify b/modules/sympa/files/scenari/open_web_only_notify
new file mode 100644
index 00000000..621e425c
--- /dev/null
+++ b/modules/sympa/files/scenari/open_web_only_notify
@@ -0,0 +1,5 @@
+title.gettext anyone on the web, notification is sent to list owner
+
+# do not notify if it is just an update
+is_subscriber([listname],[sender]) smtp,smime,md5 -> do_it
+true() md5 -> do_it,notify
diff --git a/modules/sympa/files/topics.conf b/modules/sympa/files/topics.conf
new file mode 100644
index 00000000..92e1809c
--- /dev/null
+++ b/modules/sympa/files/topics.conf
@@ -0,0 +1,32 @@
+bugsquad
+title Bug triaging
+
+sysadmin
+title System administration, infrastructure
+
+i18n
+title Internationalization and translation
+
+developers
+title Development
+
+qa
+title Quality Assurance
+
+governance
+title Board, Council and others governance group
+
+forums
+title Forums
+
+doc
+title Documentation
+
+local
+title Local Community
+
+atelier
+title Atelier (Artwork, Web, Marketing, Communication)
+
+users
+title Users discussions
diff --git a/modules/sympa/manifests/datasource/ldap_group.pp b/modules/sympa/manifests/datasource/ldap_group.pp
new file mode 100644
index 00000000..6060bec4
--- /dev/null
+++ b/modules/sympa/manifests/datasource/ldap_group.pp
@@ -0,0 +1,5 @@
+define sympa::datasource::ldap_group {
+ file { "/etc/sympa/data_sources/${name}.incl":
+ content => template('sympa/data_sources/ldap_group.incl')
+ }
+}
diff --git a/modules/sympa/manifests/init.pp b/modules/sympa/manifests/init.pp
index 3a68ddcd..7f6fcfe6 100644
--- a/modules/sympa/manifests/init.pp
+++ b/modules/sympa/manifests/init.pp
@@ -1,40 +1 @@
-class sympa {
-
- $package_list = ['sympa', 'sympa-www']
-
- package { $package_list:
- ensure => installed;
- }
-
- $password = extlookup("sympa_password")
- $ldappass = extlookup("sympa_ldap")
-
- file { '/etc/sympa/sympa.conf':
- ensure => present,
- # should be cleaner to have it root owned, but puppet do not support acl
- # and in any case, config will be reset if it change
- owner => sympa,
- group => apache,
- mode => 640,
- content => template("sympa/sympa.conf")
- }
-
- file { '/etc/sympa/auth.conf':
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("sympa/auth.conf")
- }
-
-
- include apache::mod_fcgid
- apache::webapp_other{"sympa":
- webapp_file => "sympa/webapp_sympa.conf",
- }
-
- apache::vhost_other_app { "ml.$domain":
- vhost_file => "sympa/vhost_ml.conf",
- }
-}
-
+class sympa { }
diff --git a/modules/sympa/manifests/list.pp b/modules/sympa/manifests/list.pp
new file mode 100644
index 00000000..205d2719
--- /dev/null
+++ b/modules/sympa/manifests/list.pp
@@ -0,0 +1,57 @@
+define sympa::list( $subject,
+ $language = 'en',
+ $topics = false,
+ $reply_to = false,
+ $sender_subscriber = false,
+ $sender_email = false,
+ $sender_ldap_group = false,
+ $subscriber_ldap_group = false,
+ $public_archive = true,
+ $subscription_open = false,
+ $critical = false) {
+
+ include sympa::variable
+ $ldap_password = extlookup('sympa_ldap','x')
+ $custom_subject = $name
+
+ $xml_file = "/etc/sympa/lists_xml/${name}.xml"
+
+ file { $xml_file:
+ content => template('sympa/list.xml'),
+ require => Package[sympa],
+ }
+
+ exec { "sympa.pl --create_list --robot=${sympa::variable::vhost} --input_file=${xml_file}":
+ require => File[$xml_file],
+ creates => "/var/lib/sympa/expl/${name}",
+ before => File["/var/lib/sympa/expl/${name}/config"],
+ }
+
+ file { "/var/lib/sympa/expl/${name}/config":
+ owner => 'sympa',
+ group => 'sympa',
+ mode => '0750',
+ content => template('sympa/config'),
+ notify => Service['sympa'],
+ }
+
+ sympa::scenario::sender_restricted { $name:
+ ldap_group => $sender_ldap_group,
+ email => $sender_email,
+ allow_subscriber => $sender_subscriber,
+ }
+
+ if $subscriber_ldap_group {
+ if ! defined(Sympa::Search_filter::Ldap[$subscriber_ldap_group]) {
+ sympa::search_filter::ldap { $subscriber_ldap_group: }
+ }
+ }
+
+ if $sender_ldap_group {
+ if ! defined(Sympa::Search_filter::Ldap[$sender_ldap_group]) {
+ sympa::search_filter::ldap { $sender_ldap_group: }
+ }
+ }
+}
+
+
diff --git a/modules/sympa/manifests/list/announce.pp b/modules/sympa/manifests/list/announce.pp
new file mode 100644
index 00000000..2dd1c647
--- /dev/null
+++ b/modules/sympa/manifests/list/announce.pp
@@ -0,0 +1,21 @@
+# list where announce are sent by $email or $ldap_group only
+# reply_to is set to $reply_to
+define sympa::list::announce($subject,
+ $reply_to,
+ $sender_email = false,
+ $sender_ldap_group = false,
+ $subscriber_ldap_group = false,
+ $language = 'en',
+ $topics = false,
+ $critical = false) {
+ list { $name:
+ subject => $subject,
+ language => $language,
+ topics => $topics,
+ reply_to => $reply_to,
+ sender_email => $sender_email,
+ sender_ldap_group => $sender_ldap_group,
+ subscriber_ldap_group => $subscriber_ldap_group,
+ critical => $critical
+ }
+}
diff --git a/modules/sympa/manifests/list/private.pp b/modules/sympa/manifests/list/private.pp
new file mode 100644
index 00000000..c8d9b38e
--- /dev/null
+++ b/modules/sympa/manifests/list/private.pp
@@ -0,0 +1,16 @@
+# list with private archive, restricted to member of $ldap_group
+define sympa::list::private($subject,
+ $subscriber_ldap_group,
+ $sender_email = false,
+ $language ='en',
+ $topics = false) {
+ list { $name:
+ subject => $subject,
+ language => $language,
+ topics => $topics,
+ subscriber_ldap_group => $subscriber_ldap_group,
+ sender_ldap_group => $subscriber_ldap_group,
+ sender_email => $sender_email,
+ public_archive => false,
+ }
+}
diff --git a/modules/sympa/manifests/list/public.pp b/modules/sympa/manifests/list/public.pp
new file mode 100644
index 00000000..7b97534a
--- /dev/null
+++ b/modules/sympa/manifests/list/public.pp
@@ -0,0 +1,16 @@
+# public discussion list
+# reply_to is set to the list
+define sympa::list::public($subject,
+ $language = 'en',
+ $topics = false,
+ $sender_email = false) {
+ include sympa::variable
+ list { $name:
+ subject => $subject,
+ language => $language,
+ topics => $topics,
+ sender_email => $sender_email,
+ sender_subscriber => true,
+ reply_to => "${name}@${sympa::variable::vhost}",
+ }
+}
diff --git a/modules/sympa/manifests/list/public_restricted.pp b/modules/sympa/manifests/list/public_restricted.pp
new file mode 100644
index 00000000..5c316368
--- /dev/null
+++ b/modules/sympa/manifests/list/public_restricted.pp
@@ -0,0 +1,17 @@
+# list where only people from the ldap_group can post, and where
+# they are subscribed by default, but anybody else can subscribe
+# to read and receive messages
+define sympa::list::public_restricted($subject,
+ $subscriber_ldap_group,
+ $language = 'en',
+ $topics = false) {
+ list { $name:
+ subject => $subject,
+ topics => $topics,
+ language => $language,
+ subscriber_ldap_group => $subscriber_ldap_group,
+ sender_ldap_group => $subscriber_ldap_group,
+ subscription_open => true,
+ reply_to => "${name}@${sympa::variable::vhost}",
+ }
+}
diff --git a/modules/sympa/manifests/scenario/sender_restricted.pp b/modules/sympa/manifests/scenario/sender_restricted.pp
new file mode 100644
index 00000000..c69d3669
--- /dev/null
+++ b/modules/sympa/manifests/scenario/sender_restricted.pp
@@ -0,0 +1,9 @@
+define sympa::scenario::sender_restricted(
+ $email = false,
+ $ldap_group = false,
+ $allow_subscriber = false
+) {
+ file { "/etc/sympa/scenari/send.restricted_${name}":
+ content => template('sympa/scenari/sender.restricted')
+ }
+}
diff --git a/modules/sympa/manifests/search_filter/ldap.pp b/modules/sympa/manifests/search_filter/ldap.pp
new file mode 100644
index 00000000..5cbc84f8
--- /dev/null
+++ b/modules/sympa/manifests/search_filter/ldap.pp
@@ -0,0 +1,5 @@
+define sympa::search_filter::ldap {
+ file { "/etc/sympa/search_filters/$name.ldap":
+ content => template('sympa/search_filters/group.ldap')
+ }
+}
diff --git a/modules/sympa/manifests/server.pp b/modules/sympa/manifests/server.pp
new file mode 100644
index 00000000..bcdda789
--- /dev/null
+++ b/modules/sympa/manifests/server.pp
@@ -0,0 +1,103 @@
+class sympa::server(
+ $authentication_info_url = 'https://wiki.mageia.org/en/Mageia.org_user_account'
+ ) {
+ include sympa::variable
+ # perl-CGI-Fast is needed for fast cgi
+ # perl-Socket6 is required by perl-IO-Socket-SSL
+ # (optional requirement)
+ package {['sympa',
+ 'sympa-www',
+ 'perl-CGI-Fast',
+ 'perl-Socket6']: }
+
+ # sympa script starts 5 different scripts; I am not
+ # sure that puppet will correctly handle this
+ service { 'sympa':
+ subscribe => [ Package['sympa'], File['/etc/sympa/sympa.conf']]
+ }
+
+ service { 'sympa-outgoing':
+ ensure => running,
+ require => Service['sympa']
+ }
+
+ $pgsql_password = extlookup('sympa_pgsql','x')
+ $ldap_password = extlookup('sympa_ldap','x')
+
+ postgresql::remote_db_and_user { 'sympa':
+ password => $pgsql_password,
+ description => 'Sympa database',
+ }
+
+ File {
+ require => Package['sympa'],
+ }
+
+ $vhost = $sympa::variable::vhost
+ file { '/etc/sympa/sympa.conf':
+ # should be cleaner to have it root owned, but puppet does not support acls
+ # and in any case, config will be reset if it changes
+ owner => 'sympa',
+ group => 'apache',
+ mode => '0640',
+ content => template('sympa/sympa.conf'),
+ }
+
+ file { '/etc/sympa/auth.conf':
+ content => template('sympa/auth.conf'),
+ notify => Service['httpd'],
+ }
+
+
+ include apache::mod::fcgid
+ apache::webapp_other { 'sympa':
+ webapp_file => 'sympa/webapp_sympa.conf',
+ }
+
+ apache::vhost::redirect_ssl { $sympa::variable::vhost: }
+
+ apache::vhost::base { $sympa::variable::vhost:
+ use_ssl => true,
+ content => template('sympa/vhost_ml.conf'),
+ }
+
+# git::snapshot { '/etc/sympa/web_tt2':
+# source => "git://git.${::domain}/web/templates/sympa",
+# }
+
+ file { ['/etc/sympa/lists_xml/',
+ '/etc/sympa/scenari/',
+ '/etc/sympa/data_sources/',
+ '/etc/sympa/search_filters/']:
+ ensure => directory,
+ purge => true,
+ recurse => true,
+ force => true,
+ }
+
+ file {
+ '/etc/sympa/scenari/subscribe.open_web_only_notify':
+ source => 'puppet:///modules/sympa/scenari/open_web_only_notify';
+ '/etc/sympa/scenari/unsubscribe.open_web_only_notify':
+ source => 'puppet:///modules/sympa/scenari/open_web_only_notify';
+ '/etc/sympa/scenari/create_list.forbidden':
+ source => 'puppet:///modules/sympa/scenari/forbidden';
+ '/etc/sympa/topics.conf':
+ source => 'puppet:///modules/sympa/topics.conf';
+ }
+
+ # add each group that could be used in a sympa ml either as
+ # - owner
+ # - editor ( moderation )
+ sympa::datasource::ldap_group { 'mga-sysadmin': }
+ sympa::datasource::ldap_group { 'mga-ml_moderators': }
+
+
+ # directory that will hold the list data
+ # i am not sure of the name ( misc, 09/12/10 )
+ file { '/var/lib/sympa/expl/':
+ ensure => directory,
+ owner => 'sympa',
+ }
+
+}
diff --git a/modules/sympa/manifests/variable.pp b/modules/sympa/manifests/variable.pp
new file mode 100644
index 00000000..26f60294
--- /dev/null
+++ b/modules/sympa/manifests/variable.pp
@@ -0,0 +1,3 @@
+class sympa::variable {
+ $vhost = "ml.${::domain}"
+}
diff --git a/modules/sympa/templates/auth.conf b/modules/sympa/templates/auth.conf
index 220118b5..854fdf9c 100644
--- a/modules/sympa/templates/auth.conf
+++ b/modules/sympa/templates/auth.conf
@@ -1,13 +1,15 @@
ldap
- host ldap.<%= domain %>:389
+ host ldap.<%= domain %>
timeout 30
suffix <%= dc_suffix %>
get_dn_by_uid_filter (uid=[sender])
- get_dn_by_email (|(mail=[sender])(mailalternateaddress=[sender]))
+ get_dn_by_email_filter (|(mail=[sender])(mailalternateaddress=[sender]))
email_attribute mail
scope sub
- use_ssl 1
-
-user_table
- regexp .*
+ use_tls ldaps
+ ssl_version tlsv1_2
+ ca_verify none
+ bind_dn cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+ bind_password <%= scope.lookupvar("sympa::server::ldap_password") %>
+ authentication_info_url <%= authentication_info_url %>
diff --git a/modules/sympa/templates/config b/modules/sympa/templates/config
new file mode 100644
index 00000000..4262f3ca
--- /dev/null
+++ b/modules/sympa/templates/config
@@ -0,0 +1,103 @@
+
+archive
+period month
+mail_access owner
+<%- if public_archive and not @critical -%>
+web_access public
+<%- else -%>
+web_access private
+<%- end -%>
+
+visibility noconceal
+
+digest 1,4 13:26
+
+<% if subscriber_ldap_group and not subscription_open %>
+# TODO check scenari
+subscribe closed
+
+unsubscribe closed
+<% else %>
+subscribe open_web_only_notify
+
+unsubscribe open_web_only_notify
+<% end %>
+
+editor
+email listmaster@<%= domain %>
+reception nomail
+gecos Moderator team
+visibility conceal
+
+editor_include
+reception nomail
+source mga-ml_moderators
+visibility conceal
+
+subject <%= subject %>
+
+custom_subject <%= custom_subject %>
+
+<%- if @critical -%>
+info conceal
+
+subscribe auth owner
+
+unsubscribe auth_notify
+
+invite owner
+<% end %>
+
+lang <%= language %>
+
+owner
+gecos Sysadmin team
+reception nomail
+email postmaster@<%= domain %>
+visibility noconceal
+profile normal
+
+owner_include
+profile normal
+visibility conceal
+source mga-sysadmin
+reception nomail
+
+
+<%- if @reply_to -%>
+reply_to_header
+value other_email
+other_email <%= reply_to %>
+apply forced
+<%- end -%>
+
+
+review owner
+
+<% if topics %>
+topics <%= topics %>
+<% end %>
+
+send restricted_<%= @name %>
+
+<% if subscriber_ldap_group %>
+include_ldap_query
+ timeout 10
+ scope one
+ select first
+ ssl_version tlsv1_2
+ ca_verify none
+ use_tls ldaps
+ attrs mail
+ ssl_ciphers ALL
+ passwd <%= scope.lookupvar("sympa::server::ldap_password") %>
+ user cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+ suffix ou=People,<%= dc_suffix %>
+ filter (memberOf=cn=<%= subscriber_ldap_group %>,ou=Group,<%= dc_suffix %>)
+ host ldap.<%= domain %>
+
+<% end %>
+
+process_archive on
+
+status open
diff --git a/modules/sympa/templates/data_sources/ldap_group.incl b/modules/sympa/templates/data_sources/ldap_group.incl
new file mode 100644
index 00000000..609a7e42
--- /dev/null
+++ b/modules/sympa/templates/data_sources/ldap_group.incl
@@ -0,0 +1,17 @@
+include_ldap_2level_query
+ host ldap.<%= domain %>
+ use_tls ldaps
+ ssl_version tlsv1_2
+ ca_verify none
+ user cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+ passwd <%= scope.lookupvar("sympa::server::ldap_password") %>
+ suffix1 ou=Group,<%= dc_suffix %>
+ scope1 one
+ filter1 (&(objectClass=groupOfNames)(cn=<%= name %>))
+ attrs1 member
+ select1 all
+ suffix2 [attrs1]
+ scope2 base
+ filter2 (objectClass=inetOrgPerson)
+ attrs2 mail
+ select2 first
diff --git a/modules/sympa/templates/list.xml b/modules/sympa/templates/list.xml
new file mode 100644
index 00000000..74e4f07f
--- /dev/null
+++ b/modules/sympa/templates/list.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" ?>
+<list>
+ <listname><%= name %></listname>
+ <type>discussion_list</type>
+ <subject><%= subject %></subject>
+ <description/>
+ <status>open</status>
+ <language><%= language %></language>
+ <owner_include multiple="1">
+ <source>mga-sysadmin</source>
+ </owner_include>
+ <editor_include multiple="1">
+ <source>mga-ml_moderators</source>
+ </editor_include>
+ <topic><%= topics %></topic>
+</list>
diff --git a/modules/sympa/templates/scenari/sender.restricted b/modules/sympa/templates/scenari/sender.restricted
new file mode 100644
index 00000000..66139e6c
--- /dev/null
+++ b/modules/sympa/templates/scenari/sender.restricted
@@ -0,0 +1,17 @@
+title.gettext restricted list
+
+<%- if @ldap_group -%>
+search(<%= @ldap_group %>.ldap) smtp,md5,smime -> do_it
+<%- end -%>
+<%- if @email -%>
+ <%- for e in @email -%>
+equal([sender], '<%= e %>') smtp,md5,smime -> do_it
+ <%- end -%>
+<%- end -%>
+<%- if allow_subscriber -%>
+equal([sender], 'sysadmin@group.mageia.org') smtp,smime,md5 -> do_it
+match([sender], /@mageia\.org$/) smtp,smime,md5 -> do_it
+is_subscriber([listname],[sender]) smtp,smime,md5 -> do_it
+true() smime,md5 -> do_it
+<%- end -%>
+true() smtp,md5,smime -> reject(reason='send_subscriber')
diff --git a/modules/sympa/templates/search_filters/group.ldap b/modules/sympa/templates/search_filters/group.ldap
new file mode 100644
index 00000000..884e0db1
--- /dev/null
+++ b/modules/sympa/templates/search_filters/group.ldap
@@ -0,0 +1,9 @@
+host ldap.<%= domain %>:636
+bind_dn cn=sympa-<%= hostname %>,ou=System Accounts,<%= dc_suffix %>
+bind_password <%= scope.lookupvar("sympa::server::ldap_password") %>
+use_tls ldaps
+ssl_version tlsv1_2
+ca_verify none
+suffix ou=People,<%= dc_suffix %>
+filter (&(mail=[sender])(memberOf=cn=<%= name %>,ou=Group,<%= dc_suffix %>))
+scope sub
diff --git a/modules/sympa/templates/sympa.conf b/modules/sympa/templates/sympa.conf
index a031da03..edfaba15 100644
--- a/modules/sympa/templates/sympa.conf
+++ b/modules/sympa/templates/sympa.conf
@@ -1,293 +1,627 @@
-###\\\\ Directories and file location ////###
+###\\\\ Service description ////###
-## Directory containing mailing lists subdirectories
-home /var/lib/sympa
+## Primary mail domain name
+domain <%= vhost %>
-## Directory for configuration files ; it also contains scenari/ and templates/ directories
-etc /etc/sympa
+## Email addresses of listmasters
+## Email addresses of the listmasters (users authorized to perform global
+## server commands). Some error reports may also be sent to these addresses.
+## Listmasters can be defined for each virtual host, however, the default
+## listmasters will have privileges to manage all virtual hosts.
+listmaster listmaster@<%= vhost %>
-## File containing Sympa PID while running.
-## Sympa also locks this file to ensure that it is not running more than once. Caution : user sympa need to write access without special privilegee.
-pidfile /var/run/sympa/sympa.pid
+## Default language
+## This is the default language used by Sympa. One of supported languages
+## should be chosen.
+lang en-US
-pidfile_distribute /var/run/sympa/sympa-distribute.pid
-
-pidfile_creation /var/run/sympa/sympa-creation.pid
-
-pidfile_bulk /var/run/sympa/bulk.pid
-
-## Umask used for file creation by Sympa
-umask 027
-
-## Directory containing available NLS catalogues (Message internationalization)
-localedir /usr/share/locale
-
-## The main spool containing various specialized spools
-## All spool are created at runtime by sympa.pl
-spool /var/spool/sympa
-
-## Incoming spool
-queue /var/spool/sympa/msg
-
-## Bounce incoming spool
-queuebounce /var/spool/sympa/bounce
-
-## Automatic list creation spool
-queueautomatic /var/spool/sympa/automatic
-
-##
-queuedigest /var/spool/sympa/digest
-
-##
-queuemod /var/spool/sympa/moderation
-
-##
-queuetopic /var/spool/sympa/topic
-
-##
-queueauth /var/spool/sympa/auth
-
-##
-queueoutgoing /var/spool/sympa/outgoing
-
-##
-queuetask /var/spool/sympa/task
-
-##
-queuesubscribe /var/spool/sympa/subscribe
-
-## URL to a virtual host.
-http_host http://domain.tld
-
-## The directory where Sympa stores static contents (CSS, members pictures, documentation) directly delivered by Apache
-static_content_path /var/lib/sympa/static_content
-
-## The URL mapped with the static_content_path directory defined above
-static_content_url /static-sympa
-
-###\\\\ Syslog ////###
-
-## The syslog facility for sympa
-## Do not forget to edit syslog.conf
-syslog mail
+## Supported languages
+## All supported languages for the user interface. Languages proper locale
+## information not installed are ignored.
+supported_lang en_US
+
+## Title of service
+## The name of your mailing list service. It will appear in the header of web
+## interface and subjects of several service messages.
+title Mageia Mailing lists service
+
+## Display name of Sympa
+## This parameter is used for display name in the "From:" header field for the
+## messages sent by Sympa itself.
+gecos SYMPA
+
+## Support of legacy character set
+## If set to "on", enables support of legacy character set according to
+## charset.conf(5) configuration file.
+## In some language environments, legacy encoding (character set) can be
+## preferred for e-mail messages: for example iso-2022-jp in Japanese
+## language.
+legacy_character_support_feature off
+
+###\\\\ Database related ////###
+
+## Type of the database
+## Possible types are "MySQL", "PostgreSQL", "Oracle", "Sybase" and "SQLite".
+db_type PostgreSQL
+
+## Hostname of the database server
+## With PostgreSQL, you can also use the path to Unix Socket Directory, e.g.
+## "/var/run/postgresql" for connection with Unix domain socket.
+db_host pg.<%= domain %>
+
+## Port of the database server
+db_port 5432/tcp
-## Communication mode with syslogd is either unix (via Unix sockets) or inet (use of UDP)
-log_socket_type unix
+## Name of the database
+## With SQLite, this must be the full path to database file. With Oracle
+## Database, this must be Oracle SID.
+db_name sympa
+
+## User for the database connection
+db_user sympa
+
+## Password for the database connection
+## What ever you use a password or not, you must protect the SQL server (is it
+## not a public internet service ?)
+db_passwd <%= scope.lookupvar("sympa::server::pgsql_password") %>
+
+## Environment variables setting for database
+## With Oracle Database, this is useful for defining ORACLE_HOME and NLS_LANG.
+# db_env NLS_LANG=American_America.AL32UTF8;ORACLE_HOME=/u01/app/oracle/product/11.2.0/server
+
+## Database private extension to subscriber table
+## Adds more fields to "subscriber_table" table. Sympa recognizes fields
+## defined with this parameter. You will then be able to use them from within
+## templates and scenarios:
+## * for scenarios: [subscriber->field]
+## * for templates: [% subscriber.field %]
+## These fields will also appear in the list members review page and will be
+## editable by the list owner. This parameter is a comma-separated list.
+## You need to extend the database format with these fields
+# db_additional_subscriber_fields billing_delay,subscription_expiration
-## Log intensity
-## 0 : normal, 2,3,4 for debug
-log_level 0
+## Database private extension to user table
+## Adds more fields to "user_table" table. Sympa recognizes fields defined
+## with this parameter. You will then be able to use them from within
+## templates: [% subscriber.field %]
+## This parameter is a comma-separated list.
+## You need to extend the database format with these fields
+# db_additional_user_fields age,address
-log_smtp off
+###\\\\ System log ////###
-## Number of months that elapse before a log is expired.
-logs_expiration_period 3
+## System log facility for Sympa
+## Do not forget to configure syslog server.
+syslog mail
-###\\\\ General definition ////###
+## Communication mode with syslog server
+log_socket_type unix
-## Main robot hostname
-domain ml.<%= domain %>
+## Log verbosity
+## Sets the verbosity of logs.
+## 0: Only main operations are logged
+## 3: Almost everything is logged.
+log_level 0
-## Listmasters email list comma separated
-## Sympa will associate listmaster privileges to these email addresses (mail and web interfaces). Some error reports may also be sent to these addresses.
-listmaster listmaster@ml.<%= domain %>
+###\\\\ Receiving ////###
-## Local part of sympa email adresse
-## Effective address will be \[EMAIL\]@\[HOST\]
-email sympa
+## Default maximum number of list members
+## Default limit for the number of subscribers per list (0 means no limit).
+default_max_list_members 0
-## Who is able to create lists
-## This parameter is a scenario, check sympa documentation about scenarios if you want to define one
-create_list public_listmaster
+## Maximum size of messages
+## Incoming messages smaller than this size is allowed distribution by Sympa.
+max_size 5242880
-edit_list owner
+## Reject mail sent from automated services to list
+## Rejects messages that seem to be from automated services, based on a few
+## header fields ("Content-Identifier:", "Auto-Submitted:").
+## Sympa also can be configured to reject messages based on the "From:" header
+## field value (see "loop_prevention_regex").
+reject_mail_from_automates_feature off
-###\\\\ Tuning ////###
+## Priority for command messages
+## Priority applied to messages sent to Sympa command address.
+sympa_priority 1
-## Use of binary version of the list config structure on disk: none | binary_file
-## Set this parameter to "binary_file" if you manage a big amount of lists (1000+) ; it should make the web interface startup faster
-cache_list_config none
+## Priority for messages bound for list owners
+## Priority for processing of messages bound for "LIST-request" address, i.e.
+## owners of the list
+request_priority 0
-## Sympa commands priority
-sympa_priority 1
+## Priority for non-VERP bounces
+## Priority for processing of messages bound for "LIST-owner" address, i.e.
+## non-delivery reports (bounces).
+owner_priority 9
## Default priority for list messages
-default_list_priority 5
-
-## Default timeout between two scheduled synchronizations of list members with data sources.
-default_ttl 3600
-
-## Default timeout between two action-triggered synchronizations of list members with data sources.
-default_distribution_ttl 300
-
-## Default priority for a packet to be sent by bulk.
-sympa_packet_priority 5
-
-request_priority 0
-
-owner_priority 9
-
-## The minimum number of packets in database before the bulk forks to increase sending rate
-##
-bulk_fork_threshold 1
-
-## The max number of bulks that will run on the same server.
-##
-bulk_max_count 3
-
-## the number of seconds a slave bulk will remain running without processing a message before it spontaneously dies.
-##
-bulk_lazytime 600
-
-## The number of seconds a master bulk waits between two packets number checks.
-## Keep it small if you expect brutal increases in the message sending load.
-bulk_wait_to_fork 10
-
-## the number of seconds a bulk sleeps between starting a new loop if it didn't find a message to send.
+## Priority for processing of messages posted to list addresses.
+default_list_priority 5
+
+###\\\\ Sending related ////###
+
+## Header fields to be removed from incoming messages
+## Use it, for example, to ensure some privacy for your users in case that
+## "anonymous_sender" mode is inappropriate.
+## The removal of these header fields is applied before Sympa adds its own
+## header fields ("rfc2369_header_fields" and "custom_header").
+# was remove_headers ARRAY(0x4116e50)
+remove_headers X-Sympa-To,X-Family-To,Return-Receipt-To,Precedence,X-Sequence,Disposition-Notification-To
+
+## RFC 2369 header fields
+## Specify which RFC 2369 mailing list header fields to be added.
+## "List-Id:" header field defined in RFC 2919 is always added. Sympa also
+## adds "Archived-At:" header field defined in RFC 5064.
+# was rfc2369_header_fields ARRAY(0x4116c88)
+rfc2369_header_fields help,subscribe,unsubscribe,post,owner,archive
+
+## Default priority for a packet
+## The default priority set to a packet to be sent by the bulk.
+sympa_packet_priority 5
+
+## Fork threshold of bulk daemon
+## The minimum number of packets before bulk daemon forks the new worker to
+## increase sending rate.
+bulk_fork_threshold 1
+
+## Maximum number of bulk workers
+bulk_max_count 3
+
+## Idle timeout of bulk workers
+## The number of seconds a bulk worker will remain running without processing
+## a message before it spontaneously exists.
+bulk_lazytime 600
+
+## Sleep time of bulk workers
+## The number of seconds a bulk worker sleeps between starting a new loop if
+## it didn't find a message to send.
## Keep it small if you want your server to be reactive.
-bulk_sleep 1
-
-## Secret used by Sympa to make MD5 fingerprint in web cookies secure
-## Should not be changed ! May invalid all user password
-#cookie 123456789
-
-## If set to "on", enables support of legacy characters
-##
-legacy_character_support_feature off
-
-## The default maximum size (in bytes) for messages (can be re-defined for each list)
-max_size 5242880
-
-## comma separated list of operations for which blacklist filter is applied
-## Setting this parameter to "none" will hide the blacklist feature
-use_blacklist send,create_list
-
-## Specify which rfc2369 mailing list headers to add
-rfc2369_header_fields help,subscribe,unsubscribe,post,owner,archive
-
-## Specify header fields to be removed before message distribution
-remove_headers X-Sympa-To,X-Family-To,Return-Receipt-To,Precedence,X-Sequence,Disposition-Notification-To
-
-bounce_warn_rate 30
+bulk_sleep 1
-bounce_halt_rate 50
-
-###\\\\ Internationalization ////###
-
-## Default lang (ca | cs | de | el | es | et_EE | en_US | fr | fi | hu | it | ja_JP | ko | nl | nb_NO | oc | pl | pt_BR | ru | sv | tr | vi | zh_CN | zh_TW)
-## This is the default language used by Sympa
-lang en_US
-
-## Supported languages
-## This is the set of language that will be proposed to your users for the Sympa GUI. Don't select a language if you don't have the proper locale packages installed.
-supported_lang ca,cs,de,el,es,et_EE,en_US,fr,fi,hu,it,ja_JP,ko,nl,nb_NO,oc,pl,pt_BR,ru,sv,tr,vi,zh_CN,zh_TW
+## Interval between checks of packet numbers
+## Number of seconds a master bulk daemon waits between two packets number
+## checks.
+## Keep it small if you expect brutal increases in the message sending load.
+bulk_wait_to_fork 10
+
+## Path to sendmail
+## Absolute path to sendmail command line utility (e.g.: a binary named
+## "sendmail" is distributed with Postfix).
+## Sympa expects this binary to be sendmail compatible (exim, Postfix, qmail
+## and so on provide it). Sympa also bundles "sympa_smtpc" program which may
+## be a replacement to sendmail binary.
+sendmail /usr/sbin/sendmail
+
+## Log invocation of sendmail
+## This can be overwritten by "-m" option for sympa.pl.
+log_smtp off
+
+## Maximum number of sendmail processes
+## Maximum number of simultaneous child processes spawned by Sympa. This is
+## the main load control parameter.
+## Proposed value is quite low, but you can rise it up to 100, 200 or even 300
+## with powerful systems.
+maxsmtp 40
+
+## Maximum number of recipients per call to sendmail
+## This grouping factor makes it possible for the sendmail processes to
+## optimize the number of SMTP sessions for message distribution. If needed,
+## you can limit the number of recipients for a particular domain. Check the
+## "nrcpt_by_domain.conf" configuration file.
+nrcpt 25
+
+## Maximum number of different mail domains per call to sendmail
+avg 10
+
+###\\\\ Privileges ////###
-###\\\\ Errors management ////###
+## Who is able to create lists
+## Defines who can create lists (or request list creation) by creating new
+## lists or by renaming or copying existing lists.
+create_list forbidden
+
+## Use blacklist
+## List of operations separated by comma for which blacklist filter is
+## applied. Setting this parameter to "none" will hide the blacklist feature.
+use_blacklist send,create_list
+
+## List of required domains for list owner addresses
+## Restrict list ownership to addresses in the specified domains. This can be
+## used to reserve list ownership to a group of trusted users from a set of
+## domains associated with an organization, while allowing editors and
+## subscribers from the Internet at large.
+# owner_domain domain1.tld domain2.tld
+
+## Minimum number of list owners that must match owner_domain restriction
+## Minimum number of list owners that must satisfy the owner_domain
+## restriction. The default of zero (0) means *all* list owners must match.
+## Setting to 1 requires only one list owner to match owner_domain; all other
+## owners can be from any domain. This setting can be used to ensure that
+## there is always at least one known contact point for a mailing list.
+owner_domain_min 0
+
+###\\\\ Archives ////###
+
+## Store distributed messages into archive
+## If enabled, distributed messages via lists will be archived. Otherwise
+## archiving is disabled.
+## Note that even if setting this parameter disabled, past archives will not
+## be removed and will be accessible according to access settings by each
+## list.
+process_archive on
+
+## Path to MHonArc mail-to-HTML converter
+## This is required for HTML mail archiving.
+mhonarc /usr/bin/mhonarc
+
+# There is a need to protect Sympa website against spambot
+spam_protection javascript
+
+# The same as spam_protection, but restricted to the web archive.
+web_archive_spam_protection cookie
+
+###\\\\ Bounce management and tracking ////###
+
+## Default bounce warn rate
+## The list owner receives a warning whenever a message is distributed and the
+## number (percentage) of bounces exceeds this value.
+bounce_warn_rate 30
+
+## Default bounce halt rate
+## NOT USED YET. If bounce rate reaches the halt_rate, messages for the list
+## will be halted, i.e. they are retained for subsequent moderation.
+bounce_halt_rate 50
+
+## Remove bouncing new subscribers
+## If set to unique, the welcome message is sent using a unique return path in
+## order to remove the subscriber immediately in the case of a bounce.
+welcome_return_path owner
+
+## Remove subscribers bouncing remind message
+## Same as welcome_return_path, but applied to remind messages.
+remind_return_path owner
+
+## Task for expiration of old bounces
+## This task resets bouncing information for addresses not bouncing in the
+## last 10 days after the latest message distribution.
+expire_bounce_task daily
+
+###\\\\ Automatic lists ////###
+
+## Definition of automatic list families
+## Defines the families the automatic lists are based on. It is a character
+## string structured as follows:
+## * each family is separated from the other by a semi-column (;)
+## * inside a family definition, each field is separated from the other by a
+## column (:)
+## * each field has the structure: "<field name>=<filed value>"
+## Basically, each time Sympa uses the automatic lists families, the values
+## defined in this parameter will be available in the family object.
+## * for scenarios: [family->name]
+## * for templates: [% family.name %]
+# automatic_list_families name=family_one:prefix=f1:display=My automatic lists:prefix_separator=+:classes separator=-:family_owners_list=alist@domain.tld;name=family_two:prefix=f2:display=My other automatic lists:prefix_separator=+:classes separator=-:family_owners_list=anotherlist@domain.tld;
+
+## Parsed files for families
+## comma-separated list of files that will be parsed by Sympa when
+## instantiating a family (no space allowed in file names)
+parsed_family_files message.footer,message.header,message.footer.mime,message.header.mime,info
+
+###\\\\ Tag based spam filtering ////###
+
+## Header field to tag spams
+## If a spam filter (like spamassassin or j-chkmail) add a header field to tag
+## spams, name of this header field (example X-Spam-Status)
+antispam_tag_header_name X-Spam-Status
+
+## Regular expression to check header field to tag spams
+## Regular expression applied on this header to verify message is a spam
+## (example Yes)
+antispam_tag_header_spam_regexp ^\s*Yes
+
+## Regular expression to determine spam or ham.
+## Regular expression applied on this header field to verify message is NOT a
+## spam (example No)
+antispam_tag_header_ham_regexp ^\s*No
+
+## Name of header field to inform
+## Messages are supposed to be filtered by an spam filter that add one more
+## headers to messages. This parameter is used to select a special scenario in
+## order to decide the message spam status: ham, spam or unsure. This
+## parameter replace antispam_tag_header_name, antispam_tag_header_spam_regexp
+## and antispam_tag_header_ham_regexp.
+spam_status x-spam-status
+
+###\\\\ Directories ////###
+
+## List home
+## Base directory of list configurations.
+home /var/lib/sympa/expl
+
+## Directory for configuration files
+## Base directory of global configuration (except "sympa.conf").
+etc /etc/sympa
+
+## Base directory of spools
+## Base directory of all spools which are created at runtime. This directory
+## must be writable by Sympa user.
+spool /var/spool/sympa
+
+## Directory for message incoming spool
+## This spool is used both by "queue" program and "sympa_msg.pl" daemon."
+queue /var/spool/sympa/msg
+
+## Directory for moderation spool
+queuemod /var/spool/sympa/moderation
+
+## Directory for digest spool
+queuedigest /var/spool/sympa/digest
+
+## Directory for held message spool
+## This parameter is named such by historical reason.
+queueauth /var/spool/sympa/auth
+
+## Directory for archive spool
+## This parameter is named such by historical reason.
+queueoutgoing /var/spool/sympa/outgoing
+
+## Directory for held request spool
+## This parameter is named such by historical reason.
+queuesubscribe /var/spool/sympa/subscribe
+
+## Directory for topic spool
+queuetopic /var/spool/sympa/topic
+
+## Directory for bounce incoming spool
+## This spool is used both by "bouncequeue" program and "bounced.pl" daemon.
+queuebounce /var/spool/sympa/bounce
+
+## Directory for task spool
+queuetask /var/spool/sympa/task
+
+## Directory for automatic list creation spool
+## This spool is used both by "familyqueue" program and "sympa_automatic.pl"
+## daemon.
+queueautomatic /var/spool/sympa/automatic
+
+## Directory for message outgoing spool
+## This parameter is named such by historical reason.
+queuebulk /var/spool/sympa/bulk
+
+## Directory to cache formatted messages
+## Base directory path of directories where HTML view of messages are cached.
+viewmail_dir /var/spool/sympa/viewmail
+
+## Directory for storing bounces
+## The directory where bounced.pl daemon will store the last bouncing message
+## for each user. A message is stored in the file: <bounce_path>/<list
+## name>@<mail domain name>/<email address>, or, if tracking is enabled:
+## <bounce_path>/<list name>@<mail domain name>/<email address>_<envelope ID>.
+## Users can access to these messages using web interface in the bounce
+## management page.
+## Don't confuse with "queuebounce" parameter which defines the spool where
+## incoming error reports are stored and picked by bounced.pl daemon.
+bounce_path /var/lib/sympa/bounce
+
+## Directory for storing archives
+## Where to store HTML archives. This parameter is used by the "archived.pl"
+## daemon. It is a good idea to install the archive outside the web document
+## hierarchy to ensure accesses passing WWSympa's access control will be
+## prevented.
+arc_path /var/lib/sympa/arc
+
+###\\\\ Miscellaneous ////###
+
+## Local part of Sympa email address
+## Local part (the part preceding the "@" sign) of the address by which mail
+## interface of Sympa accepts mail commands.
+## If you change the default value, you must modify the mail aliases too.
+email sympa
+
+## Custom robot parameter
+## Used to define a custom parameter for your server. Do not forget the
+## semicolon between the parameter name and the parameter value.
+## You will be able to access the custom parameter value in web templates by
+## variable "conf.custom_robot_parameter.<param_name>"
+# custom_robot_parameter param_name ; param_value
+
+## Use of binary cache of list configuration
+## binary_file: Sympa processes will maintain a binary version of the list
+## configuration, "config.bin" file on local disk. If you manage a big amount
+## of lists (1000+), it should make the web interface startup faster.
+## You can recreate cache by running "sympa.pl --reload_list_config".
+cache_list_config none
+
+## Max age of logs in database
+## Number of months that elapse before a log is expired
+logs_expiration_period 3
+
+## Umask
+## Default mask for file creation (see umask(2)). Note that it will be
+## interpreted as an octal value.
+umask 027
+
+## Secret string for generating unique keys
+## This allows generated authentication keys to differ from a site to another.
+## It is also used for encryption of user passwords stored in the database.
+## The presence of this string is one reason why access to "sympa.conf" needs
+## to be restricted to the "sympa" user.
+## Note that changing this parameter will break all HTTP cookies stored in
+## users' browsers, as well as all user passwords and lists X509 private keys.
+## To prevent a catastrophe, Sympa refuses to start if this "cookie" parameter
+## was changed.
+# cookie 123456789
+
+###\\\\ Web interface parameters ////###
+
+## URL prefix of web interface
+## This is used to construct URLs of web interface.
+wwsympa_url https://<%= vhost %>/l
+
+## URL prefix of WWSympa behind proxy
+#http_host http://domain.tld
+
+## URL for static contents
+## HTTP server have to map it with "static_content_path" directory.
+static_content_url /static-sympa
+css_url /static-sympa/css
+pictures_url /static-sympa/pictures
+
+## Directory for static contents
+static_content_path /var/lib/sympa/static_content
+css_path /var/lib/sympa/static_content/css
+pictures_path /var/lib/sympa/static_content/pictures
+
+## System log facility for web interface
+## System log facility for WWSympa, archived.pl and bounced.pl. Default is to
+## use value of "syslog" parameter.
+log_facility LOCAL1
+
+###\\\\ Web interface parameters: Appearances ////###
+
+## Type of main web page
+## "lists" for the page of list of lists. "home" for home page.
+default_home lists
+
+## Default index organization of web archive
+## thrd: Threaded index.
+## mail: Chronological index.
+archive_default_index thrd
+
+## Size of review page
+## Default number of lines of the array displaying users in the review page
+review_page_size 25
+
+## Size of viewlogs page
+## Default number of lines of the array displaying the log entries in the logs
+## page.
+viewlogs_page_size 25
+
+###\\\\ Web interface parameters: Miscellaneous ////###
+
+## HTTP cookies validity domain
+## If beginning with a dot ("."), the cookie is available within the specified
+## Internet domain. Otherwise, for the specified host. The only reason for
+## replacing the default value would be where WWSympa's authentication process
+## is shared with an application running on another host.
+cookie_domain <%= vhost %>
+
+## HTTP cookies lifetime
+## This is the default value when not set explicitly by users. "0" means the
+## cookie may be retained during browser session.
+cookie_expire 0
+
+## Average interval to refresh HTTP session ID.
+cookie_refresh 60
+
+## Use HTML editor
+## If set to "on", users will be able to post messages in HTML using a
+## javascript WYSIWYG editor.
+use_html_editor 0
+
+## URL of HTML editor
+## URL path to the javascript file making the WYSIWYG HTML editor available.
+## Relative path under <static_content_url> or absolute path.
+## Example is for TinyMCE 4 installed under <static_content_path>/js/tinymce/.
+# html_editor_url js/tinymce/tinymce.min.js
+
+## HTML editor initialization
+## Javascript excerpt that enables and configures the WYSIWYG HTML editor.
+# html_editor_init tinymce.init({selector:"#body",language:lang.split(/[^a-zA-Z]+/).join("_")});
+
+## Count limit of wrong password submission
+## If this limit is reached, the account is locked until the user renews their
+## password. The default value is chosen in order to block bots trying to log
+## in using brute force strategy. This value should never be reached by real
+## users that will probably uses the renew password service before they
+## performs so many tries.
+max_wrong_password 19
+
+## Password case
+## "insensitive" or "sensitive".
+## If set to "insensitive", WWSympa's password check will be insensitive. This
+## only concerns passwords stored in the Sympa database, not the ones in LDAP.
+## Should not be changed! May invalid all user password.
+password_case insensitive
+
+###\\\\ S/MIME and TLS ////###
-## Bouncing email rate for warn list owner
-#bounce_warn_rate 20
+## Password used to crypt lists private keys
+## If not defined, Sympa assumes that list private keys are not encrypted.
+# key_passwd your_password
-## Bouncing email rate for halt the list (not implemented)
-## Not yet used in current version, Default is 50
-#bounce_halt_rate 50
+## Directory containing user certificates
+ssl_cert_dir /var/lib/sympa/X509-user-certs
-## Task name for expiration of old bounces
-#expire_bounce_task daily
+###\\\\ Data sources setup ////###
-## Welcome message return-path
-## If set to unique, new subcriber is removed if welcome message bounce
-#welcome_return_path unique
+## Default of SQL fetch timeout
+## Default timeout while performing a fetch with include_sql_query.
+default_sql_fetch_timeout 300
-###\\\\ MTA related ////###
+###\\\\ DKIM ////###
-## Path to the MTA (sendmail, postfix, exim or qmail)
-## should point to a sendmail-compatible binary (eg: a binary named "sendmail" is distributed with Postfix)
-sendmail /usr/sbin/sendmail
+## Enable DKIM
+## If set to "on", Sympa may verify DKIM signatures of incoming messages and/
+## or insert DKIM signature to outgoing messages.
+dkim_feature off
-## Maximum number of recipients per call to Sendmail. The nrcpt_by_domain.conf file allows a different tuning per destination domain.
-nrcpt 25
+## Which service messages to be signed
+## Inserts a DKIM signature to service messages in context of robot, list or
+## both
+dkim_add_signature_to robot,list
-## Max. number of different domains per call to Sendmail
-avg 10
+## The "d=" tag as defined in rfc 4871
+## The DKIM "d=" tag, is the domain of the signing entity. Default is virtual
+## host domain name
+dkim_signer_domain <%= vhost %>
-## Max. number of Sendmail processes (launched by Sympa) running simultaneously
-## Proposed value is quite low, you can rise it up to 100, 200 or even 300 with powerfull systems.
-maxsmtp 40
+## Rewrite header for DKIM signed messages and DMARC rejecting domains
+dmarc_protection_mode dkim_signature,dmarc_reject
-###\\\\ Plugin ////###
+###\\\\ Antivirus plug-in ////###
## Path to the antivirus scanner engine
-## supported antivirus : McAfee/uvscan, Fsecure/fsav, Sophos, AVP and Trend Micro/VirusWall
-#antivirus_path /usr/local/uvscan/uvscan
-
-## Antivirus pluggin command argument
-#antivirus_args --secure --summary --dat /usr/local/uvscan
+## Supported antivirus: Clam AntiVirus/clamscan & clamdscan, McAfee/uvscan,
+## Fsecure/fsav, Sophos, AVP and Trend Micro/VirusWall
+# antivirus_path /usr/local/bin/clamscan
-###\\\\ S/MIME pluggin ////###
+## Antivirus plugin command line arguments
+# antivirus_args --no-summary --database /usr/local/share/clamav
-## Path to OpenSSL
-## Sympa knowns S/MIME if openssl is installed
-#openssl /usr/bin/ssl
+###\\\\ Password validation ////###
-## The directory path use by OpenSSL for trusted CA certificates
-#capath /etc/sympa/ssl.crt
+## Password validation
+## The password validation techniques to be used against user passwords that
+## are added to mailing lists. Options come from Data::Password
+## (https://search.cpan.org/~razinf/Data-Password-1.07/Password.pm#VARIABLES)
+# password_validation MINLEN=8,GROUPS=3,DICTIONARY=4,DICTIONARIES=/pentest/dictionaries
-## This parameter sets the all-in-one file where you can assemble the Certificates of Certification Authorities (CA)
-#cafile /usr/local/apache/conf/ssl.crt/ca-bundle.crt
-
-## User CERTs directory
-ssl_cert_dir /var/lib/sympa/X509-user-certs
-
-crl_dir /var/lib/sympa/crl
-
-## Password used to crypt lists private keys
-#key_passwd your_password
-
-###\\\\ Database ////###
-
-## Database type (mysql | Pg | Oracle | Sybase | SQLite)
-## be carefull to the case
-db_type Pg
-
-## Name of the database
-## with SQLite, the name of the DB corresponds to the DB file
-db_name sympa
-
-## The host hosting your sympa database
-db_host pgsql.<%= domain %>
-
-## The database port
-db_port 5432/tcp
-
-## Database user for connexion
-db_user sympa
-
-## Database password (associated to the db_user)
-## What ever you use a password or not, you must protect the SQL server (is it a not a public internet service ?)
-db_passwd <%= password %>
-
-## Database private extention to user table
-## You need to extend the database format with these fields
-#db_additional_user_fields age,address
-
-## Database private extention to subscriber table
-## You need to extend the database format with these fields
-#db_additional_subscriber_fields billing_delay,subscription_expiration
+###\\\\ Authentication with LDAP ////###
-###\\\\ Web interface ////###
+## Use canonical email address for LDAP authentication
+## When using LDAP authentication, if the identifier provided by the user was
+## a valid email, if this parameter is set to false, then the provided email
+## will be used to authenticate the user. Otherwise, use of the first email
+## returned by the LDAP server will be used.
+ldap_force_canonical_email 1
-## Sympa's main page URL
-wwsympa_url http://ml.<%= domain %>/
+###\\\\ Obsoleted parameters ////###
-## If a spam filter (like spamassassin or j-chkmail) add a smtp headers to tag spams, name of this header (example X-Spam-Status)
-antispam_tag_header_name X-Spam-Status
+## Default timeout between two scheduled synchronizations of list members with
+## data sources.
+default_ttl 3600
-## The regexp applied on this header to verify message is a spam (example \s*Yes)
-antispam_tag_header_spam_regexp ^\s*Yes
+## Default timeout between two action-triggered synchronizations of list
+## members with data sources.
+default_distribution_ttl 300
-## The regexp applied on this header to verify message is NOT a spam (example \s*No)
-antispam_tag_header_ham_regexp ^\s*No
+edit_list owner
-# Disable alias management, already managed in postfix
-sendmail_aliases none
+## Enable FastCGI
+## Is FastCGI module for HTTP server installed. This module provide much
+## faster web interface.
+use_fast_cgi 1
+# Upgrade from 6.2.40 to 6.2.42
+# 22 May 2019 at 21:22:06
+shared_feature on
diff --git a/modules/sympa/templates/vhost_ml.conf b/modules/sympa/templates/vhost_ml.conf
index bd98b175..11aa7ae5 100644
--- a/modules/sympa/templates/vhost_ml.conf
+++ b/modules/sympa/templates/vhost_ml.conf
@@ -1,10 +1,20 @@
-<VirtualHost *:80>
- ServerName ml.<%= domain %>
-<%-
-path_cgi_directory = "/usr/lib" + ( architecture == "x86_64" ? '64' : '') + "/sympa/cgi"
--%>
- DocumentRoot <%= path_cgi_directory %>
- <Location />
- Allow from all
- </Location>
-</VirtualHost>
+ RewriteEngine On
+ RewriteRule ^/?$ /l/home [R]
+ RewriteRule ^/l$ /l/
+ RewriteRule ^/l/(.*)$ /wwsympa-wrapper.fcgi/$1
+
+ DocumentRoot <%= lib_dir + "/sympa/cgi" %>
+
+ Alias /static-sympa /var/lib/sympa/static_content
+
+ <Directory /var/lib/sympa/static_content>
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
+ </Directory>
diff --git a/modules/sympa/templates/webapp_sympa.conf b/modules/sympa/templates/webapp_sympa.conf
index 84debe38..1a508199 100644
--- a/modules/sympa/templates/webapp_sympa.conf
+++ b/modules/sympa/templates/webapp_sympa.conf
@@ -1,11 +1,16 @@
-<%-
-path_cgi_directory = "/usr/lib" + ( architecture == "x86_64" ? '64' : '') + "/sympa/cgi"
--%>
-<Directory <%= path_cgi_directory %> >
- Options ExecCGI
- AddHandler fastcgi-script .fcgi
+<Directory <%= lib_dir + "/sympa/cgi" %> >
+ SetHandler fcgid-script
+ Options +ExecCGI
+ AddHandler cgi-script .fcgi
DirectoryIndex wwsympa-wrapper.fcgi
- Order allow,deny
- Allow from all
+ <IfModule mod_authz_core.c>
+ # Apache 2.4
+ Require all granted
+ </IfModule>
+ <IfModule !mod_authz_core.c>
+ # Apache 2.2
+ Order allow,deny
+ Allow from all
+ </IfModule>
</Directory>
diff --git a/modules/testvm/manifests/init.pp b/modules/testvm/manifests/init.pp
index 93376e45..d8ca9564 100644
--- a/modules/testvm/manifests/init.pp
+++ b/modules/testvm/manifests/init.pp
@@ -1,33 +1,40 @@
class testvm
{
- $testvm_login = "testvm"
- $testvmdir = "/home/testvm"
+ $testvm_login = 'testvm'
+ $testvmdir = '/home/testvm'
- group {"$testvm_login":
- ensure => present,
+ group {"${testvm_login}":
+ ensure => present,
}
- user {"$testvm_login":
- ensure => present,
- comment => "System user used to run test VMs",
- managehome => true,
- gid => $vmtest_login,
- shell => "/bin/bash",
+ user {"${testvm_login}":
+ ensure => present,
+ comment => "System user used to run test VMs",
+ managehome => true,
+ gid => $vmtest_login,
+ shell => '/bin/bash',
}
- file { "$testvmdir/bin/_vm":
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- source => "puppet:///modules/testvm/_vm",
+ file { "${testvmdir}/bin/":
+ ensure => directory,
+ require => User[$testvm_login],
}
- file { "$testvmdir/bin/vm-jonund":
- ensure => present,
- owner => root,
- group => $testvm_login,
- mode => 750,
- source => "puppet:///modules/testvm/vm-jonund",
+ file { "${testvmdir}/bin/_vm":
+ ensure => present,
+ owner => root,
+ group => root,
+ mode => '0644',
+ source => "puppet:///modules/testvm/_vm",
+ require => File["${testvmdir}/bin"],
+ }
+
+ file { "${testvmdir}/bin/vm-jonund":
+ ensure => present,
+ owner => root,
+ group => $testvm_login,
+ mode => '0750',
+ source => "puppet:///modules/testvm/vm-jonund",
+ require => File["${testvmdir}/bin"],
}
}
diff --git a/modules/timezone/manifests/init.pp b/modules/timezone/manifests/init.pp
index 0f33093a..67682f49 100644
--- a/modules/timezone/manifests/init.pp
+++ b/modules/timezone/manifests/init.pp
@@ -1,8 +1 @@
-
-class timezone {
- define timezone() {
- file { "/etc/localtime":
- ensure => "/usr/share/zoneinfo/$name"
- }
- }
-}
+class timezone {}
diff --git a/modules/timezone/manifests/timezone.pp b/modules/timezone/manifests/timezone.pp
new file mode 100644
index 00000000..8f3298a2
--- /dev/null
+++ b/modules/timezone/manifests/timezone.pp
@@ -0,0 +1,6 @@
+define timezone::timezone() {
+ file { '/etc/localtime':
+ ensure => link,
+ target => "/usr/share/zoneinfo/${name}"
+ }
+}
diff --git a/modules/transifex/manifests/init.pp b/modules/transifex/manifests/init.pp
index 32069430..282b3f9a 100644
--- a/modules/transifex/manifests/init.pp
+++ b/modules/transifex/manifests/init.pp
@@ -1,28 +1,89 @@
class transifex {
- package { 'transifex':
- ensure => installed
- }
-
- $password = extlookup("transifex_password")
- file { "20-engines.conf":
- path => "/etc/transifex/20-engines.conf",
- ensure => present,
- owner => root,
- group => apache,
- mode => 640,
- content => template("transifex/20-engines.conf")
- }
-
- file { "30-site.conf":
- path => "/etc/transifex/30-site.conf",
- ensure => present,
- owner => root,
- group => root,
- mode => 644,
- content => template("transifex/30-site.conf")
- }
-
-# apache::vhost_django_app { "transifex.$domain":
-# module => "transifex"
-# }
+ include django_application
+
+ package { 'transifex': }
+
+ $pgsql_password = extlookup('transifex_pgsql','x')
+ $ldap_password = extlookup('transifex_ldap','x')
+
+ $templates_dir = '/var/lib/transifex/templates'
+
+ postgresql::remote_db_and_user { 'transifex':
+ description => 'Transifex database',
+ password => $pgsql_password,
+ }
+
+ define config() {
+ $filename = $name
+
+ file { "/etc/transifex/${filename}":
+ group => 'apache',
+ mode => '0640',
+ require => Package['transifex'],
+ notify => Service['apache'],
+ content => template("transifex/${filename}"),
+ }
+ }
+
+ config { ['20-engines.conf',
+ '30-site.conf',
+ '40-apps.conf',
+ '45-ldap.conf',
+ '50-project.conf']: }
+
+ git::snapshot { $templates_dir:
+ source => "git://git.${::domain}/web/templates/transifex"
+ }
+
+ apache::vhost::django_app { "transifex.${::domain}":
+ module => 'transifex',
+ use_ssl => true,
+ module_path => ['/usr/share/transifex','/usr/share','/usr/local/lib/'],
+ aliases => { '/site_media/static/admin/' => '/usr/lib/python2.6/site-packages/django/contrib/admin/media/', },
+ }
+
+ # tx need write access there when running in apache
+ file { '/var/lib/transifex/scratchdir/storage_files':
+ ensure => directory,
+ owner => 'apache',
+ group => 'apache',
+ require => Package['transifex'],
+ }
+
+ apache::vhost::redirect_ssl { "transifex.${::domain}": }
+
+ # the group are mapped from ldap, since AUTH_LDAP_FIND_GROUP_PERMS is set to yes
+ # but the group need to exist in django first
+ django_application::create_group { ['mga-i18n','mga-i18n-committers']:
+ module => 'transifex',
+ path => '/usr/share/transifex:/usr/share',
+ }
+
+ define committers_permission($app='')
+ {
+ # using django_application::add_permission_to_group may cause problem
+ # if we install a 2nd django application with the same permission name ( as it need
+ # to be unique )
+ django_application::add_permission_to_group { $name:
+ app => $app,
+ group => 'mga-i18n-committers',
+ module => 'transifex',
+ path => '/usr/share/transifex:/usr/share',
+ require => Django_application::Create_group['mga-i18n-committers'],
+ }
+ }
+
+ committers_permission {['add_project',
+ 'change_project',
+ 'delete_project']: }
+
+ committers_permission {['add_release',
+ 'change_release',
+ 'delete_release']: }
+
+ committers_permission {['add_resource',
+ 'change_resource',
+ 'delete_resource']:
+ app => 'resources',
+ }
}
diff --git a/modules/transifex/templates/20-engines.conf b/modules/transifex/templates/20-engines.conf
index 1906a438..620a9556 100644
--- a/modules/transifex/templates/20-engines.conf
+++ b/modules/transifex/templates/20-engines.conf
@@ -3,14 +3,14 @@
## Database configuration
-# http://docs.djangoproject.com/en/dev/ref/settings/#database-engine
+# https://docs.djangoproject.com/en/dev/ref/settings/#database-engine
DATABASE_ENGINE = 'postgresql_psycopg2'
# Use file path for sqlite3
DATABASE_NAME = 'transifex'
# The following are not used for sqlite3
DATABASE_USER = 'transifex'
-DATABASE_PASSWORD = '<%= password %>'
-DATABASE_HOST = 'pgsql.<%= domain %>' # Set to empty string for local socket
+DATABASE_PASSWORD = '<%= @pgsql_password %>'
+DATABASE_HOST = 'pgsql.<%= @domain %>' # Set to empty string for local socket
DATABASE_PORT = '' # Set to empty string for default
## Caching (optional)
diff --git a/modules/transifex/templates/30-site.conf b/modules/transifex/templates/30-site.conf
index 4d4e9e4c..3c386354 100644
--- a/modules/transifex/templates/30-site.conf
+++ b/modules/transifex/templates/30-site.conf
@@ -1,7 +1,7 @@
# Sites
SITE_ID = 1
# Your site's domain. This is used only in this file.
-SITE_DOMAIN = '<%= domain %>'
+SITE_DOMAIN = '<%= @domain %>'
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
diff --git a/modules/transifex/templates/40-apps.conf b/modules/transifex/templates/40-apps.conf
new file mode 100644
index 00000000..dd92fb1c
--- /dev/null
+++ b/modules/transifex/templates/40-apps.conf
@@ -0,0 +1,58 @@
+# Enable actionlog application
+ACTIONLOG_ENABLED = True
+
+# Notifications
+# Enable notifications (requires working email settings)
+# TODO: Make notifications not crash the app if email sending doesn't work.
+# To enable notices you also need to enable the context processor and
+# application below.
+ENABLE_NOTICES = True
+
+# If True it requires a `./manage.py emit_notices` from the command line to
+# send the notifications/emails.
+NOTIFICATION_QUEUE_ALL = True
+
+# Tagging
+FORCE_LOWERCASE_TAGS = True
+
+# Registration - OpenID (Currently not used)
+# Requires respective middleware and application
+ENABLE_OPENID=False
+
+# Useful to work with another authentication backends
+# When True the registration system (django-profile) is disabled
+ENABLE_SIMPLEAUTH=True
+
+# Enable/Disable django-contact app URL.
+ENABLE_CONTACT_FORM = True
+
+# Django-profile
+AUTH_PROFILE_MODULE = 'txcommon.profile'
+DEFAULT_AVATAR_WIDTH = 96
+AVATAR_WEBSEARCH = False
+GOOGLE_MAPS_API_KEY = "ABQIAAAAfLle-Q79W6zCD3xcdCPsABQCULP4XOMyhPd8d_NrQQEO8sT8XBRbfo_kvrGWYPqQ7PnWFWJbDj4bQQ"
+REQUIRE_EMAIL_CONFIRMATION = False
+
+ugettext = lambda s: s
+LOGIN_URL = '/%s%s' % ('accounts/', 'login/')
+
+# Default timeout duration in days
+# How many days should the user stay logged in if he selects "Stay signed in"?
+LOGIN_DAYS = 21
+
+# URL used to access the Django Admin Panel
+# Ex. http://domain.com/admin/
+DJANGO_ADMIN_PANEL_URL = 'admin'
+
+# The directory where the vcs app will checkout stuff and play around.
+# Warning: On production systems this should be a place outside of the source
+# and with enough disk space. Eg. /var/lib/transifex.
+# WARNING: Kept only for migration purposes. It will be removed in 1.1.
+SCRATCH_DIR = os.path.join('/var/lib/transifex', 'scratchdir')
+
+AJAX_LOOKUP_CHANNELS = {
+ # the simplest case, pass a DICT with the model and field to search against :
+ 'users' : ('txcommon.lookups', 'UsersLookup'),
+ 'projects' : ('projects.lookups', 'ProjectsLookup'),
+ 'resources' : ('resources.lookups', 'ResourcesLookup'),
+}
diff --git a/modules/transifex/templates/45-ldap.conf b/modules/transifex/templates/45-ldap.conf
new file mode 100644
index 00000000..2532edf5
--- /dev/null
+++ b/modules/transifex/templates/45-ldap.conf
@@ -0,0 +1,48 @@
+AUTHENTICATION_BACKENDS = (
+ 'custom_backend.ForceUidLDAPBackend',
+ 'django.contrib.auth.backends.ModelBackend',
+)
+
+# Use LDAP group membership to calculate group permissions.
+AUTH_LDAP_FIND_GROUP_PERMS = True
+
+AUTH_LDAP_START_TLS = True
+
+# Cache group memberships for an hour to minimize LDAP traffic
+AUTH_LDAP_CACHE_GROUPS = True
+AUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600
+
+import ldap
+from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
+
+
+# Baseline configuration.
+AUTH_LDAP_SERVER_URI = "ldap://ldap.<%= @domain %> ldap://ldap-slave-1.<%= @domain %>"
+
+AUTH_LDAP_BIND_DN = "cn=transifex-<%= @hostname %>,ou=System Accounts,<%= @dc_suffix %>"
+AUTH_LDAP_BIND_PASSWORD = "<%= @ldap_password %>"
+
+AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=People,<%= @dc_suffix %> ",
+ ldap.SCOPE_SUBTREE, "(|(uid=%(user)s)(mail=%(user)s))")
+
+# Set up the basic group parameters.
+AUTH_LDAP_GROUP_SEARCH = LDAPSearch("ou=Group,<%= @dc_suffix %>",
+ ldap.SCOPE_SUBTREE, "(objectClass=groupOfNames)"
+)
+AUTH_LDAP_GROUP_TYPE = GroupOfNamesType(name_attr="cn")
+
+# Only users in this group can log in.
+#AUTH_LDAP_REQUIRE_GROUP = "cn=enabled,ou=groups,dc=example,dc=com"
+
+# Populate the Django user from the LDAP directory.
+AUTH_LDAP_USER_ATTR_MAP = {
+ "first_name": "givenName",
+ "last_name": "sn",
+ "email": "mail"
+}
+
+AUTH_LDAP_USER_FLAGS_BY_GROUP = {
+ "is_active": "cn=mga-i18n,ou=Group,<%= @dc_suffix %>",
+ "is_staff": "cn=mga-i18n-committers,ou=Group,<%= @dc_suffix %>",
+ "is_superuser": "cn=mga-sysadmin,ou=Group,<%= @dc_suffix %>"
+}
diff --git a/modules/transifex/templates/50-project.conf b/modules/transifex/templates/50-project.conf
new file mode 100644
index 00000000..013741b2
--- /dev/null
+++ b/modules/transifex/templates/50-project.conf
@@ -0,0 +1,85 @@
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.load_template_source',
+ 'django.template.loaders.app_directories.load_template_source',
+# 'django.template.loaders.eggs.load_template_source',
+)
+
+TEMPLATE_CONTEXT_PROCESSORS = [
+ "django.core.context_processors.auth",
+ "django.core.context_processors.debug",
+ "django.core.context_processors.i18n",
+ "django.core.context_processors.media",
+ "django.core.context_processors.request",
+ "notification.context_processors.notification",
+ "staticfiles.context_processors.static_url",
+]
+
+TEMPLATE_CONTEXT_PROCESSORS += (
+ 'userprofile.context_processors.css_classes',
+ 'txcommon.context_processors.site_section',
+ 'txcommon.context_processors.bidi',
+)
+
+MIDDLEWARE_CLASSES = [
+ # Enable GZIP compression
+ 'django.middleware.gzip.GZipMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ # Enable protection against Cross Site Request Forgeries
+ # FIXME: Enable CSRF!
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.middleware.locale.LocaleMiddleware',
+ 'django.middleware.doc.XViewMiddleware',
+ 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
+ 'django_sorting.middleware.SortingMiddleware',
+# 'django.middleware.transaction.TransactionMiddleware',
+ 'pagination.middleware.PaginationMiddleware',
+]
+
+ROOT_URLCONF = 'urls'
+
+TEMPLATE_DIRS = [
+ '<%= @templates_dir %>',
+ os.path.join(TX_ROOT, 'templates'),
+]
+
+INSTALLED_APPS = [
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.comments',
+ 'django.contrib.contenttypes',
+ 'django.contrib.flatpages',
+ 'django.contrib.markup',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ 'django.contrib.admindocs',
+ 'notification',
+ 'django_filters',
+ 'django_sorting',
+ 'south',
+ 'tagging',
+ 'pagination',
+ 'piston',
+ 'contact_form',
+ 'ajax_select',
+ 'threadedcomments',
+ 'staticfiles',
+ 'authority',
+ # Transifex specific apps:
+ 'transifex.txcommon',
+ # It's coming here due https://trac.transifex.org/ticket/596
+ 'userprofile',
+ 'transifex.languages',
+ 'transifex.projects',
+ 'transifex.releases',
+ 'transifex.actionlog',
+ 'transifex.txpermissions',
+ 'transifex.teams',
+ 'transifex.resources',
+ 'transifex.storage',
+ # Must come in the end
+ 'django_addons',
+]
+
+COMMENTS_APP = 'threadedcomments'
diff --git a/modules/viewvc/files/robots.txt b/modules/viewvc/files/robots.txt
new file mode 100644
index 00000000..dbb13834
--- /dev/null
+++ b/modules/viewvc/files/robots.txt
@@ -0,0 +1,29 @@
+User-agent: Googlebot
+User-agent: Baiduspider
+User-agent: bingbot
+User-agent: YandexBot
+User-agent: Mail.RU_Bot
+User-agent: MJ12bot
+User-agent: ClaudeBot
+User-agent: Amazonbot
+User-agent: PetalBot
+User-agent: Bytespider
+User-agent: facebookexternalhit
+Disallow: /*/tags/
+Disallow: *?view=annotate*
+Disallow: *?annotate=*
+Disallow: *?view=diff*
+Disallow: *?r1=*
+Disallow: *sortby=*
+Disallow: *sortdir=*
+Disallow: *?revision=*&view=markup&*
+Disallow: *pathrev=*
+Disallow: *?*&view=log*
+Disallow: *view=log&*
+Disallow: *diff_format=*
+User-agent: AhrefsBot
+Disallow: /
+User-agent: Sogou web spider
+Disallow: /
+User-agent: *
+Crawl-delay: 30
diff --git a/modules/viewvc/files/setcookieredirect.html b/modules/viewvc/files/setcookieredirect.html
new file mode 100644
index 00000000..fe98b9dc
--- /dev/null
+++ b/modules/viewvc/files/setcookieredirect.html
@@ -0,0 +1,28 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>User check</title>
+ <script type="text/javascript" defer>
+ const randomValue = "6436"; // Chosen by fair dice roll. Guaranteed to be random.
+ document.cookie = `session=${randomValue}; path=/; expires=${new Date(Date.now() + 24*3600*1000).toUTCString()}`;
+ const params = new Proxy(new URLSearchParams(window.location.search), {
+ get: (searchParams, prop) => searchParams.get(prop),
+ });
+ let path = params.to;
+ // Sanitize redirect path to avoid malicious arbitrary redirects
+ if (/^\/[-a-zA-Z0-9~_.?&=/+]*$/.test(decodeURIComponent(path))) {
+ const current = new URL(window.location.toLocaleString());
+ window.location.href = encodeURI(current.origin + decodeURIComponent(path));
+ } else {
+ window.onload = function() {
+ document.getElementById('error').innerHTML = 'Error! Bad redirect location!';
+ }
+ }
+ </script>
+ </head>
+ <body>
+ Redirecting back...
+ <br>
+ <p id="error"><!-- space for error message --></p>
+ </body>
+</html>
diff --git a/modules/viewvc/manifests/init.pp b/modules/viewvc/manifests/init.pp
new file mode 100644
index 00000000..bd676f29
--- /dev/null
+++ b/modules/viewvc/manifests/init.pp
@@ -0,0 +1,74 @@
+class viewvc {
+ include apache::mod::fcgid
+ include viewvc::var
+ package {['viewvc',
+ 'python2-svn',
+ 'python-flup']: }
+
+ # http_expiration_time = 600
+ # svn_roots = admin: svn://svn.mageia.org/svn/adm/
+
+ file { '/etc/viewvc/viewvc.conf':
+ content => template($viewvc::var::tmpl_viewvc_conf),
+ notify => Service['apache'],
+ require => Package['viewvc'],
+ }
+
+ apache::webapp_other { 'viewvc':
+ webapp_file => 'viewvc/webapp.conf',
+ }
+
+ mga_common::local_script { 'kill_viewvc':
+ content => template('viewvc/kill_viewvc.sh'),
+ }
+
+ cron { 'kill_viewvc':
+ command => '/usr/local/bin/kill_viewvc',
+ hour => '*',
+ minute => '*/5',
+ user => 'apache',
+ environment => 'MAILTO=root',
+ }
+
+ $viewvc_docroot = '/usr/share/viewvc/templates/docroot'
+ $robotsfile = "$viewvc_docroot/robots.txt"
+ file { $robotsfile:
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => root,
+ source => 'puppet:///modules/viewvc/robots.txt',
+ }
+
+ file { "$viewvc_docroot/setcookieredirect.html":
+ ensure => present,
+ mode => '0644',
+ owner => root,
+ group => root,
+ source => 'puppet:///modules/viewvc/setcookieredirect.html',
+ }
+
+ $vhost_aliases = {
+ '/viewvc' => $viewvc_docroot,
+ '/robots.txt' => $robotsfile,
+ '/_check' => "$viewvc_docroot/setcookieredirect.html",
+ }
+
+ $script_aliases = {
+ '/' => '/usr/share/viewvc/bin/wsgi/viewvc.fcgi/',
+ }
+
+ $process = 4
+
+ apache::vhost::base { $viewvc::var::hostname:
+ aliases => $vhost_aliases,
+ content => template('apache/vhost_fcgid_norobot.conf'),
+ }
+
+ apache::vhost::base { "ssl_${viewvc::var::hostname}":
+ vhost => $viewvc::var::hostname,
+ use_ssl => true,
+ aliases => $vhost_aliases,
+ content => template('apache/vhost_fcgid_norobot.conf'),
+ }
+}
diff --git a/modules/viewvc/manifests/var.pp b/modules/viewvc/manifests/var.pp
new file mode 100644
index 00000000..9027d808
--- /dev/null
+++ b/modules/viewvc/manifests/var.pp
@@ -0,0 +1,9 @@
+# $hostname:
+# vhost used by viewvc
+# $tmpl_viewvc_conf:
+# path to /etc/viewvc.conf template file
+class viewvc::var(
+ $hostname = "svnweb.${::domain}",
+ $tmpl_viewvc_conf = 'viewvc/viewvc.conf'
+) {
+}
diff --git a/modules/viewvc/templates/kill_viewvc.sh b/modules/viewvc/templates/kill_viewvc.sh
new file mode 100755
index 00000000..7283a10c
--- /dev/null
+++ b/modules/viewvc/templates/kill_viewvc.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# Kill viewvc if the process gets too large
+max_memory=1000000 # size in KiB
+
+for process in $(pgrep viewvc.fcgi)
+do
+ process_mem=$(pmap "$process" | grep total | sed 's/ \+total \+\([[:digit:]]\+\)K/\1/')
+ if [ -n "$process_mem" ] && [ "$process_mem" -gt "$max_memory" ]
+ then
+ kill -15 "$process"
+ fi
+done
diff --git a/modules/viewvc/templates/viewvc.conf b/modules/viewvc/templates/viewvc.conf
new file mode 100644
index 00000000..dec74771
--- /dev/null
+++ b/modules/viewvc/templates/viewvc.conf
@@ -0,0 +1,1002 @@
+##---------------------------------------------------------------------------
+##
+## Configuration file for ViewVC
+##
+## Information on ViewVC is located at the following web site:
+## http://viewvc.org/
+##
+##---------------------------------------------------------------------------
+
+## THE FORMAT OF THIS CONFIGURATION FILE
+##
+## This file is delineated by sections, specified in [brackets]. Within
+## each section, are a number of configuration settings. These settings
+## take the form of: name = value. Values may be continued on the
+## following line by indenting the continued line.
+##
+## WARNING: Indentation *always* means continuation. Name=value lines
+## should always start in column zero.
+##
+## Comments should always start in column zero, and are identified
+## with "#". By default each of the configuration items is
+## commented out, with the default value of the option shown.
+## You'll need to remove the '#' that precedes configuration
+## options whose values you wish to modify.
+##
+## Certain configuration settings may have multiple values. These should
+## be separated by a comma. The settings where this is allowed are noted
+## below. Any other setting that requires special syntax is noted at that
+## setting.
+##
+##
+## SOME TERMINOLOGY USED HEREIN
+##
+## "root" - This is a CVS or Subversion repository. For Subversion, the
+## meaning is pretty clear, as the virtual, versioned directory tree
+## stored inside a Subversion repository looks nothing like the actual
+## tree visible with shell utilities that holds the repository. For
+## CVS, this is more confusing, because CVS's repository layout mimics
+## (actually, defines) the layout of the stuff housed in the repository.
+## But a CVS repository can be identified by the presence of a CVSROOT
+## subdirectory in its root directory.
+##
+## "module" - A module is a top-level subdirectory of a root, usually
+## associated with the concept of a single "project" among many housed
+## within a single repository.
+##
+##
+## BASIC VIEWVC CONFIGURATION HINTS
+##
+## While ViewVC has quite a few configuration options, you generally
+## only need to change a small subset of them to get your ViewVC
+## installation working properly. Here are some options that we
+## recommend you pay attention to. Of course, don't try to change the
+## options here -- do so in the relevant section of the configuration
+## file below.
+##
+## For correct operation, you will probably need to change the following
+## configuration variables:
+##
+## cvs_roots (for CVS)
+## svn_roots (for Subversion)
+## root_parents (for CVS or Subversion)
+## default_root
+## root_as_url_component
+## rcs_dir
+## mime_types_files
+## the many options in the [utilities] section
+##
+## It is usually desirable to change the following variables:
+##
+## address
+## forbidden
+##
+## To optimize delivery of ViewVC static files:
+##
+## docroot
+##
+## To customize the display of ViewVC for your site:
+##
+## template_dir
+## the [templates] override section
+##
+
+##---------------------------------------------------------------------------
+[general]
+
+## cvs_roots: Specifies each of the CVS roots on your system and
+## assigns names to them. Each root should be given by a "name: path"
+## value. Multiple roots should be separated by commas and can be
+## placed on separate lines.
+##
+## Example:
+## cvs_roots = cvsroot: /opt/cvs/repos1,
+## anotherroot: /usr/local/cvs/repos2
+##
+#cvs_roots =
+
+## svn_roots: Specifies each of the Subversion roots (repositories) on
+## your system and assigns names to them. Each root should be given by
+## a "name: path" value. Multiple roots should be separated by commas
+## and can be placed on separate lines.
+##
+## Example:
+## svn_roots = svnrepos: /opt/svn/,
+## anotherrepos: /usr/local/svn/repos2
+##
+#svn_roots = adm: /svn/adm/
+# web: /svn/web/
+# packages: /svn/packages/
+# soft: /svn/soft/
+
+## root_parents: Specifies a list of directories in which any number of
+## repositories may reside. Rather than force you to add a new entry
+## to 'cvs_roots' or 'svn_roots' each time you create a new repository,
+## ViewVC rewards you for organising all your repositories under a few
+## parent directories by allowing you to simply specify just those
+## parent directories. ViewVC will then notice each repository in that
+## directory as a new root whose name is the subdirectory of the parent
+## path in which that repository lives.
+##
+## You can specify multiple parent paths separated by commas or new lines.
+##
+## WARNING: these names can, of course, clash with names you have
+## defined in your cvs_roots or svn_roots configuration items. If this
+## occurs, you can either rename the offending repository on disk, or
+## grant new names to the clashing item in cvs_roots or svn_roots.
+## Each parent path is processed sequentially, so repositories under
+## later parent paths may override earlier ones.
+##
+## Example:
+## root_parents = /opt/svn : svn,
+## /opt/cvs : cvs
+##
+root_parents = /svn : svn
+
+## default_root: This is the name of the default root. Valid names
+## include those explicitly listed in the cvs_roots and svn_roots
+## configuration options, as well as those implicitly indicated by
+## virtue of being the basenames of repositories found in the
+## root_parents option locations.
+##
+## NOTE: This setting is ignored when root_as_url_component is enabled.
+##
+## Example:
+## default_root = cvsroot
+##
+#default_root =
+
+## mime_types_files: This is a list of pathnames to a set of MIME type
+## mapping files to help ViewVC guess the correct MIME type of a
+## versioned file. The pathnames listed here are specified in order of
+## authoritativeness either as absolute paths or relative to this
+## configuration file.
+##
+## As a convenience, ViewVC provides a MIME type mapping file
+## (mimetypes.conf) which is, by default, the preferred provider of
+## MIME type mapping answers, but which is also empty. If you find
+## that ViewVC is unable to accurately guess MIME types based on the
+## extensions of some of your versioned files, you can add records of
+## your preferred mappings to the provided mimetypes.conf file (or to
+## your system's mapping files, if you wish).
+##
+## You might, for example, wish to have ViewVC also consult the mapping
+## files provided by your operating system and Apache.
+##
+## Example:
+## mime_types_files = mimetypes.conf,
+## /etc/mime.types,
+## /usr/local/apache2/conf/mime.types
+##
+mime_types_files = /etc/viewvc/mimetypes.conf, /etc/httpd/conf/mime.types
+
+## address: The address of the local repository maintainer. (This
+## option is provided only as a convenience for ViewVC installations
+## which are using the default template set, where the value of this
+## option will be displayed in the footer of every ViewVC page.)
+##
+## Example:
+## address = admin@server.com
+##
+#address =
+
+## kv_files: Provides a mechanism for custom key/value pairs to be
+## available to templates. These are stored in key/value (KV) files.
+##
+## The paths of the KV files are listed here, specified either as
+## absolute paths or relative to this configuration file. The files
+## use the same format as this configuration file, containing one or
+## more user-defined sections, and user-defined options in those
+## sections. ViewVC makes these options available to template authors
+## as:
+##
+## kv.SECTION.OPTION
+##
+## Note that an option name can be dotted. For example:
+##
+## [my_images]
+## logos.small = /images/small-logo.png
+## logos.big = /images/big-logo.png
+##
+## Templates can use these with a directive like: [kv.my_images.logos.small]
+##
+## Note that section names which are common to multiple KV files will
+## be merged. If two files have a [my_images] section, then the
+## options in those two like-named sections will be merged together.
+## If two files have the same option name in a section, then one will
+## overwrite the other (and which one "wins" is unspecified).
+##
+## To further categorize the KV files, and how the values are provided to
+## the templates, a KV file name may be annotated with an additional level
+## of dotted naming. For example:
+##
+## kv_files = [asf]kv/images.conf
+##
+## Assuming the same section as above, the template would refer to an image
+## using [kv.asf.my_images.logos.small]
+##
+## Lastly, it is possible to use %lang% in the filenames to specify a
+## substitution of the selected language-tag.
+##
+## Example:
+## kv_files = kv/file1.conf, kv/file2.conf, [i18n]kv/%lang%_data.conf
+##
+#kv_files =
+
+## This option is a comma-separated list of language-tag values
+## available to ViewVC. The first language-tag listed is the default
+## language, and will be used if an Accept-Language header is not
+## present in the request, or none of the user's requested languages
+## are available. If there are ties on the selection of a language,
+## then the first to appear in the list is chosen.
+##
+## Example:
+## languages = en-us, en-gb, de
+##
+#languages = en-us
+
+
+##---------------------------------------------------------------------------
+[utilities]
+
+## ViewVC uses (sometimes optionally) various third-party programs to do some
+## of the heavy lifting. Generally, it will attempt to execute those utility
+## programs in such a way that if they are found in ViewVC's executable
+## search path ($PATH, %PATH%, etc.) all is well. But sometimes these tools
+## aren't installed in the executable search path, so here's where you can
+## tell ViewVC where to find them.
+##
+## NOTE: Options with a "_dir" suffix are for configuring the
+## directories in which certain programs live. Note that this might
+## not be the same directory into which the program's installer dumped
+## the whole program package -- we want the deepest directory in which
+## the executable program itself resides ("C:\rcstools\bin\win32"
+## rather than just "C:\rcstools", for example). The values of options
+## whose names lack the "_dir" suffix should point to the actual
+## program itself (such as "C:\Program Files\cvsnt\cvs.exe").
+
+
+## rcs_dir: Directory in which the RCS utilities are installed, used
+## for viewing CVS repositories.
+##
+## Example:
+## rcs_dir = /usr/bin/
+##
+#rcs_dir =
+
+## cvsnt: Location of cvsnt program. ViewVC can use CVSNT (www.cvsnt.org)
+## instead of the RCS utilities to retrieve information from CVS
+## repositories. To enable use of CVSNT, set the "cvsnt" value to the
+## path of the CVSNT executable. (If CVSNT is on the standard path, you
+## can also set it to the name of the CVSNT executable). By default
+## "cvsnt" is set to "cvs" on Windows and is not set on other platforms.
+##
+## Examples:
+## cvsnt = K:\Program Files\cvsnt\cvs.exe
+## cvsnt = /usr/bin/cvs
+## cvsnt = cvs
+##
+#cvsnt =
+
+## svn: Location of the Subversion command-line client, used for
+## viewing Subversion repositories.
+##
+## Example:
+## svn = /usr/bin/svn
+##
+#svn =
+
+## diff: Location of the GNU diff program, used for showing file
+## version differences.
+##
+## Example:
+## diff = /usr/bin/diff
+##
+#diff =
+
+## cvsgraph: Location of the CvsGraph program, a graphical CVS version
+## graph generator (see options.use_cvsgraph).
+##
+## Example:
+## cvsgraph = /usr/local/bin/cvsgraph
+##
+#cvsgraph =
+
+
+##---------------------------------------------------------------------------
+[options]
+
+## root_as_url_component: Interpret the first path component in the URL
+## after the script location as the root to use. This is an
+## alternative to using the "root=" query key. If ViewVC is configured
+## with multiple repositories, this results in more natural looking
+## ViewVC URLs.
+##
+## NOTE: Enabling this option will break backwards compatibility with
+## any old ViewCVS URL which doesn't have an explicit "root" parameter.
+##
+#root_as_url_component = 1
+
+## checkout_magic: Use checkout links with magic /*checkout*/ prefixes so
+## checked out HTML pages can have working links to other repository files
+##
+## NOTE: This option is DEPRECATED and should not be used in new ViewVC
+## installations. Setting "default_file_view = co" achieves the same effect
+##
+#checkout_magic = 0
+
+## allowed_views: List the ViewVC views which are enabled. Views not
+## in this comma-delimited list will not be served (or, will return an
+## error on attempted access).
+## Possible values: "annotate", "co", "diff", "markup", "roots", "tar"
+##
+allowed_views = annotate, diff, markup, roots, co
+
+## authorizer: The name of the ViewVC authorizer plugin to use when
+## authorizing access to repository contents. This value must be the
+## name of a Python module addressable as vcauth.MODULENAME (most
+## easily accomplished by placing it in ViewVC's lib/vcauth/ directory)
+## and which implements a ViewVCAuthorizer class (as a subclass of
+## vcauth.GenericViewVCAuthorizer). You can provide custom parameters
+## to the authorizer module by defining configuration sections named
+## authz-MODULENAME and adding the parameter keys and values there.
+##
+## ViewVC provides the following modules:
+## svnauthz - based on Subversion authz files
+## forbidden - simple path glob matches against top-level root directories
+## forbiddenre - root and path matches against regular expressions
+##
+## NOTE: Only one authorizer may be in use for a given ViewVC request.
+## It doesn't matter if you configure the parameters of multiple
+## authorizer plugins -- only the authorizer whose name is configured
+## here (or effectively configured here via per-vhost or per-root
+## configuration) will be activated.
+##
+#authorizer =
+
+## hide_cvsroot: Don't show the CVSROOT directory
+## 1 Hide CVSROOT directory
+## 0 Show CVSROOT directory
+##
+## NOTE: Someday this option may be removed in favor of letting
+## individual authorizer plugin hide the CVSROOT.
+##
+#hide_cvsroot = 1
+
+## mangle_email_addresses: Mangle email addresses in marked-up output.
+## There are various levels of mangling available:
+## 0 - No mangling; markup un-mangled email addresses as hyperlinks
+## 1 - Obfuscation (using entity encoding); no hyperlinking
+## 2 - Data-dropping address truncation; no hyperlinking
+##
+## NOTE: this will not effect the display of versioned file contents, only
+## addresses that appear in version control metadata (e.g. log messages).
+##
+#mangle_email_addresses = 0
+
+## default_file_view: "log", "co", or "markup"
+## Controls whether the default view for file URLs is a checkout view or
+## a log view. "log" is the default for backwards compatibility with old
+## ViewCVS URLs, but "co" has the advantage that it allows ViewVC to serve
+## static HTML pages directly from a repository with working links
+## to other repository files
+##
+## NOTE: Changing this option may break compatibility with existing
+## bookmarked URLs.
+##
+## ALSO NOTE: If you choose one of the "co" or "markup" views, be sure
+## to enable it (via the allowed_views option)
+##
+#default_file_view = log
+
+## http_expiration_time: Expiration time (in seconds) for cacheable
+## pages served by ViewVC. Note that in most cases, a cache aware
+## client will only revalidate the page after it expires (using the
+## If-Modified-Since and/or If-None-Match headers) and that browsers
+## will also revalidate the page when the reload button is pressed.
+## Set to 0 to disable the transmission of these caching headers.
+##
+http_expiration_time = 600
+
+## generate_etags: Generate Etag headers for relevant pages to assist
+## in browser caching.
+## 1 Generate Etags
+## 0 Don't generate Etags
+##
+#generate_etags = 1
+
+## svn_ignore_mimetype: Don't consult the svn:mime-type property to
+## determine how to display a file in the markup view. This is
+## especially helpful when versioned images carry the default
+## Subversion-calculated MIME type of "application/octet-stream" (which
+## isn't recognized as viewable type by browsers).
+##
+#svn_ignore_mimetype = 0
+
+## svn_config_dir: Path of the Subversion runtime configuration
+## directory ViewVC should consult for various things, including cached
+## remote authentication credentials. If unset, Subversion will use
+## the default location(s) ($HOME/.subversion, etc.)
+##
+#svn_config_dir =
+
+## use_rcsparse: Use the rcsparse Python module to retrieve CVS
+## repository information instead of invoking rcs utilities [EXPERIMENTAL]
+##
+#use_rcsparse = 0
+
+## sort_by: File sort order
+## file Sort by filename
+## rev Sort by revision number
+## date Sort by commit date
+## author Sort by author
+## log Sort by log message
+##
+#sort_by = file
+
+## sort_group_dirs: Group directories when sorting
+## 1 Group directories together
+## 0 No grouping -- sort directories as any other item would be sorted
+##
+#sort_group_dirs = 1
+
+## hide_attic: Hide or show the contents of the Attic subdirectory
+## 1 Hide dead files inside Attic subdir
+## 0 Show the files which are inside the Attic subdir
+##
+#hide_attic = 1
+
+## hide_errorful_entries: Hide or show errorful directory entries
+## (perhaps due to not being readable, or some other rlog parsing
+## error, etc.)
+## 1 Hide errorful entries from the directory display
+## 0 Show errorful entries (with their errors) in the directory display
+##
+#hide_errorful_entries = 0
+
+## log_sort: Sort order for log messages
+## date Sort revisions by date
+## rev Sort revision by revision number
+## none Use the version control system's ordering
+##
+#log_sort = date
+
+## diff_format: Default diff format
+## h Human readable
+## u Unified diff
+## c Context diff
+## s Side by side
+## l Long human readable (more context)
+## f Full human readable (entire file)
+##
+#diff_format = h
+
+## hr_breakable: Diff view line breaks
+## 1 lines break at spaces
+## 0 no line breaking
+## Or, use a positive integer > 1 to cut lines after that many characters
+##
+#hr_breakable = 1
+
+## hr_funout: Give out function names in human readable diffs.
+## (Only works well for C source files, otherwise diff's heuristic falls short.)
+## ('-p' option to diff)
+##
+#hr_funout = 0
+
+## hr_ignore_white: Ignore whitespace (indentation and stuff) for human
+## readable diffs.
+## ('-w' option to diff)
+##
+#hr_ignore_white = 0
+
+## hr_ignore_keyword_subst: Ignore diffs which are caused by keyword
+## substitution (such as "$Id - Stuff").
+## ('-kk' option to rcsdiff)
+##
+#hr_ignore_keyword_subst = 1
+
+## hr_intraline: Enable highlighting of intraline changes in human
+## readable diffs. [Requires Python 2.4]
+##
+#hr_intraline = 0
+
+## allow_compress: Allow compression via gzip of output if the Browser
+## accepts it (HTTP_ACCEPT_ENCODING contains "gzip").
+##
+## NOTE: this relies on Python's gzip module, which has proven to be
+## not-so-performant. Enabling this feature should reduce the overall
+## transfer size of ViewVC's responses to the client's request, but
+## will do so with a speed penalty.
+##
+#allow_compress = 0
+
+## template_dir: The directory which contains the EZT templates used by
+## ViewVC to customize the display of the various output views. ViewVC
+## looks in this directory for files with names that match the name of
+## the view ("log", "directory", etc.) plus the ".ezt" extension. If
+## specified as a relative path, it is relative to the directory where
+## this config file resides; absolute paths may be used as well. If
+## %lang% occurs in the pathname, then the selected language will be
+## substituted.
+##
+## SEE ALSO: the [templates] configuration section, where you can
+## override templates on a per-view basis.
+##
+template_dir = /usr/share/viewvc/templates/
+
+## docroot: Web path to a directory that contains ViewVC static files
+## (stylesheets, images, etc.) If set, static files will get
+## downloaded directory from this location. If unset, static files
+## will be served by the ViewVC script (at a likely performance
+## penalty, and from the "docroot" subdirectory of the directory
+## specified by the "template_dir" option).
+##
+## NOTE: This option is evaluated outside the context of a particular
+## root. Be careful when using per-root configuration to select an
+## alternate template set as the default value for this option will
+## still be based on the global default template set per 'template_dir'
+## above, not on 'template_dir' as overridden for a given root.
+##
+docroot = /viewvc
+
+## show_subdir_lastmod: Show last changelog message for CVS subdirectories
+##
+## NOTE: The current implementation makes many assumptions and may show
+## the incorrect file at some times. The main assumption is that the
+## last modified file has the newest filedate. But some CVS operations
+## touches the file without even when a new version is not checked in,
+## and TAG based browsing essentially puts this out of order, unless
+## the last checkin was on the same tag as you are viewing. Enable
+## this if you like the feature, but don't rely on correct results.
+##
+## SECURITY WARNING: Enabling this will currently leak unauthorized
+## path names.
+##
+#show_subdir_lastmod = 0
+
+## show_logs: Show the most recent log entry in directory listings.
+##
+#show_logs = 1
+
+## show_log_in_markup: Show log when viewing file contents.
+##
+#show_log_in_markup = 1
+
+## cross_copies: Cross filesystem copies when traversing Subversion
+## file revision histories.
+##
+#cross_copies = 1
+
+## use_localtime: Display dates as UTC or in local time zone.
+##
+#use_localtime = 0
+
+## short_log_len: The length (in characters) to which the most recent
+## log entry should be truncated when shown in the directory view.
+##
+#short_log_len = 80
+
+## enable_syntax_coloration: Should we colorize known file content
+## syntaxes? [Requires Pygments Python module]
+##
+#enable_syntax_coloration = 1
+
+## tabsize: The number of spaces into which tabstops are converted
+## when viewing file contents.
+##
+#tabsize = 8
+
+## detect_encoding: Should we attempt to detect versioned file
+## character encodings? [Requires 'chardet' module, and is currently
+## used only by the syntax coloration logic -- if enabled -- for the
+## 'markup' and 'annotate' views; see 'enable_syntax_coloration'.]
+##
+#detect_encoding = 0
+
+## use_cvsgraph: Use CvsGraph to offer visual graphs of CVS revision history.
+##
+#use_cvsgraph = 0
+
+## cvsgraph_conf: Location of the customized cvsgraph configuration file.
+## May be specified as an absolute path or as a path relative to this
+## configuration file.
+##
+cvsgraph_conf = /etc/viewvc/cvsgraph.conf
+
+## use_re_search: Enable regular expression search of files in a directory.
+##
+## WARNING: Enabling this option can consume HUGE amounts of server
+## time. A "checkout" must be performed on *each* file in a directory,
+## and the result needs to be searched for a match against the regular
+## expression.
+##
+## SECURITY WARNING: Since a user can enter the regular expression, it
+## is possible for them to enter an expression with many alternatives
+## and a lot of backtracking. Executing that search over thousands of
+## lines over dozens of files can easily tie up a server for a long
+## period of time. This option should only be used on sites with
+## trusted users. It is highly inadvisable to use this on a public site.
+##
+#use_re_search = 0
+
+## dir_pagesize: Maximum number of directory entries on a given page.
+## This allows ViewVC to present discrete pages to the users instead of
+## the entire directory. Set to 0 to disable pagination.
+##
+dir_pagesize = 100
+
+## log_pagesize: Maximum number of revision log entries on a given page.
+## This allows ViewVC to present discrete pages to the users instead of
+## the entire revision log. Set to 0 to disable pagination.
+##
+log_pagesize = 100
+
+## limit_changes: Maximum number of changed paths shown per commit in
+## the Subversion revision view and in query results. This is not a
+## hard limit (the UI provides options to show all changed paths), but
+## it prevents ViewVC from generating enormous and hard to read pages
+## by default when they happen to contain import or merge commits
+## affecting hundreds or thousands of files. Set to 0 to disable the
+## limit.
+##
+#limit_changes = 100
+
+##---------------------------------------------------------------------------
+[templates]
+
+## You can override the templates used by various ViewVC views in this
+## section. By default, ViewVC will look for templates in the
+## directory specified by the "template_dir" configuration option (see
+## the documentation for that option for details). But if you want to
+## use a different template for a particular view, simply uncomment the
+## appropriate option below and specify the correct location of the EZT
+## template file you wish to use for that view.
+##
+## Templates are specified relative to the configured template
+## directory (see the "template_dir" option), but absolute paths may
+## also be used as well.
+##
+## If %lang% occurs in the pathname, then the selected language will be
+## substituted.
+##
+## NOTE: the selected language is defined by the "languages" item in the
+## [general] section, and based on the request's Accept-Language
+## header.
+##
+
+## diff: Template used for the file differences view.
+##
+#diff =
+
+## directory: Template used for the directory listing view.
+##
+#directory =
+
+## error: Template used for the ViewVC error display view.
+##
+#error =
+
+## file: Template used for the file contents/annotation view.
+##
+#file =
+
+## graph: Template used for the revision graph view.
+##
+#graph =
+
+## log: Template used for the revision log view.
+##
+#log =
+
+## query: Template used for the non-integrated query interface.
+##
+#query =
+
+## query_form: Template used for the query form view.
+##
+#query_form =
+
+## query_results: Template used for the query results view.
+##
+#query_results =
+
+## revision: Template used for the revision/changeset view.
+##
+#revision =
+
+## roots: Template used for the root listing view.
+##
+#roots =
+
+##---------------------------------------------------------------------------
+[cvsdb]
+
+## enabled: Enable database integration feature.
+##
+#enabled = 0
+
+## host: Database hostname. Leave unset to use a local Unix socket
+## connection.
+##
+#host =
+
+## post: Database listening port.
+##
+#port = 3306
+
+## database_name: ViewVC database name.
+##
+#database_name = ViewVC
+
+## user: Username of user with read/write privileges to the database
+## specified by the 'database_name' configuration option.
+##
+#user =
+
+## passwd: Password of user with read/write privileges to the database
+## specified by the 'database_name' configuration option.
+##
+#passwd =
+
+## readonly_user: Username of user with read privileges to the database
+## specified by the 'database_name' configuration option.
+##
+#readonly_user =
+
+## readonly_passwd: Password of user with read privileges to the database
+## specified by the 'database_name' configuration option.
+##
+#readonly_passwd =
+
+## row_limit: Maximum number of rows returned by a given normal query
+## to the database.
+##
+#row_limit = 1000
+
+## rss_row_limit: Maximum number of rows returned by a given query to
+## the database made as part of an RSS feed request. (Keeping in mind
+## that RSS readers tend to poll regularly for new data, you might want
+## to keep this set to a conservative number.)
+##
+#rss_row_limit = 100
+
+## check_database_for_root: Check if the repository is found in the
+## database before showing the query link and RSS feeds.
+##
+## WARNING: Enabling this check adds the cost of a database connection
+## and query to most ViewVC requests. If all your roots are represented
+## in the commits database, or if you don't care about the creation of
+## RSS and query links that might lead ultimately to error pages for
+## certain of your roots, or if you simply don't want to add this extra
+## cost to your ViewVC requests, leave this disabled.
+##
+#check_database_for_root = 0
+
+##---------------------------------------------------------------------------
+[vhosts]
+
+## Virtual hosts are individual logical servers accessible via
+## different hostnames, but which are all really the same physical
+## computer. For example, you might have your web server configured to
+## accept incoming traffic for both http://www.yourdomain.com/ and
+## http://viewvc.yourdomain.com/. Users pointing their web browsers at
+## each of those two URLs might see entirely different content via one
+## URL versus the other, but all that content actually lives on the
+## same computer, is served up via the same web server, and so
+## on. It just *looks* like its coming from multiple servers.
+##
+## ViewVC allows you to customize its configuration options for
+## individual virtual hosts. You might, for example, wish to expose
+## all of your Subversion repositories at http://svn.yourdomain.com/viewvc/
+## and all your CVS ones at http://cvs.yourdomain.com/viewvc/, with no
+## cross-exposure. Using ViewVC's virtual host (vhost) configuration
+## support, you can do this. Simply create two vhost configurations
+## (one for each of your hostnames), then configure the cvs_roots
+## option only for the vhost associated with cvs.yourdomain.com, and
+## configure the svn_roots option only for the vhost associated with
+## svn.yourdomain.com.
+##
+## This section is a freeform configuration section, where you create
+## both the option names and their values. The names of the options
+## are then treated as canonical names of virtual hosts, and their
+## values are defined to be comma-delimited lists of hostname globs
+## against which incoming ViewVC requests will be matched to figure out
+## which vhost they apply to.
+##
+## After you've named and defined your vhosts, you may then create new
+## configuration sections whose names are of the form
+## vhost-VHOSTNAME/CONFIGSECTION. VHOSTNAME here is the canonical name
+## of one of the virtual hosts you defined under the [vhosts] section.
+## Inside those configuration sections, you override the standard
+## ViewVC options typically found in the base configuration section
+## named CONFIGSECTION ("general", "option", etc.)
+##
+## NOTE: Per-vhost overrides may only be applied to the following
+## sections:
+##
+## general
+## options
+## utilities
+## templates
+## cvsdb
+## authz-*
+##
+## Here is an example:
+##
+## [vhosts]
+## libs = libs.yourdomain.*, *.yourlibs.*
+## gui = guiproject.yourdomain.*
+##
+## [vhost-libs/general]
+## cvs_roots =
+## svn_roots = svnroot: /var/svn/libs-repos
+## default_root = svnroot
+##
+## [vhost-libs/options]
+## show_logs = 1
+##
+## [vhost-gui/general]
+## cvs_roots = cvsroot: /var/cvs/guiproject
+## svn_roots =
+## default_root = cvsroot
+##
+
+##---------------------------------------------------------------------------
+## ViewVC recognizes per-root configuration overrides, too. To
+## override the value of a configuration parameter only for a single
+## root, create a configuration section whose names is of the form
+## root-ROOTNAME/CONFIGSECTION. ROOTNAME here is the name of the root
+## as defined explicitly in cvs_roots or svn_roots or implicitly as the
+## basename of a root path in root_parents. Options found in this new
+## configuration section override for this one root the corresponding
+## options found in the base configuration section CONFIGSECTION
+## ("options", "authz-*", etc.) as interpreted after per-vhost
+## overrides (if any) have been applied.
+##
+## NOTE: Per-root overrides may only be applied to the following
+## sections:
+##
+## options
+## utilities
+## authz-*
+##
+## WARNING: Do not use per-root overrides if your ViewVC instance is
+## served via the standalone.py server option! Doing so could cause
+## ViewVC to be unable to function properly (or at all).
+##
+## Here is an example showing how to enable Subversion authz-based
+## authorization for only the single root named "svnroot":
+##
+## [root-svnroot/options]
+## authorizer = svnauthz
+##
+## [root-svnroot/authz-svnauthz]
+## authzfile = /path/to/authzfile
+##
+
+##---------------------------------------------------------------------------
+[authz-forbidden]
+
+## The "forbidden" authorizer forbids access to repository modules,
+## defined to be top-level subdirectories in a repository.
+##
+## NOTE: The options in this section apply only when the 'authorizer'
+## option (in the [options] section) is set to 'forbidden'.
+
+## forbidden: A comma-delimited list of patterns which match modules
+## that ViewVC should hide from users.
+##
+## You can use a simple list of modules, or something more complex:
+##
+## *) The "!" can be used before a module to explicitly state that it
+## is NOT forbidden. Whenever this form is seen, then all modules will
+## be forbidden unless one of the "!" modules match.
+##
+## *) Shell-style "glob" expressions may be used. "*" will match any
+## sequence of zero or more characters, "?" will match any single
+## character, "[seq]" will match any character in seq, and "[!seq]"
+## will match any character not in seq.
+##
+## *) Tests are performed in sequence. The first match will terminate the
+## testing. This allows for more complex allow/deny patterns.
+##
+## Tests are case-sensitive.
+##
+## NOTE: Again, this is for the hiding of modules within repositories, *not*
+## for the hiding of repositories (roots) themselves.
+##
+## Some examples:
+##
+## Disallow "example" but allow all others:
+## forbidden = example
+##
+## Disallow "example1" and "example2" but allow all others:
+## forbidden = example1, example2
+##
+## Allow *only* "example1" and "example2":
+## forbidden = !example1, !example2
+##
+## Forbid modules starting with "x":
+## forbidden = x*
+##
+## Allow modules starting with "x" but no others:
+## forbidden = !x*
+##
+## Allow "xml", forbid other modules starting with "x", and allow the rest:
+## forbidden = !xml, x*, !*
+##
+#forbidden =
+
+##---------------------------------------------------------------------------
+[authz-forbiddenre]
+
+## The "forbiddenre" authorizer forbids access to repositories and
+## repository paths by comparing a list of regular expressions
+## (separated by commas) against paths consisting of the repository (or
+## root) name plus the path of the versioned file or directory to be
+## tested. For example, to see if the user is authorized to see the
+## path "/trunk/www/index.html" in the repository whose root name is
+## "svnrepos", this authorizer will check the path
+## "svnrepos/trunk/www/index.html" against the list of forbidden
+## regular expressions. Directory paths will be terminated by a forward
+## slash.
+##
+## NOTE: The options in this section apply only when the 'authorizer'
+## option (in the [options] section) is set to 'forbiddenre'.
+
+## forbiddenre: A comma-delimited list of regular expressions which
+## match paths that ViewVC should hide from users.
+##
+## Like the "forbidden" authorizer...
+##
+## *) The "!" can be used before a module to explicitly state that it
+## is NOT forbidden. Whenever this form is seen, then all modules will
+## be forbidden unless one of the "!" modules match.
+##
+## *) Tests are performed in sequence. The first match will terminate the
+## testing. This allows for more complex allow/deny patterns.
+##
+## Unlike the "forbidden" authorizer, you can can use this to hide roots, too.
+##
+## Some examples:
+##
+## Disallow files named "PRIVATE", but allow all others:
+## forbiddenre = /PRIVATE$
+##
+## Disallow the "hidden" repository, allowing all others:
+## forbiddenre = ^hidden(/|$)
+##
+## Allow only the "example1" and "example2" roots and the paths inside them,
+## disallowing all others (which can be done in multiple ways):
+## forbiddenre = !^example1(/|$), !^example2(/|$)/
+## forbiddenre = !^example[12](/|$)
+##
+## Only allow visibility of HTML files and the directories that hold them:
+## forbiddenre = !^([^/]+|.*(/|\.html))$
+##
+#forbiddenre =
+
+##---------------------------------------------------------------------------
+[authz-svnauthz]
+
+## The "svnauthz" authorizer uses a Subversion authz configuration file
+## to determine access to repository paths.
+##
+## NOTE: The options in this section apply only when the 'authorizer'
+## option (in the [options] section) is set to 'svnauthz'.
+
+## authzfile: Specifies the location of the authorization rules file
+## (using an absolute path).
+##
+#authzfile =
+
+## force_username_case: Like the AuthzForceUsernameCase httpd.conf
+## directive, set this to "upper" or "lower" to force the normalization
+## to upper- or lower-case, respectively, of incoming usernames prior
+## to comparison against the authorization rules files. Leave the
+## option unset to preserve the username case.
+##
+#force_username_case =
+
+##---------------------------------------------------------------------------
diff --git a/modules/viewvc/templates/webapp.conf b/modules/viewvc/templates/webapp.conf
new file mode 100644
index 00000000..de257cc0
--- /dev/null
+++ b/modules/viewvc/templates/webapp.conf
@@ -0,0 +1,2 @@
+# this file must be empty for now
+# signed $fqdn admin
diff --git a/modules/xinetd/manifests/init.pp b/modules/xinetd/manifests/init.pp
new file mode 100644
index 00000000..a86aaeee
--- /dev/null
+++ b/modules/xinetd/manifests/init.pp
@@ -0,0 +1,7 @@
+class xinetd {
+ package { 'xinetd': }
+
+ service { 'xinetd':
+ subscribe => Package['xinetd']
+ }
+}
diff --git a/modules/xinetd/manifests/port_forward.pp b/modules/xinetd/manifests/port_forward.pp
new file mode 100644
index 00000000..2717466e
--- /dev/null
+++ b/modules/xinetd/manifests/port_forward.pp
@@ -0,0 +1,8 @@
+define xinetd::port_forward($target_ip, $target_port, $port, $proto = 'tcp') {
+ include xinetd
+ file { "/etc/xinetd.d/${name}":
+ require => Package['xinetd'],
+ content => template('xinetd/port_forward'),
+ notify => Service['xinetd']
+ }
+}
diff --git a/modules/xinetd/manifests/service.pp b/modules/xinetd/manifests/service.pp
new file mode 100644
index 00000000..24caafd9
--- /dev/null
+++ b/modules/xinetd/manifests/service.pp
@@ -0,0 +1,9 @@
+define xinetd::service($content) {
+ include xinetd
+ file { "/etc/xinetd.d/${name}":
+ require => Package['xinetd'],
+ content => $content,
+ notify => Service['xinetd']
+ }
+}
+
diff --git a/modules/xinetd/templates/port_forward b/modules/xinetd/templates/port_forward
new file mode 100644
index 00000000..99518dcd
--- /dev/null
+++ b/modules/xinetd/templates/port_forward
@@ -0,0 +1,15 @@
+service <%= @name %>
+{
+ disable = no
+ type = UNLISTED
+<%- if @proto == 'tcp' -%>
+ socket_type = stream
+<%- else -%>
+ socket_type = dgram
+<%- end -%>
+ protocol = <%= @proto %>
+ user = nobody
+ wait = no
+ redirect = <%= @target_ip %> <%= @target_port %>
+ port = <%= @port %>
+}
diff --git a/modules/xymon/manifests/client.pp b/modules/xymon/manifests/client.pp
new file mode 100644
index 00000000..cfde8134
--- /dev/null
+++ b/modules/xymon/manifests/client.pp
@@ -0,0 +1,19 @@
+class xymon::client {
+ package { 'xymon-client': }
+
+ $service = 'xymon'
+
+ service { $service:
+ hasstatus => false,
+ status => "${::lib_dir}/xymon/client/runclient.sh status",
+ require => Package['xymon-client'],
+ }
+
+ # TODO replace with a exported resource
+ $server = extlookup('hobbit_server','x')
+ file { '/etc/sysconfig/xymon-client':
+ content => template('xymon/xymon-client'),
+ notify => Service[$service],
+ require => Package['xymon-client'],
+ }
+}
diff --git a/modules/xymon/manifests/init.pp b/modules/xymon/manifests/init.pp
new file mode 100644
index 00000000..9b609048
--- /dev/null
+++ b/modules/xymon/manifests/init.pp
@@ -0,0 +1,2 @@
+class xymon {
+}
diff --git a/modules/xymon/manifests/server.pp b/modules/xymon/manifests/server.pp
new file mode 100644
index 00000000..b6c269cf
--- /dev/null
+++ b/modules/xymon/manifests/server.pp
@@ -0,0 +1,45 @@
+class xymon::server {
+ package { ['xymon','fping']: }
+
+ File {
+ group => 'xymon',
+ require => Package['xymon'],
+ notify => Exec['service xymon reload'],
+ }
+
+ file {
+ # Define hosts and web view layout, and lists tests to be run against
+ # host by e.g. network tests from xymon server
+ '/etc/xymon/hosts.cfg':
+ content => template('xymon/bb-hosts');
+
+ # Environment variables user by hobbitd,hobbitlaunch,hobbitd_rrd,CGIs
+ # and bbgen (which generates the static html pages)
+ # hobbitlaunch (started by init script) may need to be restarted for
+ # changes here, for hobbitd_rrd (e.g. TEST2RRD), it is sufficient to
+ # kill hobbitd_rrd, hobbitlaunch will respawn it
+ '/etc/xymon/hobbitserver.cfg':
+ content => template('xymon/hobbitserver.cfg');
+
+ # Defines thresholds for test data reported by clients, e.g. load
+ # disk, procs, ports, memory, as well as those which require some
+ # configuration server side to the client: files, msgs,
+ '/etc/xymon/hobbit-clients.cfg':
+ content => template('xymon/hobbit-clients.cfg');
+
+ # Configuration for the xymon clients, which log files to process etc.
+ '/etc/xymon/client-local.cfg':
+ content => template('xymon/client-local.cfg');
+
+ # Used for alerting, changes should be taken into effect immediately
+ '/etc/xymon/hobbit-alerts.cfg':
+ content => template('xymon/hobbit-alerts.cfg');
+ }
+
+ # Most changes should take effect immediately, but sometimes threshold
+ # changes take effect sooner if hobbit is HUPd
+ exec { 'service xymon reload':
+ refreshonly => true,
+ require => Package['xymon'],
+ }
+}
diff --git a/modules/xymon/templates/bb-hosts b/modules/xymon/templates/bb-hosts
new file mode 100644
index 00000000..140932b5
--- /dev/null
+++ b/modules/xymon/templates/bb-hosts
@@ -0,0 +1,52 @@
+#
+# Master configuration file for Xymon
+#
+# This file defines several things:
+#
+# 1) By adding hosts to this file, you define hosts that are monitored by Xymon
+# 2) By adding "page", "subpage", "group" definitions, you define the layout
+# of the Xymon webpages, and how hosts are divided among the various webpages
+# that Xymon generates.
+# 3) Several other definitions can be done for each host, see the bb-hosts(5)
+# man-page.
+#
+# You need to define at least the Xymon server itself here.
+
+page visible Visible Services
+0.0.0.0 blog.<%= domain %> # sni https://blog.<%= domain %>/en/
+0.0.0.0 identity.<%= domain %> # https://identity.<%= domain %>
+0.0.0.0 bugs.<%= domain %> # https://bugs.<%= domain %>
+0.0.0.0 ml.<%= domain %> # https://ml.<%= domain %>
+0.0.0.0 www.<%= domain %> # https://www.<%= domain %>
+0.0.0.0 svnweb.<%= domain %> # https://svnweb.<%= domain %>
+0.0.0.0 epoll.<%= domain %> # https://epoll.<%= domain %>
+0.0.0.0 planet.<%= domain %> # sni https://planet.<%= domain %>/en/
+# This checks the public reverse proxy
+0.0.0.0 forums.<%= domain %> # sni https://forums.<%= domain %>=<%= @nodes_ipaddr['sucuk']['ipv4'] %>/
+0.0.0.0 check.<%= domain %> # https://check.<%= domain %>
+0.0.0.0 madb.<%= domain %> # https://madb.mageia.org
+0.0.0.0 pkgsubmit.<%= domain %> # sni https://pkgsubmit.<%= domain %>
+#0.0.0.0 bcd.<%= domain %> # http://bcd.<%= domain %>
+0.0.0.0 hugs.<%= domain %> # http://hugs.<%= domain %>
+0.0.0.0 dashboard.<%= domain %> # http://dashboard.<%= domain %>
+0.0.0.0 meetbot.<%= domain %> # sni https://meetbot.<%= domain %>
+
+
+page servers Servers
+group-compress Marseille
+212.85.158.151 sucuk.<%= domain %> # testip bbd dns smtp ssh CLIENT:xymon.<%= domain %> http://xymon.<%= domain %>
+212.85.158.148 ecosse.<%= domain %> # testip ssh
+212.85.158.150 fiona.<%= domain %> # testip ssh
+212.85.158.152 rabbit.<%= domain %> # testip ssh
+212.85.158.153 duvel.<%= domain %> # testip ssh rsync svn git ldapssl ldap
+
+group-compress VM Sucuk
+192.168.122.131 friteuse.<%= domain %> # testip ssh http://forums.<%= domain %>=<%= @nodes_ipaddr['friteuse']['ipv4'] %>/ %>/
+
+group-compress Scaleway
+163.172.148.228 neru.mageia.org # testip ssh dns ldap ldapssl smtp
+163.172.201.211 madb.mageia.org # testip
+
+# NOTE: lines with IPv6 addresses are ignored in xymon versions before 4.4 or 5.0
+group-compress Oracle cloud
+2603:c026:c101:f00::1:1 ociaa1.<%= domain %> # testip ssh
diff --git a/modules/xymon/templates/client-local.cfg b/modules/xymon/templates/client-local.cfg
new file mode 100644
index 00000000..44428778
--- /dev/null
+++ b/modules/xymon/templates/client-local.cfg
@@ -0,0 +1,131 @@
+# The client-local.cfg file contains configuration for
+# the Xymon clients running on monitored systems. When
+# clients contact the Xymon server, they get the section
+# from this file which matches their hostname or operating
+# system.
+#
+# The following configuration items are currently possible:
+# "log:FILENAME:MAXDATA"
+# Monitor the text-based logfile FILENAME, and report
+# back at most MAXDATA bytes. The Xymon client will
+# only report back entries generated during the past
+# 30 minutes, so MAXDATA is an upper limit.
+# "ignore EXPRESSION"
+# Must follow a "log:..." entry. Lines matching the
+# regular EXPRESSION are not sent to the Xymon server.
+# "trigger EXPRESSION"
+# Must follow a "log:..." entry. Lines matching the
+# regular EXPRESSION are always sent to the Xymon server.
+# Use this for extremely critical errors that must be
+# reported.
+#
+# "linecount:FILENAME"
+# Monitor the text-based logfile FILENAME, but just
+# count the number of times certain expressions appear.
+# This processes the entire file every time. It must
+# be followed by one or more lines with
+# "KEYWORD PATTERN"
+# KEYWORD identifies this count. You can use any string
+# except whitespace. PATTERN is a regular expression
+# that you want to search for in the file.
+#
+# "file:FILENAME[:hash]"
+# Monitor the file FILENAME by reporting file metadata.
+# The Xymon client will report back all of the file
+# meta-data, e.g. size, timestamp, filetype, permissions
+# etc. The optional "hash" setting is "md5", "sha1" or
+# "rmd160", and causes the Xymon client to compute a
+# file hash using the MD5, SHA-1 or RMD160 algorithm.
+# Note: Computing the hash value may be CPU-intensive,
+# so You should use this sparingly. For large-scale
+# file integrity monitoring, use a real host-based
+# IDS (Tripwire, AIDE or similar).
+#
+# "dir:DIRECTORY"
+# Monitor the size of DIRECTORY, including sub-directories.
+# This causes the Xymon client to run a "du" on DIRECTORY
+# and send this back to the Xymon server.
+# Note: Running "du" on large/deep directory structures can
+# cause a significant system load.
+#
+# NB: If FILENAME and/or DIRECTORY are of the form `COMMAND`,
+# then COMMAND is run on the client, and the lines output
+# by the command are used as the file- or directory-names.
+# This allows you to monitor files where the names change,
+# as long as you can script some way of determining the
+# interesting filenames.
+
+[sunos]
+log:/var/adm/messages:10240
+
+[osf1]
+log:/var/adm/messages:10240
+
+[aix]
+log:/var/adm/syslog/syslog.log:10240
+
+[hp-ux]
+log:/var/adm/syslog/syslog.log:10240
+
+[win32]
+
+[freebsd]
+log:/var/log/messages:10240
+
+[netbsd]
+log:/var/log/messages:10240
+
+[openbsd]
+log:/var/log/messages:10240
+
+[linux]
+log:/var/log/messages:10240
+ignore MARK
+file:/var/lib/puppet/state/state.yaml
+
+[linux22]
+log:/var/log/messages:10240
+ignore MARK
+
+[redhat]
+log:/var/log/messages:10240
+ignore MARK
+
+[debian]
+log:/var/log/messages:10240
+ignore MARK
+
+[suse]
+log:/var/log/messages:10240
+ignore MARK
+
+[mageia]
+log:/var/log/messages:10240
+ignore MARK
+
+[mandrivalinux]
+log:/var/log/messages:10240
+#log:/var/log/secure:10240
+ignore MARK
+
+[redhatAS]
+log:/var/log/messages:10240
+ignore MARK
+
+[redhatES]
+log:/var/log/messages:10240
+ignore MARK
+
+[rhel3]
+log:/var/log/messages:10240
+ignore MARK
+
+[irix]
+log:/var/adm/SYSLOG:10240
+
+[darwin]
+log:/var/log/system.log:10240
+
+[sco_sv]
+log:/var/adm/syslog:10240
+
diff --git a/modules/xymon/templates/hobbit-alerts.cfg b/modules/xymon/templates/hobbit-alerts.cfg
new file mode 100644
index 00000000..763e253d
--- /dev/null
+++ b/modules/xymon/templates/hobbit-alerts.cfg
@@ -0,0 +1,128 @@
+#
+# The hobbit-alerts.cfg file controls who receives alerts
+# when a status in the BB system goes into a critical
+# state (usually: red, yellow or purple).
+#
+# This file is made up from RULES and RECIPIENTS.
+#
+# A RULE is a filter made from the PAGE where a host
+# is located in BB; the HOST name, the SERVICE name,
+# the COLOR of the status, the TIME of day, and the
+# DURATION of the event.
+#
+# A RECIPIENT can be a MAIL address, or a SCRIPT.
+#
+# Recipients can also have rules associated with them,
+# that modify the rules for a single recipient, e.g.
+# you can define a rule for alerting, then add an
+# extra criteria e.g. so a single recipient does not get
+# alerted until after 20 minutes.
+#
+# A sample rule:
+#
+# HOST=www.foo.com SERVICE=http
+# MAIL webadmin@foo.com REPEAT=20 RECOVERED
+# MAIL cio@foo.com DURATION>60 COLOR=red
+# SCRIPT /usr/local/bin/sendsms 1234567890 FORMAT=SMS
+#
+# The first line sets up a rule that catches alerts
+# for the host "www.foo.com" and the "http" service.
+# There are three recipients for these alerts: The first
+# one is the "webadmin@foo.com" - they get alerted
+# immediately when the status goes into an alert state,
+# and the alert is repeated every 20 minutes until it
+# recovers. When it recovers, a message is sent about
+# the recovery.
+#
+# The second recipient is "cio@foo.com". He gets alerted
+# only when the service goes "red" for more than 60 minutes.
+#
+# The third recipient is a script, "/usr/local/bin/sendsms".
+# The real recipient is "1234567890", but it is handled
+# by the script - the script receives a set of environment
+# variables with the details about the alert, including the
+# real recipient. The alert message is preformatted for
+# an SMS recipient.
+#
+# You can use Perl-compatible "regular expressions" for
+# the PAGE, HOST and SERVICE definitions, by putting a "%"
+# in front of the regex. E.g.
+#
+# HOST=%^www.*
+# MAIL webadmin@foo.com EXHOST=www.testsite.foo.com
+#
+# This sets up a rule so that alerts from any hostname
+# beginning with "www" goes to "webadmin@foo.com", EXCEPT
+# alerts from "www.testsite.foo.com"
+#
+# The following keywords are recognized:
+# PAGE - rule matching an alert by the name of the
+# page in BB. This is the name following
+# the "page", "subpage" or "subparent" keyword
+# in the bb-hosts file.
+# EXPAGE - rule excluding an alert if the pagename matches.
+# HOST - rule matching an alert by the hostname.
+# EXHOST - rule excluding an alert by matching the hostname.
+# SERVICE - rule matching an alert by the service name.
+# EXSERVICE - rule excluding an alert by matching the hostname.
+# GROUP - rule matching an alert by the group ID.
+# (Group ID's are associated with a status through the
+# hobbit-clients.cfg configuration).
+# EXGROUP - rule excluding an alert by matching the group ID.
+# COLOR - rule matching an alert by color. Can be "red",
+# "yellow", or "purple".
+# TIME - rule matching an alert by the time-of-day. This
+# is specified as the DOWNTIME timespecification
+# in the bb-hosts file (see bb-hosts(5)).
+# DURATION - Rule matching an alert if the event has lasted
+# longer/shorter than the given duration. E.g.
+# DURATION>10 (lasted longer than 10 minutes) or
+# DURATION<30 (only sends alerts the first 30 minutes).
+# RECOVERED - Rule matches if the alert has recovered from an
+# alert state.
+# NOTICE - Rule matches if the message is a "notify" message
+# (typically sent when a status is enabled or disabled).
+# MAIL - Recipient who receives an e-mail alert. This takes
+# one parameter, the e-mail address.
+# SCRIPT - Recipient that invokes a script. This takes two
+# parameters: The script filename, and the recipient
+# that gets passed to the script.
+# FORMAT - format of the text message with the alert. Default
+# is "TEXT" (suitable for e-mail alerts). "SMS" is
+# a short message with no subject for SMS alerts.
+# "SCRIPT" is a brief message template for scripts.
+# REPEAT - How often an alert gets repeated, in minutes.
+# STOP - Valid for a recipient: If this recipient gets an
+# alert, recipients further down in hobbit-alerts.cfg
+# are ignored.
+# UNMATCHED - Matches if no alerts have been sent so far.
+#
+#
+# Script get the following environment variables pre-defined so
+# that they can send a meaningful alert:
+#
+# BBCOLORLEVEL - The color of the alert: "red", "yellow" or "purple"
+# BBALPHAMSG - The full text of the status log triggering the alert
+# ACKCODE - The "cookie" that can be used to acknowledge the alert
+# RCPT - The recipient, from the SCRIPT entry
+# BBHOSTNAME - The name of the host that the alert is about
+# MACHIP - The IP-address of the host that has a problem
+# BBSVCNAME - The name of the service that the alert is about
+# BBSVCNUM - The numeric code for the service. From SVCCODES definition.
+# BBHOSTSVC - HOSTNAME.SERVICE that the alert is about.
+# BBHOSTSVCCOMMAS - As BBHOSTSVC, but dots in the hostname replaced with commas
+# BBNUMERIC - A 22-digit number made by BBSVCNUM, MACHIP and ACKCODE.
+# RECOVERED - Is "1" if the service has recovered.
+# DOWNSECS - Number of seconds the service has been down.
+# DOWNSECSMSG - When recovered, holds the text "Event duration : N" where
+# N is the DOWNSECS value.
+
+<%
+builder = ['ecosse','rabbit']
+builders = builder.map{|x| x + "." + domain }.join(',')
+%>
+HOST=<%= builders %> SERVICE=cpu
+ MAIL=sysadmin-reports@ml.<%= domain %> DURATION>6h RECOVERED NOTICE REPEAT=3h STOP
+
+HOST=%.*.<%= domain %>
+ MAIL=sysadmin-reports@ml.<%= domain %> DURATION>5 RECOVERED NOTICE REPEAT=3h
diff --git a/modules/xymon/templates/hobbit-clients.cfg b/modules/xymon/templates/hobbit-clients.cfg
new file mode 100644
index 00000000..ff010681
--- /dev/null
+++ b/modules/xymon/templates/hobbit-clients.cfg
@@ -0,0 +1,380 @@
+# hobbit-clients.cfg - configuration file for clients reporting to Xymon
+#
+# This file is used by the hobbitd_client module, when it builds the
+# cpu, disk, files, memory, msgs and procs status messages from the
+# information reported by clients running on the monitored systems.
+#
+# This file must be installed on the Xymon server - client installations
+# do not need this file.
+#
+# The file defines a series of rules:
+# UP : Changes the "cpu" status when the system has rebooted recently,
+# or when it has been running for too long.
+# LOAD : Changes the "cpu" status according to the system load.
+# CLOCK : Changes the "cpu" status if the client system clock is
+# not synchronized with the clock of the Xymon server.
+# DISK : Changes the "disk" status, depending on the amount of space
+# used of filesystems.
+# MEMPHYS: Changes the "memory" status, based on the percentage of real
+# memory used.
+# MEMACT : Changes the "memory" status, based on the percentage of "actual"
+# memory used. Note: Not all systems report an "actual" value.
+# MEMSWAP: Changes the "memory" status, based on the percentage of swap
+# space used.
+# PROC : Changes the "procs" status according to which processes were found
+# in the "ps" listing from the client.
+# LOG : Changes the "msgs" status according to entries in text-based logfiles.
+# Note: The "client-local.cfg" file controls which logfiles the client will report.
+# FILE : Changes the "files" status according to meta-data for files.
+# Note: The "client-local.cfg" file controls which files the client will report.
+# DIR : Changes the "files" status according to the size of a directory.
+# Note: The "client-local.cfg" file controls which directories the client will report.
+# PORT : Changes the "ports" status according to which tcp ports were found
+# in the "netstat" listing from the client.
+# DEFAULT: Set the default values that apply if no other rules match.
+#
+# All rules can be qualified so they apply only to certain hosts, or on certain
+# times of the day (see below).
+#
+# Each type of rule takes a number of parameters:
+# UP bootlimit toolonglimit
+# The cpu status goes yellow if the system has been up for less than
+# "bootlimit" time, or longer than "toolonglimit". The time is in
+# minutes, or you can add h/d/w for hours/days/weeks - eg. "2h" for
+# two hours, or "4w" for 4 weeks.
+# Defaults: bootlimit=1h, toolonglimit=-1 (infinite).
+#
+# LOAD warnlevel paniclevel
+# If the system load exceeds "warnlevel" or "paniclevel", the "cpu"
+# status will go yellow or red, respectively. These are decimal
+# numbers.
+# Defaults: warnlevel=5.0, paniclevel=10.0
+#
+# CLOCK maximum-offset
+# If the system clock of the client differs from that of the Xymon
+# server by more than "maximum-offset" seconds, then the CPU status
+# column will go yellow. Note that the accuracy of this test is limited,
+# since it is affected by the time it takes a client status report to
+# go from the client to the Xymon server and be processed. You should
+# therefore allow for a few seconds (5-10) of slack when you define
+# your max. offset.
+# It is not wise to use this test, unless your servers are synchronized
+# to a common clock, e.g. through NTP.
+#
+# DISK filesystem warnlevel paniclevel
+# DISK filesystem IGNORE
+# If the utilization of "filesystem" is reported to exceed "warnlevel"
+# or "paniclevel", the "disk" status will go yellow or red, respectively.
+# "warnlevel" and "paniclevel" are either the percentage used, or the
+# space available as reported by the local "df" command on the host.
+# For the latter type of check, the "warnlevel" must be followed by the
+# letter "U", e.g. "1024U".
+# The special keyword "IGNORE" causes this filesystem to be ignored
+# completely, i.e. it will not appear in the "disk" status column and
+# it will not be tracked in a graph. This is useful for e.g. removable
+# devices, backup-disks and similar hardware.
+# "filesystem" is the mount-point where the filesystem is mounted, e.g.
+# "/usr" or "/home". A filesystem-name that begins with "%" is interpreted
+# as a Perl-compatible regular expression; e.g. "%^/oracle.*/" will match
+# any filesystem whose mountpoint begins with "/oracle".
+# Defaults: warnlevel=90%, paniclevel=95%
+#
+# MEMPHYS warnlevel paniclevel
+# MEMACT warnlevel paniclevel
+# MEMSWAP warnlevel paniclevel
+# If the memory utilization exceeds the "warnlevel" or "paniclevel", the
+# "memory" status will change to yellow or red, respectively.
+# Note: The words "PHYS", "ACT" and "SWAP" are also recognized.
+# Defaults: MEMPHYS warnlevel=100 paniclevel=101 (i.e. it will never go red)
+# MEMSWAP warnlevel=50 paniclevel=80
+# MEMACT warnlevel=90 paniclevel=97
+#
+# PROC processname minimumcount maximumcount color [TRACK=id] [TEXT=displaytext]
+# The "ps" listing sent by the client will be scanned for how many
+# processes containing "processname" are running, and this is then
+# matched against the min/max settings defined here. If the running
+# count is outside the thresholds, the color of the "procs" status
+# changes to "color".
+# To check for a process that must NOT be running: Set minimum and
+# maximum to 0.
+#
+# "processname" can be a simple string, in which case this string must
+# show up in the "ps" listing as a command. The scanner will find
+# a ps-listing of e.g. "/usr/sbin/cron" if you only specify "processname"
+# as "cron".
+# "processname" can also be a Perl-compatible regular expression, e.g.
+# "%java.*inst[0123]" can be used to find entries in the ps-listing for
+# "java -Xmx512m inst2" and "java -Xmx256 inst3". In that case,
+# "processname" must begin with "%" followed by the reg.expression.
+# If "processname" contains whitespace (blanks or TAB), you must enclose
+# the full string in double quotes - including the "%" if you use regular
+# expression matching. E.g.
+# PROC "%hobbitd_channel --channel=data.*hobbitd_rrd" 1 1 yellow
+# or
+# PROC "java -DCLASSPATH=/opt/java/lib" 2 5
+#
+# You can have multiple "PROC" entries for the same host, all of the
+# checks are merged into the "procs" status and the most severe
+# check defines the color of the status.
+#
+# The TRACK=id option causes the number of processes found to be recorded
+# in an RRD file, with "id" as part of the filename. This graph will then
+# appear on the "procs" page as well as on the "trends" page. Note that
+# "id" must be unique among the processes tracked for each host.
+#
+# The TEXT=displaytext option affects how the process appears on the
+# "procs" status page. By default, the process is listed with the
+# "processname" as identification, but if this is a regular expression
+# it may be a bit difficult to understand. You can then use e.g.
+# "TEXT=Apache" to make these processes appear with the name "Apache"
+# instead.
+#
+# Defaults: mincount=1, maxcount=-1 (unlimited), color="red".
+# Note: No processes are checked by default.
+#
+# Example: Check that "cron" is running:
+# PROC cron
+# Example: Check that at least 5 "httpd" processes are running, but
+# not more than 20:
+# PROC httpd 5 20
+#
+# LOG filename match-pattern [COLOR=color] [IGNORE=ignore-pattern] [TEXT=displaytext]
+# In the "client-local.cfg" file, you can list any number of files
+# that the client will collect log data from. These are sent to the
+# Xymon server together with the other client data, and you can then
+# choose how to analyze the log data with LOG entries.
+#
+# ************ IMPORTANT ***************
+# To monitor a logfile, you *MUST* configure both client-local.cfg
+# and hobbit-clients.cfg. If you configure only the client-local.cfg
+# file, the client will collect the log data and you can view it in
+# the "client data" display, but it will not affect the color of the
+# "msgs" status. On the other hand, if you configure only the
+# hobbit-clients.cfg file, then there will be no log data to inspect,
+# and you will not see any updates of the "msgs" status either.
+#
+# "filename" is a filename or pattern. The set of files reported by
+# the client is matched against "filename", and if they match then
+# this LOG entry is processed against the data from a file.
+#
+# "match-pattern": The log data is matched against this pattern. If
+# there is a match, this log file causes a status change to "color".
+#
+# "ignore-pattern": The log data that matched "match-pattern" is also
+# matched against "ignore-pattern". If the data matches the "ignore-pattern",
+# this line of data does not affect the status color. In other words,
+# the "ignore-pattern" can be used to refine the strings which cause
+# a match.
+# Note: The "ignore-pattern" is optional.
+#
+# "color": The color which this match will trigger.
+# Note: "color" is optional, if omitted then "red" will be used.
+#
+# Example: Go yellow if the text "WARNING" shows up in any logfile.
+# LOG %.* WARNING COLOR=yellow
+#
+# Example: Go red if the text "I/O error" or "read error" appears.
+# LOG %/var/(adm|log)/messages %(I/O|read).error COLOR=red
+#
+# FILE filename [color] [things to check] [TRACK]
+# NB: The files you wish to monitor must be listed in a "file:..."
+# entry in the client-local.cfg file, in order for the client to
+# report any data about them.
+#
+# "filename" is a filename or pattern. The set of files reported by
+# the client is matched against "filename", and if they match then
+# this FILE entry is processed against the data from that file.
+#
+# [things to check] can be one or more of the following:
+# - "NOEXIST" triggers a warning if the file exists. By default,
+# a warning is triggered for files that have a FILE entry, but
+# which do not exist.
+# - "TYPE=type" where "type" is one of "file", "dir", "char", "block",
+# "fifo", or "socket". Triggers warning if the file is not of the
+# specified type.
+# - "OWNERID=owner" and "GROUPID=group" triggers a warning if the owner
+# or group does not match what is listed here. "owner" and "group" is
+# specified either with the numeric uid/gid, or the user/group name.
+# - "MODE=mode" triggers a warning if the file permissions are not
+# as listed. "mode" is written in the standard octal notation, e.g.
+# "644" for the rw-r--r-- permissions.
+# - "SIZE<max.size" and "SIZE>min.size" triggers a warning it the file
+# size is greater than "max.size" or less than "min.size", respectively.
+# You can append "K" (KB), "M" (MB), "G" (GB) or "T" (TB) to the size.
+# If there is no such modifier, KB is assumed.
+# E.g. to warn if a file grows larger than 1MB (1024 KB): "SIZE<1M".
+# - "SIZE=size" triggers a warning it the file size is not what is listed.
+# - "MTIME>min.mtime" and "MTIME<max.mtime" checks how long ago the file
+# was last modified (in seconds). E.g. to check if a file was updated
+# within the past 10 minutes (600 seconds): "MTIME<600". Or to check
+# that a file has NOT been updated in the past 24 hours: "MTIME>86400".
+# - "MTIME=timestamp" checks if a file was last modified at "timestamp".
+# "timestamp" is a unix epoch time (seconds since midnight Jan 1 1970 UTC).
+# - "CTIME>min.ctime", "CTIME<max.ctime", "CTIME=timestamp" acts as the
+# mtime checks, but for the ctime timestamp (when the files' directory
+# entry was last changed, eg. by chown, chgrp or chmod).
+# - "MD5=md5sum", "SHA1=sha1sum", "RMD160=rmd160sum" trigger a warning
+# if the file checksum using the MD5, SHA1 or RMD160 message digest
+# algorithms do not match the one configured here. Note: The "file"
+# entry in the client-local.cfg file must specify which algorithm to use.
+#
+# "TRACK" causes the size of this file to be tracked in an RRD file, and
+# shown on the graph on the "files" display.
+#
+# Example: Check that the /var/log/messages file is not empty and was updated
+# within the past 10 minutes, and go yellow if either fails:
+# FILE /var/log/messages SIZE>0 MTIME<600 yellow
+#
+# Example: Check the timestamp, size and SHA-1 hash of the /bin/sh program:
+# FILE /bin/sh MTIME=1128514608 SIZE=645140 SHA1=5bd81afecf0eb93849a2fd9df54e8bcbe3fefd72
+#
+# DIR directory [color] [SIZE<maxsize] [SIZE>minsize] [TRACK]
+# NB: The directories you wish to monitor must be listed in a "dir:..."
+# entry in the client-local.cfg file, in order for the client to
+# report any data about them.
+#
+# "directory" is a filename or pattern. The set of directories reported by
+# the client is matched against "directory", and if they match then
+# this DIR entry is processed against the data for that directory.
+#
+# "SIZE<maxsize" and "SIZE>minsize" defines the size limits that the
+# directory must stay within. If it goes outside these limits, a warning
+# will trigger. Note the Xymon uses the raw number reported by the
+# local "du" command on the client. This is commonly KB, but it may be
+# disk blocks which are often 512 bytes.
+#
+# "TRACK" causes the size of this directory to be tracked in an RRD file,
+# and shown on the graph on the "files" display.
+#
+# PORT [LOCAL=addr] [EXLOCAL=addr] [REMOTE=addr] [EXREMOTE=addr] [STATE=state] [EXSTATE=state] [MIN=mincount] [MAX=maxcount] [COLOR=color] [TRACK=id] [TEXT=displaytext]
+# The "netstat" listing sent by the client will be scanned for how many
+# sockets match the criteria listed.
+# "addr" is a (partial) address specification in the format used on
+# the output from netstat. This is typically "10.0.0.1:80" for the IP
+# 10.0.0.1, port 80. Or "*:80" for any local address, port 80.
+# NB: The Xymon clients normally report only the numeric data for
+# IP-addresses and port-numbers, so you must specify the port
+# number (e.g. "80") instead of the service name ("www").
+# "state" causes only the sockets in the specified state to be included;
+# it is usually LISTEN or ESTABLISHED.
+# The socket count is then matched against the min/max settings defined
+# here. If the count is outside the thresholds, the color of the "ports"
+# status changes to "color".
+# To check for a socket that must NOT exist: Set minimum and
+# maximum to 0.
+#
+# "addr" and "state" can be a simple strings, in which case these string must
+# show up in the "netstat" at the appropriate column.
+# "addr" and "state" can also be a Perl-compatible regular expression, e.g.
+# "LOCAL=%(:80|:443)" can be used to find entries in the netstat local port for
+# both http (port 80) and https (port 443). In that case, portname or state must
+# begin with "%" followed by the reg.expression.
+#
+# The TRACK=id option causes the number of sockets found to be recorded
+# in an RRD file, with "id" as part of the filename. This graph will then
+# appear on the "ports" page as well as on the "trends" page. Note that
+# "id" must be unique among the ports tracked for each host.
+#
+# The TEXT=displaytext option affects how the port appears on the
+# "ports" status page. By default, the port is listed with the
+# local/remote/state rules as identification, but this may be somewhat
+# difficult to understand. You can then use e.g. "TEXT=Secure Shell" to make
+# these ports appear with the name "Secure Shell" instead.
+#
+# Defaults: state="LISTEN", mincount=1, maxcount=-1 (unlimited), color="red".
+# Note: No ports are checked by default.
+#
+# Example: Check that there is someone listening on the https port:
+# PORT "LOCAL=%([.:]443)$" state=LISTEN TEXT=https
+#
+# Example: Check that at least 5 "ssh" connections are established, but
+# not more than 10; warn but do not error; graph the connection count:
+# PORT "LOCAL=%([.:]22)$" state=ESTABLISHED min=5 max=20 color=yellow TRACK=ssh "TEXT=SSH logins"
+#
+# Example: Check that ONLY ports 22, 80 and 443 are open for incoming connections:
+# PORT STATE=LISTEN LOCAL=%0.0.0.0[.:].* EXLOCAL=%[.:](22|80|443)$ MAX=0 "TEXT=Bad listeners"
+#
+#
+# To apply rules to specific hosts, you can use the "HOST=", "EXHOST=", "PAGE="
+# "EXPAGE=", "CLASS=" or "EXCLASS=" qualifiers. (These act just as in the
+# hobbit-alerts.cfg file).
+#
+# Hostnames are either a comma-separated list of hostnames (from the bb-hosts file),
+# "*" to indicate "all hosts", or a Perl-compatible regular expression.
+# E.g. "HOST=dns.foo.com,www.foo.com" identifies two specific hosts;
+# "HOST=%www.*.foo.com EXHOST=www-test.foo.com" matches all hosts with a name
+# beginning with "www", except the "www-test" host.
+# "PAGE" and "EXPAGE" match the hostnames against the page on where they are
+# located in the bb-hosts file, via the bb-hosts' page/subpage/subparent
+# directives. This can be convenient to pick out all hosts on a specific page.
+#
+# Rules can be dependant on time-of-day, using the standard Xymon syntax
+# (the bb-hosts(5) about the NKTIME parameter). E.g. "TIME=W:0800:2200"
+# applied to a rule will make this rule active only on week-days between
+# 8AM and 10PM.
+#
+# You can also associate a GROUP id with a rule. The group-id is passed to
+# the alert module, which can then use it to control who gets an alert when
+# a failure occurs. E.g. the following associates the "httpd" process check
+# with the "web" group, and the "sshd" check with the "admins" group:
+# PROC httpd 5 GROUP=web
+# PROC sshd 1 GROUP=admins
+# In the hobbit-alerts.cfg file, you could then have rules like
+# GROUP=web
+# MAIL webmaster@foo.com
+# GROUP=admins
+# MAIL root@foo.com
+#
+# Qualifiers must be placed after each rule, e.g.
+# LOAD 8.0 12.0 HOST=db.foo.com TIME=*:0800:1600
+#
+# If you have multiple rules that you want to apply the same qualifiers to,
+# you can write the qualifiers *only* on one line, followed by the rules. E.g.
+# HOST=%db.*.foo.com TIME=W:0800:1600
+# LOAD 8.0 12.0
+# DISK /db 98 100
+# PROC mysqld 1
+# will apply the three rules to all of the "db" hosts on week-days between 8AM
+# and 4PM. This can be combined with per-rule qualifiers, in which case the
+# per-rule qualifier overrides the general qualifier; e.g.
+# HOST=%.*.foo.com
+# LOAD 7.0 12.0 HOST=bax.foo.com
+# LOAD 3.0 8.0
+# will result in the load-limits being 7.0/12.0 for the "bax.foo.com" host,
+# and 3.0/8.0 for all other foo.com hosts.
+#
+# The special DEFAULT section can modify the built-in defaults - this must
+# be placed at the end of the file.
+
+HOST=rabbit.<%= domain %>
+ DISK %.*stage2$ IGNORE
+
+# ecosse has 24 cores, is a builder, and we try to use them all
+HOST=ecosse.<%= domain %>
+ LOAD 36.0 48.0
+
+# rabbit has 12 cores and mksquashfs uses all of them
+HOST=rabbit.<%= domain %>
+ LOAD 18.0 24.0
+
+# duvel has 24 cores, dont trigger alarms too soon
+HOST=duvel.<%= domain %>
+ LOAD 18.0 24.0
+ DISK /var/lib/binrepo 95 98
+ DISK /var/www 95 98
+
+DEFAULT
+ # These are the built-in defaults.
+ UP 1h
+ LOAD 5.0 10.0
+ DISK %^/mnt/cdrom 101 101
+ DISK * 90 95
+ MEMPHYS 100 101
+ MEMSWAP 50 80
+ MEMACT 90 97
+ CLOCK 60
+ FILE /var/lib/puppet/state/state.yaml yellow mtime<5400
+ PORT state=LISTEN "LOCAL=%([.:]22)$" MIN=1 TEXT=ssh
+ PROC puppetd 0 3 red
+ # 10 , just in case something goes wrong
+ PROC crond 1 10 red
diff --git a/modules/xymon/templates/hobbitserver.cfg b/modules/xymon/templates/hobbitserver.cfg
new file mode 100644
index 00000000..a5a7aacf
--- /dev/null
+++ b/modules/xymon/templates/hobbitserver.cfg
@@ -0,0 +1,230 @@
+# NB : Even though it might look like a shell-script, it is NOT.
+#
+BBSERVERROOT="<%= lib_dir %>/xymon" # Where Xymon is installed - holds the server and bbvar sub-dirs.
+BBSERVERLOGS="/var/log/xymon" # Directory for server logs. The hobbit user must have write-access here.
+HOBBITCLIENTHOME="<%= lib_dir %>/xymon/client" # BBHOME directory for the client
+
+
+BBSERVERHOSTNAME="sucuk.<%= domain %>" # The hostname of your server
+BBSERVERIP="<%= ipaddress %>" # The IP-address of your server. Use the real one, not 127.0.0.1 .
+BBSERVEROS="linux" # The operating system of your server. linux,freebsd,solaris,hpux,aix,osf
+
+BBSERVERWWWNAME="xymon.<%= domain %>" # The name used for this hosts' webserver
+BBSERVERWWWURL="/xymon" # The top URL for the Xymon webpages
+BBSERVERCGIURL="/xymon-cgi" # The URL for the Xymon CGI scripts.
+BBSERVERSECURECGIURL="/xymon-seccgi" # The URL for the secured Xymon CGI scripts.
+
+# BBLOCATION="foo" # The network location, makes bbtest-net test only hosts with NET:foo
+ # You only need to set this if you have multiple network test servers with
+ # a shared bb-hosts file.
+
+# Make sure the path includes the directories where you have fping, mail and (optionally) ntpdate installed,
+# as well as the BBHOME/bin directory where all of the Xymon programs reside.
+PATH="/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin:/usr/lib64/xymon/server/bin"
+
+# Some systems need extra settings e.g. to locate run-time libraries.
+# You can add these extra settings here:
+
+# fix error message from jonund :
+# 2011-07-17 15:32:54 Oversize status msg from
+# 212.85.158.149 for jonund.mageia.org:procs truncated (n=350049, limit=262144)
+#
+# https://en.wikibooks.org/wiki/System_Monitoring_with_Xymon/Other_Docs/FAQ#Q._How_do_I_fix_.22Oversize_status_msg_from_192.168.1.31_for_test.my.com:ports_truncated_.28n.3D508634.2C_limit.3D262144.29.22
+MAXMSG_STATUS="496"
+
+##### Normally you do not need to modify anything below this point #####
+
+# General settings
+BBPORT="1984" # Portnumber where hobbitd/bbd listens
+BBDISP="$BBSERVERIP" # IP of a single hobbit/bbd server
+BBDISPLAYS="" # IP of multiple hobbit/bbd servers. If used, BBDISP must be 0.0.0.0
+FQDN="TRUE" # Use fully-qualified hostnames internally. Keep it TRUE unless you know better.
+BBGHOSTS="1" # How to handle status messages from unknown hosts.
+ # 0=accept message, 1=discard message, 2=discard message and log the event
+BBLOGSTATUS="DYNAMIC" # Are HTML status logs statically or dynamically generated?
+ # Use DYNAMIC with Xymon, unless you run hobbitd_filestore --status --html
+
+PINGCOLUMN="conn" # Column where the ping-test reports results.
+INFOCOLUMN="info" # Column where the info-pages are reported.
+TRENDSCOLUMN="trends" # Column where the RRD graphs are reported.
+
+BBMAXMSGSPERCOMBO="100" # How many individual messages to combine in a combo-message. 0=unlimited.
+BBSLEEPBETWEENMSGS="0" # Delay between sending each combo message, in milliseconds.
+
+
+# Specific to this host
+BBOSTYPE="$BBSERVEROS" # Hosttype (operating system). Not used by server-side, but clients use this.
+MACHINEDOTS="$BBSERVERHOSTNAME" # This systems hostname
+MACHINEADDR="$BBSERVERIP" # This systems IP-address
+
+# URL's generated/used by bbgen
+BBWEBHOST="https://$BBSERVERWWWNAME" # Just the host part of the URL - http://www.foo.com
+BBWEBHOSTURL="$BBWEBHOST$BBSERVERWWWURL" # Prefix for all static Xymon pages - http://www.foo.com/bb
+BBWEBHTMLLOGS="$BBWEBHOSTURL/html" # Prefix for the Xymon HTML logs (only if BBLOGSTATUS=STATIC)
+BBWEB="$BBSERVERWWWURL" # Xymon URL prefix without the host part
+BBSKIN="$BBSERVERWWWURL/gifs" # Xymon URL prefix for the GIF files
+BBHELPSKIN="$BBSERVERWWWURL/help" # Xymon URL prefix for the online help files.
+BBNOTESSKIN="$BBSERVERWWWURL/notes" # Xymon URL prefix for the online notes-files.
+BBMENUSKIN="$BBSERVERWWWURL/menu" # Xymon URL prefix for the webpage menu files.
+BBREPURL="$BBSERVERWWWURL/rep" # Xymon URL prefix for the Xymon availability reports
+BBSNAPURL="$BBSERVERWWWURL/snap" # Xymon URL prefix for the Xymon snapshots
+BBWAP="$BBSERVERWWWURL/wml" # Xymon URL prefix for the WAP/WML files.
+CGIBINURL="$BBSERVERCGIURL" # URL prefix for the Xymon CGI-scripts - /cgi-bin
+SECURECGIBINURL="$BBSERVERSECURECGIURL" # URL prefix for the secured Xymon CGI-scripts - /cgi-secure
+
+# Locations of system-wide files and directories
+BBHOME="<%= lib_dir %>/xymon/server" # The Xymon server directory, where programs and configurations go.
+BBTMP="$BBHOME/tmp" # Directory used for temporary files.
+BBHOSTS="$BBHOME/etc/bb-hosts" # The bb-hosts file
+BB="$BBHOME/bin/bb" # The 'bb' client program
+BBGEN="$BBHOME/bin/bbgen" # The bbgen program
+
+# Server specific directories
+BBVAR="/var/lib/xymon" # The bbvar directory holds all monitoring data
+BBACKS="$BBVAR/acks" # Acknowledge event info stored here (hobbitd_alert)
+BBDATA="$BBVAR/data" # Data files go here (hobbitd_filestore --data)
+BBDISABLED="$BBVAR/disabled" # Enabled/disabled flags are stored here (hobbitd_filestore --enadis)
+BBHIST="$BBVAR/hist" # History logs are stored here (hobbitd_history)
+BBHISTLOGS="$BBVAR/histlogs" # Historical detail status-loge are stored here (hobbitd_history)
+BBLOGS="$BBVAR/logs" # Status logs go here (hobbitd_filestore --status). Not needed by Xymon.
+BBWWW="$BBHOME/www" # The directory for Xymon webpage files.
+BBHTML="$BBWWW/html" # HTML status logs go here (hobbitd_filestore --status --html)
+BBNOTES="$BBWWW/notes" # For notes-files (hobbitd_filestore --notes)
+BBREP="$BBWWW/rep" # Top-level directory for Xymon reports.
+BBSNAP="$BBWWW/snap" # Top-level directory for Xymon snapshots.
+
+# For the hobbitd_history module
+BBALLHISTLOG="TRUE" # Save a common log of all events (used for the bb2 webpage)
+BBHOSTHISTLOG="TRUE" # Save a log of all events for a host (not used by any tool currently)
+SAVESTATUSLOG="TRUE" # Save the detailed status log each time the status changes.
+
+# For the hobbitd_alert module
+FROM="root@<%= domain %>"
+MAILC="mail -r $FROM" # Command used to send an e-mail with no subject
+MAIL="$MAILC -s" # Command used to send an e-mail with a subject
+SVCCODES="disk:100,cpu:200,procs:300,svcs:350,msgs:400,conn:500,http:600,dns:800,smtp:725,telnet:723,ftp:721,pop:810,pop3:810,pop-3:810,ssh:722,imap:843,ssh1:722,ssh2:722,imap2:843,imap3:843,imap4:843,pop2:809,pop-2:809,nntp:819,test:901"
+ALERTCOLORS="red,yellow,purple" # Colors that may trigger an alert message
+OKCOLORS="green,blue,clear" # Colors that may trigger a recovery message
+ALERTREPEAT="30" # The default interval between repeated alert-messages (in minutes)
+
+# For bbtest-net
+CONNTEST="TRUE" # Should we 'ping' hosts ?
+IPTEST_2_CLEAR_ON_FAILED_CONN="TRUE" # If TRUE, then failing network tests go CLEAR if conn-test fails.
+NONETPAGE="" # Network tests that go YELLOW upon failure
+FPING="/bin/fping -Ae" # Path and options for the ping program.
+NTPDATE="ntpdate" # Path to the 'ntpdate' program
+TRACEROUTE="traceroute" # How to do traceroute on failing ping tests. Requires "trace" in bb-hosts.
+BBROUTERTEXT="router" # What to call a failing intermediate network device.
+NETFAILTEXT="not OK" # Text indicating a network test failed
+
+
+# Settings for the RRD graphs
+
+# Top level directory for the RRD files
+BBRRDS="$BBVAR/rrd"
+
+# Size of the generated graph images
+RRDHEIGHT="120"
+RRDWIDTH="576" # The RRD's contain 576 data points, so this is a good value
+
+# TEST2RRD defines the status- and data-messages you want to collect RRD data
+# about. You will normally not need to modify this, unless you have added a
+# script to pick up RRD data from custom tests (the hobbitd_larrd --extra-script
+# and --extra-tests options).
+# Note that network tests defined in the bb-services file are automatically
+# included.
+# The format here is "COLUMN=RRDSERVICE". If you leave out the "=RRDSERVICE"
+# part, it is assumed to be the same as the COLUMN value.
+#
+# This is also used by the bb-hostsvc.cgi script to determine if the detailed
+# status view of a test should include a graph.
+TEST2RRD="cpu=la,disk,inode,qtree,memory,$PINGCOLUMN=tcp,http=tcp,dns=tcp,dig=tcp,time=ntpstat,vmstat,iostat,netstat,temperature,apache,bind,sendmail,mailq,nmailq=mailq,socks,bea,iishealth,citrix,bbgen,bbtest,bbproxy,hobbitd,files,procs=processes,ports,clock,lines,ops,stats,cifs,JVM,JMS,HitCache,Session,JDBCConn,ExecQueue,JTA,TblSpace,RollBack,MemReq,InvObj,snapmirr,snaplist,snapshot,if_load=devmon,temp=devmon"
+
+# This defines which RRD files to include on the "trends" column webpage,
+# and the order in which they appear.
+GRAPHS="la,disk,inode,qtree,files,processes,memory,users,vmstat,iostat,tcp.http,tcp,ncv,netstat,ifstat,mrtg::1,ports,temperature,ntpstat,apache,bind,sendmail,mailq,socks,bea,iishealth,citrix,bbgen,bbtest,bbproxy,hobbitd,clock,lines,ops,stats,cifs,JVM,JMS,HitCache,Session,JDBCConn,ExecQueue,JTA,TblSpace,RollBack,MemReq,InvObj,snapmirr,snaplist,snapshot,devmon::1,if_load::1,temp"
+
+# These two settings can be used to restrict what filesystems are being
+# tracked (i.e. have their utilisation graphed) by Xymon.
+# NORRDDISKS="" # Filesystems that will NOT be tracked
+# RRDDISKS="" # Only track these filesystems
+
+
+############################################################
+# These determine some parts of how bbgen generates webpages
+############################################################
+BBGENOPTS="--recentgifs --subpagecolumns=2" # Standard options for bbgen.
+SUMMARY_SET_BKG="FALSE" # Do summaries affect the background color of the BB webpage ?
+BBMKBB2EXT="eventlog.sh acklog.sh" # What extensions to have on the BB2 page.
+DOTHEIGHT="16" # Height (in pixels) of the color GIF's
+DOTWIDTH="16" # Width (in pixels) of the color GIF's
+COLUMNDOCURL="$CGIBINURL/hobbitcolumn.sh?%s" # URL formatting string for column-links
+
+# HTML content
+HTMLCONTENTTYPE="text/html" # You can add charset options here.
+
+# Fonts and texts
+HOBBITLOGO="Mageia monitoring" # HTML inserted on all header pages at top-left corner.
+MKBBLOCAL="<B><I>Pages Hosted Locally</I></B>"
+MKBBREMOTE="<B><I>Remote Status Display</I></B>"
+MKBBSUBLOCAL="<B><I>Subpages Hosted Locally</I></B>"
+MKBBACKFONT="COLOR=\"#33ebf4\" SIZE=\"-1\"" # Size and color of the 'Current acknowledgement...' text in the html log.
+MKBBCOLFONT="COLOR=\"#87a9e5\" SIZE=\"-1\"" # Size and color of the column headings text
+MKBBROWFONT="SIZE=\"+1\" COLOR=\"#FFFFCC\" FACE=\"Tahoma, Arial, Helvetica\"" # Size,color,font of text in each row (hostname)
+MKBBTITLE="COLOR=\"#FFFFF0\" SIZE=\"+1\"" # Size and color of the BB titles (the old "ivory" is invalid HTML)
+BBDATEFORMAT="%a %b %d %H:%M:%S %Y" # Date format
+BBRSSTITLE="Xymon Alerts" # Title for the RSS and WML outputs.
+ACKUNTILMSG="Next update at: %H:%M %Y-%m-%d" # strftime format for the acknowledgement status display.
+
+# For WML output
+WMLMAXCHARS="1500" # Max number of bytes in a WAP message
+
+# For BB reports
+BBREPWARN="97" # Default availability causing yellow status on availability report.
+BBREPGREEN="99.995" # Default availability causing green status on availability report.
+BBGENREPOPTS="$BBGENOPTS" # bbgen(1) options used when generating availability reports.
+BBREPEXT="" # What extensions to run on report pages.
+
+# For BB snapshots
+BBGENSNAPOPTS="$BBGENOPTS" # bbgen(1) options used when generating snapshots.
+
+# For the bb-hist CGI
+BBHISTEXT="" # What extensions to run on history pages.
+
+
+# The following defines a bunch of commands that BB extensions expect to be present.
+# Hobbit does not use them, but they are provided here so if you use BB extension
+# scripts, then they will hopefully run without having to do a lot of tweaking.
+
+UPTIME="/usr/bin/uptime"
+AWK="/usr/bin/awk"
+CAT="/bin/cat"
+CP="/bin/cp"
+CUT="/usr/bin/cut"
+DATE="/bin/date"
+EGREP="/bin/egrep"
+EXPR="/usr/bin/expr"
+FIND="/usr/bin/find"
+GREP="/bin/grep"
+HEAD="/usr/bin/head"
+ID="/bin/id"
+LN="/bin/ln"
+LS="/bin/ls"
+MV="/bin/mv"
+RM="/bin/rm"
+SED="/bin/sed"
+SORT="/bin/sort"
+TAIL="/usr/bin/tail"
+TOP="/usr/bin/top"
+TOUCH="/bin/touch"
+TR="/usr/bin/tr"
+UNIQ="/usr/bin/uniq"
+WHO="/usr/bin/who"
+WC="/usr/bin/wc -l"
+WCC="/usr/bin/wc"
+# DF,DFCMD and PS are for compatibility only, NOT USED by the Hobbit client
+DF="/bin/df -Pk"
+DFCMD="/bin/df -Pk"
+PS="ps ax"
+
+MAXLINE="32768"
diff --git a/modules/xymon/templates/xymon-client b/modules/xymon/templates/xymon-client
new file mode 100644
index 00000000..e846d2a5
--- /dev/null
+++ b/modules/xymon/templates/xymon-client
@@ -0,0 +1,21 @@
+# Configure the Hobbit client settings.
+
+# You MUST set the list of Hobbit servers that this
+# client reports to.
+# It is good to use IP-addresses here instead of DNS
+# names - DNS might not work if there's a problem.
+#
+# E.g. (a single Hobbit server)
+# HOBBITSERVERS="192.168.1.1"
+# or (multiple servers)
+# HOBBITSERVERS="10.0.0.1 192.168.1.1"
+XYMONSERVERS="<%= server %>"
+
+# The defaults usually suffice for the rest of this file,
+# but you can tweak the hostname that the client reports
+# data with, and the OS name used (typically needed only on
+# RHEL or RHAS servers).
+
+# CLIENTHOSTNAME=""
+# CLIENTOS="rhel3"
+
diff --git a/modules/youri-check/manifests/init.pp b/modules/youri-check/manifests/init.pp
new file mode 100644
index 00000000..ebdaa492
--- /dev/null
+++ b/modules/youri-check/manifests/init.pp
@@ -0,0 +1,133 @@
+class youri-check {
+ class base {
+ $vhost = "check.${::domain}"
+ $user = 'youri'
+ $home = '/var/lib/youri'
+ $home_check = '/var/www/youri-check'
+ $pgsql_password = extlookup('youri_pgsql','x')
+
+ user { $user:
+ comment => 'Youri Check',
+ home => $home,
+ }
+
+ file { $home:
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ }
+
+ file { $home_check:
+ ensure => directory,
+ owner => $user,
+ group => $user,
+ }
+
+ $pgsql_server = "${vhost}"
+
+ package { ['youri-check', 'perl-DBD-Pg', 'perl-Youri-Media']: }
+
+ }
+
+
+ define config($version) {
+ include stdlib
+ include youri-check::base
+
+ $config = "/etc/youri/${version}.conf"
+ $outdir = "/var/www/youri-check/${version}"
+ $pgsql_db = "youri_check_${version}"
+ $pgsql_server = $base::pgsql_server
+ $pgsql_user = "youri${version}"
+ $pgsql_password = extlookup('youri_pgsql','x')
+ # We want to alert for packages older than the cut-off for latest mass rebuild
+ # 1745539200 is 2025-04-25
+ $max_days = (time() - 1745539200)/(24*3600)
+
+ file { "${config}":
+ ensure => present,
+ owner => $base::user,
+ mode => '0640',
+ content => template("youri-check/${version}.conf"),
+ require => User[$base::user],
+ }
+ }
+
+
+ define createdb_user($version) {
+ $pgsql_db = "youri_check_${version}"
+ $pgsql_user = "youri${version}"
+ $pgsql_password = extlookup('youri_pgsql','x')
+
+ postgresql::remote_user { $pgsql_user:
+ password => $base::pgsql_password,
+ }
+
+ postgresql::remote_database { $pgsql_db:
+ description => "Youri Check results",
+ user => $pgsql_user,
+ }
+ }
+
+ define check($version, $hour = "*", $minute = 0) {
+ include youri-check::base
+ $config = "/etc/youri/${version}.conf"
+ $pgsql_server = $base::pgsql_server
+ $pgsql_db = "youri_check_${version}"
+ $pgsql_user = "youri${version}"
+ $pgsql_password = extlookup('youri_pgsql','x')
+
+ postgresql::remote_user { $pgsql_user:
+ password => $base::pgsql_password,
+ }
+
+ postgresql::remote_database { $pgsql_db:
+ description => "Youri Check results",
+ user => $pgsql_user,
+ }
+ cron { "check_${version}":
+ command => "youri-check -c ${config} --parallel test",
+ hour => $hour,
+ minute => $minute,
+ user => $base::user,
+ environment => "MAILTO=root",
+ require => User[$base::user],
+ }
+ }
+
+ define report_www {
+ include youri-check::base
+ $outdir = "/var/www/youri-check/"
+ apache::vhost::base { $base::vhost:
+ location => $outdir,
+ content => template('youri-check/vhost_check.conf'),
+ }
+ apache::vhost::base { "ssl_${base::vhost}":
+ vhost => $base::vhost,
+ use_ssl => true,
+ location => $outdir,
+ content => template('youri-check/vhost_check.conf'),
+ }
+ }
+
+ define report($version, $hour = "*", $minute = 20) {
+ include youri-check::base
+
+ $config = "/etc/youri/${version}.conf"
+
+ $outdir = "/var/www/youri-check/${version}"
+ file { "${outdir}":
+ ensure => directory,
+ owner => $base::user,
+ mode => '0755',
+ }
+
+ cron { "check_${version}":
+ command => "youri-check -c ${config} report",
+ hour => $hour,
+ minute => $minute,
+ user => $base::user,
+ require => User[$base::user],
+ }
+ }
+}
diff --git a/modules/youri-check/templates/9.conf b/modules/youri-check/templates/9.conf
new file mode 100644
index 00000000..28028080
--- /dev/null
+++ b/modules/youri-check/templates/9.conf
@@ -0,0 +1,241 @@
+# vim:ft=yaml:et:sw=4
+
+# helper variables
+mirror: http://repository.mageia.org/distrib/9
+mirror_i586: ${mirror}/i586/media
+mirror_x86_64: ${mirror}/x86_64/media
+
+# resultset definition
+resultset:
+ class: Youri::Check::Resultset::DBI
+ options:
+ driver: Pg
+ host: <%= pgsql_server %>;sslmode=require
+ base: <%= pgsql_db %>
+ user: <%= pgsql_user %>
+ pass: <%= pgsql_password %>
+
+resolver:
+ class: Youri::Check::Maintainer::Resolver::CGI
+ options:
+ url: https://pkgsubmit.<%= domain %>/data/maintdb.txt
+ exceptions:
+ - nobody
+
+
+# checks definitions
+tests:
+ dependencies:
+ class: Youri::Check::Test::Dependencies
+
+ missing:
+ class: Youri::Check::Test::Missing
+
+# reports definitions
+reports:
+ file:
+ class: Youri::Check::Report::File
+ options:
+ to: <%= outdir %>
+ global: 1
+ individual: 1
+ formats:
+ html:
+ class: Youri::Check::Report::Format::HTML
+ text:
+ class: Youri::Check::Report::Format::Text
+ rss:
+ class: Youri::Check::Report::Format::RSS
+
+# media definitions
+medias:
+ core.i586:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_i586}/media_info/hdlist_core.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i586
+ missing:
+ allowed:
+ - core.sources
+
+ core_updates.i586:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates
+ type: binary
+ hdlist: ${mirror_i586}/media_info/hdlist_core_updates.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i586
+ - core_updates.i586
+ missing:
+ allowed:
+ - core.sources
+ - core_updates.sources
+
+ core_updates_testing.i586:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates_testing
+ type: binary
+ hdlist: ${mirror_i586}/media_info/hdlist_core_updates_testing.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i586
+ - core_updates.i586
+ - core_updates_testing.i586
+ missing:
+ allowed:
+ - core.sources
+ - core_updates.sources
+ - core_updates_testing.sources
+
+ core.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_core.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i586
+ missing:
+ allowed:
+ - core.sources
+
+ core_updates.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_core_updates.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i586
+ - core_updates.i586
+ - core.x86_64
+ - core_updates.x86_64
+ missing:
+ allowed:
+ - core.sources
+ - core_updates.sources
+
+ core_updates_testing.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates_testing
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_core_updates_testing.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core_updates.x86_64
+ - core_updates_testing.x86_64
+ - core.i586
+ - core_updates.i586
+ - core_updates_testing.i586
+ missing:
+ allowed:
+ - core.sources
+ - core_updates.sources
+ - core_updates_testing.sources
+
+ core.sources:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: source
+ hdlist: ${mirror_i586}/media_info/hdlist_core.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i586
+
+ core_updates.sources:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates
+ type: source
+ hdlist: ${mirror_i586}/media_info/hdlist_core_updates.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core_updates.x86_64
+ - core.i586
+ - core_updates.i586
+
+ core_updates_testing.sources:
+ class: Youri::Media::URPM
+ options:
+ name: core_updates_testing
+ type: source
+ hdlist: ${mirror_i586}/media_info/hdlist_core_updates_testing.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core_updates.x86_64
+ - core_updates_testing.x86_64
+ - core.i586
+ - core_updates.i586
+ - core_updates_testing.i586
+
+ nonfree.i586:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: binary
+ hdlist: ${mirror_i586}/media_info/hdlist_nonfree_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i586
+ - nonfree.i586
+ missing:
+ allowed:
+ - nonfree.sources
+
+ nonfree.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_nonfree_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i586
+ - nonfree.x86_64
+ - nonfree.i586
+ missing:
+ allowed:
+ - nonfree.sources
+
+
+ nonfree.sources:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: source
+ hdlist: ${mirror_i586}/media_info/hdlist_nonfree_release.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - nonfree.x86_64
+ - core.i586
+ - nonfree.i586
diff --git a/modules/youri-check/templates/cauldron.conf b/modules/youri-check/templates/cauldron.conf
new file mode 100644
index 00000000..aeace447
--- /dev/null
+++ b/modules/youri-check/templates/cauldron.conf
@@ -0,0 +1,504 @@
+# vim:ft=yaml:et:sw=4
+
+# helper variables
+mirror: http://repository.mageia.org/distrib/cauldron
+mirror_aarch64: ${mirror}/aarch64/media
+mirror_armv7hl: ${mirror}/armv7hl/media
+mirror_i686: ${mirror}/i686/media
+mirror_x86_64: ${mirror}/x86_64/media
+
+# resultset definition
+resultset:
+ class: Youri::Check::Resultset::DBI
+ options:
+ driver: Pg
+ host: <%= pgsql_server %>;sslmode=require
+ base: <%= pgsql_db %>
+ user: <%= pgsql_user %>
+ pass: <%= pgsql_password %>
+
+resolver:
+ class: Youri::Check::Maintainer::Resolver::CGI
+ options:
+ url: https://pkgsubmit.<%= domain %>/data/maintdb.txt
+ exceptions:
+ - nobody
+
+
+# checks definitions
+tests:
+ dependencies:
+ class: Youri::Check::Test::Dependencies
+
+ missing:
+ class: Youri::Check::Test::Missing
+
+ updates:
+ class: Youri::Check::Test::Updates
+ options:
+ aliases:
+ basesystem: ~
+ drakxtools: ~
+ drakx-installer-advertising: ~
+ drakx-installer-binaries: ~
+ drakx-installer-images: ~
+ drakx-installer-rescue: ~
+ drakx-installer-stage2: ~
+ horde-accounts: accounts
+ horde-chora: chora
+ horde-forwards: forwards
+ horde-imp: imp
+ horde-ingo: ingo
+ horde-kronolith: kronolith
+ horde-mnemo: mnemo
+ horde-nag: nag
+ horde-passwd: passwd
+ horde-turba: turba
+ horde-vacation: vacation
+ freetype: freetype2
+ gstreamer: ~
+ gstreamer0.10: gstreamer
+ gnupg2: gnupg
+ gnupg: ~
+ gnome-vfs2: gnome-vfs
+ gnome-vfs: ~
+ ldetect: ~
+ ldetect-lst: ~
+ libutempter: utempter
+ perl-URPM: ~
+ rpm: ~
+ rpmdrake: ~
+ rpmstats: ~
+ rpmtools: ~
+ urpmi: ~
+ vte: ~
+ vte3: vte
+ xine-lib: xine-lib1.2
+ xine-lib-1.2: xine-lib1.2
+ sources:
+ cpan:
+ order: 0
+ class: Youri::Check::Test::Updates::Source::CPAN
+ options:
+ aliases:
+ libnet: ~
+ perl-Catalyst-P-S-State-Cookie: Catalyst::Plugin::State::State::Cookie
+ perl-Catalyst-P-S-Store-FastMmap: Catalyst::Plugin::State::Store::FastMmap
+ perl-Catalyst-P-S-Store-File: Catalyst::Plugin::State::Store::File
+ gettext: ~
+ pear:
+ order: 0
+ class: Youri::Check::Test::Updates::Source::PEAR
+# pypi:
+# order: 0
+# class: Youri::Check::Test::Updates::Source::PyPI
+# apache:
+# order: 0
+# class: Youri::Check::Test::Updates::Source::Apache
+ debian:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::Debian
+ options:
+ aliases:
+ anjuta2: anjuta
+ anjuta: ~
+ perl-Jcode: libjcode-pm-perl
+ makepasswd: ~
+ sipp: ~
+ zsnes: ~
+ unclutter: ~
+ python-id3: ~
+ freetype: ~
+ openldap2.3: ~
+ git: git-core
+ nilfs-utils: nilfs-tools
+ mobile-broadband-provider-info: ~
+ cpulimit: ~
+ icecream: ~
+ colorize: ~
+ fedora:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::Fedora
+ options:
+ aliases:
+ authd: ~
+ basesystem: ~
+ bash: ~
+ freetype: ~
+ freetype2: freetype
+ gle: ~
+ gtksourceview-sharp: ~
+ modemmanager: ModemManager
+ netcat-openbsd: netcat
+ networkmanager: NetworkManager
+ networkmanager-applet: network-manager-applet
+ networkmanager-fortisslvpn: NetworkManager-fortisslvpn
+ networkmanager-l2tp: NetworkManager-l2tp
+ networkmanager-libreswan: NetworkManager-libreswan
+ networkmanager-openconnect: NetworkManager-openconnect
+ networkmanager-openvpn: NetworkManager-openvpn
+ networkmanager-pptp: NetworkManager-pptp
+ networkmanager-vpnc: NetworkManager-vpnc
+ ocaml-lablgtk: ~
+ ocaml-lablgtk2: ocaml-lablgtk
+ OpenIPMI: OpenIPMI2
+ sqlite: sqlite2
+ gentoo:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::Gentoo
+ options:
+ aliases:
+ beagle: ~
+ makepasswd: ~
+ hibernate: hibernate-script
+ leif: ~
+ sqlite3: sqlite
+ sqlite: ~
+ cfengine3: cfengine
+ cfengine: ~
+ kamikaze: ~
+ knob: ~
+ vertex: ~
+ unclutter: ~
+ pam-krb5: pam_krb5
+ pam_krb5: ~
+ akonadi: akonadi-server
+ attica: libattica
+ raptor2: raptor
+ raptor: ~
+ libevent: ~
+ wifi-radar: ~
+ tuxmathscrabble: ~
+ chromium: ~
+ cpulimit: ~
+ icecream: ~
+ nodejs: ~
+ gnome:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::GNOME
+ options:
+ url: https://download.gnome.org/sources/
+ aliases:
+ acme: ~
+ GConf: ~
+ GConf2: GConf
+ gcr: ~
+ gcr4: gcr
+ gdk-pixbuf2.0: gdk-pixbuf
+ glib: ~
+ glib2.0: glib
+ glibmm2.4: ~
+ goocanvas2: ~
+ gtkmm-documentation3.0: ~
+ gtkmm: ~
+ gtkmm2.4: ~
+ gtkmm3.0: ~
+ gtkmm4.0: gtkmm
+ gtksourceviewmm3.0: ~
+ gtk: ~
+ gtk+2.0: ~
+ gtk+3.0: ~
+ gtk4.0: gtk
+ modemmanager: ModemManager
+ networkmanager: NetworkManager
+ networkmanager-applet: network-manager-applet
+ networkmanager-fortisslvpn: NetworkManager-fortisslvpn
+ networkmanager-l2tp: NetworkManager-l2tp
+ networkmanager-libreswan: NetworkManager-libreswan
+ networkmanager-openconnect: NetworkManager-openconnect
+ networkmanager-openvpn: NetworkManager-openvpn
+ networkmanager-pptp: NetworkManager-pptp
+ networkmanager-vpnc: NetworkManager-vpnc
+ notify-sharp: ~
+ notify-sharp3: notify-sharp
+ pango: ~
+ pango2.0: pango
+ netbsd:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::NetBSD
+# sourceforge:
+# class: Youri::Check::Test::Updates::Source::Sourceforge
+# options:
+# aliases:
+# bigforth: ~
+# gtkmm: ~
+# hydrogen: ~
+# ltp: ~
+# pblogan: ~
+# console-tools: ~
+# maxima: ~
+# clisp: ~
+
+ updates_fedora:
+ class: Youri::Check::Test::Updates
+ options:
+ sources:
+ fedora:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::Fedora
+ options:
+ aliases:
+ authd: ~
+ basesystem: ~
+ bash: ~
+ freetype: ~
+ freetype2: freetype
+ gle: ~
+ gtksourceview-sharp: ~
+ modemmanager: ModemManager
+ netcat-openbsd: netcat
+ networkmanager: NetworkManager
+ networkmanager-applet: network-manager-applet
+ networkmanager-fortisslvpn: NetworkManager-fortisslvpn
+ networkmanager-l2tp: NetworkManager-l2tp
+ networkmanager-libreswan: NetworkManager-libreswan
+ networkmanager-openconnect: NetworkManager-openconnect
+ networkmanager-openvpn: NetworkManager-openvpn
+ networkmanager-pptp: NetworkManager-pptp
+ networkmanager-vpnc: NetworkManager-vpnc
+ ocaml-lablgtk: ~
+ ocaml-lablgtk2: ocaml-lablgtk
+ OpenIPMI: OpenIPMI2
+ sqlite: sqlite2
+ updates_gnome:
+ class: Youri::Check::Test::Updates
+ options:
+ sources:
+ gnome:
+ order: 1
+ class: Youri::Check::Test::Updates::Source::GNOME
+ options:
+ url: https://download.gnome.org/sources/
+ aliases:
+ acme: ~
+ GConf: ~
+ GConf2: GConf
+ gcr: ~
+ gcr4: gcr
+ gdk-pixbuf2.0: gdk-pixbuf
+ glib: ~
+ glib2.0: glib
+ glibmm2.4: ~
+ goocanvas2: ~
+ gtkmm-documentation3.0: ~
+ gtkmm: ~
+ gtkmm2.4: ~
+ gtkmm3.0: ~
+ gtkmm4.0: gtkmm
+ gtksourceviewmm3.0: ~
+ gtk: ~
+ gtk+2.0: ~
+ gtk+3.0: ~
+ gtk4.0: gtk
+ modemmanager: ModemManager
+ networkmanager: NetworkManager
+ networkmanager-applet: network-manager-applet
+ networkmanager-fortisslvpn: NetworkManager-fortisslvpn
+ networkmanager-l2tp: NetworkManager-l2tp
+ networkmanager-libreswan: NetworkManager-libreswan
+ networkmanager-openconnect: NetworkManager-openconnect
+ networkmanager-openvpn: NetworkManager-openvpn
+ networkmanager-pptp: NetworkManager-pptp
+ networkmanager-vpnc: NetworkManager-vpnc
+ notify-sharp: ~
+ notify-sharp3: notify-sharp
+ pango: ~
+ pango2.0: pango
+ build:
+ class: Youri::Check::Test::Build
+ options:
+ sources:
+ iurt:
+ class: Youri::Check::Test::Build::Source::Iurt
+ options:
+ url: https://pkgsubmit.mageia.org/autobuild/cauldron
+ arches:
+ - x86_64
+ medias:
+ - core
+ age:
+ class: Youri::Check::Test::Age
+ options:
+ max: <%= max_days %> days
+ pattern: "%d days"
+
+# reports definitions
+reports:
+ file:
+ class: Youri::Check::Report::File
+ options:
+ to: <%= outdir %>
+ global: 1
+ individual: 1
+ formats:
+ html:
+ class: Youri::Check::Report::Format::HTML
+ text:
+ class: Youri::Check::Report::Format::Text
+ rss:
+ class: Youri::Check::Report::Format::RSS
+
+# media definitions
+medias:
+ core.aarch64:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_aarch64}/core/release/media_info/hdlist.cz
+ options:
+ dependencies:
+ allowed:
+ - core.aarch64
+ missing:
+ allowed:
+ - core.sources
+
+ core.armv7hl:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_armv7hl}/core/release/media_info/hdlist.cz
+ options:
+ dependencies:
+ allowed:
+ - core.armv7hl
+ missing:
+ allowed:
+ - core.sources
+
+ core.i686:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_i686}/media_info/hdlist_core.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i686
+ missing:
+ allowed:
+ - core.sources
+
+ core.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_core.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i686
+ missing:
+ allowed:
+ - core.sources
+
+
+ core.sources:
+ class: Youri::Media::URPM
+ options:
+ name: core
+ type: source
+ hdlist: ${mirror_i686}/media_info/hdlist_core.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i686
+
+ nonfree.i686:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: binary
+ hdlist: ${mirror_i686}/media_info/hdlist_nonfree_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i686
+ - nonfree.i686
+ missing:
+ allowed:
+ - nonfree.sources
+
+ nonfree.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_nonfree_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i686
+ - nonfree.x86_64
+ - nonfree.i686
+ missing:
+ allowed:
+ - nonfree.sources
+
+
+ nonfree.sources:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: source
+ hdlist: ${mirror_i686}/media_info/hdlist_nonfree_release.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - nonfree.x86_64
+ - core.i686
+ - nonfree.i686
+
+ tainted.i686:
+ class: Youri::Media::URPM
+ options:
+ name: nonfree
+ type: binary
+ hdlist: ${mirror_i686}/media_info/hdlist_tainted_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.i686
+ - tainted.i686
+ missing:
+ allowed:
+ - tainted.sources
+
+ tainted.x86_64:
+ class: Youri::Media::URPM
+ options:
+ name: tainted
+ type: binary
+ hdlist: ${mirror_x86_64}/media_info/hdlist_tainted_release.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - core.i686
+ - tainted.x86_64
+ - tainted.i686
+ missing:
+ allowed:
+ - tainted.sources
+
+ tainted.sources:
+ class: Youri::Media::URPM
+ options:
+ name: tainted
+ type: source
+ hdlist: ${mirror_i686}/media_info/hdlist_tainted_release.src.cz
+ options:
+ dependencies:
+ allowed:
+ - core.x86_64
+ - tainted.x86_64
+ - core.i686
+ - tainted.i686
diff --git a/modules/youri-check/templates/vhost_check.conf b/modules/youri-check/templates/vhost_check.conf
new file mode 100644
index 00000000..2cf598b5
--- /dev/null
+++ b/modules/youri-check/templates/vhost_check.conf
@@ -0,0 +1,2 @@
+Header set Access-Control-Allow-Origin "http://pkgsubmit.<%= domain %>"
+Header set Access-Control-Allow-Origin "https://pkgsubmit.<%= domain %>" env=HTTPS