/[gli]/branches/overhaul/src/GLIArchitectureTemplate.py
Gentoo

Contents of /branches/overhaul/src/GLIArchitectureTemplate.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1523 - (show annotations) (download) (as text)
Tue Sep 12 01:58:29 2006 UTC (12 years, 1 month ago) by agaffney
Original Path: trunk/src/GLIArchitectureTemplate.py
File MIME type: text/x-python
File size: 91051 byte(s)
  src/GLIArchitectureTemplate.py:
  break after installing 1 mta in install_mta()

1 """
2 # Copyright 1999-2005 Gentoo Foundation
3 # This source code is distributed under the terms of version 2 of the GNU
4 # General Public License as published by the Free Software Foundation, a copy
5 # of which can be found in the main directory of this project.
6 Gentoo Linux Installer
7
8 $Id: GLIArchitectureTemplate.py,v 1.295 2006/09/12 01:58:29 agaffney Exp $
9
10 The ArchitectureTemplate is largely meant to be an abstract class and an
11 interface (yes, it is both at the same time!). The purpose of this is to create
12 subclasses that populate all the methods with working methods for that architecture.
13 The only definitions that are filled in here are architecture independent.
14
15 """
16
17 import GLIUtility, GLILogger, os, string, sys, shutil, re, time
18 import GLIPortage
19 from GLIException import *
20 import parted
21 import GLIStorageDevice
22
23 MEGABYTE = 1024 * 1024
24
25 class ArchitectureTemplate:
26 ##
27 # Initialization of the ArchitectureTemplate. Called from some other arch template.
28 # @param selfconfiguration=None A Client Configuration
29 # @param install_profile=None An Install Profile
30 # @param client_controller=None Client Controller. not same as configuration.
31 def __init__(self,configuration=None, install_profile=None, client_controller=None):
32 self._client_configuration = configuration
33 self._install_profile = install_profile
34 self._cc = client_controller
35
36 # This will get used a lot, so it's probably
37 # better to store it in a variable than to call
38 # this method 100000 times.
39 self._chroot_dir = self._client_configuration.get_root_mount_point()
40 self._logger = GLILogger.Logger(self._client_configuration.get_log_file())
41 self._compile_logfile = "/tmp/compile_output.log"
42 self._debug = self._client_configuration.get_verbose()
43
44 self._portage = GLIPortage.GLIPortage(self._chroot_dir, self._install_profile.get_grp_install(), self._logger, self._debug, self._cc, self._compile_logfile)
45
46 # This will cleanup the logfile if it's a dead link (pointing
47 # to the chroot logfile when partitions aren't mounted, else
48 # no action needs to be taken
49
50 if os.path.islink(self._compile_logfile) and not os.path.exists(self._compile_logfile):
51 os.unlink(self._compile_logfile)
52
53 # cache the list of successfully mounted devices and swap devices here
54 self._mounted_devices = []
55 self._swap_devices = []
56
57 # These must be filled in by the subclass. _steps is a list of
58 # functions, that will carry out the installation. They must be
59 # in order.
60 #
61 # For example, self._steps might be: [preinstall, stage1, stage2, stage3, postinstall],
62 # where each entry is a function (with no arguments) that carries out the desired actions.
63 # Of course, steps will be different depending on the install_profile
64
65 self._architecture_name = "generic"
66 self._install_steps = [
67 { 'function': self.partition, 'name': "Partition", 'modes': ("normal", "stage4") },
68 { 'function': self.mount_local_partitions, 'name': "Mount local partitions", 'modes': ("normal", "stage4") },
69 { 'function': self.mount_network_shares, 'name': "Mount network (NFS) shares", 'modes': ("normal", "stage4") },
70 { 'function': self.unpack_stage_tarball, 'name': "Unpack stage tarball", 'modes': ("normal", "stage4", "chroot") },
71 { 'function': self.update_config_files, 'name': "Updating config files", 'modes': ("normal", "chroot") },
72 { 'function': self.configure_make_conf, 'name': "Configure /etc/make.conf", 'modes': ("normal", "chroot") },
73 { 'function': self.prepare_chroot, 'name': "Preparing chroot", 'modes': ("normal", "stage4", "chroot") },
74 { 'function': self.install_portage_tree, 'name': "Syncing the Portage tree", 'modes': ("normal", "chroot") },
75 { 'function': self.stage1, 'name': "Performing bootstrap", 'modes': ("normal", "chroot") },
76 { 'function': self.stage2, 'name': "Performing 'emerge system'", 'modes': ("normal", "chroot") },
77 { 'function': self.set_root_password, 'name': "Set the root password", 'modes': ("normal", "chroot") },
78 { 'function': self.set_timezone, 'name': "Setting timezone", 'modes': ("normal", "chroot") },
79 { 'function': self.emerge_kernel_sources, 'name': "Emerge kernel sources", 'modes': ("normal", "chroot") },
80 { 'function': self.build_kernel, 'name': "Building kernel", 'modes': ("normal", "chroot") },
81 { 'function': self.install_distcc, 'name': "Install distcc", 'modes': ("normal", "chroot") },
82 { 'function': self.install_mta, 'name': "Installing MTA", 'modes': ("normal", "chroot") },
83 { 'function': self.install_logging_daemon, 'name': "Installing system logger", 'modes': ("normal", "chroot") },
84 { 'function': self.install_cron_daemon, 'name': "Installing Cron daemon", 'modes': ("normal", "chroot") },
85 { 'function': self.install_filesystem_tools, 'name': "Installing filesystem tools", 'modes': ("normal", "chroot") },
86 { 'function': self.setup_network_post, 'name': "Configuring post-install networking", 'modes': ("normal", "chroot") },
87 { 'function': self.install_bootloader, 'name': "Configuring and installing bootloader", 'modes': ("normal", "chroot") },
88 { 'function': self.setup_and_run_bootloader, 'name': "Setting up and running bootloader", 'modes': ( "normal", "stage4") },
89 { 'function': self.update_config_files, 'name': "Re-Updating config files", 'modes': ("normal", "chroot") },
90 # { 'function': self.configure_rc_conf, 'name': "Updating /etc/rc.conf", 'modes': ("normal", "stage4", "chroot") },
91 { 'function': self.set_users, 'name': "Add additional users.", 'modes': ("normal", "chroot") },
92 { 'function': self.install_packages, 'name': "Installing additional packages.", 'modes': ("normal", "chroot") },
93 # services for startup need to come after installing extra packages
94 # otherwise some of the scripts will not exist.
95 { 'function': self.set_services, 'name': "Setting up services for startup", 'modes': ("normal", "chroot") },
96 { 'function': self.run_post_install_script, 'name': "Running custom post-install script", 'modes': ("normal", "stage4", "chroot") },
97 { 'function': self.finishing_cleanup, 'name': "Cleanup and unmounting local filesystems.", 'modes': ("normal", "stage4", "chroot") }
98 ]
99
100
101 ##
102 # Returns the steps and their comments in an array
103 def get_install_steps(self):
104 return self._install_steps
105
106 ##
107 # Tells the frontend something
108 # @param type type of data
109 # @param data the data itself. usually a number.
110 def notify_frontend(self, type, data):
111 self._cc.addNotification(type, data)
112
113 # It is possible to override these methods in each Arch Template.
114 # It might be necessary to do so, if the arch needs something 'weird'.
115
116 ##
117 # Private function to add a /etc/init.d/ script to the given runlevel in the chroot environement
118 # @param script_name the script to be added
119 # @param runlevel="default" the runlevel to add to
120 def _add_to_runlevel(self, script_name, runlevel="default"):
121 if not GLIUtility.is_file(self._chroot_dir + '/etc/init.d/' + script_name):
122 #raise GLIException("RunlevelAddError", 'fatal', '_add_to_runlevel', "Failure adding " + script_name + " to runlevel " + runlevel + "!")
123 #This is not a fatal error. If the init script is important it will exist.
124 self._logger.log("ERROR! Failure adding " + script_name + " to runlevel " + runlevel + " because it was not found!")
125 if self._debug: self._logger.log("DEBUG: running rc-update add " + script_name + " " + runlevel + " in chroot.")
126 status = GLIUtility.spawn("rc-update add " + script_name + " " + runlevel, display_on_tty8=True, chroot=self._chroot_dir, logfile=self._compile_logfile, append_log=True)
127 if not GLIUtility.exitsuccess(status):
128 #raise GLIException("RunlevelAddError", 'fatal', '_add_to_runlevel', "Failure adding " + script_name + " to runlevel " + runlevel + "!")
129 #Again, an error here will not prevent a new system from booting. But it is important to log the error.
130 self._logger.log("ERROR! Could not add " + script_name + " to runlevel " + runlevel + ". returned a bad status code.")
131 else:
132 self._logger.log("Added "+script_name+" to runlevel "+runlevel)
133
134 ##
135 # Private Function. Will return a list of packages to be emerged for a given command. Not yet used.
136 # @param cmd full command to run ('/usr/portage/scripts/bootstrap.sh --pretend' or 'emerge -p system')
137 def _get_packages_to_emerge(self, cmd):
138 if self._debug: self._logger.log("DEBUG: _get_packages_to_emerge() called with '%s'" % cmd)
139 return GLIUtility.spawn(cmd + r" 2>/dev/null | grep -e '\[ebuild' | sed -e 's:\[ebuild .\+ \] ::' -e 's: \[.\+\] ::' -e 's: \+$::'", chroot=self._chroot_dir, return_output=True)[1].strip().split("\n")
140
141 ##
142 # Private Function. Will emerge a given package in the chroot environment.
143 # @param package package to be emerged
144 # @param binary=False defines whether to try a binary emerge (if GRP this gets ignored either way)
145 # @param binary_only=False defines whether to only allow binary emerges.
146 def _emerge(self, package, binary=True, binary_only=False):
147 #Error checking of this function is to be handled by the parent function.
148 # self._logger.log("_emerge() called with: package='%s', binary='%s', binary_only='%s', grp_install='%s'" % (package, str(binary), str(binary_only), str(self._install_profile.get_grp_install())))
149 # now short-circuit for GRP
150 if self._install_profile.get_grp_install():
151 cmd="emerge -k " + package
152 # now normal installs
153 else:
154 if binary_only:
155 cmd="emerge -K " + package
156 elif binary:
157 cmd="emerge -k " + package
158 else:
159 cmd="emerge " + package
160
161 self._logger.log("Calling emerge: "+cmd)
162 return GLIUtility.spawn(cmd, display_on_tty8=True, chroot=self._chroot_dir, logfile=self._compile_logfile, append_log=True)
163
164 ##
165 # Private Function. Will edit a config file and insert a value or two overwriting the previous value
166 # (actually it only just comments out the old one)
167 # @param filename file to be edited
168 # @param newvalues a dictionary of VARIABLE:VALUE pairs
169 # @param delimeter='=' what is between the key and the value
170 # @param quotes_around_value=True whether there are quotes around the value or not (ex. "local" vs. localhost)
171 # @param only_value=False Ignore the keys and output only a value.
172 # @param create_file=True Create the file if it does not exist.
173 def _edit_config(self, filename, newvalues, delimeter='=', quotes_around_value=True, only_value=False,create_file=True):
174 # don't use 'file' as a normal variable as it conflicts with the __builtin__.file
175 newvalues = newvalues.copy()
176 if self._debug: self._logger.log("DEBUG: _edit_config() called with " + str(newvalues)+" and flags: "+delimeter + "quotes: "+str(quotes_around_value)+" value: "+str(only_value))
177 if GLIUtility.is_file(filename):
178 f = open(filename)
179 contents = f.readlines()
180 f.close()
181 elif create_file:
182 contents = []
183 else:
184 raise GLIException("NoSuchFileError", 'notice','_edit_config',filename + ' does not exist!')
185
186 for key in newvalues.keys():
187 newline = ""
188 if key == "SPACER":
189 newline = "\n"
190 elif key == "COMMENT":
191 newline = '# ' + newvalues[key] + "\n"
192 elif newvalues[key] == "##comment##" or newvalues[key] == "##commented##":
193 newline = '#' + key + delimeter + '""' + "\n"
194 else:
195 if quotes_around_value:
196 newvalues[key] = '"' + newvalues[key] + '"'
197 #Only the printing of values is required.
198 if only_value:
199 newline = newvalues[key] + "\n"
200 else:
201 newline = key + delimeter + newvalues[key] + "\n"
202 add_at_line = len(contents)
203 for i in range(len(contents)):
204 if newline == contents[i]:
205 break
206 if contents[i].startswith(key + delimeter):
207 contents[i] = "#" + contents[i]
208 add_at_line = i + 1
209 else:
210 contents.insert(add_at_line, newline)
211 if self._debug: self._logger.log("DEBUG: Contents of file "+filename+": "+str(contents))
212 f = open(filename,'w')
213 f.writelines(contents)
214 f.flush()
215 f.close()
216 self._logger.log("Edited Config file "+filename)
217
218 ##
219 # Stage 1 install -- bootstraping the system
220 # If we are doing a stage 1 install, then bootstrap
221 def stage1(self):
222 if self._install_profile.get_install_stage() == 1:
223 self._logger.mark()
224 self._logger.log("Starting bootstrap.")
225 pkgs = self._get_packages_to_emerge("/usr/portage/scripts/bootstrap.sh --pretend")
226 if self._debug: self._logger.log("DEBUG: Packages to emerge: "+str(pkgs)+". Now running bootstrap.sh")
227 exitstatus = GLIUtility.spawn("env-update && source /etc/profile && /usr/portage/scripts/bootstrap.sh", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
228 if not GLIUtility.exitsuccess(exitstatus):
229 raise GLIException("Stage1Error", 'fatal','stage1', "Bootstrapping failed!")
230 self._logger.log("Bootstrap complete.")
231
232 ##
233 # Stage 2 install -- emerge -e system
234 # If we are doing a stage 1 or 2 install, then emerge system
235 def stage2(self):
236 if self._install_profile.get_install_stage() in [ 1, 2 ]:
237 self._logger.mark()
238 self._logger.log("Starting emerge system.")
239 pkgs = self._get_packages_to_emerge("emerge -p system") #currently quite the useless
240 if self._debug: self._logger.log("DEBUG: Packages to emerge: "+str(pkgs)+"/ Now running emerge --emptytree system")
241 # exitstatus = self._emerge("--emptytree system")
242 exitstatus = GLIUtility.spawn("env-update && source /etc/profile && emerge -e system", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
243 if not GLIUtility.exitsuccess(exitstatus):
244 raise GLIException("Stage2Error", 'fatal','stage2', "Building the system failed!")
245 self._logger.log("Emerge system complete.")
246
247 ##
248 # Unpacks the stage tarball that has been specified in the profile (it better be there!)
249 def unpack_stage_tarball(self):
250 if not os.path.isdir(self._chroot_dir):
251 if self._debug: self._logger.log("DEBUG: making the chroot dir:"+self._chroot_dir)
252 os.makedirs(self._chroot_dir)
253 if self._install_profile.get_install_stage() == 3 and self._install_profile.get_dynamic_stage3():
254 # stage3 generation code here
255 if not GLIUtility.is_file("/usr/livecd/systempkgs.txt"):
256 raise GLIException("CreateStage3Error", "fatal", "unpack_stage_tarball", "Required file /usr/livecd/systempkgs.txt does not exist")
257 try:
258 syspkgs = open("/usr/livecd/systempkgs.txt", "r")
259 systempkgs = syspkgs.readlines()
260 syspkgs.close()
261 except:
262 raise GLIException("CreateStage3Error", "fatal", "unpack_stage_tarball", "Could not open /usr/livecd/systempkgs.txt")
263
264 # Pre-create /lib (and possibly /lib32 and /lib64)
265 for libdir in ("/lib", "/usr/lib"):
266 if os.path.islink(libdir) and os.readlink(libdir).endswith("64"):
267 if self._debug: self._logger.log("DEBUG: unpack_stage_tarball(): precreating " + libdir + "64 dir and " + libdir + " -> lib64 symlink because glibc/portage sucks")
268 if not GLIUtility.exitsuccess(GLIUtility.spawn("mkdir -p " + self._chroot_dir + libdir + "64 && ln -sf lib64 " + self._chroot_dir + libdir)):
269 raise GLIException("CreateStage3Error", "fatal", "unpack_stage_tarball", "Could not precreate " + libdir + "64 dir and " + libdir + " -> lib64 symlink")
270
271 syspkglen = len(systempkgs)
272 for i, pkg in enumerate(systempkgs):
273 pkg = pkg.strip()
274 self.notify_frontend("progress", (float(i) / (syspkglen+1), "Copying " + pkg + " (" + str(i+1) + "/" + str(syspkglen) + ")"))
275 self._portage.copy_pkg_to_chroot(pkg, True, ignore_missing=True)
276 self.notify_frontend("progress", (float(syspkglen) / (syspkglen+1), "Finishing"))
277 GLIUtility.spawn("cp /etc/make.conf " + self._chroot_dir + "/etc/make.conf")
278 GLIUtility.spawn("ln -s `readlink /etc/make.profile` " + self._chroot_dir + "/etc/make.profile")
279 GLIUtility.spawn("cp -f /etc/inittab.old " + self._chroot_dir + "/etc/inittab")
280
281 # Nasty, nasty, nasty hack because vapier is a tool
282 for tmpfile in ("/etc/passwd", "/etc/group", "/etc/shadow"):
283 GLIUtility.spawn("grep -ve '^gentoo' " + tmpfile + " > " + self._chroot_dir + tmpfile)
284
285 chrootscript = r"""
286 #!/bin/bash
287
288 source /etc/make.conf
289 export LDPATH="/usr/lib/gcc-lib/${CHOST}/$(cd /usr/lib/gcc-lib/${CHOST} && ls -1 | head -n 1)"
290
291 ldconfig $LDPATH
292 gcc-config 1
293 env-update
294 source /etc/profile
295 modules-update
296 [ -f /usr/bin/binutils-config ] && binutils-config 1
297 source /etc/profile
298 #mount -t proc none /proc
299 #cd /dev
300 #/sbin/MAKEDEV generic-i386
301 #umount /proc
302 [ -f /lib/udev-state/devices.tar.bz2 ] && tar -C /dev -xjf /lib/udev-state/devices.tar.bz2
303 """
304 script = open(self._chroot_dir + "/tmp/extrastuff.sh", "w")
305 script.write(chrootscript)
306 script.close()
307 GLIUtility.spawn("chmod 755 /tmp/extrastuff.sh && /tmp/extrastuff.sh", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
308 GLIUtility.spawn("rm -rf /var/tmp/portage/* /usr/portage /tmp/*", chroot=self._chroot_dir)
309 self.notify_frontend("progress", (1, "Done"))
310 self._logger.log("Stage3 was generated successfully")
311 else:
312 self._logger.log("Fetching and unpacking tarball: "+self._install_profile.get_stage_tarball_uri())
313 GLIUtility.fetch_and_unpack_tarball(self._install_profile.get_stage_tarball_uri(), self._chroot_dir, temp_directory=self._chroot_dir, keep_permissions=True, cc=self._cc)
314 self._logger.log(self._install_profile.get_stage_tarball_uri()+" was fetched and unpacked.")
315
316 ##
317 # Prepares the Chroot environment by copying /etc/resolv.conf and mounting proc and dev
318 def prepare_chroot(self):
319 # Copy resolv.conf to new env
320 try:
321 if self._debug: self._logger.log("DEBUG: copying /etc/resolv.conf over.")
322 shutil.copy("/etc/resolv.conf", self._chroot_dir + "/etc/resolv.conf")
323 except:
324 pass
325 if self._debug: self._logger.log("DEBUG: mounting proc")
326 ret = GLIUtility.spawn("mount -t proc none "+self._chroot_dir+"/proc")
327 if not GLIUtility.exitsuccess(ret):
328 raise GLIException("MountError", 'fatal','prepare_chroot','Could not mount /proc')
329 else:
330 self._mounted_devices.append("/proc")
331 bind_mounts = [ '/dev' ]
332 uname = os.uname()
333 if uname[0] == 'Linux' and uname[2].split('.')[1] == '6':
334 bind_mounts.append('/sys')
335 if self._debug: self._logger.log("DEBUG: bind-mounting " + ", ".join(bind_mounts))
336 for mount in bind_mounts:
337 ret = GLIUtility.spawn('mount -o bind %s %s%s' % (mount,self._chroot_dir,mount))
338 if not GLIUtility.exitsuccess(ret):
339 raise GLIException("MountError", 'fatal','prepare_chroot','Could not mount '+mount)
340 else:
341 self._mounted_devices.append(mount)
342 if self._debug: self._logger.log("DEBUG: copying logfile to new system!")
343 GLIUtility.spawn("mv " + self._compile_logfile + " " + self._chroot_dir + self._compile_logfile + " && ln -s " + self._chroot_dir + self._compile_logfile + " " + self._compile_logfile)
344 self._logger.log("Chroot environment ready.")
345
346 ##
347 # Installs a list of packages specified in the profile. Will install any extra software!
348 # In the future this function will lead to better things. It may even wipe your ass for you.
349 def install_packages(self):
350 installpackages = self._install_profile.get_install_packages()
351 if installpackages:
352 # pkglist = self._portage.get_deps(" ".join(installpackages))
353 # if self._debug: self._logger.log("install_packages(): pkglist is " + str(pkglist))
354 # for i, pkg in enumerate(pkglist):
355 # if self._debug: self._logger.log("install_packages(): processing package " + pkg)
356 # self.notify_frontend("progress", (float(i) / len(pkglist), "Emerging " + pkg + " (" + str(i) + "/" + str(len(pkglist)) + ")"))
357 # if not self._portage.get_best_version_vdb("=" + pkg):
358 # status = self._emerge("=" + pkg)
359 # if not GLIUtility.exitsuccess(status):
360 # raise GLIException("ExtraPackagesError", "fatal", "install_packages", "Could not emerge " + pkg + "!")
361 # else:
362 # try:
363 # self._portage.copy_pkg_to_chroot(pkg)
364 # except:
365 # raise GLIException("ExtraPackagesError", "fatal", "install_packages", "Could not emerge " + pkg + "!")
366 self._portage.emerge(installpackages)
367
368 if GLIUtility.is_file(self._chroot_dir + "/etc/X11"):
369 # Copy the xorg.conf from the LiveCD if they installed xorg-x11
370 exitstatus = GLIUtility.spawn("cp /etc/X11/xorg.conf " + self._chroot_dir + "/etc/X11/xorg.conf")
371 if not GLIUtility.exitsuccess(exitstatus):
372 self._logger.log("Could NOT copy the xorg configuration from the livecd to the new system!")
373 else:
374 self._logger.log("xorg.conf copied to new system. X should be ready to roll!")
375 if GLIUtility.is_file(self._chroot_dir + "/etc/X11/gdm/gdm.conf"):
376 GLIUtility.spawn("cp -f /etc/X11/gdm/gdm.conf.old " + self._chroot_dir + "/etc/X11/gdm/gdm.conf")
377 if GLIUtility.is_file(self._chroot_dir + "/etc/X11/gdm/custom.conf"):
378 GLIUtility.spawn("cp -f /etc/X11/gdm/custom.conf.old " + self._chroot_dir + "/etc/X11/gdm/custom.conf")
379
380 ##
381 # Will set the list of services to runlevel default. This is a temporary solution!
382 def set_services(self):
383 services = self._install_profile.get_services()
384 for service in services:
385 if service:
386 self._add_to_runlevel(service)
387
388 ##
389 # Will grab partition info from the profile and mount all partitions with a specified mountpoint (and swap too)
390 def mount_local_partitions(self):
391 parts = self._install_profile.get_partition_tables()
392 parts_to_mount = {}
393 for device in parts:
394 tmp_partitions = parts[device] #.get_install_profile_structure()
395 tmp_minor = -1
396 for minor in tmp_partitions: #.get_ordered_partition_list():
397 if not tmp_partitions[minor]['type'] in ("free", "extended"):
398 tmp_minor = minor
399 break
400 time.sleep(1)
401 if tmp_minor == -1: continue
402 # now sleep until it exists
403 while not GLIUtility.is_file(tmp_partitions[minor]['devnode']):
404 if self._debug: self._logger.log("DEBUG: Waiting for device node " + tmp_partitions[minor]['devnode'] + " to exist...")
405 time.sleep(1)
406 # one bit of extra sleep is needed, as there is a blip still
407 time.sleep(1)
408 for partition in tmp_partitions: #.get_ordered_partition_list():
409 mountpoint = tmp_partitions[partition]['mountpoint']
410 mountopts = tmp_partitions[partition]['mountopts']
411 minor = str(int(tmp_partitions[partition]['minor']))
412 partition_type = tmp_partitions[partition]['type']
413 if mountpoint:
414 if mountopts:
415 mountopts = "-o " + mountopts + " "
416 if partition_type:
417 if partition_type == "fat32" or partition_type == "fat16": partition_type = "vfat"
418 partition_type = "-t " + partition_type + " "
419 parts_to_mount[mountpoint] = (mountopts, partition_type, tmp_partitions[partition]['devnode'])
420
421 if partition_type == "linux-swap":
422 ret = GLIUtility.spawn("swapon " + tmp_partitions[partition]['devnode'])
423 if not GLIUtility.exitsuccess(ret):
424 self._logger.log("ERROR! : Could not activate swap (" + tmp_partitions[partition]['devnode'] + ")!")
425 else:
426 self._swap_devices.append(tmp_partitions[partition]['devnode'])
427 sorted_list = parts_to_mount.keys()
428 sorted_list.sort()
429
430 if not GLIUtility.is_file(self._chroot_dir):
431 if self._debug: self._logger.log("DEBUG: making the chroot dir")
432 exitstatus = GLIUtility.spawn("mkdir -p " + self._chroot_dir)
433 if not GLIUtility.exitsuccess(exitstatus):
434 raise GLIException("MkdirError", 'fatal','mount_local_partitions', "Making the ROOT mount point failed!")
435 else:
436 self._logger.log("Created root mount point")
437 for mountpoint in sorted_list:
438 mountopts = parts_to_mount[mountpoint][0]
439 partition_type = parts_to_mount[mountpoint][1]
440 partition = parts_to_mount[mountpoint][2]
441 if not GLIUtility.is_file(self._chroot_dir + mountpoint):
442 if self._debug: self._logger.log("DEBUG: making mountpoint: "+mountpoint)
443 exitstatus = GLIUtility.spawn("mkdir -p " + self._chroot_dir + mountpoint)
444 if not GLIUtility.exitsuccess(exitstatus):
445 raise GLIException("MkdirError", 'fatal','mount_local_partitions', "Making the mount point failed!")
446 else:
447 self._logger.log("Created mountpoint " + mountpoint)
448 ret = GLIUtility.spawn("mount " + partition_type + mountopts + partition + " " + self._chroot_dir + mountpoint, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
449 if not GLIUtility.exitsuccess(ret):
450 raise GLIException("MountError", 'fatal','mount_local_partitions','Could not mount a partition')
451 else:
452 self._mounted_devices.append(mountpoint)
453 # double check in /proc/mounts
454 # This current code doesn't work and needs to be fixed, because there is a case that it is needed for - robbat2
455 #ret, output = GLIUtility.spawn('awk \'$2 == "%s" { print "Found" }\' /proc/mounts | head -n1' % (self._chroot_dir + mountpoint), display_on_tty8=True, return_output=True)
456 #if output.strip() != "Found":
457 # raise GLIException("MountError", 'fatal','mount_local_partitions','Could not mount a partition (failed in double-check)')
458 self._logger.log("Mounted mountpoint: " + mountpoint)
459
460 ##
461 # Mounts all network shares to the local machine
462 def mount_network_shares(self):
463 """
464 <agaffney> it'll be much easier than mount_local_partitions
465 <agaffney> make sure /etc/init.d/portmap is started
466 <agaffney> then mount each one: mount -t nfs -o <mountopts> <host>:<export> <mountpoint>
467 """
468 nfsmounts = self._install_profile.get_network_mounts()
469 for netmount in nfsmounts:
470 if netmount['type'] == "NFS" or netmount['type'] == "nfs":
471 mountopts = netmount['mountopts']
472 if mountopts:
473 mountopts = "-o " + mountopts
474 host = netmount['host']
475 export = netmount['export']
476 mountpoint = netmount['mountpoint']
477 if not GLIUtility.is_file(self._chroot_dir + mountpoint):
478 exitstatus = GLIUtility.spawn("mkdir -p " + self._chroot_dir + mountpoint)
479 if not GLIUtility.exitsuccess(exitstatus):
480 raise GLIException("MkdirError", 'fatal','mount_network_shares', "Making the mount point failed!")
481 else:
482 if self._debug: self._logger.log("DEBUG: mounting nfs mount")
483 ret = GLIUtility.spawn("mount -t nfs " + mountopts + " " + host + ":" + export + " " + self._chroot_dir + mountpoint, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
484 if not GLIUtility.exitsuccess(ret):
485 raise GLIException("MountError", 'fatal','mount_network_shares','Could not mount an NFS partition')
486 else:
487 self._logger.log("Mounted netmount at mountpoint: " + mountpoint)
488 self._mounted_devices.append(mountpoint)
489 else:
490 self._logger.log("Netmount type " + netmount['type'] + " not supported...skipping " + netmount['mountpoint'])
491
492
493 ##
494 # Configures the new /etc/make.conf
495 def configure_make_conf(self):
496 # Get make.conf options
497 make_conf = self._install_profile.get_make_conf()
498
499 # For each configuration option...
500 filename = self._chroot_dir + "/etc/make.conf"
501 # self._edit_config(filename, {"COMMENT": "GLI additions ===>"})
502 for key in make_conf.keys():
503 # Add/Edit it into make.conf
504 self._edit_config(filename, {key: make_conf[key]})
505 # self._edit_config(filename, {"COMMENT": "<=== End GLI additions"})
506
507 self._logger.log("Make.conf configured")
508 # now make any directories that emerge needs, otherwise it will fail
509 # this must take place before ANY calls to emerge.
510 # otherwise emerge will fail (for PORTAGE_TMPDIR anyway)
511 # defaults first
512 # this really should use portageq or something.
513 PKGDIR = '/usr/portage/packages'
514 PORTAGE_TMPDIR = '/var/tmp'
515 PORT_LOGDIR = None
516 PORTDIR_OVERLAY = None
517 # now other stuff
518 if 'PKGDIR' in make_conf: PKGDIR = make_conf['PKGDIR']
519 if 'PORTAGE_TMPDIR' in make_conf: PORTAGE_TMPDIR = make_conf['PORTAGE_TMPDIR']
520 if 'PORT_LOGDIR' in make_conf: PORT_LOGDIR = make_conf['PORT_LOGDIR']
521 if 'PORTDIR_OVERLAY' in make_conf: PORTDIR_OVERLAY = make_conf['PORTDIR_OVERLAY']
522 if self._debug: self._logger.log("DEBUG: making PKGDIR if necessary: "+PKGDIR)
523 GLIUtility.spawn("mkdir -p " + self._chroot_dir + PKGDIR, logfile=self._compile_logfile, append_log=True)
524 if self._debug: self._logger.log("DEBUG: making PORTAGE_TMPDIR if necessary: "+PORTAGE_TMPDIR)
525 GLIUtility.spawn("mkdir -p " + self._chroot_dir + PORTAGE_TMPDIR, logfile=self._compile_logfile, append_log=True)
526 if PORT_LOGDIR != None:
527 if self._debug: self._logger.log("DEBUG: making PORT_LOGDIR if necessary: "+PORT_LOGDIR)
528 GLIUtility.spawn("mkdir -p " + self._chroot_dir + PORT_LOGDIR, logfile=self._compile_logfile, append_log=True)
529 if PORTDIR_OVERLAY != None:
530 if self._debug: self._logger.log("DEBUG: making PORTDIR_OVERLAY if necessary "+PORTDIR_OVERLAY)
531 GLIUtility.spawn("mkdir -p " + self._chroot_dir + PORTDIR_OVERLAY, logfile=self._compile_logfile, append_log=True)
532
533 ##
534 # This will get/update the portage tree. If you want to snapshot or mount /usr/portage use "custom".
535 def install_portage_tree(self):
536 # Check the type of portage tree fetching we'll do
537 # If it is custom, follow the path to the custom tarball and unpack it
538
539 # This is a hack to copy the LiveCD's rsync into the chroot since it has the sigmask patch
540 if self._debug: self._logger.log("DEBUG: Doing the hack where we copy the LiveCD's rsync into the chroot since it has the sigmask patch")
541 GLIUtility.spawn("cp -a /usr/bin/rsync " + self._chroot_dir + "/usr/bin/rsync")
542 GLIUtility.spawn("cp -a /usr/lib/libpopt* " + self._chroot_dir + "/usr/lib")
543
544 sync_type = self._install_profile.get_portage_tree_sync_type()
545 if sync_type == "snapshot" or sync_type == "custom": # Until this is finalized
546
547 # Get portage tree info
548 portage_tree_snapshot_uri = self._install_profile.get_portage_tree_snapshot_uri()
549 if portage_tree_snapshot_uri:
550 # Fetch and unpack the tarball
551 if self._debug: self._logger.log("DEBUG: grabbing custom snapshot uri: "+portage_tree_snapshot_uri)
552 GLIUtility.fetch_and_unpack_tarball(portage_tree_snapshot_uri, self._chroot_dir + "/usr/", self._chroot_dir + "/", cc=self._cc)
553 if GLIUtility.is_file("/usr/livecd/metadata.tar.bz2"):
554 GLIUtility.fetch_and_unpack_tarball("/usr/livecd/metadata.tar.bz2", self._chroot_dir + "/", self._chroot_dir + "/", cc=self._cc)
555 self._logger.log("Portage tree install was custom.")
556 elif sync_type == "sync":
557 if self._debug: self._logger.log("DEBUG: starting emerge sync")
558 exitstatus = GLIUtility.spawn("emerge sync", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
559 if not GLIUtility.exitsuccess(exitstatus):
560 self._logger.log("ERROR! Could not sync the portage tree using emerge sync. Falling back to emerge-webrsync as a backup.")
561 sync_type = "webrsync"
562 else:
563 self._logger.log("Portage tree sync'd")
564 # If the type is webrsync, then run emerge-webrsync
565 elif sync_type == "webrsync":
566 if self._debug: self._logger.log("DEBUG: starting emerge webrsync")
567 exitstatus = GLIUtility.spawn("emerge-webrsync", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
568 if not GLIUtility.exitsuccess(exitstatus):
569 raise GLIException("EmergeWebRsyncError", 'fatal','install_portage_tree', "Failed to retrieve portage tree using webrsync!")
570 self._logger.log("Portage tree sync'd using webrsync")
571 # Otherwise, spit out a message because its probably a bad thing.
572 else:
573 self._logger.log("NOTICE! No valid portage tree sync method was selected. This will most likely result in a failed installation unless the tree is mounted.")
574
575 ##
576 # Sets the timezone for the new environment
577 def set_timezone(self):
578
579 # Set symlink
580 if os.access(self._chroot_dir + "/etc/localtime", os.W_OK):
581 if self._debug: self._logger.log("DEBUG: /etc/localtime already exists, removing it so it can be symlinked")
582 GLIUtility.spawn("rm "+self._chroot_dir + "/etc/localtime")
583 if self._debug: self._logger.log("DEBUG: running ln -s ../usr/share/zoneinfo/" + self._install_profile.get_time_zone() + " /etc/localtime")
584 GLIUtility.spawn("ln -s ../usr/share/zoneinfo/" + self._install_profile.get_time_zone() + " /etc/localtime", chroot=self._chroot_dir)
585 if not (self._install_profile.get_time_zone() == "UTC"):
586 if self._debug: self._logger.log("DEBUG: timezone was not UTC, setting CLOCK to local. This may be overwritten later.")
587 self._edit_config(self._chroot_dir + "/etc/conf.d/clock", {"CLOCK":"local"})
588 self._logger.log("Timezone set.")
589
590 ##
591 # Configures /etc/fstab on the new envorinment
592 def configure_fstab(self):
593 newfstab = ""
594 parts = self._install_profile.get_partition_tables()
595 for device in parts:
596 tmp_partitions = parts[device] #.get_install_profile_structure()
597 for partition in tmp_partitions: #.get_ordered_partition_list():
598 mountpoint = tmp_partitions[partition]['mountpoint']
599 minor = str(int(tmp_partitions[partition]['minor']))
600 partition_type = tmp_partitions[partition]['type']
601 mountopts = tmp_partitions[partition]['mountopts']
602 if not mountopts.strip(): mountopts = "defaults"
603 if mountpoint:
604 if not GLIUtility.is_file(self._chroot_dir+mountpoint):
605 if self._debug: self._logger.log("DEBUG: making mountpoint: "+mountpoint)
606 exitstatus = GLIUtility.spawn("mkdir -p " + self._chroot_dir + mountpoint)
607 if not GLIUtility.exitsuccess(exitstatus):
608 raise GLIException("MkdirError", 'fatal','configure_fstab', "Making the mount point failed!")
609 newfstab += tmp_partitions[partition]['devnode']+"\t "+mountpoint+"\t "+partition_type+"\t "+mountopts+"\t\t "
610 if mountpoint == "/boot":
611 newfstab += "1 2\n"
612 elif mountpoint == "/":
613 newfstab += "0 1\n"
614 else:
615 newfstab += "0 0\n"
616 if partition_type == "linux-swap":
617 newfstab += tmp_partitions[partition]['devnode']+"\t none swap sw 0 0\n"
618 newfstab += "none /proc proc defaults 0 0\n"
619 newfstab += "none /dev/shm tmpfs defaults 0 0\n"
620 if GLIUtility.is_device("/dev/cdroms/cdrom0"):
621 newfstab += "/dev/cdroms/cdrom0 /mnt/cdrom auto noauto,user 0 0\n"
622
623 for netmount in self._install_profile.get_network_mounts():
624 if netmount['type'] == "nfs":
625 newfstab += netmount['host'] + ":" + netmount['export'] + "\t" + netmount['mountpoint'] + "\tnfs\t" + netmount['mountopts'] + "\t0 0\n"
626
627 file_name = self._chroot_dir + "/etc/fstab"
628 try:
629 if self._debug: self._logger.log("DEBUG: backing up original fstab")
630 shutil.move(file_name, file_name + ".OLDdefault")
631 except:
632 self._logger.log("ERROR: could not backup original fstab.")
633 if self._debug: self._logger.log("DEBUG: Contents of new fstab: "+newfstab)
634 f = open(file_name, 'w')
635 f.writelines(newfstab)
636 f.close()
637 self._logger.log("fstab configured.")
638
639 ##
640 # Fetches desired kernel sources, unless you're using a livecd-kernel in which case it does freaky stuff.
641 def emerge_kernel_sources(self):
642 self._logger.log("Starting emerge_kernel")
643 kernel_pkg = self._install_profile.get_kernel_source_pkg()
644 # if kernel_pkg:
645 # Special case, no kernel installed
646 if kernel_pkg == "none":
647 return
648 # Special case, livecd kernel
649 elif kernel_pkg == "livecd-kernel":
650 if self._debug: self._logger.log("DEBUG: starting livecd-kernel setup")
651 self.notify_frontend("progress", (0, "Copying livecd-kernel to chroot"))
652 self._portage.copy_pkg_to_chroot(self._portage.get_best_version_vdb("livecd-kernel"))
653 self.notify_frontend("progress", (1, "Done copying livecd-kernel to chroot"))
654
655 exitstatus = self._portage.emerge("coldplug")
656 self._logger.log("Coldplug emerged. Now they should be added to the boot runlevel.")
657 self._add_to_runlevel("coldplug", runlevel="boot")
658
659 if self._install_profile.get_kernel_bootsplash():
660 self._logger.log("Bootsplash enabled for livecd-kernel...this is currently broken, so we're skipping the package install")
661 # self._logger.log("Bootsplash enabled...emerging necessary packages")
662 # self._portage.emerge(["splashutils", "splash-themes-livecd"])
663
664 # Extra modules from kernelpkgs.txt...disabled until I can figure out why it sucks
665 # try:
666 # kernpkgs = open("/usr/livecd/kernelpkgs.txt", "r")
667 # pkgs = ""
668 # for line in kernpkgs.readlines():
669 # pkgs += line.strip() + " "
670 # kernpkgs.close()
671 # except:
672 # raise GLIException("EmergeColdplugError", 'fatal','build_kernel', "Could not read kernelpkgs.txt")
673 # exitstatus = self._emerge(pkgs)
674 # if not GLIUtility.exitsuccess(exitstatus):
675 # raise GLIException("EmergeExtraKernelModulesError", 'fatal','build_kernel', "Could not emerge extra kernel packages")
676 # self._logger.log("Extra kernel packages emerged.")
677
678 # normal case
679 else:
680 exitstatus = self._portage.emerge(kernel_pkg)
681 # if not GLIUtility.exitsuccess(exitstatus):
682 # raise GLIException("EmergeKernelSourcesError", 'fatal','emerge_kernel_sources',"Could not retrieve kernel sources!")
683 try:
684 os.stat(self._chroot_dir + "/usr/src/linux")
685 except:
686 kernels = os.listdir(self._chroot_dir+"/usr/src")
687 if self._debug: self._logger.log("DEBUG: no /usr/src/linux found. found kernels: "+kernels)
688 found_a_kernel = False
689 counter = 0
690 while not found_a_kernel:
691 if (len(kernels[counter]) > 6) and (kernels[counter][0:6]=="linux-"):
692 if self._debug: self._logger.log("DEBUG: found one. linking it. running: ln -s /usr/src/"+kernels[counter]+ " /usr/src/linux in the chroot.")
693 exitstatus = GLIUtility.spawn("ln -s /usr/src/"+kernels[counter]+ " /usr/src/linux",chroot=self._chroot_dir)
694 if not GLIUtility.exitsuccess(exitstatus):
695 raise GLIException("EmergeKernelSourcesError", 'fatal','emerge_kernel_sources',"Could not make a /usr/src/linux symlink")
696 found_a_kernel = True
697 else:
698 counter = counter + 1
699 self._logger.log("Kernel sources:"+kernel_pkg+" emerged and /usr/src/linux symlinked.")
700
701 ##
702 # Builds the kernel using genkernel or regularly if given a custom .config file in the profile
703 def build_kernel(self):
704 self._logger.mark()
705 self._logger.log("Starting build_kernel")
706
707 build_mode = self._install_profile.get_kernel_build_method()
708
709 # No building necessary if using the LiveCD's kernel/initrd
710 # or using the 'none' kernel bypass
711 if self._install_profile.get_kernel_source_pkg() in ["livecd-kernel","none"]:
712 if self._debug: self._logger.log("DEBUG: using "+self._install_profile.get_kernel_source_pkg()+ " so skipping this function.")
713 return
714 # Get the uri to the kernel config
715 kernel_config_uri = self._install_profile.get_kernel_config_uri()
716
717 # is there an easier way to do this?
718 if self._debug: self._logger.log("DEBUG: running command: awk '/^PATCHLEVEL/{print $3}' /usr/src/linux/Makefile in chroot.")
719 ret, kernel_major = GLIUtility.spawn("awk '/^PATCHLEVEL/{print $3}' /usr/src/linux/Makefile",chroot=self._chroot_dir,return_output=True)
720 # 6 == 2.6 kernel, 4 == 2.4 kernel
721 kernel_major = int(kernel_major)
722 if self._debug: self._logger.log("DEBUG: kernel major version is: "+str(kernel_major))
723 #Copy the kernel .config to the proper location in /usr/src/linux
724 if kernel_config_uri != '':
725 try:
726 if self._debug: self._logger.log("DEBUG: grabbing kernel config from "+kernel_config_uri+" and putting it in "+self._chroot_dir + "/var/tmp/kernel_config")
727 GLIUtility.get_uri(kernel_config_uri, self._chroot_dir + "/var/tmp/kernel_config")
728 except:
729 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not copy kernel config!")
730
731 # the && stuff is important so that we can catch any errors.
732 kernel_compile_script = "#!/bin/bash\n"
733 kernel_compile_script += "cp /var/tmp/kernel_config /usr/src/linux/.config && "
734 kernel_compile_script += "cd /usr/src/linux && "
735 # required for 2.[01234] etc kernels
736 if kernel_major in [0,1,2,3,4]:
737 kernel_compile_script += " yes 'n' | make oldconfig && make symlinks && make dep"
738 # not strictly needed, but recommended by upstream
739 else: #elif kernel_major in [5,6]:
740 kernel_compile_script += "make prepare"
741
742 # bypass to install a kernel, but not compile it
743 if build_mode == "none":
744 return
745 # this mode is used to install kernel sources, and have then configured
746 # but not actually build the kernel. This is needed for netboot
747 # situations when you have packages that require kernel sources
748 # to build.
749 elif build_mode == "prepare-only":
750 if self._debug: self._logger.log("DEBUG: writing kernel script with contents: "+kernel_compile_script)
751 f = open(self._chroot_dir+"/var/tmp/kernel_script", 'w')
752 f.writelines(kernel_compile_script)
753 f.close()
754 #Build the kernel
755 if self._debug: self._logger.log("DEBUG: running: chmod u+x "+self._chroot_dir+"/var/tmp/kernel_script")
756 exitstatus1 = GLIUtility.spawn("chmod u+x "+self._chroot_dir+"/var/tmp/kernel_script")
757 if self._debug: self._logger.log("DEBUG: running: /var/tmp/kernel_script in chroot.")
758 exitstatus2 = GLIUtility.spawn("/var/tmp/kernel_script", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
759 if not GLIUtility.exitsuccess(exitstatus1):
760 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not handle prepare-only build! died on chmod.")
761 if not GLIUtility.exitsuccess(exitstatus2):
762 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not handle prepare-only build! died on running of kernel script.")
763 #i'm sure i'm forgetting something here.
764 #cleanup
765 exitstatus = GLIUtility.spawn("rm -f "+self._chroot_dir+"/var/tmp/kernel_script "+self._chroot_dir+"/var/tmp/kernel_config")
766 #it's not important if this fails.
767 self._logger.log("prepare-only build complete")
768 # Genkernel mode, including custom kernel_config. Initrd always on.
769 elif build_mode == "genkernel":
770 if self._debug: self._logger.log("DEBUG: build_kernel(): starting emerge genkernel")
771 exitstatus = self._portage.emerge("genkernel")
772 # if not GLIUtility.exitsuccess(exitstatus):
773 # raise GLIException("EmergeGenKernelError", 'fatal','build_kernel', "Could not emerge genkernel!")
774 self._logger.log("Genkernel emerged. Beginning kernel compile.")
775 # Null the genkernel_options
776 genkernel_options = ""
777
778 # If the uri for the kernel config is not null, then
779 if kernel_config_uri != "":
780 if self._debug: self._logger.log("DEBUG: build_kernel(): getting kernel config "+kernel_config_uri)
781 GLIUtility.get_uri(kernel_config_uri, self._chroot_dir + "/var/tmp/kernel_config")
782 genkernel_options = genkernel_options + " --kernel-config=/var/tmp/kernel_config"
783
784 # Decide whether to use bootsplash or not
785 if self._install_profile.get_kernel_bootsplash():
786 genkernel_options = genkernel_options + " --gensplash"
787 else:
788 genkernel_options = genkernel_options + " --no-gensplash"
789 # Run genkernel in chroot
790 #print "genkernel all " + genkernel_options
791 if self._debug: self._logger.log("DEBUG: build_kernel(): running: genkernel all " + genkernel_options + " in chroot.")
792 exitstatus = GLIUtility.spawn("genkernel all " + genkernel_options, chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
793 if not GLIUtility.exitsuccess(exitstatus):
794 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not build kernel!")
795
796 # exitstatus = self._emerge("hotplug")
797 # if not GLIUtility.exitsuccess(exitstatus):
798 # raise GLIException("EmergeHotplugError", 'fatal','build_kernel', "Could not emerge hotplug!")
799 # self._logger.log("Hotplug emerged.")
800 exitstatus = self._portage.emerge("coldplug")
801 # if not GLIUtility.exitsuccess(exitstatus):
802 # raise GLIException("EmergeColdplugError", 'fatal','build_kernel', "Could not emerge coldplug!")
803 self._logger.log("Coldplug emerged. Now they should be added to the default runlevel.")
804
805 # self._add_to_runlevel("hotplug")
806 self._add_to_runlevel("coldplug", runlevel="boot")
807
808 if self._install_profile.get_kernel_bootsplash():
809 self._logger.log("Bootsplash enabled...emerging necessary packages")
810 self._portage.emerge(["splashutils", "splash-themes-livecd"])
811
812 self._logger.log("Genkernel complete.")
813 elif build_mode == "custom": #CUSTOM CONFIG
814
815 kernel_compile_script += " && make && make modules && make modules_install"
816
817 #Ok now that it's built, copy it to /boot/kernel-* for bootloader code to find it
818 if self._client_configuration.get_architecture_template() == "x86":
819 kernel_compile_script += " && cp /usr/src/linux/arch/i386/boot/bzImage /boot/kernel-custom\n"
820 elif self._client_configuration.get_architecture_template() == "amd64":
821 kernel_compile_script += " && cp /usr/src/linux/arch/x86_64/boot/bzImage /boot/kernel-custom\n"
822 elif self._client_configuration.get_architecture_template() == "ppc":
823 kernel_compile_script += " && cp /usr/src/linux/vmlinux /boot/kernel-custom\n"
824 if self._debug: self._logger.log("DEBUG: build_kernel(): writing custom kernel script: "+kernel_compile_script)
825 f = open(self._chroot_dir+"/var/tmp/kernel_script", 'w')
826 f.writelines(kernel_compile_script)
827 f.close()
828 #Build the kernel
829 if self._debug: self._logger.log("DEBUG: build_kernel(): running: chmod u+x "+self._chroot_dir+"/var/tmp/kernel_script")
830 exitstatus1 = GLIUtility.spawn("chmod u+x "+self._chroot_dir+"/var/tmp/kernel_script")
831 if self._debug: self._logger.log("DEBUG: build_kernel(): running: /var/tmp/kernel_script in chroot")
832 exitstatus2 = GLIUtility.spawn("/var/tmp/kernel_script", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
833 if not GLIUtility.exitsuccess(exitstatus1):
834 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not build custom kernel! died on chmod.")
835 if not GLIUtility.exitsuccess(exitstatus2):
836 raise GLIException("KernelBuildError", 'fatal', 'build_kernel', "Could not build custom kernel! died on running of kernel script.")
837
838 #i'm sure i'm forgetting something here.
839 #cleanup
840 exitstatus = GLIUtility.spawn("rm -f "+self._chroot_dir+"/var/tmp/kernel_script "+self._chroot_dir+"/var/tmp/kernel_config")
841 #it's not important if this fails.
842
843 if self._install_profile.get_kernel_bootsplash():
844 self._logger.log("Bootsplash enabled...emerging necessary packages")
845 self._portage.emerge(["splashutils", "splash-themes-livecd"])
846
847 self._logger.log("Custom kernel complete")
848
849 ##
850 # Installs and starts up distccd if the user has it set, so that it will get used for the rest of the install
851 def install_distcc(self):
852 if self._install_profile.get_install_distcc():
853 if self._debug: self._logger.log("DEBUG: install_distcc(): we ARE installing distcc")
854 if self._debug: self._logger.log("DEBUG: install_distcc(): running: USE='-*' emerge --nodeps sys-devel/distcc in chroot.")
855 exitstatus = GLIUtility.spawn("USE='-*' emerge --nodeps sys-devel/distcc", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
856 if not GLIUtility.exitsuccess(exitstatus):
857 self._logger.log("ERROR! : Could not emerge distcc!")
858 else:
859 self._logger.log("distcc emerged.")
860
861 ##
862 # Installs mail MTA. Does not put into runlevel, as this is not simple with MTAs.
863 def install_mta(self):
864 # Get MTA info
865 mta_pkg = self._install_profile.get_mta_pkg()
866 if mta_pkg:
867 # Emerge MTA
868 if self._debug: self._logger.log("DEBUG: install_mta(): installing mta: "+mta_pkg)
869 exitstatus = self._portage.emerge(mta_pkg)
870 # if not GLIUtility.exitsuccess(exitstatus):
871 # raise GLIException("MTAError", 'fatal','install_mta', "Could not emerge " + mta_pkg + "!")
872 self._logger.log("MTA installed: "+mta_pkg)
873 else:
874 installpackages = self._install_profile.get_install_packages()
875 if installpackages:
876 for pkg in installpackages:
877 if pkg in ['esmtp', 'exim', 'msmtp', 'nbsmtp', 'nullmailer', 'sendmail', 'ssmtp', 'xmail']:
878 self._logger.log("Found an mta in the package list: "+pkg+". Installing early.")
879 exitstatus = self._portage.emerge(pkg)
880 self._logger.log("MTA installed.")
881 break # We only want to install one
882
883 ##
884 # Installs and sets up logging daemon on the new system. adds to runlevel too.
885 def install_logging_daemon(self):
886 # Get loggin daemon info
887 logging_daemon_pkg = self._install_profile.get_logging_daemon_pkg()
888 if logging_daemon_pkg:
889 # Emerge Logging Daemon
890 if self._debug: self._logger.log("DEBUG: install_logging_daemon: emerging "+logging_daemon_pkg)
891 exitstatus = self._portage.emerge(logging_daemon_pkg)
892 # if not GLIUtility.exitsuccess(exitstatus):
893 # raise GLIException("LoggingDaemonError", 'fatal','install_logging_daemon', "Could not emerge " + logging_daemon_pkg + "!")
894
895 # Add Logging Daemon to default runlevel
896 # After we find the name of it's initscript
897 # This current code is a hack, and should be better.
898 initscript = logging_daemon_pkg[(logging_daemon_pkg.find('/')+1):]
899 if self._debug: self._logger.log("DEBUG: install_logging_daemon: adding "+initscript+" to runlevel")
900 self._add_to_runlevel(initscript)
901 self._logger.log("Logging daemon installed: "+logging_daemon_pkg)
902 ##
903 # Installs and sets up cron package.
904 def install_cron_daemon(self):
905 # Get cron daemon info
906 cron_daemon_pkg = self._install_profile.get_cron_daemon_pkg()
907 if cron_daemon_pkg:
908 if cron_daemon_pkg == "none":
909 self._logger.log("Skipping installation of cron daemon")
910 else:
911 # Emerge Cron Daemon
912 if self._debug: self._logger.log("DEBUG: install_cron_daemon: emerging "+cron_daemon_pkg)
913 exitstatus = self._portage.emerge(cron_daemon_pkg)
914 # if not GLIUtility.exitsuccess(exitstatus):
915 # raise GLIException("CronDaemonError", 'fatal', 'install_cron_daemon', "Could not emerge " + cron_daemon_pkg + "!")
916
917 # Add Cron Daemon to default runlevel
918 # After we find the name of it's initscript
919 # This current code is a hack, and should be better.
920 initscript = cron_daemon_pkg[(cron_daemon_pkg.find('/')+1):]
921 if self._debug: self._logger.log("DEBUG: install_cron_daemon: adding "+initscript+" to runlevel")
922 self._add_to_runlevel(initscript)
923
924 # If the Cron Daemon is not vixie-cron, run crontab
925 if "vixie-cron" not in cron_daemon_pkg:
926 if self._debug: self._logger.log("DEBUG: install_cron_daemon: running: crontab /etc/crontab in chroot.")
927 exitstatus = GLIUtility.spawn("crontab /etc/crontab", chroot=self._chroot_dir, display_on_tty8=True)
928 if not GLIUtility.exitsuccess(exitstatus):
929 raise GLIException("CronDaemonError", 'fatal', 'install_cron_daemon', "Failure making crontab!")
930 self._logger.log("Cron daemon installed and configured: "+cron_daemon_pkg)
931
932 ##
933 # This will parse the partitions looking for types that require fstools and emerge them if found.
934 def install_filesystem_tools(self):
935 "Installs and sets up fstools"
936 # Get the list of file system tools to be installed
937 parts = self._install_profile.get_partition_tables()
938 # don't use an array, use a set instead
939 filesystem_types = []
940 for device in parts:
941 tmp_partitions = parts[device] #.get_install_profile_structure()
942 for partition in tmp_partitions: #.get_ordered_partition_list():
943 partition_type = tmp_partitions[partition]['type'].lower()
944 if tmp_partitions[partition]['mountpoint'] and partition_type not in filesystem_types:
945 filesystem_types.append(partition_type)
946
947 package_list = []
948 for filesystem in filesystem_types:
949 if filesystem == 'xfs':
950 package_list.append('sys-fs/xfsprogs')
951 elif filesystem == 'reiserfs':
952 package_list.append('sys-fs/reiserfsprogs')
953 elif filesystem == 'jfs':
954 package_list.append('sys-fs/jfsutils')
955 elif filesystem == 'ntfs':
956 package_list.append('sys-fs/ntfsprogs')
957 elif filesystem in ['fat','vfat', 'msdos', 'umsdos']:
958 package_list.append('sys-fs/dosfstools')
959 elif filesystem == 'hfs':
960 # should check with the PPC guys on this
961 package_list.append('sys-fs/hfsutils')
962 package_list.append('sys-fs/hfsplusutils')
963 #else:
964 # should be code here for every FS type!
965 failed_list = []
966 for package in package_list:
967 if self._debug: self._logger.log("DEBUG: install_filesystem_tools(): emerging "+package)
968 exitstatus = self._portage.emerge(package)
969 # if not GLIUtility.exitsuccess(exitstatus):
970 # self._logger.log("ERROR! : Could not emerge "+package+"!")
971 # failed_list.append(package)
972 # else:
973 self._logger.log("FileSystemTool "+package+" was emerged successfully.")
974 # error checking is important!
975 if len(failed_list) > 0:
976 raise GLIException("InstallFileSystemToolsError", 'warning', 'install_filesystem_tools', "Could not emerge " + failed_list + "!")
977
978 ##
979 # Installs rp-pppoe but does not configure it. This function is quite the unknown.
980 def install_rp_pppoe(self):
981 # If user wants us to install rp-pppoe, then do so
982 if self._install_profile.get_install_rp_pppoe():
983 if self._debug: self._logger.log("DEBUG: install_rp_pppoe: emerging rp-pppoe")
984 exitstatus = self._portage.emerge("rp-pppoe")
985 # if not GLIUtility.exitsuccess(exitstatus):
986 # self._logger.log("ERROR! : Could not emerge rp-pppoe!")
987 # raise GLIException("RP_PPPOEError", 'warning', 'install_rp_pppoe', "Could not emerge rp-pppoe!")
988 # else:
989 self._logger.log("rp-pppoe emerged but not set up.")
990 # Should we add a section here to automatically configure rp-pppoe?
991 # I think it should go into the setup_network_post section
992 # What do you guys think? <-- said by unknown. samyron or npmcallum
993
994 ##
995 # Installs and sets up pcmcia-cs if selected in the profile
996 def install_pcmcia_cs(self):
997 if self._debug: self._logger.log("DEBUG: install_pcmcia_cs(): emerging pcmcia-cs")
998 exitstatus = self._portage.emerge("pcmcia-cs")
999 # if not GLIUtility.exitsuccess(exitstatus):
1000 # self._logger.log("ERROR! : Could not emerge pcmcia-cs!")
1001
1002 # Add pcmcia-cs to the default runlevel
1003 # else:
1004 self._add_to_runlevel('pcmcia')
1005 self._logger.log("PCMCIA_CS emerged and configured.")
1006
1007 ##
1008 # This runs etc-update and then re-overwrites the files by running the configure_*'s to keep our values.
1009 def update_config_files(self):
1010 "Runs etc-update (overwriting all config files), then re-configures the modified ones"
1011 # Run etc-update overwriting all config files
1012 if self._debug: self._logger.log("DEBUG: update_config_files(): running: "+'echo "-5" | chroot '+self._chroot_dir+' etc-update')
1013 status = GLIUtility.spawn('echo "-5" | chroot '+self._chroot_dir+' etc-update', display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
1014 if not GLIUtility.exitsuccess(status):
1015 self._logger.log("ERROR! : Could not update the config files!")
1016 else:
1017 # self.configure_make_conf()
1018 self.configure_fstab()
1019 # self.configure_rc_conf()
1020 etc_files = self._install_profile.get_etc_files()
1021 for etc_file in etc_files:
1022 # Skip entries with blank filenames
1023 if not etc_file: continue
1024 if self._debug: self._logger.log("DEBUG: update_config_files(): updating config file: "+etc_file)
1025 if isinstance(etc_files[etc_file], dict):
1026 self._edit_config(self._chroot_dir + "/etc/" + etc_file, etc_files[etc_file])
1027 else:
1028 for entry in etc_files[etc_file]:
1029 # Skip blank entries
1030 if not entry: continue
1031 self._edit_config(self._chroot_dir + "/etc/" + etc_file, { "0": entry }, only_value=True)
1032 self._logger.log("Config files updated using etc-update. make.conf/fstab/rc.conf restored.")
1033
1034 ##
1035 # Configures /etc/rc.conf (deprecated by above code)
1036 def configure_rc_conf(self):
1037
1038 # Get make.conf options
1039 options = self._install_profile.get_rc_conf()
1040
1041 # For each configuration option...
1042 filename = self._chroot_dir + "/etc/rc.conf"
1043 # self._edit_config(filename, {"COMMENT": "GLI additions ===>"})
1044 for key in options.keys():
1045 # Add/Edit it into rc.conf
1046 self._edit_config(filename, {key: options[key]})
1047 # self._edit_config(filename, {"COMMENT": "<=== End GLI additions"})
1048 self._logger.log("rc.conf configured.")
1049
1050 ##
1051 # Sets up the network for the first boot
1052 def setup_network_post(self):
1053 if self._debug: self._logger.log("DEBUG: setup_network_post(): starting network configuration")
1054 # Get hostname, domainname and nisdomainname
1055 hostname = self._install_profile.get_hostname()
1056 domainname = self._install_profile.get_domainname()
1057 nisdomainname = self._install_profile.get_nisdomainname()
1058
1059 # Write the hostname to the hostname file
1060 #open(self._chroot_dir + "/etc/hostname", "w").write(hostname + "\n")
1061 self._edit_config(self._chroot_dir + "/etc/conf.d/hostname", {"HOSTNAME": hostname})
1062
1063 # Write the domainname to the nisdomainname file
1064 if domainname:
1065 #open(self._chroot_dir + "/etc/dnsdomainname", "w").write(domainname + "\n")
1066 self._edit_config(self._chroot_dir + "/etc/conf.d/domainname", {"DNSDOMAIN": domainname})
1067 self._add_to_runlevel("domainname")
1068
1069 # Write the nisdomainname to the nisdomainname file
1070 if nisdomainname:
1071 #open(self._chroot_dir + "/etc/nisdomainname", "w").write(nisdomainname + "\n")
1072 self._edit_config(self._chroot_dir + "/etc/conf.d/domainname", {"NISDOMAIN": nisdomainname})
1073 self._add_to_runlevel("domainname")
1074
1075 #
1076 # EDIT THE /ETC/HOSTS FILE
1077 #
1078
1079 # The address we are editing is 127.0.0.1
1080 hosts_ip = "127.0.0.1"
1081
1082 # If the hostname is localhost
1083 if hostname == "localhost":
1084 # If a domainname is set
1085 if domainname:
1086 hosts_line = hostname + "." + domainname + "\t" + hostname
1087 else:
1088 hosts_line = hostname
1089 # If the hostname is not localhost
1090 else:
1091 # If a domainname is set
1092 if domainname:
1093 hosts_line = hostname + "." + domainname + "\t" + hostname + "\tlocalhost"
1094 else:
1095 hosts_line = "localhost\t" + hostname
1096
1097 # Write to file
1098 self._edit_config(self._chroot_dir + "/etc/hosts", {hosts_ip: hosts_line}, delimeter='\t', quotes_around_value=False)
1099
1100 #
1101 # SET DEFAULT GATEWAY
1102 #
1103
1104 # Get default gateway
1105 default_gateway = self._install_profile.get_default_gateway()
1106
1107 # If the default gateway exists, add it
1108 if default_gateway:
1109 default_gateway_string = '( "default via ' + default_gateway[1] + '" )'
1110 if self._debug: self._logger.log("DEBUG: setup_network_post(): found gateway. adding to confing. "+default_gateway_string)
1111 self._edit_config(self._chroot_dir + "/etc/conf.d/net", {"routes_"+default_gateway[0]: default_gateway_string}, quotes_around_value=False)
1112
1113 #
1114 # SET RESOLV INFO
1115 #
1116
1117 # Get dns servers
1118 dns_servers = self._install_profile.get_dns_servers()
1119
1120 # Clear the list
1121 resolv_output = []
1122
1123 # If dns servers are set
1124 if dns_servers:
1125
1126
1127 # Parse each dns server
1128 for dns_server in dns_servers:
1129 # Add the server to the output
1130 resolv_output.append("nameserver " + dns_server +"\n")
1131
1132 # If the domainname is set, then also output it
1133 if domainname:
1134 resolv_output.append("search " + domainname + "\n")
1135
1136 # Output to file
1137 if self._debug: self._logger.log("DEBUG: setup_network_post(): writing resolv.conf with contents: " + str(resolv_output))
1138 resolve_conf = open(self._chroot_dir + "/etc/resolv.conf", "w")
1139 resolve_conf.writelines(resolv_output)
1140 resolve_conf.close()
1141
1142 #
1143 # PARSE INTERFACES
1144 #
1145
1146 # Fetch interfaces
1147 interfaces = self._install_profile.get_network_interfaces()
1148 emerge_dhcp = False
1149 # Parse each interface
1150 for interface in interfaces.keys():
1151 if self._debug: self._logger.log("DEBUG: setup_network_post(): configuring interface: "+ interface)
1152 # Set what kind of interface it is
1153 interface_type = interface[:3]
1154
1155 # Check to see if there is a startup script for this interface, if there isn't link to the proper script
1156 try:
1157 os.stat(self._chroot_dir + "/etc/init.d/net." + interface)
1158 except:
1159 if self._debug: self._logger.log("DEBUG: setup_network_post(): /etc/init.d/net." + interface + " didn't exist, symlinking it.")
1160 os.symlink("net." + interface_type + "0", self._chroot_dir + "/etc/init.d/net." + interface)
1161
1162 # If we are going to load the network at boot...
1163 #if interfaces[interface][2]: #THIS FEATURE NO LONGER EXISTS
1164
1165 # Add it to the default runlevel
1166 if self._debug: self._logger.log("DEBUG: setup_network_post(): adding net."+interface+" to runlevel.")
1167 self._add_to_runlevel("net."+interface) # moved a bit <-- for indentation
1168
1169 #
1170 # ETHERNET
1171 #
1172 if interface_type == "eth":
1173
1174 #
1175 # STATIC IP
1176 #
1177 # If the post-install device info is not None, then it is a static ip addy
1178 if interfaces[interface][0] != "dhcp":
1179 ip = interfaces[interface][0]
1180 broadcast = interfaces[interface][1]
1181 netmask = interfaces[interface][2]
1182 # aliases = interfaces[interface][1][3]
1183 # alias_ips = []
1184 # alias_broadcasts = []
1185 # alias_netmasks = []
1186
1187 # Write the static ip config to /etc/conf.d/net
1188 self._edit_config(self._chroot_dir + "/etc/conf.d/net", {"iface_" + interface: ip + " broadcast " + broadcast + " netmask " + netmask})
1189
1190 # If aliases are set
1191 # if aliases:
1192
1193 # Parse aliases to format alias info
1194 # for alias in aliases:
1195 # alias_ips.append(alias[0])
1196 # alias_broadcasts.append(alias[1])
1197 # alias_netmasks.append(allias[2])
1198
1199 # Once the alias info has been gathered, then write it out
1200 # Alias ips first
1201 # self._edit_config(self._chroot_dir + "/etc/conf.d/net", "alias_" + interface, string.join(alias_ips))
1202 # Alias broadcasts next
1203 # self._edit_config(self._chroot_dir + "/etc/conf.d/net", "broadcast_" + interface, string.join(alias_broadcasts))
1204 # Alias netmasks last
1205 # self._edit_config(self._chroot_dir + "/etc/conf.d/net", "netmask_" + interface, string.join(alias_netmasks))
1206
1207 #
1208 # DHCP IP
1209 #
1210 else:
1211 dhcpcd_options = interfaces[interface][1]
1212 if not dhcpcd_options:
1213 dhcpcd_options = ""
1214 self._edit_config(self._chroot_dir + "/etc/conf.d/net", {"iface_" + interface: "dhcp", "dhcpcd_" + interface: dhcpcd_options})
1215 emerge_dhcp = True
1216 if emerge_dhcp:
1217 if self._debug: self._logger.log("DEBUG: setup_network_post(): emerging dhcpcd.")
1218 exitstatus = self._portage.emerge("dhcpcd")
1219 # if not GLIUtility.exitsuccess(exitstatus):
1220 # self._logger.log("ERROR! : Could not emerge dhcpcd!")
1221 # else:
1222 self._logger.log("dhcpcd emerged.")
1223
1224 ##
1225 # Sets the root password
1226 def set_root_password(self):
1227 if self._debug: self._logger.log("DEBUG: set_root_password(): running: "+ 'echo \'root:' + self._install_profile.get_root_pass_hash() + '\' | chroot '+self._chroot_dir+' chpasswd -e')
1228 status = GLIUtility.spawn('echo \'root:' + self._install_profile.get_root_pass_hash() + '\' | chroot '+self._chroot_dir+' chpasswd -e')
1229 if not GLIUtility.exitsuccess(status):
1230 raise GLIException("SetRootPasswordError", 'fatal', 'set_root_password', "Failure to set root password!")
1231 self._logger.log("Root Password set on the new system.")
1232
1233 ##
1234 # Sets up the new users for the system
1235 def set_users(self):
1236 # Loop for each user
1237 for user in self._install_profile.get_users():
1238
1239 # Get values from the tuple
1240 username = user[0]
1241 password_hash = user[1]
1242 groups = user[2]
1243 shell = user[3]
1244 home_dir = user[4]
1245 uid = user[5]
1246 comment = user[6]
1247
1248 options = [ "-m", "-p '" + password_hash + "'" ]
1249
1250 # If the groups are specified
1251 if groups:
1252
1253 # If just one group is listed as a string, make it a list
1254 if groups == str:
1255 groups = [ groups ]
1256
1257 # If only 1 group is listed
1258 if len(groups) == 1:
1259 options.append("-G " + groups[0])
1260
1261 # If there is more than one group
1262 elif len(groups) > 1:
1263 options.append('-G "' + string.join(groups, ",") + '"')
1264
1265 # Attempt to add the group (will return success when group exists)
1266 for group in groups:
1267 if not group: continue
1268 # Add the user
1269 if self._debug: self._logger.log("DEBUG: set_users(): adding user to groups with (in chroot): "+'groupadd -f ' + group)
1270 exitstatus = GLIUtility.spawn('groupadd -f ' + group, chroot=self._chroot_dir, logfile=self._compile_logfile, append_log=True, display_on_tty8=True)
1271 if not GLIUtility.exitsuccess(exitstatus):
1272 self._logger.log("ERROR! : Failure to add group " + group+" and it wasn't that the group already exists!")
1273
1274
1275 # If a shell is specified
1276 if shell:
1277 options.append("-s " + shell)
1278
1279 # If a home dir is specified
1280 if home_dir:
1281 options.append("-d " + home_dir)
1282
1283 # If a UID is specified
1284 if uid:
1285 options.append("-u " + str(uid))
1286
1287 # If a comment is specified
1288 if comment:
1289 options.append('-c "' + comment + '"')
1290
1291 # Add the user
1292 if self._debug: self._logger.log("DEBUG: set_users(): adding user with (in chroot): "+'useradd ' + string.join(options) + ' ' + username)
1293 exitstatus = GLIUtility.spawn('useradd ' + string.join(options) + ' ' + username, chroot=self._chroot_dir, logfile=self._compile_logfile, append_log=True, display_on_tty8=True)
1294 if not GLIUtility.exitsuccess(exitstatus):
1295 self._logger.log("ERROR! : Failure to add user " + username)
1296 # raise GLIException("AddUserError", 'warning', 'set_users', "Failure to add user " + username)
1297 else:
1298 self._logger.log("User " + username + " was added.")
1299
1300 ##
1301 # This function will handle the various cleanup tasks as well as unmounting the filesystems for reboot.
1302 def finishing_cleanup(self):
1303 #These are temporary until I come up with a nicer idea.
1304 #get rid of the compile_output file so the symlink doesn't get screwed up.
1305
1306 #we copy the log over to the new system.
1307 install_logfile = self._client_configuration.get_log_file()
1308 try:
1309 if self._debug: self._logger.log("DEBUG: finishing_cleanup(): copying logfile over to new system's root.")
1310 shutil.copy(install_logfile, self._chroot_dir + install_logfile)
1311 except:
1312 if self._debug: self._logger.log("DEBUG: finishing_cleanup(): ERROR! could not copy logfile over to /root.")
1313 #Now we're done logging as far as the new system is concerned.
1314 GLIUtility.spawn("cp /tmp/installprofile.xml " + self._chroot_dir + "/root/installprofile.xml")
1315 GLIUtility.spawn("cp /tmp/clientconfiguration.xml " + self._chroot_dir + "/root/clientconfiguration.xml")
1316
1317 #Unmount mounted fileystems in preparation for reboot
1318 #mounts = GLIUtility.spawn(r"mount | sed -e 's:^.\+ on \(.\+\) type .\+$:\1:' | grep -e '^" + self._chroot_dir + "' | sort -r", return_output=True)[1].split("\n")
1319 mounted_devices = self._mounted_devices
1320 mounted_devices.sort()
1321 mounted_devices.reverse()
1322 for mount in mounted_devices:
1323 if self._debug: self._logger.log("DEBUG: finishing_cleanup(): running: umount -l " + mount)
1324 ret = GLIUtility.spawn("umount -l " + self._chroot_dir + mount)
1325 if not GLIUtility.exitsuccess(ret):
1326 self._logger.log("ERROR! : Could not unmount mountpoint %s" % mount)
1327
1328 # now turn off all swap as well.
1329 # we need to find the swap devices
1330 for swap_device in self._swap_devices:
1331 if self._debug: self._logger.log("DEBUG: finishing_cleanup(): running: swapoff "+swap_device)
1332 ret = GLIUtility.spawn("swapoff "+swap_device)
1333 if not GLIUtility.exitsuccess(ret):
1334 self._logger.log("ERROR! : Could not deactivate swap ("+swap_device+")!")
1335
1336 #OLD WAY: Unmount the /proc and /dev that we mounted in prepare_chroot
1337 #There really isn't a reason to log errors here.
1338 #ret = GLIUtility.spawn("umount "+self._chroot_dir+"/proc", display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
1339 #ret = GLIUtility.spawn("umount "+self._chroot_dir+"/dev", display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
1340 #temp hack to unmount the new root.
1341 #ret = GLIUtility.spawn("umount "+self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
1342 #insert code here to unmount the swap partition, if there is one.
1343
1344 GLIUtility.spawn("rm /tmp/compile_output.log && rm " + install_logfile)
1345
1346 ##
1347 # This is a stub function to be done by the individual arch. I don't think it's even needed here.
1348 # but it's nice having it just incase.
1349 def install_bootloader(self):
1350 "THIS FUNCTION MUST BE DONE BY THE INDIVIDUAL ARCH"
1351 pass
1352
1353 def run_post_install_script(self):
1354 if self._install_profile.get_post_install_script_uri():
1355 try:
1356 if self._debug: self._logger.log("DEBUG: run_post_install_script(): getting script: "+self._install_profile.get_post_install_script_uri())
1357 GLIUtility.get_uri(self._install_profile.get_post_install_script_uri(), self._chroot_dir + "/var/tmp/post-install")
1358 if self._debug: self._logger.log("DEBUG: run_post_install_script(): running: chmod a+x /var/tmp/post-install && /var/tmp/post-install in chroot")
1359 GLIUtility.spawn("chmod a+x /var/tmp/post-install && /var/tmp/post-install", chroot=self._chroot_dir, display_on_tty8=True, logfile=self._compile_logfile, append_log=True)
1360 except:
1361 raise GLIException("RunPostInstallScriptError", 'fatal', 'run_post_install_script', "Failed to retrieve and/or execute post-install script")
1362
1363 ##
1364 # This function should only be called in the event of an install failure. It performs
1365 # general cleanup to prepare the system for another installer run.
1366 def install_failed_cleanup(self):
1367 if self._debug: self._logger.log("DEBUG: install_failed_cleanup(): gathering mounts to unmount")
1368 #mounts = GLIUtility.spawn(r"mount | sed -e 's:^.\+ on \(.\+\) type .\+$:\1:' | grep -e '^" + self._chroot_dir + "' | sort -r", return_output=True)[1].split("\n")
1369 mounted_devices = self._mounted_devices
1370 mounted_devices.sort()
1371 mounted_devices.reverse()
1372 for mount in mounted_devices:
1373 if self._debug: self._logger.log("DEBUG: install_failed_cleanup(): running: umount -l " + mount)
1374 ret = GLIUtility.spawn("umount -l " + self._chroot_dir + mount)
1375 if not GLIUtility.exitsuccess(ret):
1376 self._logger.log("ERROR! : Could not unmount mountpoint %s" % mount)
1377
1378 # now turn off all swap as well.
1379 # we need to find the swap devices
1380 for swap_device in self._swap_devices:
1381 if self._debug: self._logger.log("DEBUG: install_failed_cleanup(): running: swapoff "+swap_device)
1382 ret = GLIUtility.spawn("swapoff "+swap_device)
1383 if not GLIUtility.exitsuccess(ret):
1384 self._logger.log("ERROR! : Could not deactivate swap ("+swap_device+")!")
1385
1386 if self._debug: self._logger.log("DEBUG: install_failed_cleanup(): running: cp /tmp/compile_output.log /tmp/compile_output.log.failed then removing /tmp/compile_output.log")
1387 GLIUtility.spawn("mv " + self._compile_logfile + " " + self._compile_logfile + ".failed")
1388 # GLIUtility.spawn("rm /tmp/compile_output.log")
1389 GLIUtility.spawn("mv " + self._client_configuration.get_log_file() + " " + self._client_configuration.get_log_file() + ".failed")
1390 # GLIUtility.spawn("rm /var/log/installer.log")
1391
1392 def _sectors_to_megabytes(self, sectors, sector_bytes=512):
1393 return float((float(sectors) * sector_bytes)/ float(MEGABYTE))
1394
1395 def _add_partition(self, disk, start, end, type, fs, name="", strict_start=False, strict_end=False):
1396 if self._debug: self._logger.log("_add_partition(): type=%s, fstype=%s" % (type, fs))
1397 types = { 'primary': parted.PARTITION_PRIMARY, 'extended': parted.PARTITION_EXTENDED, 'logical': parted.PARTITION_LOGICAL }
1398 fsTypes = {}
1399 fs_type = parted.file_system_type_get_next ()
1400 while fs_type:
1401 fsTypes[fs_type.name] = fs_type
1402 fs_type = parted.file_system_type_get_next (fs_type)
1403 fstype = None
1404 if fs == "apple_bootstrap":
1405 fs = "hfs"
1406 if fs: fstype = fsTypes[fs]
1407 newpart = disk.partition_new(types[type], fstype, start, end)
1408 constraint = disk.dev.constraint_any()
1409 if strict_start:
1410 constraint.start_range.set_start(start)
1411 constraint.start_range.set_end(start)
1412 constraint.end_range.set_start(end)
1413 if strict_end:
1414 constraint.start_range.set_start(start)
1415 constraint.end_range.set_start(end)
1416 constraint.end_range.set_end(end)
1417 disk.add_partition(newpart, constraint)
1418 if self._debug: self._logger.log("_add_partition(): partition added")
1419
1420 def _delete_partition(self, parted_disk, minor):
1421 try:
1422 parted_disk.delete_partition(parted_disk.get_partition(minor))
1423 except:
1424 self._logger.log("_delete_partition(): could not delete partition...ignoring (for now)")
1425
1426 def _check_table_changed(self, oldparts, newparts):
1427 for part in newparts:
1428 oldpart = oldparts[part]
1429 newpart = newparts[part]
1430 if not newparts[part]['origminor'] or not oldparts.get_partition(part):
1431 return True
1432 if oldpart['type'] == newpart['type'] and long(oldpart['mb']) == long(newpart['mb']) and not newpart['resized'] and not newpart['format']:
1433 continue
1434 else:
1435 return True
1436 return False
1437
1438 def _check_table_layout_changed(self, oldparts, newparts):
1439 # This function is similar to the above function except it will see it as un-changed even if a partition is just being reformatted
1440 for part in newparts:
1441 oldpart = oldparts[part]
1442 newpart = newparts[part]
1443 if not newparts[part]['origminor'] or not oldparts.get_partition(part):
1444 return True
1445 if oldpart['type'] == newpart['type'] and long(oldpart['mb']) == long(newpart['mb']) and not newpart['resized']:
1446 continue
1447 else:
1448 return True
1449 return False
1450
1451 def _find_existing_in_new(self, oldminor, newparts):
1452 for part in newparts:
1453 if newparts[part]['origminor'] == oldminor:
1454 return part
1455 return 0
1456
1457 def _check_keeping_any_existing(self, newparts):
1458 for part in newparts:
1459 if newparts[part]['origminor']: return True
1460 return False
1461
1462 def _find_next_partition(self, curminor, parts):
1463 foundmyself = False
1464 for part in parts:
1465 if not part == curminor and not foundmyself: continue
1466 if part == curminor:
1467 foundmyself = True
1468 continue
1469 if foundmyself:
1470 return part
1471 return 0
1472
1473 def _find_current_minor_for_part(self, device, start):
1474 tmp_oldparts = GLIStorageDevice.Device(device, arch=self._client_configuration.get_architecture_template())
1475 tmp_oldparts.set_partitions_from_disk()
1476 for tmp_oldpart in tmp_oldparts:
1477 self._logger.log("_find_current_minor_for_part(): looking at minor %s...start sector is %s...looking for %s" % (str(tmp_oldpart), str(tmp_oldparts[tmp_oldpart]['start']), str(start)))
1478 if tmp_oldparts[tmp_oldpart]['start'] == start:
1479 return tmp_oldparts[tmp_oldpart]['minor']
1480 else:
1481 raise GLIException("PartitionResizeError", 'fatal', '_find_current_minor_for_part', "Could not determine the new devnode for partition starting at sector " + str(start))
1482
1483 def _partition_delete_step(self, parted_disk, oldparts, newparts):
1484 self._logger.log("_partition_delete_step(): Deleting partitions that aren't being resized")
1485 for oldpart in list(oldparts)[::-1]:
1486 tmppart_old = oldparts[oldpart]
1487 if oldparts.get_disklabel() != "mac" and tmppart_old['type'] == "free": continue
1488 if tmppart_old['type'] == "extended":
1489 # Iterate through logicals to see if any are being resized
1490 self._logger.log("_partition_delete_step(): logicals for extended part %d: %s" % (tmppart_old['minor'], str(tmppart_old.get_logicals())))
1491 for logpart in tmppart_old.get_logicals():
1492 newminor = self._find_existing_in_new(logpart, newparts)
1493 self._logger.log("_partition_delete_step(): newminor is " + str(newminor))
1494 if newminor and newparts[newminor]['resized']:
1495 self._logger.log(" Logical partition " + str(logpart) + " to be resized...can't delete extended")
1496 break
1497 else:
1498 self._logger.log(" No logical partitions are being resized...deleting extended")
1499 self._delete_partition(parted_disk, oldpart)
1500 else:
1501 newminor = self._find_existing_in_new(oldpart, newparts)
1502 if newminor and not newparts[newminor]['format']:
1503 if newparts[newminor]['resized']:
1504 self._logger.log(" Ignoring old minor " + str(oldpart) + " to resize later")
1505 continue
1506 else:
1507 self._logger.log(" Deleting old minor " + str(oldpart) + " to be recreated later")
1508 else:
1509 self._logger.log(" No match in new layout for old minor " + str(oldpart) + "...deleting")
1510 self._delete_partition(parted_disk, oldpart)
1511 parted_disk.commit()
1512
1513 def _partition_resize_step(self, parted_disk, device, oldparts, newparts):
1514 self._logger.log("_partition_resize_step(): Resizing partitions")
1515 device_sectors = newparts.get_num_sectors()
1516 for oldpart in oldparts:
1517 tmppart_old = oldparts[oldpart]
1518 newminor = self._find_existing_in_new(oldpart, newparts)
1519 if not newminor or not newparts[newminor]['resized'] or newparts[newminor]['type'] in ("extended", "free"):
1520 continue
1521 tmppart_new = newparts[newminor]
1522 type = tmppart_new['type']
1523 start = tmppart_new['start']
1524 end = start + (long(tmppart_new['mb']) * MEGABYTE / 512) - 1
1525
1526 # Make sure calculated end sector doesn't overlap start sector of next partition
1527 nextminor = self._find_next_partition(newminor, newparts)
1528 if nextminor:
1529 if newparts[nextminor]['start'] and end >= newparts[nextminor]['start']:
1530 self._logger.log(" End sector for growing partition overlaps with start of next partition...fixing")
1531 end = newparts[nextminor]['start'] - 1
1532
1533 # cap to end of device
1534 if end >= device_sectors:
1535 end = device_sectors - 1
1536
1537 total_sectors = end - start + 1
1538 total_bytes = long(total_sectors) * 512
1539
1540 # Delete partition and recreate at same start point with new size if growing
1541 if tmppart_new['mb'] > tmppart_old['mb']:
1542 curminor = self._find_current_minor_for_part(device, start)
1543 self._delete_partition(parted_disk, curminor)
1544 if tmppart_new.is_logical():
1545 tmptype = "logical"
1546 else:
1547 tmptype = "primary"
1548 self._add_partition(parted_disk, start, end, tmptype, tmppart_new['type'], strict_start=True)
1549 parted_disk.commit()
1550
1551 curminor = self._find_current_minor_for_part(device, start)
1552 devnode = device + str(curminor)
1553
1554 # sleep a bit first
1555 time.sleep(3)
1556 # now sleep until it exists
1557 while not GLIUtility.is_file(devnode):
1558 self._logger.log("Waiting for device node " + devnode + " to exist before resizing")
1559 time.sleep(1)
1560 # one bit of extra sleep is needed, as there is a blip still
1561 time.sleep(3)
1562
1563 if type in ("ext2", "ext3"):
1564 resizecmd = "resize2fs %s %sK" % (devnode, str(int((total_bytes - (2 * MEGABYTE)) / 1024)))
1565 self._logger.log("_partition_resize_step(): running: " + resizecmd)
1566 ret = GLIUtility.spawn(resizecmd, logfile=self._compile_logfile, append_log=True)
1567 if not GLIUtility.exitsuccess(ret):
1568 raise GLIException("PartitionResizeError", 'fatal', 'partition', "could not resize ext2/3 filesystem on " + devnode)
1569 elif type == "ntfs":
1570 ret = GLIUtility.spawn("yes | ntfsresize -v --size " + str(total_bytes) + " " + devnode, logfile=self._compile_logfile, append_log=True)
1571 if not GLIUtility.exitsuccess(ret):
1572 raise GLIException("PartitionResizeError", 'fatal', 'partition', "could not resize NTFS filesystem on " + devnode)
1573 elif type in ("linux-swap", "fat32", "fat16"):
1574 parted_fs = parted_disk.get_partition(curminor).geom.file_system_open()
1575 resize_constraint = parted_fs.get_resize_constraint()
1576 if total_sectors < resize_constraint.min_size or start != resize_constraint.start_range.start:
1577 raise GLIException("PartitionError", 'fatal', 'partition', "New size specified for " + devnode + " is not within allowed boundaries (blame parted)")
1578 new_geom = resize_constraint.start_range.duplicate()
1579 new_geom.set_start(start)
1580 new_geom.set_end(end)
1581 try:
1582 parted_fs.resize(new_geom)
1583 except:
1584 raise GLIException("PartitionResizeError", 'fatal', 'partition', "could not resize " + devnode)
1585 self._logger.log(" Deleting old minor " + str(oldpart) + " to be recreated in next pass")
1586 # self._delete_partition(parted_disk, oldpart)
1587 parted_disk.delete_all()
1588 parted_disk.commit()
1589
1590 def _partition_recreate_step(self, parted_disk, newparts):
1591 self._logger.log("_partition_recreate_step(): Recreating partitions")
1592 start = 0
1593 end = 0
1594 extended_start = 0
1595 extended_end = 0
1596 device_sectors = newparts.get_num_sectors()
1597 self._logger.log(" Drive has " + str(device_sectors) + " sectors")
1598 for part in newparts:
1599 strict_start = False
1600 strict_end = False
1601 newpart = newparts[part]
1602 self._logger.log(" Partition " + str(part) + " has " + str(newpart['mb']) + "MB")
1603 if newpart['start']:
1604 self._logger.log(" Old start sector " + str(newpart['start']) + " retrieved")
1605 if start != newpart['start']:
1606 self._logger.log(" Retrieved start sector is not the same as the calculated next start sector (usually not an issue)")
1607 start = newpart['start']
1608 strict_start = True
1609 else:
1610 if newpart.is_logical() and start > extended_end:
1611 start = extended_start + 1
1612 self._logger.log(" Start sector calculated to be " + str(start))
1613 if extended_end and not newpart.is_logical() and start <= extended_end:
1614 self._logger.log(" Start sector for primary is less than the end sector for previous extended")
1615 start = extended_end + 1
1616 if newpart['end']:
1617 self._logger.log(" Old end sector " + str(newpart['end']) + " retrieved")
1618 end = newpart['end']
1619 part_sectors = end - start + 1
1620 strict_end = True
1621 else:
1622 part_sectors = long(newpart['mb']) * MEGABYTE / 512
1623 end = start + part_sectors
1624 if newpart.is_logical() and end > extended_end:
1625 end = extended_end
1626 self._logger.log(" End sector calculated to be " + str(end))
1627 # Make sure end doesn't overlap next partition's existing start sector
1628 nextminor = self._find_next_partition(newpart, newparts)
1629 if nextminor:
1630 if newparts[nextminor]['start'] and end >= newparts[nextminor]['start']:
1631 self._logger.log(" End sector for partition overlaps with start of next partition...fixing")
1632 end = newparts[nextminor]['start'] - 1
1633 strict_end = True
1634 # cap to end of device
1635 if end >= device_sectors:
1636 end = device_sectors - 1
1637 # now the actual creation
1638 if newpart['type'] == "free":
1639 if newparts.get_disklabel() == "mac":
1640 # Create a dummy partition to be removed later because parted sucks
1641 self._logger.log(" Adding dummy partition to fool parted " + str(part) + " from " + str(start) + " to " + str(end))
1642 self._add_partition(parted_disk, start, end, "primary", "ext2", "free", strict_start=strict_start, strict_end=strict_end)
1643 elif newpart['type'] == "extended":
1644 self._logger.log(" Adding extended partition " + str(part) + " from " + str(start) + " to " + str(end))
1645 self._add_partition(parted_disk, start, end, "extended", "", strict_start=strict_start, strict_end=strict_end)
1646 extended_start = start
1647 extended_end = end
1648 elif not newpart.is_logical():
1649 self._logger.log(" Adding primary partition " + str(part) + " from " + str(start) + " to " + str(end))
1650 self._add_partition(parted_disk, start, end, "primary", newpart['type'], strict_start=strict_start, strict_end=strict_end)
1651 elif newpart.is_logical():
1652 if start >= extended_end:
1653 start = extended_start + 1
1654 end = start + part_sectors
1655 if nextminor and not newparts[nextminor].is_logical() and end > extended_end:
1656 end = extended_end
1657 self._logger.log(" Adding logical partition " + str(part) + " from " + str(start) + " to " + str(end))
1658 self._add_partition(parted_disk, start, end, "logical", newpart['type'], strict_start=strict_start, strict_end=strict_end)
1659 if self._debug: self._logger.log("partition(): flags: " + str(newpart['flags']))
1660 for flag in newpart['flags']:
1661 if parted_disk.get_partition(part).is_flag_available(flag):
1662 parted_disk.get_partition(part).set_flag(flag, True)
1663 if newpart['name'] and parted_disk.type.check_feature(parted.DISK_TYPE_PARTITION_NAME):
1664 parted_disk.set_name(newpart['name'])
1665 # write to disk
1666 if self._debug: self._logger.log("partition(): committing change to disk")
1667 parted_disk.commit()
1668 if self._debug: self._logger.log("partition(): committed change to disk")
1669 start = end + 1
1670
1671 def _partition_format_step(self, parted_disk, device, newparts):
1672 self._logger.log("_partition_format_step(): Formatting new partitions")
1673 for part in newparts:
1674 newpart = newparts[part]
1675 devnode = newpart['devnode']
1676 # This little hack is necessary because parted sucks goat nuts
1677 if newparts.get_disklabel() == "mac" and newpart['type'] == "free":
1678 self._delete_partition(parted_disk, newpart)
1679 continue
1680 if newpart['format'] and newpart['type'] not in ('extended', 'free'):
1681 # devnode = device + str(int(part))
1682 if self._debug: self._logger.log("_partition_format_step(): devnode is %s in formatting code" % devnode)
1683 # if you need a special command and
1684 # some base options, place it here.
1685 format_cmds = { 'linux-swap': "mkswap", 'fat16': "mkfs.vfat -F 16", 'fat32': "mkfs.vfat -F 32",
1686 'ntfs': "mkntfs", 'xfs': "mkfs.xfs -f", 'jfs': "mkfs.jfs -f",
1687 'reiserfs': "mkfs.reiserfs -f", 'ext2': "mkfs.ext2", 'ext3': "mkfs.ext3",
1688 'hfs': "hformat", 'apple_bootstrap': "hformat"
1689 }
1690 if newpart['type'] in format_cmds:
1691 cmdname = format_cmds[newpart['type']]
1692 else: # this should catch everything else
1693 raise GLIException("PartitionFormatError", 'fatal', '_partition_format_step', "Unknown partition type " + newpart['type'])
1694 # sleep a bit first
1695 time.sleep(1)
1696 for tries in range(10):
1697 cmd = "%s %s %s" % (cmdname, newpart['mkfsopts'], devnode)
1698 self._logger.log(" Formatting partition %s as %s with: %s" % (str(part),newpart['type'],cmd))
1699 ret = GLIUtility.spawn(cmd, logfile=self._compile_logfile, append_log=True)
1700 if not GLIUtility.exitsuccess(ret):
1701 self._logger.log("Try %d failed formatting partition %s...waiting 5 seconds" % (tries+1, devnode))
1702 time.sleep(5)
1703 else:
1704 break
1705 else:
1706 raise GLIException("PartitionFormatError", 'fatal', '_partition_format_step', "Could not create %s filesystem on %s" % (newpart['type'], devnode))
1707
1708 def partition(self):
1709 """
1710 TODO:
1711 skip fixed partitions in all passes (in GLISD maybe?)
1712 """
1713 parts_old = {}
1714 parts_new = self._install_profile.get_partition_tables()
1715 for device in GLIStorageDevice.detect_devices():
1716 parts_old[device] = GLIStorageDevice.Device(device, arch=self._client_configuration.get_architecture_template())
1717 parts_old[device].set_partitions_from_disk()
1718
1719 self.notify_frontend("progress", (0, "Examining partitioning data"))
1720 total_steps = float(len(parts_new) * 4) # 4 for the number of passes over each device
1721 cur_progress = 0
1722 for device in parts_new:
1723 # Skip this device in parts_new if device isn't detected on current system
1724 if not device in parts_old:
1725 self._logger.log("There is no physical device " + device + " detected to match the entry in the install profile...skipping")
1726 continue
1727
1728 # This just makes things simpler in the code
1729 newparts = parts_new[device]
1730 oldparts = parts_old[device]
1731
1732 # Check to see if the old and new partition table structures are the same...skip if they are
1733 if not self._check_table_changed(oldparts, newparts):
1734 self._logger.log("Partition table for " + device + " is unchanged...skipping")
1735 continue
1736
1737 self._logger.log("partition(): Processing " + device + "...")
1738
1739 # Commit ritual sepuku if there are any mounted filesystems on this device
1740 if GLIUtility.spawn("mount | grep '^" + device + "'", return_output=True)[1].strip():
1741 raise GLIException("PartitionsMountedError", 'fatal', 'partition', "Cannot partition " + device + " due to filesystems being mounted")
1742
1743 # We also can't handle "unknown" partitions
1744 for part in newparts:
1745 if newparts[part]['type'] == "unknown":
1746 raise GLIException("UnknownPartitionTypeError", 'fatal', 'partition', "Refusing to partition this drive due to the presence of an unknown type of partition")
1747
1748 # Create pyparted objects for this device
1749 parted_dev = parted.PedDevice.get(device)
1750 try:
1751 parted_disk = parted.PedDisk.new(parted_dev)
1752 except:
1753 if self._debug: self._logger.log("partition(): could not load existing disklabel...creating new one")
1754 parted_disk = parted_dev.disk_new_fresh(parted.disk_type_get((newparts.get_disklabel() or GLIStorageDevice.archinfo[self._architecture_name])))
1755
1756 # Iterate through new partitions and check for 'origminor' and 'format' == False
1757 for part in newparts:
1758 tmppart_new = newparts[part]
1759 if not tmppart_new['origminor'] or tmppart_new['format']: continue
1760 if not tmppart_new['origminor'] in oldparts:
1761 raise GLIException("MissingPartitionsError", 'fatal', 'partition', "Cannot find the existing partition that a new one refers to. This is not a bug. This is in fact your (the user's) fault. You should not reuse the installprofile.xml from a previous install that started the partitioning step.")
1762 tmppart_old = oldparts[tmppart_new['origminor']]
1763 if parted_disk.type.check_feature(parted.DISK_TYPE_PARTITION_NAME):
1764 tmppart_new['name'] = tmppart_old['name']
1765 tmppart_new['flags'] = tmppart_old['flags']
1766 if tmppart_new['resized']:
1767 # Partition is being resized in the new layout
1768 self._logger.log(" Partition " + str(part) + " has origminor " + str(tmppart_new['origminor']) + " and it being resized...saving start sector " + str(tmppart_old['start']))
1769 tmppart_new['start'] = tmppart_old['start']
1770 tmppart_new['end'] = 0
1771 else:
1772 # Partition is untouched in the new layout
1773 self._logger.log(" Partition " + str(part) + " has origminor " + str(tmppart_new['origminor']) + "...saving start sector " + str(tmppart_old['start']) + " and end sector " + str(tmppart_old['end']))
1774 tmppart_new['start'] = tmppart_old['start']
1775 tmppart_new['end'] = tmppart_old['end']
1776
1777 if self._check_table_layout_changed(parts_old[device], parts_new[device]):
1778 # First pass to delete old partitions that aren't resized
1779 self.notify_frontend("progress", (cur_progress / total_steps, "Deleting partitioning that aren't being resized for " + device))
1780 cur_progress += 1
1781 self._partition_delete_step(parted_disk, oldparts, newparts)
1782
1783 # Second pass to resize old partitions that need to be resized
1784 self.notify_frontend("progress", (cur_progress / total_steps, "Resizing remaining partitions for " + device))
1785 cur_progress += 1
1786 self._partition_resize_step(parted_disk, device, oldparts, newparts)
1787
1788 # Wiping disk and creating blank disklabel
1789 try:
1790 parted_disk = parted_dev.disk_new_fresh(parted.disk_type_get(newparts.get_disklabel()))
1791 parted_disk.commit()
1792 except:
1793 raise GLIException("DiskLabelCreationError", 'fatal', 'partition', "Could not create a blank disklabel!")
1794
1795 # Third pass to create new partition table
1796 self.notify_frontend("progress", (cur_progress / total_steps, "Recreating partition table for " + device))
1797 cur_progress += 1
1798 self._partition_recreate_step(parted_disk, newparts)
1799 else:
1800 cur_progress += 3
1801
1802 # Fourth pass to format partitions
1803 self.notify_frontend("progress", (cur_progress / total_steps, "Formatting partitions for " + device))
1804 cur_progress += 1
1805 self._partition_format_step(parted_disk, device, newparts)
1806
1807 # All done for this device
1808 self.notify_frontend("progress", (cur_progress / total_steps, "Done with partitioning for " + device))
1809 cur_progress += 1

Properties

Name Value
svn:eol-style native

  ViewVC Help
Powered by ViewVC 1.1.20