/[gentoo-src]/portage/pym/portage.py
Gentoo

Contents of /portage/pym/portage.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.590 - (show annotations) (download) (as text)
Sun Aug 14 16:49:41 2005 UTC (9 years, 1 month ago) by genone
Branch: MAIN
CVS Tags: HEAD
Changes since 1.589: +4 -4 lines
File MIME type: text/x-python
fix logic error in sandbox

1
2 # portage.py -- core Portage functionality
3 # Copyright 1998-2004 Gentoo Foundation
4 # Distributed under the terms of the GNU General Public License v2
5 # $Header: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.589 2005/04/29 04:43:19 vapier Exp $
6 cvs_id_string="$Id: portage.py,v 1.589 2005/04/29 04:43:19 vapier Exp $"[5:-2]
7
8 VERSION="$Revision: 1.589 $"[11:-2] + "-cvs"
9
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
13
14
15 try:
16 import sys
17 except SystemExit, e:
18 raise
19 except:
20 print "Failed to import sys! Something is _VERY_ wrong with python."
21 raise SystemExit, 127
22
23 try:
24 import os,string,types,atexit,signal,fcntl
25 import time,cPickle,traceback,copy
26 import re,pwd,grp
27 import shlex,shutil
28 import stat
29 from time import sleep
30 from random import shuffle
31 except SystemExit, e:
32 raise
33 except Exception, e:
34 sys.stderr.write("\n\n")
35 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
36 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
37 sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
38
39 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
40 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
41
42 sys.stderr.write(" "+str(e)+"\n\n");
43 sys.exit(127)
44 except:
45 sys.stderr.write("\n\n")
46 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
47 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
48 sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
49
50 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
51 sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
52 sys.exit(127)
53
54
55 try:
56 #XXX: This should get renamed to bsd_chflags, I think.
57 import chflags
58 bsd_chflags = chflags
59 except SystemExit, e:
60 raise
61 except:
62 # XXX: This should get renamed to bsd_chflags, I think.
63 bsd_chflags = None
64
65 try:
66 from config import config
67 import ebuild
68 import cvstree
69 import xpak
70 import getbinpkg
71 import portage_dep
72 import eclass_cache
73 import portage_versions
74
75 #assign these to portage's namespace to keep the tool monkeys happy.
76 catpkgsplit = portage_versions.catpkgsplit
77 pkgsplit = portage_versions.pkgsplit
78 pkgcmp = portage_versions.pkgcmp
79
80 # XXX: This needs to get cleaned up.
81 import output
82 from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
83 darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
84 xtermTitle, xtermTitleReset, yellow
85
86 import portage_const
87 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
88 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
89 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
90 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
91 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
92 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
93 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, SANDBOX_PIDS_FILE, CONFIG_MEMORY_FILE,\
94 INCREMENTALS, STICKIES
95
96 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
97 portage_uid, portage_gid
98
99 import portage_util
100 from portage_util import grab_multiple, grabdict, grabdict_package, grabfile, grabfile_package, \
101 grabints, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
102 unique_array, varexpand, writedict, writeints, writemsg, getconfig, movefile, flatten, \
103 abssymlink
104 from portage_file import normpath, listdir
105 import portage_exception
106 import portage_gpg
107 import portage_locks
108 import portage_exec
109 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
110 import portage_checksum
111 from portage_checksum import perform_md5,perform_checksum,prelink_capable
112
113 import transports.bundled_lib
114 import transports.fetchcommand
115 except SystemExit, e:
116 raise
117 except Exception, e:
118 sys.stderr.write("\n\n")
119 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
120 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
121 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
122 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
123 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
124 sys.stderr.write("!!! a recovery of portage.\n")
125
126 sys.stderr.write(" "+str(e)+"\n\n")
127 sys.exit(127)
128
129
130 # ===========================================================================
131 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
132 # ===========================================================================
133
134
135 def exithandler(signum,frame):
136 """Handles ^C interrupts in a sane manner"""
137 signal.signal(signal.SIGINT, signal.SIG_IGN)
138 signal.signal(signal.SIGTERM, signal.SIG_IGN)
139
140 # 0=send to *everybody* in process group
141 print "caught %i in %i" % (signum, os.getpid())
142 portageexit()
143 print "Exiting due to signal"
144 os.kill(0,signum)
145 sys.exit(1)
146
147 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
148 signal.signal(signal.SIGINT, exithandler)
149 signal.signal(signal.SIGTERM, exithandler)
150
151 def getcwd():
152 "this fixes situations where the current directory doesn't exist"
153 try:
154 return os.getcwd()
155 except SystemExit, e:
156 raise
157 except:
158 os.chdir("/")
159 return "/"
160 getcwd()
161
162 def suffix_array(array,suffix,doblanks=1):
163 """Appends a given suffix to each element in an Array/List/Tuple.
164 Returns a List."""
165 if type(array) not in (list,tuple):
166 raise TypeError, "List or Tuple expected. Got %s" % type(array)
167 newarray=[]
168 for x in array:
169 if x or doblanks:
170 newarray.append(x + suffix)
171 else:
172 newarray.append(x)
173 return newarray
174
175 def prefix_array(array,prefix,doblanks=1):
176 """Prepends a given prefix to each element in an Array/List/Tuple.
177 Returns a List."""
178 if type(array) not in (list,tuple):
179 raise TypeError, "List or Tuple expected. Got %s" % type(array)
180 newarray=[]
181 for x in array:
182 if x or doblanks:
183 newarray.append(prefix + x)
184 else:
185 newarray.append(x)
186 return newarray
187
188 starttime=long(time.time())
189 features=[]
190
191 def tokenize(mystring):
192 """breaks a string like 'foo? (bar) oni? (blah (blah))'
193 into embedded lists; returns None on paren mismatch"""
194
195 # This function is obsoleted.
196 # Use dep_parenreduce
197
198 newtokens=[]
199 curlist=newtokens
200 prevlists=[]
201 level=0
202 accum=""
203 for x in mystring:
204 if x=="(":
205 if accum:
206 curlist.append(accum)
207 accum=""
208 prevlists.append(curlist)
209 curlist=[]
210 level=level+1
211 elif x==")":
212 if accum:
213 curlist.append(accum)
214 accum=""
215 if level==0:
216 writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
217 return None
218 newlist=curlist
219 curlist=prevlists.pop()
220 curlist.append(newlist)
221 level=level-1
222 elif x in string.whitespace:
223 if accum:
224 curlist.append(accum)
225 accum=""
226 else:
227 accum=accum+x
228 if accum:
229 curlist.append(accum)
230 if (level!=0):
231 writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
232 return None
233 return newtokens
234
235
236 def elog_process(cpv, mysettings):
237 mylogfiles = listdir(mysettings["T"]+"/logging/")
238 # shortcut for packages without any messages
239 if len(mylogfiles) == 0:
240 return
241 # exploit listdir() file order so we process log entries in cronological order
242 mylogfiles.reverse()
243 mylogentries = {}
244 for f in mylogfiles:
245 msgfunction, msgtype = f.split(".")
246 if not msgtype.upper() in mysettings["PORTAGE_LOG_CLASSES"].split() \
247 and not msgtype.lower() in mysettings["PORTAGE_LOG_CLASSES"].split():
248 continue
249 if msgfunction not in portage_const.EBUILD_PHASES.split():
250 print "!!! can't process invalid log file: %s" % f
251 continue
252 if not msgfunction in mylogentries:
253 mylogentries[msgfunction] = []
254 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
255 mylogentries[msgfunction].append((msgtype, msgcontent))
256
257 # in case the filters matched all messages
258 if len(mylogentries) == 0:
259 return
260
261 # generate a single string with all log messages
262 fulllog = ""
263 for phase in portage_const.EBUILD_PHASES.split():
264 if not phase in mylogentries:
265 continue
266 for msgtype,msgcontent in mylogentries[phase]:
267 fulllog += "%s: %s\n" % (msgtype, phase)
268 for line in msgcontent:
269 fulllog += line
270 fulllog += "\n"
271
272 # pass the processing to the individual modules
273 logsystems = mysettings["PORTAGE_LOG_SYSTEM"].split()
274 for s in logsystems:
275 try:
276 # FIXME: ugly ad.hoc import code
277 # TODO: implement a common portage module loader
278 logmodule = __import__("elog_modules.mod_"+s)
279 m = getattr(logmodule, "mod_"+s)
280 m.process(mysettings, cpv, mylogentries, fulllog)
281 except (ImportError, AttributeError), e:
282 print "!!! Error while importing logging modules:"
283 print e
284 except portage_exception.PortageException, e:
285 print e
286
287 #parse /etc/env.d and generate /etc/profile.env
288
289 #move this to config.
290 def env_update(root,makelinks=1):
291 if not os.path.exists(root+"etc/env.d"):
292 prevmask=os.umask(0)
293 os.makedirs(root+"etc/env.d",0755)
294 os.umask(prevmask)
295 fns=listdir(root+"etc/env.d")
296 fns.sort()
297 pos=0
298 while (pos<len(fns)):
299 if len(fns[pos])<=2:
300 del fns[pos]
301 continue
302 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
303 del fns[pos]
304 continue
305 pos=pos+1
306
307 specials={
308 "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
309 "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
310 "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
311 "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
312 }
313 colon_separated = [
314 "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
315 "LDPATH", "MANPATH",
316 "PATH", "PRELINK_PATH",
317 "PRELINK_PATH_MASK", "PYTHON_PATH"
318 ]
319
320 env={}
321
322 for x in fns:
323 # don't process backup files
324 if x[-1]=='~' or x[-4:]==".bak":
325 continue
326 myconfig=getconfig(root+"etc/env.d/"+x)
327 if myconfig==None:
328 writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
329 #parse error
330 continue
331 # process PATH, CLASSPATH, LDPATH
332 for myspec in specials.keys():
333 if myconfig.has_key(myspec):
334 if myspec in colon_separated:
335 specials[myspec].extend(string.split(varexpand(myconfig[myspec]),":"))
336 else:
337 specials[myspec].append(varexpand(myconfig[myspec]))
338 del myconfig[myspec]
339 # process all other variables
340 for myenv in myconfig.keys():
341 env[myenv]=varexpand(myconfig[myenv])
342
343 if os.path.exists(root+"etc/ld.so.conf"):
344 myld=open(root+"etc/ld.so.conf")
345 myldlines=myld.readlines()
346 myld.close()
347 oldld=[]
348 for x in myldlines:
349 #each line has at least one char (a newline)
350 if x[0]=="#":
351 continue
352 oldld.append(x[:-1])
353 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
354 # Where is the new ld.so.conf generated? (achim)
355 else:
356 oldld=None
357
358 ld_cache_update=False
359 if os.environ.has_key("PORTAGE_CALLER") and \
360 os.environ["PORTAGE_CALLER"] == "env-update":
361 ld_cache_update = True
362
363 newld=specials["LDPATH"]
364 if (oldld!=newld):
365 #ld.so.conf needs updating and ldconfig needs to be run
366 myfd=open(root+"etc/ld.so.conf","w")
367 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
368 myfd.write("# contents of /etc/env.d directory\n")
369 for x in specials["LDPATH"]:
370 myfd.write(x+"\n")
371 myfd.close()
372 ld_cache_update=True
373
374 # Update prelink.conf if we are prelink-enabled
375 if prelink_capable:
376 newprelink=open(root+"etc/prelink.conf","w")
377 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
378 newprelink.write("# contents of /etc/env.d directory\n")
379
380 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
381 newprelink.write("-l "+x+"\n");
382 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
383 if not x:
384 continue
385 if x[-1] != "/":
386 x += "/"
387 plmasked=0
388 for y in specials["PRELINK_PATH_MASK"]:
389 if y[-1]!='/':
390 y=y+"/"
391 if y==x[0:len(y)]:
392 plmasked=1
393 break
394 if not plmasked:
395 newprelink.write("-h "+x+"\n")
396 for x in specials["PRELINK_PATH_MASK"]:
397 newprelink.write("-b "+x+"\n")
398 newprelink.close()
399
400 if not mtimedb.has_key("ldpath"):
401 mtimedb["ldpath"]={}
402
403 for x in specials["LDPATH"]+['/usr/lib','/lib']:
404 try:
405 newldpathtime=os.stat(x)[stat.ST_MTIME]
406 except SystemExit, e:
407 raise
408 except:
409 newldpathtime=0
410 if mtimedb["ldpath"].has_key(x):
411 if mtimedb["ldpath"][x]==newldpathtime:
412 pass
413 else:
414 mtimedb["ldpath"][x]=newldpathtime
415 ld_cache_update=True
416 else:
417 mtimedb["ldpath"][x]=newldpathtime
418 ld_cache_update=True
419
420 if (ld_cache_update or makelinks):
421 # We can't update links if we haven't cleaned other versions first, as
422 # an older package installed ON TOP of a newer version will cause ldconfig
423 # to overwrite the symlinks we just made. -X means no links. After 'clean'
424 # we can safely create links.
425 writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
426 cwd="/"
427 try: cwd=os.getcwd()
428 except (OSError, IOError): pass
429 if makelinks:
430 portage_exec.spawn("/sbin/ldconfig -r "+root)
431 else:
432 portage_exec.spawn("/sbin/ldconfig -X -r "+root)
433 try: os.chdir(cwd)
434 except OSError: pass
435
436 del specials["LDPATH"]
437
438 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
439 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
440 cenvnotice = penvnotice
441 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
442 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
443
444 #create /etc/profile.env for bash support
445 outfile=open(root+"/etc/profile.env","w")
446 outfile.write(penvnotice)
447
448 for path, values in specials.items():
449 if not values:
450 continue
451 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
452 sep = " "
453 else:
454 sep = ":"
455 outstring = "export %s='%s'\n" % (path,sep.join(values))
456 outfile.write(outstring)
457
458 #create /etc/profile.env
459 for x in env:
460 if type(env[x])!=str:
461 continue
462 outfile.write("export "+x+"='"+env[x]+"'\n")
463 outfile.close()
464
465 #create /etc/csh.env for (t)csh support
466 outfile=open(root+"/etc/csh.env","w")
467 outfile.write(cenvnotice)
468
469 for path, values in specials.items():
470 if not values:
471 continue
472 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
473 sep = " "
474 else:
475 sep = ":"
476 outstring = "setenv %s '%s'\n" % (path, sep.join(values))
477 outfile.write(outstring)
478
479 #create /etc/csh.env
480 for x in env:
481 if type(env[x])!=str:
482 continue
483 outfile.write("setenv "+x+" '"+env[x]+"'\n")
484 outfile.close()
485
486 def new_protect_filename(mydest, newmd5=None):
487 """Resolves a config-protect filename for merging, optionally
488 using the last filename if the md5 matches.
489 (dest,md5) ==> 'string' --- path_to_target_filename
490 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
491 """
492
493 # config protection filename format:
494 # ._cfg0000_foo
495 # 0123456789012
496 prot_num=-1
497 last_pfile=""
498
499 if (len(mydest) == 0):
500 raise ValueError, "Empty path provided where a filename is required"
501 if (mydest[-1]=="/"): # XXX add better directory checking
502 raise ValueError, "Directory provided but this function requires a filename"
503 if not os.path.exists(mydest):
504 return mydest
505
506 real_filename = os.path.basename(mydest)
507 real_dirname = os.path.dirname(mydest)
508 for pfile in listdir(real_dirname):
509 if pfile[0:5] != "._cfg":
510 continue
511 if pfile[10:] != real_filename:
512 continue
513 try:
514 new_prot_num = int(pfile[5:9])
515 if new_prot_num > prot_num:
516 prot_num = new_prot_num
517 last_pfile = pfile
518 except SystemExit, e:
519 raise
520 except:
521 continue
522 prot_num = prot_num + 1
523
524 new_pfile = os.path.normpath(real_dirname+"/._cfg"+str(prot_num).zfill(4)+"_"+real_filename)
525 old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
526 if last_pfile and newmd5:
527 if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
528 return old_pfile
529 else:
530 return new_pfile
531 elif newmd5:
532 return new_pfile
533 else:
534 return (new_pfile, old_pfile)
535
536 #XXX: These two are now implemented in portage_util.py but are needed here
537 #XXX: until the isvalidatom() dependency is sorted out.
538
539 def grabdict_package(myfilename,juststrings=0):
540 pkgs=grabdict(myfilename, juststrings=juststrings, empty=1)
541 for x in pkgs.keys():
542 if not portage_dep.isvalidatom(x):
543 del(pkgs[x])
544 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
545 return pkgs
546
547 def grabfile_package(myfilename,compatlevel=0):
548 pkgs=grabfile(myfilename,compatlevel)
549 for x in range(len(pkgs)-1,-1,-1):
550 pkg = pkgs[x]
551 if pkg[0] == "-":
552 pkg = pkg[1:]
553 if pkg[0] == "*":
554 pkg = pkg[1:]
555 if not portage_dep.isvalidatom(pkg):
556 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
557 del(pkgs[x])
558 return pkgs
559
560 # returns a tuple. (version[string], error[string])
561 # They are pretty much mutually exclusive.
562 # Either version is a string and error is none, or
563 # version is None and error is a string
564 #
565 def ExtractKernelVersion(base_dir):
566 lines = []
567 pathname = os.path.join(base_dir, 'Makefile')
568 try:
569 f = open(pathname, 'r')
570 except OSError, details:
571 return (None, str(details))
572 except IOError, details:
573 return (None, str(details))
574
575 try:
576 for i in range(4):
577 lines.append(f.readline())
578 except OSError, details:
579 return (None, str(details))
580 except IOError, details:
581 return (None, str(details))
582
583 lines = [ l.strip() for l in lines ]
584
585 version = ''
586
587 #XXX: The following code relies on the ordering of vars within the Makefile
588 for line in lines:
589 # split on the '=' then remove annoying whitespace
590 items = [ i.strip() for i in line.split('=') ]
591 if items[0] == 'VERSION' or \
592 items[0] == 'PATCHLEVEL':
593 version += items[1]
594 version += "."
595 elif items[0] == 'SUBLEVEL':
596 version += items[1]
597 elif items[0] == 'EXTRAVERSION' and \
598 items[-1] != items[0]:
599 version += items[1]
600
601 # Grab a list of files named localversion* and sort them
602 localversions = os.listdir(base_dir)
603 for x in range(len(localversions)-1,-1,-1):
604 if localversions[x][:12] != "localversion":
605 del localversions[x]
606 localversions.sort()
607
608 # Append the contents of each to the version string, stripping ALL whitespace
609 for lv in localversions:
610 version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
611
612 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
613 kernelconfig = getconfig(base_dir+"/.config")
614 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
615 version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
616
617 return (version,None)
618
619
620 # XXX This would be to replace getstatusoutput completely.
621 # XXX Issue: cannot block execution. Deadlock condition.
622 def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
623 """spawn a subprocess with optional sandbox protection,
624 depending on whether sandbox is enabled. The "free" argument,
625 when set to 1, will disable sandboxing. This allows us to
626 spawn processes that are supposed to modify files outside of the
627 sandbox. We can't use os.system anymore because it messes up
628 signal handling. Using spawn allows our Portage signal handler
629 to work."""
630
631 if type(mysettings) == types.DictType:
632 env=mysettings
633 keywords["opt_name"]="[ %s ]" % "portage"
634 else:
635 if not isinstance(mysettings, config):
636 raise TypeError, "Invalid type for config object: %s" % mysettings.__class_
637 env=mysettings.environ()
638 keywords["opt_name"]="[%s]" % mysettings["PF"]
639
640
641 # XXX: Negative RESTRICT word
642 myrestrict = mysettings["RESTRICT"].split()
643 droppriv=(droppriv and "userpriv" in mysettings.features and
644 "nouserpriv" not in myrestrict and "userpriv" not in myrestrict)
645
646 if ("sandbox" in features) and (not free):
647 keywords["opt_name"] += " sandbox"
648 if droppriv and portage_gid and portage_uid:
649 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
650 return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
651 else:
652 keywords["opt_name"] += " bash"
653 return portage_exec.spawn_bash(mystring,env=env,**keywords)
654
655 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1,verbosity=0):
656 "fetch files. Will use digest file if available."
657
658 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
659 myrestrict = mysettings["RESTRICT"].split()
660 if "mirror" in myrestrict or "nomirror" in myrestrict:
661 if ("mirror" in mysettings.features) and ("lmirror" not in mysettings.features):
662 # lmirror should allow you to bypass mirror restrictions.
663 # XXX: This is not a good thing, and is temporary at best.
664 if verbosity:
665 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
666 return 1
667
668 global thirdpartymirrors
669
670 if not isinstance(mysettings, config):
671 raise TypeError, "Invalid type for config object: %s" % mysettings.__class_
672
673 custommirrors=grabdict(CUSTOM_MIRRORS_FILE)
674
675 mymirrors=[]
676
677 if listonly or ("distlocks" not in features):
678 use_locks = 0
679
680 # local mirrors are always added
681 if custommirrors.has_key("local"):
682 mymirrors += custommirrors["local"]
683
684 if ("nomirror" in mysettings["RESTRICT"].split()) or \
685 ("mirror" in mysettings["RESTRICT"].split()):
686 # We don't add any mirrors.
687 pass
688 else:
689 if try_mirrors:
690 for x in mysettings["GENTOO_MIRRORS"].split():
691 if x:
692 if x[-1] == '/':
693 mymirrors += [x[:-1]]
694 else:
695 mymirrors += [x]
696
697 mydigests={}
698 digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
699 if os.path.exists(digestfn):
700 mydigests = digestParseFile(digestfn)
701
702 fsmirrors = []
703 for x in range(len(mymirrors)-1,-1,-1):
704 if mymirrors[x] and mymirrors[x][0]=='/':
705 fsmirrors += [mymirrors[x]]
706 del mymirrors[x]
707
708 for myuri in myuris:
709 myfile=os.path.basename(myuri)
710 try:
711 destdir = mysettings["DISTDIR"]+"/"
712 if not os.path.exists(destdir+myfile):
713 for mydir in fsmirrors:
714 if os.path.exists(mydir+"/"+myfile):
715 writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
716 shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
717 break
718 except (OSError,IOError),e:
719 # file does not exist
720 writemsg(_("!!! %(file)s not found in %(dir)s." % {"file":myfile,"dir":mysettings["DISTDIR"]}),verbosity)
721 gotit=0
722
723 if "fetch" in mysettings["RESTRICT"].split():
724 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
725 # print message and exit.
726 gotit=1
727 for myuri in myuris:
728 myfile=os.path.basename(myuri)
729 try:
730 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
731 except (OSError,IOError),e:
732 # file does not exist
733 # FIXME: gettext doesn't work yet
734 # writemsg(_("!!! %(file)s not found in %(dir)s." % {"file":myfile, "dir":mysettings["DISTDIR"]}),verbosity)
735 writemsg("!!! %(file)s not found in %(dir)s." % {"file":myfile, "dir":mysettings["DISTDIR"]},verbosity)
736 gotit=0
737 if not gotit:
738 writemsg("\n!!!"+mysettings["CATEGORY"]+"/"+mysettings["PF"]+"has fetch restriction turned on.\n"+
739 "!!! This probably means that this ebuild's files must be downloaded\n"+
740 "!!! manually. See the comments in the ebuild for more information.\n\n",
741 verbosity)
742 spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
743 return 0
744 return 1
745 locations=mymirrors[:]
746 filedict={}
747 primaryuri_indexes={}
748 for myuri in myuris:
749 myfile=os.path.basename(myuri)
750 if not filedict.has_key(myfile):
751 filedict[myfile]=[]
752 for y in range(0,len(locations)):
753 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
754 if myuri[:9]=="mirror://":
755 eidx = myuri.find("/", 9)
756 if eidx != -1:
757 mirrorname = myuri[9:eidx]
758
759 # Try user-defined mirrors first
760 if custommirrors.has_key(mirrorname):
761 for cmirr in custommirrors[mirrorname]:
762 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
763 # remove the mirrors we tried from the list of official mirrors
764 if cmirr.strip() in thirdpartymirrors[mirrorname]:
765 thirdpartymirrors[mirrorname].remove(cmirr)
766 # now try the official mirrors
767 if thirdpartymirrors.has_key(mirrorname):
768 try:
769 shuffle(thirdpartymirrors[mirrorname])
770 except SystemExit, e:
771 raise
772 except:
773 writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"),verbosity)
774 writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n",verbosity)
775 writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n",verbosity)
776 writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n",verbosity)
777 writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n",verbosity)
778 time.sleep(10)
779
780 for locmirr in thirdpartymirrors[mirrorname]:
781 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
782
783
784 if not filedict[myfile]:
785 writemsg("No known mirror by the name: %s\n" % (mirrorname),verbosity)
786 else:
787 writemsg("Invalid mirror definition in SRC_URI:\n",verbosity)
788 writemsg(" %s\n" % (myuri),verbosity)
789 else:
790 if "primaryuri" in mysettings["RESTRICT"].split():
791 # Use the source site first.
792 if primaryuri_indexes.has_key(myfile):
793 primaryuri_indexes[myfile] += 1
794 else:
795 primaryuri_indexes[myfile] = 0
796 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
797 else:
798 filedict[myfile].append(myuri)
799
800 missingSourceHost = False
801 for myfile in filedict.keys(): # Gives a list, not just the first one
802 if not filedict[myfile]:
803 writemsg("Warning: No mirrors available for file '%s'\n" % (myfile),verbosity)
804 missingSourceHost = True
805 if missingSourceHost:
806 return 0
807 del missingSourceHost
808
809 can_fetch=True
810 if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
811 writemsg("!!! No write access to %s" % mysettings["DISTDIR"]+"/\n",verbosity)
812 can_fetch=False
813 else:
814 mystat=os.stat(mysettings["DISTDIR"]+"/")
815 if mystat.st_gid != portage_gid:
816 try:
817 os.chown(mysettings["DISTDIR"],-1,portage_gid)
818 except OSError, oe:
819 if oe.errno == 1:
820 writemsg(red("!!!")+" Unable to chgrp of %s to portage, continuing\n" %
821 mysettings["DISTDIR"],verbosity)
822 else:
823 raise oe
824
825 # writable by portage_gid? This is specific to root, adjust perms if needed automatically.
826 if not stat.S_IMODE(mystat.st_mode) & 020:
827 try:
828 os.chmod(mysettings["DISTDIR"],stat.S_IMODE(mystat.st_mode) | 020)
829 except OSError, oe:
830 if oe.errno == 1:
831 writemsg(red("!!!")+" Unable to chmod %s to perms 0755. Non-root users will experience issues.\n" % mysettings["DISTDIR"],verbosity)
832 else:
833 raise oe
834
835 if use_locks and locks_in_subdir:
836 if os.path.exists(mysettings["DISTDIR"]+"/"+locks_in_subdir):
837 if not os.access(mysettings["DISTDIR"]+"/"+locks_in_subdir,os.W_OK):
838 writemsg("!!! No write access to write to %s. Aborting.\n" % mysettings["DISTDIR"]+"/"+locks_in_subdir,verbosity)
839 return 0
840 else:
841 old_umask=os.umask(0002)
842 os.mkdir(mysettings["DISTDIR"]+"/"+locks_in_subdir,0775)
843 if os.stat(mysettings["DISTDIR"]+"/"+locks_in_subdir).st_gid != portage_gid:
844 try:
845 os.chown(mysettings["DISTDIR"]+"/"+locks_in_subdir,-1,portage_gid)
846 except SystemExit, e:
847 raise
848 except:
849 pass
850 os.umask(old_umask)
851
852
853 fetcher = get_preferred_fetcher()
854 for myfile in filedict.keys():
855 fetched=0
856 file_lock = None
857 if listonly:
858 writemsg("\n",verbosity)
859 else:
860 if use_locks and can_fetch:
861 if locks_in_subdir:
862 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1,verbosity=verbosity)
863 else:
864 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1,verbosity=verbosity)
865 try:
866 for loc in filedict[myfile]:
867 if listonly:
868 writemsg(loc+" ",verbosity)
869 continue
870
871 try:
872 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
873 if mydigests.has_key(myfile):
874 #if we have the digest file, we know the final size and can resume the download.
875 if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
876 fetched=1
877 else:
878 #we already have it downloaded, skip.
879 #if our file is bigger than the recorded size, digestcheck should catch it.
880 if not fetchonly:
881 fetched=2
882 else:
883 # Check md5sum's at each fetch for fetchonly.
884 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
885 if not verified_ok:
886 writemsg("!!! Previously fetched file: "+str(myfile)+"\n!!! Reason: "+reason+"\nRefetching...\n\n",verbosity)
887 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
888 fetched=0
889 else:
890 for x_key in mydigests[myfile].keys():
891 writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n",verbosity)
892 fetched=2
893 break #No need to keep looking for this file, we have it!
894 else:
895 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
896 fetched=2
897 except (OSError,IOError),e:
898 writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),verbosity+1)
899 fetched=0
900
901 if not can_fetch:
902 if fetched != 2:
903 if fetched == 0:
904 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,verbosity)
905 else:
906 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,verbosity)
907 return 0
908 else:
909 continue
910
911 # check if we can actually write to the directory/existing file.
912 if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
913 os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK):
914 writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile),verbosity)
915 fetched=0
916 break
917 elif fetched!=2:
918 #we either need to resume or start the download
919 #you can't use "continue" when you're inside a "try" block
920 if fetched==1:
921 #resume mode:
922 writemsg(">>> Resuming download...\n",verbosity)
923 locfetch=fetcher.resume
924 else:
925 #normal mode:
926 locfetch=fetcher.fetch
927 writemsg(">>> Downloading "+str(loc)+"\n",verbosity)
928 try:
929 myret=locfetch(loc,file_name=mysettings["DISTDIR"]+"/"+myfile, \
930 verbose=(verbosity==0))
931 if myret==127 and \
932 isinstance(fetcher,transports.fetchcommand.CustomConnection):
933 # this is an indication of a missing libs for the binary.
934 # fex: USE="ssl" wget, missing libssl.
935 #
936 # lets try to be helpful. ;-)
937 f=transports.bundled_lib.BundledConnection()
938 if fetched==1:
939 myret=f.resume(loc, \
940 file_name=mysettings["DISTDIR"]+"/"+myfile,
941 verbose=(verbosity==0))
942 else:
943 myret=f.fetch(loc, \
944 file_name=mysettings["DISTDIR"]+"/"+myfile,
945 verbose=(verbosity==0))
946 if not myret:
947 writemsg(red("!!!")+"\n")
948 writemsg(red("!!!")+" FETCHCOMMAND/RESUMECOMMAND exited with code 127\n")
949 writemsg(red("!!!")+" This is indicative of missing libs for the fetch/resume binaries\n")
950 writemsg(red("!!!")+" Added, the independ BundledConnection succeeded\n")
951 writemsg(red("!!!")+" Please check your installation.\n")
952 writemsg(red("!!!")+" Defaulting to BundledConnection for the remainder of this fetch request\n")
953 writemsg(red("!!!")+"\n")
954 fetcher = f
955 finally:
956 #if root, -always- set the perms.
957 if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0):
958 if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
959 try:
960 os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
961 except SystemExit, e:
962 raise
963 except:
964 writemsg("chown failed on distfile: " + str(myfile),verbosity)
965 os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
966
967 if mydigests!=None and mydigests.has_key(myfile):
968 try:
969 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
970 # no exception? file exists. let digestcheck() report
971 # an appropriately for size or md5 errors
972 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
973 # Fetch failed... Try the next one... Kill 404 files though.
974 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
975 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
976 try:
977 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
978 try:
979 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
980 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n",verbosity)
981 except SystemExit, e:
982 raise
983 except:
984 pass
985 except SystemExit, e:
986 raise
987 except:
988 pass
989 continue
990 if not fetchonly:
991 fetched=2
992 break
993 else:
994 # File is the correct size--check the MD5 sum for the fetched
995 # file NOW, for those users who don't have a stable/continuous
996 # net connection. This way we have a chance to try to download
997 # from another mirror...
998 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
999 if not verified_ok:
1000 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n!!! Reason: "+reason+"\nRemoving corrupt distfile...\n",verbosity)
1001 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1002 fetched=0
1003 else:
1004 for x_key in mydigests[myfile].keys():
1005 writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n",verbosity)
1006 fetched=2
1007 break
1008 except (OSError,IOError),e:
1009 writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),verbosity+1)
1010 fetched=0
1011 else:
1012 if not myret:
1013 fetched=2
1014 break
1015 elif mydigests!=None:
1016 writemsg("No digest file available and download failed.\n\n")
1017 finally:
1018 if use_locks and file_lock:
1019 portage_locks.unlockfile(file_lock)
1020
1021 if listonly:
1022 writemsg("\n")
1023 if (fetched!=2) and not listonly:
1024 writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n",verbosity)
1025 return 0
1026 return 1
1027
1028
1029 def digestCreate(myfiles,basedir,oldDigest={}):
1030 """Takes a list of files and the directory they are in and returns the
1031 dict of dict[filename][CHECKSUM_KEY] = hash
1032 returns None on error."""
1033 mydigests={}
1034 for x in myfiles:
1035 print "<<<",x
1036 myfile=os.path.normpath(basedir+"///"+x)
1037 if os.path.exists(myfile):
1038 if not os.access(myfile, os.R_OK):
1039 print "!!! Given file does not appear to be readable. Does it exist?"
1040 print "!!! File:",myfile
1041 return None
1042 mydigests[x] = portage_checksum.perform_all(myfile)
1043 mysize = os.stat(myfile)[stat.ST_SIZE]
1044 else:
1045 if x in oldDigest:
1046 # DeepCopy because we might not have a unique reference.
1047 mydigests[x] = copy.deepcopy(oldDigest[x])
1048 mysize = oldDigest[x]["size"]
1049 else:
1050 print "!!! We have a source URI, but no file..."
1051 print "!!! File:",myfile
1052 return None
1053
1054 if "size" in mydigests[x] and (mydigests[x]["size"] != mysize):
1055 raise portage_exception.DigestException, "Size mismatch during checksums"
1056 mydigests[x]["size"] = mysize
1057
1058
1059 return mydigests
1060
1061 def digestCreateLines(filelist, mydigests):
1062 mylines = []
1063 for myarchive in filelist:
1064 mysize = mydigests[myarchive]["size"]
1065 if len(mydigests[myarchive]) == 0:
1066 raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
1067 for sumName in mydigests[myarchive].keys():
1068 if sumName not in portage_checksum.get_valid_checksum_keys():
1069 continue
1070 mysum = mydigests[myarchive][sumName]
1071
1072 myline = " ".join([sumName, mysum, myarchive, str(mysize)])
1073 if sumName != "MD5":
1074 # XXXXXXXXXXXXXXXX This cannot be used!
1075 # Older portage make very dumb assumptions about the formats.
1076 # We need a lead-in period before we break everything.
1077 continue
1078 mylines.append(myline)
1079 return mylines
1080
1081 def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0,verbosity=0):
1082 """generates digest file if missing. Assumes all files are available. If
1083 overwrite=0, the digest will only be created if it doesn't already exist."""
1084
1085 # archive files
1086 basedir=mysettings["DISTDIR"]+"/"
1087 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1088
1089 # portage files -- p(ortagefiles)basedir
1090 pbasedir=mysettings["O"]+"/"
1091 manifestfn=pbasedir+"Manifest"
1092
1093 if not manifestonly:
1094 if not os.path.isdir(mysettings["FILESDIR"]):
1095 os.makedirs(mysettings["FILESDIR"])
1096 mycvstree=cvstree.getentries(pbasedir, recursive=1)
1097
1098 if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
1099 if not cvstree.isadded(mycvstree,"files"):
1100 if "autoaddcvs" in features:
1101 writemsg(">>> Auto-adding files/ dir to CVS...\n",verbosity - 1)
1102 spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
1103 else:
1104 writemsg("--- Warning: files/ is not added to cvs.\n",verbosity)
1105
1106 if (not overwrite) and os.path.exists(digestfn):
1107 return 1
1108
1109 print green(">>> Generating digest file...")
1110
1111 # Track the old digest so that we can assume checksums without requiring
1112 # all files be downloaded. 'Assuming'
1113 # XXX: <harring>- why does this seem like a way to pollute the hell out of the
1114 # digests? This strikes me as lining the path between your bed and coffee machine
1115 # with land mines...
1116 myolddigest = {}
1117 if os.path.exists(digestfn):
1118 myolddigest = digestParseFile(digestfn)
1119
1120 mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
1121 if mydigests==None: # There was a problem, exit with an errorcode.
1122 return 0
1123
1124 try:
1125 outfile=open(digestfn, "w+")
1126 except SystemExit, e:
1127 raise
1128 except Exception, e:
1129 print "!!! Filesystem error skipping generation. (Read-Only?)"
1130 print "!!!",e
1131 return 0
1132 for x in digestCreateLines(myarchives, mydigests):
1133 outfile.write(x+"\n")
1134 outfile.close()
1135 try:
1136 os.chown(digestfn,os.getuid(),portage_gid)
1137 os.chmod(digestfn,0664)
1138 except SystemExit, e:
1139 raise
1140 except Exception,e:
1141 print e
1142
1143 print green(">>> Generating manifest file...")
1144 mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1)
1145 mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
1146 if "Manifest" in mypfiles:
1147 del mypfiles[mypfiles.index("Manifest")]
1148
1149 mydigests=digestCreate(mypfiles, pbasedir)
1150 if mydigests==None: # There was a problem, exit with an errorcode.
1151 return 0
1152
1153 try:
1154 outfile=open(manifestfn, "w+")
1155 except SystemExit, e:
1156 raise
1157 except Exception, e:
1158 print "!!! Filesystem error skipping generation. (Read-Only?)"
1159 print "!!!",e
1160 return 0
1161 for x in digestCreateLines(mypfiles, mydigests):
1162 outfile.write(x+"\n")
1163 outfile.close()
1164 try:
1165 os.chown(manifestfn,os.getuid(),portage_gid)
1166 os.chmod(manifestfn,0664)
1167 except SystemExit, e:
1168 raise
1169 except Exception,e:
1170 print e
1171
1172 if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
1173 mycvstree=cvstree.getentries(pbasedir, recursive=1)
1174 myunaddedfiles=""
1175 if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
1176 if digestfn[:len(pbasedir)]==pbasedir:
1177 myunaddedfiles=digestfn[len(pbasedir):]+" "
1178 else:
1179 myunaddedfiles=digestfn+" "
1180 if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
1181 if manifestfn[:len(pbasedir)]==pbasedir:
1182 myunaddedfiles+=manifestfn[len(pbasedir):]+" "
1183 else:
1184 myunaddedfiles+=manifestfn
1185 if myunaddedfiles:
1186 if "autoaddcvs" in features:
1187 print blue(">>> Auto-adding digest file(s) to CVS...")
1188 spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
1189 else:
1190 print "--- Warning: digests are not yet added into CVS."
1191 print darkgreen(">>> Computed message digests.")
1192 print
1193 return 1
1194
1195
1196 def digestParseFile(myfilename):
1197 """(filename) -- Parses a given file for entries matching:
1198 MD5 MD5_STRING_OF_HEX_CHARS FILE_NAME FILE_SIZE
1199 Ignores lines that do not begin with 'MD5' and returns a
1200 dict with the filenames as keys and [md5,size] as the values."""
1201
1202 if not os.path.exists(myfilename):
1203 return None
1204 mylines = portage_util.grabfile(myfilename, compat_level=1)
1205
1206 mydigests={}
1207 for x in mylines:
1208 myline=x.split()
1209 if len(myline) < 4:
1210 #invalid line
1211 continue
1212 if myline[0] not in portage_checksum.get_valid_checksum_keys():
1213 continue
1214 mykey = myline.pop(0)
1215 myhash = myline.pop(0)
1216 mysize = long(myline.pop())
1217 myfn = " ".join(myline)
1218 if myfn not in mydigests:
1219 mydigests[myfn] = {}
1220 mydigests[myfn][mykey] = myhash
1221 if "size" in mydigests[myfn]:
1222 if mydigests[myfn]["size"] != mysize:
1223 raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
1224 else:
1225 mydigests[myfn]["size"] = mysize
1226 return mydigests
1227
1228 # XXXX strict was added here to fix a missing name error.
1229 # XXXX It's used below, but we're not paying attention to how we get it?
1230 def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0,verbosity=0):
1231 """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
1232 of their digests and checks the digests against the indicated files in
1233 the basedir given. Returns 1 only if all files exist and match the md5s.
1234 """
1235 for x in myfiles:
1236 if not mydigests.has_key(x):
1237 writemsg("\n",verbosity)
1238 writemsg(red("!!! No message digest entry found for file \""+x+".\"")+"\n"+
1239 "!!! Most likely a temporary problem. Try 'emerge sync' again later.\n"+
1240 "!!! If you are certain of the authenticity of the file then you may type\n"+
1241 "!!! the following to generate a new digest:\n"+
1242 "!!! ebuild /usr/portage/category/package/package-version.ebuild digest\n",
1243 verbosity)
1244 return 0
1245 myfile=os.path.normpath(basedir+"/"+x)
1246 if not os.path.exists(myfile):
1247 if strict:
1248 writemsg("!!! File does not exist:"+str(myfile)+"\n",verbosity)
1249 return 0
1250 continue
1251
1252 ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
1253 if not ok:
1254 writemsg("\n"+red("!!! Digest verification Failed:")+"\n"+
1255 red("!!!")+" "+str(myfile)+"\n"+
1256 red("!!! Reason: ")+reason+"\n",
1257 verbosity)
1258 return 0
1259 else:
1260 writemsg(">>> md5 "+note+" ;-) %s\n" % str(x),verbosity)
1261 return 1
1262
1263
1264 def digestcheck(myfiles, mysettings, strict=0,verbosity=0):
1265 """Checks md5sums. Assumes all files have been downloaded."""
1266 # archive files
1267 basedir=mysettings["DISTDIR"]+"/"
1268 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1269
1270 # portage files -- p(ortagefiles)basedir
1271 pbasedir=mysettings["O"]+"/"
1272 manifestfn=pbasedir+"Manifest"
1273
1274 if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
1275 if "digest" in features:
1276 writemsg(">>> No package digest/Manifest file found.\n",verbosity)
1277 writemsg(">>> \"digest\" mode enabled; auto-generating new digest...\n",verbosity)
1278 return digestgen(myfiles,mysettings,verbosity=verbosity)
1279 else:
1280 if not os.path.exists(manifestfn):
1281 if strict:
1282 writemsg(red("!!! No package manifest found:")+" %s\n" % manifestfn,verbosity)
1283 return 0
1284 else:
1285 writemsg("--- No package manifest found: %s\n" % manifestfn,verbosity)
1286 if not os.path.exists(digestfn):
1287 writemsg("!!! No package digest file found: %s\n" % digestfn,verbosity)
1288 writemsg("!!! Type \"ebuild foo.ebuild digest\" to generate it.\n", verbosity)
1289 return 0
1290
1291 mydigests=digestParseFile(digestfn)
1292 if mydigests==None:
1293 writemsg("!!! Failed to parse digest file: %s\n" % digestfn, verbosity)
1294 return 0
1295 mymdigests=digestParseFile(manifestfn)
1296 if "strict" not in features:
1297 # XXX: Remove this when manifests become mainstream.
1298 pass
1299 elif mymdigests==None:
1300 writemsg("!!! Failed to parse manifest file: %s\n" % manifestfn,verbosity)
1301 if strict:
1302 return 0
1303 else:
1304 # Check the portage-related files here.
1305 mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1)
1306 manifest_files = mymdigests.keys()
1307 for x in range(len(mymfiles)-1,-1,-1):
1308 if mymfiles[x]=='Manifest': # We don't want the manifest in out list.
1309 del mymfiles[x]
1310 continue
1311 if mymfiles[x] in manifest_files:
1312 manifest_files.remove(mymfiles[x])
1313 elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
1314 # we filter here, rather then above; manifest might have files flagged by the filter.
1315 # if something is returned, then it's flagged as a bad file
1316 # manifest doesn't know about it, so we kill it here.
1317 del mymfiles[x]
1318 else:
1319 writemsg(red("!!! Security Violation: A file exists that is not in the manifest.")+"\n",verbosity)
1320 writemsg("!!! File: %s\n" % mymfiles[x],verbosity)
1321 if strict:
1322 return 0
1323
1324 if manifest_files and strict:
1325 for x in grabfile(USER_CONFIG_PATH+"/manifest_excludes"):
1326 if x in manifest_files:
1327 #writemsg(yellow(">>>")+" md5-ignore: "+x,verbosity)
1328 manifest_files.remove(x)
1329
1330 if manifest_files:
1331 writemsg(red("!!! Files listed in the manifest do not exist!")+"\n",verbosity)
1332 for x in manifest_files:
1333 writemsg(x+"\n",verbosity)
1334 return 0
1335
1336 if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict, verbosity=verbosity):
1337 if strict:
1338 writemsg(">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and\n"+
1339 ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")+"\n\n",
1340 verbosity)
1341 return 0
1342 else:
1343 writemsg("--- Manifest check failed. 'strict' not enabled; ignoring.\n\n",verbosity)
1344
1345 # Just return the status, as it's the last check.
1346 return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict,verbosity=verbosity)
1347
1348 # note, use_info_env is a hack to allow treewalk to specify the correct env. it sucks, but so does this doebuild
1349 # setup
1350 def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,\
1351 fetchall=0,tree="porttree",allstages=True,use_info_env=True,verbosity=0):
1352
1353 retval = ebuild.ebuild_handler().process_phase(mydo,mysettings,myebuild,myroot, debug=debug, listonly=listonly, \
1354 fetchonly=fetchonly, cleanup=cleanup, use_cache=use_cache, fetchall=fetchall, tree=tree, allstages=allstages, \
1355 use_info_env=use_info_env,verbosity=verbosity)
1356
1357 #def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree="porttree",allstages=True,use_info_env=True):
1358 # retval=ebuild.ebuild_handler().process_phase(mydo, mysettings,myebuild,myroot,debug=debug,listonly=listonly,fetchonly=fetchonly,cleanup=cleanup,dbkey=None,use_cache=1,fetchall=0,tree="porttree",allstages=allstages,use_info_env=use_info_env)
1359 return retval
1360
1361
1362 expandcache={}
1363
1364 def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None):
1365 mylink=dblink(mycat,mypkg,myroot,mysettings)
1366 return mylink.merge(pkgloc,infloc,myroot,myebuild)
1367
1368 def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
1369 mylink=dblink(cat,pkg,myroot,mysettings)
1370 if mylink.exists():
1371 mylink.unmerge(trimworld=mytrimworld,cleanup=1)
1372 mylink.delete()
1373
1374 def getCPFromCPV(mycpv):
1375 """Calls portage_versions.pkgsplit on a cpv and returns only the cp."""
1376 return portage_versions.pkgsplit(mycpv)[0]
1377
1378
1379 def dep_parenreduce(mysplit,mypos=0):
1380 """
1381 Accepts a list of strings, and converts
1382 '(' and ')' surrounded items to sub-lists
1383 """
1384 while (mypos<len(mysplit)):
1385 if (mysplit[mypos]=="("):
1386 firstpos=mypos
1387 mypos=mypos+1
1388 while (mypos<len(mysplit)):
1389 if mysplit[mypos]==")":
1390 mysplit[firstpos:mypos+1]=[mysplit[firstpos+1:mypos]]
1391 mypos=firstpos
1392 break
1393 elif mysplit[mypos]=="(":
1394 #recurse
1395 mysplit=dep_parenreduce(mysplit,mypos=mypos)
1396 mypos=mypos+1
1397 mypos=mypos+1
1398 return mysplit
1399
1400 def dep_opconvert(mysplit,myuse,mysettings):
1401 """
1402 Does dependency operator conversion
1403 """
1404
1405
1406 mypos=0
1407 newsplit=[]
1408 while mypos<len(mysplit):
1409 if type(mysplit[mypos])==types.ListType:
1410 newsplit.append(dep_opconvert(mysplit[mypos],myuse,mysettings))
1411 mypos += 1
1412 elif mysplit[mypos]==")":
1413 #mismatched paren, error
1414 return None
1415 elif mysplit[mypos]=="||":
1416 if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType):
1417 # || must be followed by paren'd list
1418 return None
1419 try:
1420 mynew=dep_opconvert(mysplit[mypos+1],myuse,mysettings)
1421 except SystemExit, e:
1422 raise
1423 except Exception, e:
1424 print "!!! Unable to satisfy OR dependency:", " || ".join(mysplit)
1425 raise
1426 mynew[0:0]=["||"]
1427 newsplit.append(mynew)
1428 mypos += 2
1429 elif mysplit[mypos][-1]=="?":
1430 #uses clause, i.e "gnome? ( foo bar )"
1431 #this is a quick and dirty hack so that repoman can enable all USE vars:
1432 if (len(myuse)==1) and (myuse[0]=="*") and mysettings:
1433 # enable it even if it's ! (for repoman) but kill it if it's
1434 # an arch variable that isn't for this arch. XXX Sparc64?
1435 k=mysplit[mypos][:-1]
1436 if k[0]=="!":
1437 k=k[1:]
1438 if k not in archlist and k not in mysettings.usemask:
1439 enabled=1
1440 elif k in archlist:
1441 if k==mysettings["ARCH"]:
1442 if mysplit[mypos][0]=="!":
1443 enabled=0
1444 else:
1445 enabled=1
1446 elif mysplit[mypos][0]=="!":
1447 enabled=1
1448 else:
1449 enabled=0
1450 else:
1451 enabled=0
1452 else:
1453 if mysplit[mypos][0]=="!":
1454 myusevar=mysplit[mypos][1:-1]
1455 if myusevar in myuse:
1456 enabled=0
1457 else:
1458 enabled=1
1459 else:
1460 myusevar=mysplit[mypos][:-1]
1461 if myusevar in myuse:
1462 enabled=1
1463 else:
1464 enabled=0
1465 if (mypos+2<len(mysplit)) and (mysplit[mypos+2]==":"):
1466 #colon mode
1467 if enabled:
1468 #choose the first option
1469 if type(mysplit[mypos+1])==types.ListType:
1470 newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
1471 else:
1472 newsplit.append(mysplit[mypos+1])
1473 else:
1474 #choose the alternate option
1475 if type(mysplit[mypos+1])==types.ListType:
1476 newsplit.append(dep_opconvert(mysplit[mypos+3],myuse,mysettings))
1477 else:
1478 newsplit.append(mysplit[mypos+3])
1479 mypos += 4
1480 else:
1481 #normal use mode
1482 if enabled:
1483 if type(mysplit[mypos+1])==types.ListType:
1484 newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
1485 else:
1486 newsplit.append(mysplit[mypos+1])
1487 #otherwise, continue.
1488 mypos += 2
1489 else:
1490 #normal item
1491 newsplit.append(mysplit[mypos])
1492 mypos += 1
1493 return newsplit
1494
1495 def dep_virtual(mysplit, mysettings):
1496 """
1497 Does virtual dependency conversion
1498 """
1499 newsplit=[]
1500 for x in mysplit:
1501 if type(x)==list:
1502 newsplit.append(dep_virtual(x, mysettings))
1503 else:
1504 mykey=portage_dep.dep_getkey(x)
1505 if mysettings.virtuals.has_key(mykey):
1506 if len(mysettings.virtuals[mykey])==1:
1507 a=x.replace( mykey, mysettings.virtuals[mykey][0])
1508 else:
1509 if x[0]=="!":
1510 # blocker needs "and" not "or(||)".
1511 a=[]
1512 else:
1513 a=['||']
1514 for y in mysettings.virtuals[mykey]:
1515 a.append( x.replace( mykey, y) )
1516 newsplit.append(a)
1517 else:
1518 newsplit.append(x)
1519 return newsplit
1520
1521 def dep_eval(deplist):
1522 if len(deplist)==0:
1523 return 1
1524 if deplist[0]=="||":
1525 #or list; we just need one "1"
1526 for x in deplist[1:]:
1527 if type(x)==types.ListType:
1528 if dep_eval(x)==1:
1529 return 1
1530 elif x==1:
1531 return 1
1532 return 0
1533 else:
1534 for x in deplist:
1535 if type(x)==types.ListType:
1536 if dep_eval(x)==0:
1537 return 0
1538 elif x==0 or x==2:
1539 return 0
1540 return 1
1541
1542 def dep_zapdeps(unreduced,reduced,vardbapi=None,use_binaries=0):
1543 """
1544 Takes an unreduced and reduced deplist and removes satisfied dependencies.
1545 Returned deplist contains steps that must be taken to satisfy dependencies.
1546 """
1547 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
1548 if unreduced==[] or unreduced==['||'] :
1549 return []
1550 if unreduced[0]=="||":
1551 if dep_eval(reduced):
1552 #deps satisfied, return empty list.
1553 return []
1554 else:
1555 #try to find an installed dep.
1556 ### We use fakedb when --update now, so we can't use local vardbapi here.
1557 ### This should be fixed in the feature.
1558 ### see bug 45468.
1559 ##if vardbapi:
1560 ## mydbapi=vardbapi
1561 ##else:
1562 ## mydbapi=db[root]["vartree"].dbapi
1563 mydbapi=db[root]["vartree"].dbapi
1564
1565 if db["/"].has_key("porttree"):
1566 myportapi=db["/"]["porttree"].dbapi
1567 else:
1568 myportapi=None
1569
1570 if use_binaries and db["/"].has_key("bintree"):
1571 mybinapi=db["/"]["bintree"].dbapi
1572 writemsg("Using bintree...\n",2)
1573 else:
1574 mybinapi=None
1575
1576 x=1
1577 candidate=[]
1578 while x<len(reduced):
1579 writemsg("x: %s, reduced[x]: %s\n" % (x,reduced[x]), 2)
1580 if (type(reduced[x])==types.ListType):
1581 newcand = dep_zapdeps(unreduced[x], reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
1582 candidate.append(newcand)
1583 else:
1584 if (reduced[x]==False):
1585 candidate.append([unreduced[x]])
1586 else:
1587 candidate.append([])
1588 x+=1
1589
1590 #use installed and no-masked package(s) in portage.
1591 for x in candidate:
1592 match=1
1593 for pkg in x:
1594 if not mydbapi.match(pkg):
1595 match=0
1596 break
1597 if myportapi:
1598 if not myportapi.match(pkg):
1599 match=0
1600 break
1601 if match:
1602 writemsg("Installed match: %s\n" % (x), 2)
1603 return x
1604
1605 # Use binary packages if available.
1606 if mybinapi:
1607 for x in candidate:
1608 match=1
1609 for pkg in x:
1610 if not mybinapi.match(pkg):
1611 match=0
1612 break
1613 else:
1614 writemsg("Binary match: %s\n" % (pkg), 2)
1615 if match:
1616 writemsg("Binary match final: %s\n" % (x), 2)
1617 return x
1618
1619 #use no-masked package(s) in portage tree
1620 if myportapi:
1621 for x in candidate:
1622 match=1
1623 for pkg in x:
1624 if not myportapi.match(pkg):
1625 match=0
1626 break
1627 if match:
1628 writemsg("Porttree match: %s\n" % (x), 2)
1629 return x
1630
1631 #none of the no-masked pkg, use the first one
1632 writemsg("Last resort candidate: %s\n" % (candidate[0]), 2)
1633 return candidate[0]
1634 else:
1635 if dep_eval(reduced):
1636 #deps satisfied, return empty list.
1637 return []
1638 else:
1639 returnme=[]
1640 x=0
1641 while x<len(reduced):
1642 if type(reduced[x])==types.ListType:
1643 returnme+=dep_zapdeps(unreduced[x],reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
1644 else:
1645 if reduced[x]==False:
1646 returnme.append(unreduced[x])
1647 x += 1
1648 return returnme
1649
1650 def cpv_getkey(mycpv):
1651 myslash=mycpv.split("/")
1652 mysplit=portage_versions.pkgsplit(myslash[-1])
1653 mylen=len(myslash)
1654 if mylen==2:
1655 return myslash[0]+"/"+mysplit[0]
1656 elif mylen==1:
1657 return mysplit[0]
1658 else:
1659 return mysplit
1660
1661 def key_expand(mykey,mydb=None,use_cache=1):
1662 mysplit=mykey.split("/")
1663 if len(mysplit)==1:
1664 if mydb and type(mydb)==types.InstanceType:
1665 for x in settings.categories:
1666 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
1667 return x+"/"+mykey
1668 if virts_p.has_key(mykey):
1669 print "VIRTS_P (Report to #gentoo-portage or bugs.g.o):",mykey
1670 return(virts_p[mykey][0])
1671 return "null/"+mykey
1672 elif mydb:
1673 if type(mydb)==types.InstanceType:
1674 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
1675 return virts[mykey][0]
1676 return mykey
1677
1678 def cpv_expand(mycpv,mydb=None,use_cache=1):
1679 """
1680 Given a string (packagename or virtual) expand it into a valid
1681 cat/package string. Virtuals use the mydb to determine which provided
1682 virtual is a valid choice and defaults to the first element when there
1683 are no installed/available candidates.
1684 """
1685 myslash=mycpv.split("/")
1686 mysplit=portage_versions.pkgsplit(myslash[-1])
1687 if len(myslash)>2:
1688 # this is illegal case.
1689 mysplit=[]
1690 mykey=mycpv
1691 elif len(myslash)==2:
1692 if mysplit:
1693 mykey=myslash[0]+"/"+mysplit[0]
1694 else:
1695 mykey=mycpv
1696 if mydb:
1697 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
1698 if type(mydb)==types.InstanceType:
1699 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
1700 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
1701 mykey_orig = mykey
1702 for vkey in virts[mykey]:
1703 if mydb.cp_list(vkey,use_cache=use_cache):
1704 mykey = vkey
1705 writemsg("virts chosen: %s\n" % (mykey), 1)
1706 break
1707 if mykey == mykey_orig:
1708 mykey=virts[mykey][0]
1709 writemsg("virts defaulted: %s\n" % (mykey), 1)
1710 #we only perform virtual expansion if we are passed a dbapi
1711 else:
1712 #specific cpv, no category, ie. "foo-1.0"
1713 if mysplit:
1714 myp=mysplit[0]
1715 else:
1716 # "foo" ?
1717 myp=mycpv
1718 mykey=None
1719 matches=[]
1720 if mydb:
1721 for x in settings.categories:
1722 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
1723 matches.append(x+"/"+myp)
1724 if (len(matches)>1):
1725 raise ValueError, matches
1726 elif matches:
1727 mykey=matches[0]
1728
1729 if not mykey and type(mydb)!=types.ListType:
1730 if virts_p.has_key(myp):
1731 print "VIRTS_P,ce (Report to #gentoo-portage or bugs.g.o):",myp
1732 mykey=virts_p[myp][0]
1733 #again, we only perform virtual expansion if we have a dbapi (not a list)
1734 if not mykey:
1735 mykey="null/"+myp
1736 if mysplit:
1737 if mysplit[2]=="r0":
1738 return mykey+"-"+mysplit[1]
1739 else:
1740 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
1741 else:
1742 return mykey
1743
1744 def dep_transform(mydep,oldkey,newkey):
1745 origdep=mydep
1746 if not len(mydep):
1747 return mydep
1748 if mydep[0]=="*":
1749 mydep=mydep[1:]
1750 prefix=""
1751 postfix=""
1752 if mydep[-1]=="*":
1753 mydep=mydep[:-1]
1754 postfix="*"
1755 if mydep[:2] in [ ">=", "<=" ]:
1756 prefix=mydep[:2]
1757 mydep=mydep[2:]
1758 elif mydep[:1] in "=<>~!":
1759 prefix=mydep[:1]
1760 mydep=mydep[1:]
1761 if mydep==oldkey:
1762 return prefix+newkey+postfix
1763 else:
1764 return origdep
1765
1766 def dep_expand(mydep,mydb=None,use_cache=1):
1767 if not len(mydep):
1768 return mydep
1769 if mydep[0]=="*":
1770 mydep=mydep[1:]
1771 prefix=""
1772 postfix=""
1773 if mydep[-1]=="*":
1774 mydep=mydep[:-1]
1775 postfix="*"
1776 if mydep[:2] in [ ">=", "<=" ]:
1777 prefix=mydep[:2]
1778 mydep=mydep[2:]
1779 elif mydep[:1] in "=<>~!":
1780 prefix=mydep[:1]
1781 mydep=mydep[1:]
1782 return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
1783
1784 def get_parsed_deps(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None):
1785
1786 if use=="all":
1787 #enable everything (for repoman)
1788 myusesplit=["*"]
1789 elif use=="yes":
1790 if myuse==None:
1791 #default behavior
1792 myusesplit = mysettings["USE"].split()
1793 else:
1794 myusesplit = myuse
1795 # We've been given useflags to use.
1796 #print "USE FLAGS PASSED IN."
1797 #print myuse
1798 #if "bindist" in myusesplit:
1799 # print "BINDIST is set!"
1800 #else:
1801 # print "BINDIST NOT set."
1802 else:
1803 #we are being run by autouse(), don't consult USE vars yet.
1804 # WE ALSO CANNOT USE SETTINGS
1805 myusesplit=[]
1806
1807 #convert parenthesis to sublists
1808 mysplit = portage_dep.paren_reduce(depstring)
1809
1810 if mysettings:
1811 # XXX: use="all" is only used by repoman. Why would repoman checks want
1812 # profile-masked USE flags to be enabled?
1813 #if use=="all":
1814 # mymasks=archlist[:]
1815 #else:
1816 mymasks=mysettings.usemask+archlist[:]
1817
1818 while mysettings["ARCH"] in mymasks:
1819 del mymasks[mymasks.index(mysettings["ARCH"])]
1820 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
1821 else:
1822 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
1823 return mysplit
1824
1825 def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0):
1826 """Takes a depend string and parses the condition."""
1827
1828 mysplit=get_parsed_deps(depstring,mydbapi,mysettings,use=use,myuse=myuse)
1829 # Do the || conversions
1830 mysplit=portage_dep.dep_opconvert(mysplit)
1831
1832 #convert virtual dependencies to normal packages.
1833 mysplit=dep_virtual(mysplit, mysettings)
1834 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
1835 #up until here, we haven't needed to look at the database tree
1836
1837 if mysplit==None:
1838 return [0,"Parse Error (parentheses mismatch?)"]
1839 elif mysplit==[]:
1840 #dependencies were reduced to nothing
1841 return [1,[]]
1842 mysplit2=mysplit[:]
1843 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
1844 if mysplit2==None:
1845 return [0,"Invalid token"]
1846
1847 writemsg("\n\n\n", 1)
1848 writemsg("mysplit: %s\n" % (mysplit), 1)
1849 writemsg("mysplit2: %s\n" % (mysplit2), 1)
1850 myeval=dep_eval(mysplit2)
1851 writemsg("myeval: %s\n" % (myeval), 1)
1852
1853 if myeval:
1854 return [1,[]]
1855 else:
1856 myzaps = dep_zapdeps(mysplit,mysplit2,vardbapi=mydbapi,use_binaries=use_binaries)
1857 mylist = flatten(myzaps)
1858 writemsg("myzaps: %s\n" % (myzaps), 1)
1859 writemsg("mylist: %s\n" % (mylist), 1)
1860 #remove duplicates
1861 mydict={}
1862 for x in mylist:
1863 mydict[x]=1
1864 writemsg("mydict: %s\n" % (mydict), 1)
1865 return [1,mydict.keys()]
1866
1867 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
1868 "Reduces the deplist to ones and zeros"
1869 mypos=0
1870 deplist=mydeplist[:]
1871 while mypos<len(deplist):
1872 if type(deplist[mypos])==types.ListType:
1873 #recurse
1874 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
1875 elif deplist[mypos]=="||":
1876 pass
1877 else:
1878 mykey = portage_dep.dep_getkey(deplist[mypos])
1879 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
1880 portage_dep.match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
1881 deplist[mypos]=True
1882 else:
1883 if mode:
1884 mydep=mydbapi.xmatch(mode,deplist[mypos])
1885 else:
1886 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
1887 if mydep!=None:
1888 tmp=(len(mydep)>=1)
1889 if deplist[mypos][0]=="!":
1890 #tmp=not tmp
1891 # This is ad-hoc code. We should rewrite this later.. (See #52377)
1892 # The reason is that portage uses fakedb when --update option now.
1893 # So portage considers that a block package doesn't exist even if it exists.
1894 # Then, #52377 happens.
1895 # ==== start
1896 # emerge checks if it's block or not, so we can always set tmp=False.
1897 # but it's not clean..
1898 tmp=False
1899 # ==== end
1900 deplist[mypos]=tmp
1901 else:
1902 #encountered invalid string
1903 return None
1904 mypos=mypos+1
1905 return deplist
1906
1907 def fixdbentries(old_value, new_value, dbdir):
1908 """python replacement for the fixdbentries script, replaces old_value
1909 with new_value for package names in files in dbdir."""
1910 for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
1911 f = open(dbdir+"/"+myfile, "r")
1912 mycontent = f.read()
1913 f.close()
1914 if not mycontent.count(old_value):
1915 continue
1916 old_value = re.escape(old_value);
1917 mycontent = re.sub(old_value+"$", new_value, mycontent)
1918 mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
1919 mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
1920 mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
1921 f = open(dbdir+"/"+myfile, "w")
1922 f.write(mycontent)
1923 f.close()
1924
1925 class packagetree:
1926 def __init__(self,virtual,clone=None):
1927 if clone:
1928 self.tree=clone.tree.copy()
1929 self.populated=clone.populated
1930 self.virtual=clone.virtual
1931 self.dbapi=None
1932 else:
1933 self.tree={}
1934 self.populated=0
1935 self.virtual=virtual
1936 self.dbapi=None
1937
1938 def resolve_key(self,mykey):
1939 return key_expand(mykey,mydb=self.dbapi)
1940
1941 def dep_nomatch(self,mypkgdep):
1942 mykey=portage_dep.dep_getkey(mypkgdep)
1943 nolist=self.dbapi.cp_list(mykey)
1944 mymatch=self.dbapi.match(mypkgdep)
1945 if not mymatch:
1946 return nolist
1947 for x in mymatch:
1948 if x in nolist:
1949 nolist.remove(x)
1950 return nolist
1951
1952 def depcheck(self,mycheck,use="yes",myusesplit=None):
1953 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
1954
1955 def populate(self):
1956 "populates the tree with values"
1957 populated=1
1958 pass
1959
1960 def best(mymatches):
1961 "accepts None arguments; assumes matches are valid."
1962 global bestcount
1963 if mymatches==None:
1964 return ""
1965 if not len(mymatches):
1966 return ""
1967 bestmatch=mymatches[0]
1968 p2=portage_versions.catpkgsplit(bestmatch)[1:]
1969 for x in mymatches[1:]:
1970 p1=portage_versions.catpkgsplit(x)[1:]
1971 if portage_versions.pkgcmp(p1,p2)>0:
1972 bestmatch=x
1973 p2=portage_versions.catpkgsplit(bestmatch)[1:]
1974 return bestmatch
1975
1976 class portagetree:
1977 def __init__(self,root="/",virtual=None,clone=None):
1978 global portdb
1979 if clone:
1980 self.root=clone.root
1981 self.portroot=clone.portroot
1982 self.pkglines=clone.pkglines
1983 else:
1984 self.root=root
1985 self.portroot=settings["PORTDIR"]
1986 self.virtual=virtual
1987 self.dbapi=portdb
1988
1989 def dep_bestmatch(self,mydep):
1990 "compatibility method"
1991 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
1992 if mymatch==None:
1993 return ""
1994 return mymatch
1995
1996 def dep_match(self,mydep):
1997 "compatibility method"
1998 mymatch=self.dbapi.xmatch("match-visible",mydep)
1999 if mymatch==None:
2000 return []
2001 return mymatch
2002
2003 def exists_specific(self,cpv):
2004 return self.dbapi.cpv_exists(cpv)
2005
2006 def getallnodes(self):
2007 """new behavior: these are all *unmasked* nodes. There may or may not be available
2008 masked package for nodes in this nodes list."""
2009 return self.dbapi.cp_all()
2010
2011 def getname(self,pkgname):
2012 "returns file location for this particular package (DEPRECATED)"
2013 if not pkgname:
2014 return ""
2015 mysplit=pkgname.split("/")
2016 psplit=portage_versions.pkgsplit(mysplit[1])
2017 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
2018
2019 def resolve_specific(self,myspec):
2020 cps=portage_versions.catpkgsplit(myspec)
2021 if not cps:
2022 return None
2023 mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
2024 mykey=mykey+"-"+cps[2]
2025 if cps[3]!="r0":
2026 mykey=mykey+"-"+cps[3]
2027 return mykey
2028
2029 def depcheck(self,mycheck,use="yes",myusesplit=None):
2030 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
2031
2032 def getslot(self,mycatpkg):
2033 "Get a slot for a catpkg; assume it exists."
2034 myslot = ""
2035 try:
2036 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
2037 except SystemExit, e:
2038 raise
2039 except Exception, e:
2040 pass
2041 return myslot
2042
2043
2044 class dbapi:
2045 def __init__(self):
2046 pass
2047
2048 def close_caches(self):
2049 pass
2050
2051 def cp_list(self,cp,use_cache=1):
2052 return
2053
2054 def aux_get(self,mycpv,mylist):
2055 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
2056 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
2057 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
2058 raise NotImplementedError
2059
2060 def match(self,origdep,use_cache=1):
2061 mydep=dep_expand(origdep,mydb=self)
2062 mykey=portage_dep.dep_getkey(mydep)
2063 mycat=mykey.split("/")[0]
2064 return portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2065
2066 def match2(self,mydep,mykey,mylist):
2067 writemsg("DEPRECATED: dbapi.match2\n")
2068 portage_dep.match_from_list(mydep,mylist)
2069
2070 def counter_tick(self,myroot,mycpv=None):
2071 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
2072
2073 def get_counter_tick_core(self,myroot,mycpv=None):
2074 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
2075
2076 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
2077 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
2078 cpath=myroot+"var/cache/edb/counter"
2079 changed=0
2080 min_counter = 0
2081 if mycpv:
2082 mysplit = portage_versions.pkgsplit(mycpv)
2083 for x in self.match(mysplit[0],use_cache=0):
2084 # fixed bug #41062
2085 if x==mycpv:
2086 continue
2087 try:
2088 old_counter = long(self.aux_get(x,["COUNTER"])[0])
2089 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
2090 except SystemExit, e:
2091 raise
2092 except:
2093 old_counter = 0
2094 writemsg("!!! BAD COUNTER in '%s'\n" % (x))
2095 if old_counter > min_counter:
2096 min_counter = old_counter
2097
2098 # We write our new counter value to a new file that gets moved into
2099 # place to avoid filesystem corruption.
2100 if os.path.exists(cpath):
2101 cfile=open(cpath, "r")
2102 try:
2103 counter=long(cfile.readline())
2104 except (ValueError,OverflowError):
2105 try:
2106 counter=long(portage_exec.spawn_get_output("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'",spawn_type=portage_exec.spawn_bash)[1])
2107 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
2108 changed=1
2109 except (ValueError,OverflowError):
2110 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
2111 writemsg("!!! corrected/normalized so that portage can operate properly.\n")
2112 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
2113 sys.exit(2)
2114 cfile.close()
2115 else:
2116 try:
2117 counter=long(portage_exec.spawn_get_output("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'",spawn_type=portage_exec.spawn_bash)[1])
2118 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
2119 except SystemExit, e:
2120 raise
2121 except:
2122 writemsg("!!! Initializing global counter.\n")
2123 counter=long(0)
2124 changed=1
2125
2126 if counter < min_counter:
2127 counter = min_counter+1000
2128 changed = 1
2129
2130 if incrementing or changed:
2131
2132 #increment counter
2133 counter += 1
2134 # update new global counter file
2135 newcpath=cpath+".new"
2136 newcfile=open(newcpath,"w")
2137 newcfile.write(str(counter))
2138 newcfile.close()
2139 # now move global counter file into place
2140 os.rename(newcpath,cpath)
2141 return counter
2142
2143 def invalidentry(self, mypath):
2144 if re.search("portage_lockfile$",mypath):
2145 if not os.environ.has_key("PORTAGE_MASTER_PID"):
2146 writemsg("Lockfile removed: %s\n" % mypath, 1)
2147 portage_locks.unlockfile((mypath,None,None))
2148 else:
2149 # Nothing we can do about it. We're probably sandboxed.
2150 pass
2151 elif re.search(".*/-MERGING-(.*)",mypath):
2152 if os.path.exists(mypath):
2153 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
2154 else:
2155 writemsg("!!! Invalid db entry: %s\n" % mypath)
2156
2157
2158
2159 class fakedbapi(dbapi):
2160 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
2161 def __init__(self):
2162 self.cpvdict={}
2163 self.cpdict={}
2164
2165 def cpv_exists(self,mycpv):
2166 return self.cpvdict.has_key(mycpv)
2167
2168 def cp_list(self,mycp,use_cache=1):
2169 return self.cpdict.get(mycp,[])
2170
2171 def cp_all(self):
2172 returnme=[]
2173 for x in self.cpdict.keys():
2174 returnme.extend(self.cpdict[x])
2175 return returnme
2176
2177 def cpv_inject(self,mycpv):
2178 """Adds a cpv from the list of available packages."""
2179 mycp=cpv_getkey(mycpv)
2180 self.cpvdict[mycpv]=1
2181 cplist = self.cpdict.setdefault(mycp,[])
2182 if mycpv not in cplist:
2183 cplist.append(mycpv)
2184
2185 #def cpv_virtual(self,oldcpv,newcpv):
2186 # """Maps a cpv to the list of available packages."""
2187 # mycp=cpv_getkey(newcpv)
2188 # self.cpvdict[newcpv]=1
2189 # if not self.virtdict.has_key(mycp):
2190 # self.virtdict[mycp]=[]
2191 # if not mycpv in self.virtdict[mycp]:
2192 # self.virtdict[mycp].append(oldcpv)
2193 # cpv_remove(oldcpv)
2194
2195 def cpv_remove(self,mycpv):
2196 """Removes a cpv from the list of available packages."""
2197 mycp=cpv_getkey(mycpv)
2198 if mycpv in self.cpvdict:
2199 del self.cpvdict[mycpv]
2200 cpvlist = self.cpdict.get(mycp)
2201 if cpvlist is None:
2202 return
2203 while mycpv in cpvlist:
2204 cpvlist.remove( mycpv )
2205 if not cpvlist:
2206 del self.cpdict[mycp]
2207
2208 class bindbapi(fakedbapi):
2209 def __init__(self,mybintree=None):
2210 self.bintree = mybintree
2211 self.cpvdict={}
2212 self.cpdict={}
2213
2214 def aux_get(self,mycpv,wants):
2215 mysplit = mycpv.split("/")
2216 mylist = []
2217 tbz2name = mysplit[1]+".tbz2"
2218 if self.bintree and not self.bintree.isremote(mycpv):
2219 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
2220 for x in wants:
2221 if self.bintree and self.bintree.isremote(mycpv):
2222 # We use the cache for remote packages
2223 if self.bintree.remotepkgs[tbz2name].has_key(x):
2224 mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
2225 else:
2226 mylist.append("")
2227 else:
2228 myval = tbz2.getfile(x)
2229 if myval == None:
2230 myval = ""
2231 else:
2232 myval = ' '.join(myval.split())
2233 mylist.append(myval)
2234
2235 return mylist
2236
2237
2238 cptot=0
2239 class vardbapi(dbapi):
2240 def __init__(self,root,categories=None):
2241 self.root = root
2242 #cache for category directory mtimes
2243 self.mtdircache = {}
2244 #cache for dependency checks
2245 self.matchcache = {}
2246 #cache for cp_list results
2247 self.cpcache = {}
2248 self.blockers = None
2249 self.categories = copy.deepcopy(categories)
2250
2251 def cpv_exists(self,mykey):
2252 "Tells us whether an actual ebuild exists on disk (no masking)"
2253 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
2254
2255 def cpv_counter(self,mycpv):
2256 "This method will grab the COUNTER. Returns a counter value."
2257 cdir=self.root+VDB_PATH+"/"+mycpv
2258 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
2259
2260 # We write our new counter value to a new file that gets moved into
2261 # place to avoid filesystem corruption on XFS (unexpected reboot.)
2262 corrupted=0
2263 if os.path.exists(cpath):
2264 cfile=open(cpath, "r")
2265 try:
2266 counter=long(cfile.readline())
2267 except ValueError:
2268 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
2269 counter=long(0)
2270 corrupted=1
2271 cfile.close()
2272 elif os.path.exists(cdir):
2273 mys = portage_versions.pkgsplit(mycpv)
2274 myl = self.match(mys[0],use_cache=0)
2275 print mys,myl
2276 if len(myl) == 1:
2277 try:
2278 # Only one package... Counter doesn't matter.
2279 myf = open(cpath, "w")
2280 myf.write("1")
2281 myf.flush()
2282 myf.close()
2283 counter = 1
2284 except SystemExit, e:
2285 raise
2286 except Exception, e:
2287 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
2288 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
2289 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
2290 writemsg("!!! unmerge this exact version.\n")
2291 writemsg("!!! %s\n" % e)
2292 sys.exit(1)
2293 else:
2294 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
2295 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
2296 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
2297 writemsg("!!! remerge the package.\n")
2298 sys.exit(1)
2299 else:
2300 counter=long(0)
2301 if corrupted:
2302 newcpath=cpath+".new"
2303 # update new global counter file
2304 newcfile=open(newcpath,"w")
2305 newcfile.write(str(counter))
2306 newcfile.close()
2307 # now move global counter file into place
2308 os.rename(newcpath,cpath)
2309 return counter
2310
2311 def cpv_inject(self,mycpv):
2312 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
2313 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
2314 counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
2315 # write local package counter so that emerge clean does the right thing
2316 lcfile=open(self.root+VDB_PATH+"/"+mycpv+"/COUNTER","w")
2317 lcfile.write(str(counter))
2318 lcfile.close()
2319
2320 def isInjected(self,mycpv):
2321 if self.cpv_exists(mycpv):
2322 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
2323 return True
2324 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
2325 return True
2326 return False
2327
2328 def move_ent(self,mylist):
2329 origcp=mylist[1]
2330 newcp=mylist[2]
2331 origmatches=self.match(origcp,use_cache=0)
2332 if not origmatches:
2333 return
2334 for mycpv in origmatches:
2335 mycpsplit=portage_versions.catpkgsplit(mycpv)
2336 mynewcpv=newcp+"-"+mycpsplit[2]
2337 mynewcat=newcp.split("/")[0]
2338 if mycpsplit[3]!="r0":
2339 mynewcpv += "-"+mycpsplit[3]
2340 mycpsplit_new = portage_versions.catpkgsplit(mynewcpv)
2341 origpath=self.root+VDB_PATH+"/"+mycpv
2342 if not os.path.exists(origpath):
2343 continue
2344 writemsg("@")
2345 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
2346 #create the directory
2347 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
2348 newpath=self.root+VDB_PATH+"/"+mynewcpv
2349 if os.path.exists(newpath):
2350 #dest already exists; keep this puppy where it is.
2351 continue
2352 spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
2353
2354 # We need to rename the ebuild now.
2355 old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
2356 new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
2357 if mycpsplit[3] != "r0":
2358 old_eb_path += "-"+mycpsplit[3]
2359 new_eb_path += "-"+mycpsplit[3]
2360 if os.path.exists(old_eb_path+".ebuild"):
2361 os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
2362
2363 catfile=open(newpath+"/CATEGORY", "w")
2364 catfile.write(mynewcat+"\n")
2365 catfile.close()
2366
2367 dbdir = self.root+VDB_PATH
2368 for catdir in listdir(dbdir):
2369 catdir = dbdir+"/"+catdir
2370 if os.path.isdir(catdir):
2371 for pkgdir in listdir(catdir):
2372 pkgdir = catdir+"/"+pkgdir
2373 if os.path.isdir(pkgdir):
2374 fixdbentries(origcp, newcp, pkgdir)
2375
2376 def move_slot_ent(self,mylist):
2377 pkg=mylist[1]
2378 origslot=mylist[2]
2379 newslot=mylist[3]
2380
2381 origmatches=self.match(pkg,use_cache=0)
2382 if not origmatches:
2383 return
2384 for mycpv in origmatches:
2385 origpath=self.root+VDB_PATH+"/"+mycpv
2386 if not os.path.exists(origpath):
2387 continue
2388
2389 slot=grabfile(origpath+"/SLOT");
2390 if (not slot):
2391 continue
2392
2393 if (slot[0]!=origslot):
2394 continue
2395
2396 writemsg("s")
2397 slotfile=open(origpath+"/SLOT", "w")
2398 slotfile.write(newslot+"\n")
2399 slotfile.close()
2400
2401 def cp_list(self,mycp,use_cache=1):
2402 mysplit=mycp.split("/")
2403 if mysplit[0] == '*':
2404 mysplit[0] = mysplit[0][1:]
2405 try:
2406 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
2407 except OSError:
2408 mystat=0
2409 if use_cache and self.cpcache.has_key(mycp):
2410 cpc=self.cpcache[mycp]
2411 if cpc[0]==mystat:
2412 return cpc[1]
2413 list=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2414
2415 if (list==None):
2416 return []
2417 returnme=[]
2418 for x in list:
2419 if x[0] == '-':
2420 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
2421 continue
2422 ps=portage_versions.pkgsplit(x)
2423 if not ps:
2424 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2425 continue
2426 if len(mysplit) > 1:
2427 if ps[0]==mysplit[1]:
2428 returnme.append(mysplit[0]+"/"+x)
2429 if use_cache:
2430 self.cpcache[mycp]=[mystat,returnme]
2431 elif self.cpcache.has_key(mycp):
2432 del self.cpcache[mycp]
2433 return returnme
2434
2435 def cpv_all(self, use_cache=1):
2436 return list(self.iter_cpv_all(use_cache=use_cache))
2437
2438 def iter_cpv_all(self,use_cache=1):
2439 basepath = self.root+VDB_PATH+"/"
2440
2441 mycats = self.categories
2442 if mycats == None:
2443 # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
2444 mycats = settings.categories
2445
2446 for x in mycats:
2447 for y in listdir(basepath+x):
2448 subpath = x+"/"+y
2449 # -MERGING- should never be a cpv, nor should files.
2450 if os.path.isdir(basepath+subpath) and (portage_versions.pkgsplit(y) is not None):
2451 yield subpath
2452
2453 def cp_all(self,use_cache=1):
2454 mylist = self.cpv_all(use_cache=use_cache)
2455 d=dict()
2456 for y in self.iter_cpv_all(use_cache=use_cache):
2457 if y[0] == '*':
2458 y = y[1:]
2459 mysplit=portage_versions.catpkgsplit(y)
2460 if not mysplit:
2461 self.invalidentry(self.root+VDB_PATH+"/"+y)
2462 continue
2463 mykey=mysplit[0]+"/"+mysplit[1]
2464 d[mysplit[0]+"/"+mysplit[1]] = None
2465 return d.keys()
2466
2467 def checkblockers(self,origdep):
2468 pass
2469
2470 def match(self,origdep,use_cache=1):
2471 "caching match function"
2472 mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
2473 mykey=portage_dep.dep_getkey(mydep)
2474 mycat=mykey.split("/")[0]
2475 if not use_cache:
2476 if self.matchcache.has_key(mycat):
2477 del self.mtdircache[mycat]
2478 del self.matchcache[mycat]
2479 return portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2480 try:
2481 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
2482 except SystemExit, e:
2483 raise
2484 except:
2485 curmtime=0
2486
2487 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
2488 # clear cache entry
2489 self.mtdircache[mycat]=curmtime
2490 self.matchcache[mycat]={}
2491 if not self.matchcache[mycat].has_key(mydep):
2492 mymatch=portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2493 self.matchcache[mycat][mydep]=mymatch
2494 return self.matchcache[mycat][mydep][:]
2495
2496 def aux_get(self, mycpv, wants):
2497 global auxdbkeys
2498 results = []
2499 if not self.cpv_exists(mycpv):
2500 return []
2501 for x in wants:
2502 myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
2503 if os.access(myfn,os.R_OK):
2504 myf = open(myfn, "r")
2505 myd = myf.read()
2506 myf.close()
2507 myd = " ".join( myd.split() )
2508 else:
2509 myd = ""
2510 results.append(myd)
2511 return results
2512
2513
2514 class vartree(packagetree):
2515 """
2516 this tree will scan a var/db/pkg database located at root (passed to init)
2517 """
2518 def __init__(self,root="/",virtual=None,clone=None,categories=None):
2519 if clone:
2520 self.root = clone.root
2521 self.dbapi = copy.deepcopy(clone.dbapi)
2522 self.populated = 1
2523 else:
2524 self.root = root
2525 self.dbapi = vardbapi(self.root,categories=categories)
2526 self.populated = 1
2527
2528 def zap(self,mycpv):
2529 return
2530
2531 def inject(self,mycpv):
2532 return
2533
2534 def get_provide(self,mycpv):
2535 myprovides=[]
2536 try:
2537 mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
2538 if mylines:
2539 myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
2540 myuse = " ".join(myuse).split()
2541 mylines = " ".join(mylines)
2542 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
2543 for myprovide in mylines:
2544 mys = portage_versions.catpkgsplit(myprovide)
2545 if not mys:
2546 mys = myprovide.split("/")
2547 myprovides += [mys[0] + "/" + mys[1]]
2548 return myprovides
2549 except SystemExit, e:
2550 raise
2551 except Exception, e:
2552 print
2553 print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
2554 print "Possibly Invalid: " + str(mylines)
2555 print "Exception: "+str(e)
2556 print
2557 return []
2558
2559 def get_all_provides(self):
2560 myprovides = {}
2561 for node in self.getallcpv():
2562 for mykey in self.get_provide(node):
2563 if myprovides.has_key(mykey):
2564 myprovides[mykey] += [node]
2565 else:
2566 myprovides[mykey] = [node]
2567 return myprovides
2568
2569 def dep_bestmatch(self,mydep,use_cache=1):
2570 "compatibility method -- all matches, not just visible ones"
2571 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
2572 mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
2573 if mymatch==None:
2574 return ""
2575 else:
2576 return mymatch
2577
2578 def dep_match(self,mydep,use_cache=1):
2579 "compatibility method -- we want to see all matches, not just visible ones"
2580 #mymatch=match(mydep,self.dbapi)
2581 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
2582 if mymatch==None:
2583 return []
2584 else:
2585 return mymatch
2586
2587 def exists_specific(self,cpv):
2588 return self.dbapi.cpv_exists(cpv)
2589
2590 def getallcpv(self):
2591 """temporary function, probably to be renamed --- Gets a list of all
2592 category/package-versions installed on the system."""
2593 return self.dbapi.cpv_all()
2594
2595 def getallnodes(self):
2596 """new behavior: these are all *unmasked* nodes. There may or may not be available
2597 masked package for nodes in this nodes list."""
2598 return self.dbapi.cp_all()
2599
2600 def exists_specific_cat(self,cpv,use_cache=1):
2601 cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
2602 a=portage_versions.catpkgsplit(cpv)
2603 if not a:
2604 return 0
2605 mylist=listdir(self.root+VDB_PATH+"/"+a[0])
2606 for x in mylist:
2607 b=portage_versions.pkgsplit(x)
2608 if not b:
2609 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
2610 continue
2611 if a[1]==b[0]:
2612 return 1
2613 return 0
2614
2615 def getebuildpath(self,fullpackage):
2616 cat,package=fullpackage.split("/")
2617 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
2618
2619 def getnode(self,mykey,use_cache=1):
2620 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
2621 if not mykey:
2622 return []
2623 mysplit=mykey.split("/")
2624 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2625 returnme=[]
2626 for x in mydirlist:
2627 mypsplit=portage_versions.pkgsplit(x)
2628 if not mypsplit:
2629 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2630 continue
2631 if mypsplit[0]==mysplit[1]:
2632 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
2633 returnme.append(appendme)
2634 return returnme
2635
2636
2637 def getslot(self,mycatpkg):
2638 "Get a slot for a catpkg; assume it exists."
2639 myslot = ""
2640 try:
2641 myslot=" ".join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
2642 except SystemExit, e:
2643 raise
2644 except Exception, e:
2645 pass
2646 return myslot
2647
2648 def hasnode(self,mykey,use_cache):
2649 """Does the particular node (cat/pkg key) exist?"""
2650 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
2651 mysplit=mykey.split("/")
2652 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2653 for x in mydirlist:
2654 mypsplit=portage_versions.pkgsplit(x)
2655 if not mypsplit:
2656 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2657 continue
2658 if mypsplit[0]==mysplit[1]:
2659 return 1
2660 return 0
2661
2662 def populate(self):
2663 self.populated=1
2664
2665
2666 auxdbkeys=[
2667 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
2668 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
2669 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
2670 'PDEPEND', 'PROVIDE',
2671 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
2672 'UNUSED_05', 'UNUSED_06', 'UNUSED_07', 'UNUSED_08',
2673 ]
2674 auxdbkeylen=len(auxdbkeys)
2675
2676 def close_portdbapi_caches():
2677 for i in portdbapi.portdbapi_instances:
2678 i.close_caches()
2679 class portdbapi(dbapi):
2680 """this tree will scan a portage directory located at root (passed to init)"""
2681 portdbapi_instances = []
2682
2683 def __init__(self,porttree_root,mysettings=None):
2684 portdbapi.portdbapi_instances.append(self)
2685 self.lock_held = 0;
2686
2687 if mysettings:
2688 self.mysettings = mysettings
2689 else:
2690 self.mysettings = config(clone=settings)
2691
2692 self.manifestVerifyLevel = None
2693 self.manifestVerifier = None
2694 self.manifestCache = {} # {location: [stat, md5]}
2695 self.manifestMissingCache = []
2696
2697 if "gpg" in self.mysettings.features:
2698 self.manifestVerifyLevel = portage_gpg.EXISTS
2699 if "strict" in self.mysettings.features:
2700 self.manifestVerifyLevel = portage_gpg.MARGINAL
2701 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
2702 elif "severe" in self.mysettings.features:
2703 self.manifestVerifyLevel = portage_gpg.TRUSTED
2704 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
2705 else:
2706 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
2707
2708 #self.root=settings["PORTDIR"]
2709 self.porttree_root = porttree_root
2710
2711 self.depcachedir = self.mysettings.depcachedir
2712
2713 self.eclassdb = eclass_cache.cache(self.porttree_root, self.mysettings["PORTDIR_OVERLAY"].split())
2714
2715 self.metadb = {}
2716 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
2717
2718 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
2719 self.xcache={}
2720 self.frozen=0
2721
2722 self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
2723
2724 filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED")]
2725 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
2726 self.auxdb = {}
2727 for x in self.porttrees:
2728 self.auxdb[x] = self.auxdbmodule(x, filtered_auxdbkeys, basepath=portage_const.DEPCACHE_PATH,
2729 gid=portage_gid)
2730
2731
2732 def getmaskingreason(self,mycpv):
2733 mysplit = portage_versions.catpkgsplit(mycpv)
2734 if not mysplit:
2735 raise ValueError("invalid CPV: %s" % mycpv)
2736 if not self.cpv_exists(mycpv):
2737 raise KeyError("CPV %s does not exist" % mycpv)
2738 mycp=mysplit[0]+"/"+mysplit[1]
2739
2740 if settings.pmaskdict.has_key(mycp):
2741 for x in settings.pmaskdict[mycp]:
2742 if mycpv in self.xmatch("match-all", x):
2743 pmaskfile = open(settings["PORTDIR"]+"/profiles/package.mask")
2744 comment = ""
2745 l = "\n"
2746 while len(l) > 0:
2747 l = pmaskfile.readline()
2748 if len(l) == 0:
2749 pmaskfile.close()
2750 return None
2751 if l[0] == "#":
2752 comment += l
2753 elif l == "\n":
2754 comment = ""
2755 elif l.strip() == x:
2756 pmaskfile.close()
2757 return comment
2758 pmaskfile.close()
2759 return None
2760
2761 def getmaskingstatus(self,mycpv):
2762 mysplit = portage_versions.catpkgsplit(mycpv)
2763 if not mysplit:
2764 raise ValueError("invalid CPV: %s" % mycpv)
2765 if not self.cpv_exists(mycpv):
2766 raise KeyError("CPV %s does not exist" % mycpv)
2767 mycp=mysplit[0]+"/"+mysplit[1]
2768
2769 rValue = []
2770
2771 # profile checking
2772 revmaskdict=settings.prevmaskdict
2773 if revmaskdict.has_key(mycp):
2774 for x in revmaskdict[mycp]:
2775 if x[0]=="*":
2776 myatom = x[1:]
2777 else:
2778 myatom = x
2779 if not portage_dep.match_to_list(mycpv, [myatom]):
2780 rValue.append("profile")
2781 break
2782
2783 # package.mask checking
2784 maskdict=settings.pmaskdict
2785 unmaskdict=settings.punmaskdict
2786 if maskdict.has_key(mycp):
2787 for x in maskdict[mycp]:
2788 if mycpv in self.xmatch("match-all", x):
2789 unmask=0
2790 if unmaskdict.has_key(mycp):
2791 for z in unmaskdict[mycp]:
2792 if mycpv in self.xmatch("match-all",z):
2793 unmask=1
2794 break
2795 if unmask==0:
2796 rValue.append("package.mask")
2797
2798 # keywords checking
2799 mygroups = self.aux_get(mycpv, ["KEYWORDS"])[0].split()
2800 pgroups=groups[:]
2801 myarch = settings["ARCH"]
2802 pkgdict = settings.pkeywordsdict
2803
2804 cp = portage_dep.dep_getkey(mycpv)
2805 if pkgdict.has_key(cp):
2806 matches = portage_dep.match_to_list(mycpv, pkgdict[cp].keys())
2807 for match in matches:
2808 pgroups.extend(pkgdict[cp][match])
2809
2810 kmask = "missing"
2811
2812 for keyword in pgroups:
2813 if keyword in mygroups:
2814 kmask=None
2815
2816 if kmask:
2817 for gp in mygroups:
2818 if gp=="*":
2819 kmask=None
2820 break
2821 elif gp=="-*":
2822 break
2823 elif gp=="-"+myarch:
2824 kmask="-"+myarch
2825 break
2826 elif gp=="~"+myarch:
2827 kmask="~"+myarch
2828 break
2829
2830 if kmask:
2831 rValue.append(kmask+" keyword")
2832 return rValue
2833
2834
2835 def regen_keys(self,cleanse_stale=True):
2836 """walk all entries of this instance to update the cache.
2837 If the cache is pregenned, pass it in via src_cache, and the cache will be updated
2838 from that instance.
2839 cleanse_stale controls whether or not the cache's old/stale entries are removed.
2840 This is useful both for emerge metadata, and emerge regen (moreso for regen)"""
2841
2842 import cache.cache_errors
2843 valid_nodes = {}
2844 for x in self.cp_all():
2845 # print "processing pkg %s" % x
2846 for y in self.cp_list(x):
2847 valid_nodes[y] = None
2848 try: self.aux_get(y,["_mtime_"])
2849 except cache.cache_errors.CacheError, ce:
2850 print "Cache Exception-", ce
2851 del ce
2852 for loc, tree in self.auxdb.items():
2853 print "cleansing cache for tree at %s" % loc
2854 for x in tree.keys():
2855 if x not in valid_nodes:
2856 try: del tree[x]
2857 except (KeyError, cache.cache_errors.CacheError):
2858 pass
2859
2860
2861 def close_caches(self):
2862 pass
2863 # for y in self.auxdb[x].keys():
2864 # self.auxdb[x][y].sync()
2865 # self.auxdb[x][y].close()
2866 # del self.auxdb[x][y]
2867 # del self.auxdb[x]
2868 # self.eclassdb.close_caches()
2869
2870 def flush_cache(self):
2871 self.metadb.clear()
2872 self.auxdb.clear()
2873 # self.eclassdb.flush_cache()
2874
2875 def finddigest(self,mycpv):
2876 try:
2877 mydig = self.findname2(mycpv)[0]
2878 mydigs = mydig.split("/")[:-1]
2879 mydig = "/".join(mydigs)
2880
2881 mysplit = mycpv.split("/")
2882 except SystemExit, e:
2883 raise
2884 except:
2885 return ""
2886 return mydig+"/files/digest-"+mysplit[-1]
2887
2888 def findname(self,mycpv):
2889 return self.findname2(mycpv)[0]
2890
2891 def findname2(self,mycpv):
2892 "returns file location for this particular package and in_overlay flag"
2893 if not mycpv:
2894 return "",0
2895 mysplit=mycpv.split("/")
2896 if mysplit[0]=="virtual":
2897 print "!!! Cannot resolve a virtual package name to an ebuild."
2898 print "!!! This is a bug, please report it. ("+mycpv+")"
2899 sys.exit(1)
2900
2901 psplit=portage_versions.pkgsplit(mysplit[1])
2902 ret=None
2903 if psplit:
2904 for x in self.porttrees:
2905 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
2906
2907 if os.access(file, os.R_OK):
2908 # when found
2909 ret=[file, x]
2910 if ret:
2911 return ret[0], ret[1]
2912
2913 # when not found
2914 return None, 0
2915
2916 def aux_get(self,mycpv,mylist,strict=0,metacachedir=None,debug=0):
2917 """
2918 stub code for returning auxilliary db information, such as SLOT, DEPEND, etc.
2919 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
2920 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
2921 """
2922 global auxdbkeys,auxdbkeylen
2923
2924 cat,pkg = mycpv.split( "/", 1)
2925
2926 if metacachedir:
2927 if cat not in self.metadb:
2928 self.metadb[cat] = self.metadbmodule(metacachedir,cat,auxdbkeys,uid,portage_gid)
2929
2930 myebuild, mylocation=self.findname2(mycpv)
2931
2932 if not myebuild:
2933 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
2934 writemsg("!!! %s\n" % myebuild)
2935 raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
2936
2937 if "gpg" in self.mysettings.features:
2938 myManifestPath = os.path.join("/",os.path.dirname(myebuild),"Manifest")
2939 try:
2940 mys = portage_gpg.fileStats(myManifestPath)
2941 if (myManifestPath in self.manifestCache) and \
2942 (self.manifestCache[myManifestPath] == mys):
2943 pass
2944 elif self.manifestVerifier:
2945 if not self.manifestVerifier.verify(myManifestPath):
2946 # Verification failed the desired level.
2947 raise portage_exception.UntrustedSignature, "Untrusted/Missing signature on Manifest: %(manifest)s" % {"manifest":myManifestPath}
2948
2949 if ("severe" in self.mysettings.features) and \
2950 (mys != portage_gpg.fileStats(myManifestPath)):
2951 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
2952
2953 except portage_exception.InvalidSignature, e:
2954 if ("strict" in self.mysettings.features) or \
2955 ("severe" in self.mysettings.features):
2956 raise
2957 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
2958 except portage_exception.MissingSignature, e:
2959 if ("severe" in self.mysettings.features):
2960 raise
2961 if ("strict" in self.mysettings.features):
2962 if myManifestPath not in self.manifestMissingCache:
2963 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
2964 self.manifestMissingCache.insert(0,myManifestPath)
2965 except (OSError,portage_exception.FileNotFound), e:
2966 if ("strict" in self.mysettings.features) or \
2967 ("severe" in self.mysettings.features):
2968 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
2969 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
2970
2971 # XXX no no.
2972 #
2973 # if mylocation not in self.auxdb:
2974 # self.auxdb[mylocation] = {}
2975
2976
2977 try:
2978 emtime = os.stat(myebuild).st_mtime
2979 except (OSError, IOError):
2980 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
2981 writemsg("!!! %s\n" % myebuild)
2982 raise KeyError
2983
2984 mydata={}
2985
2986 # fix the indenting, lazy ass.
2987 if True:
2988 doregen=False
2989 try:
2990 mydata = self.auxdb[mylocation][mycpv]
2991 if emtime != long(mydata.get("_mtime_",0)):
2992 doregen=True
2993 elif len(mydata.get("_eclasses_",())) > 0:
2994 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
2995
2996 except SystemExit,e:
2997 raise
2998 except Exception, e:
2999 # print "exception 1=", e
3000 doregen = True
3001 if not isinstance(e, KeyError):
3002 # CorruptionError is the likely candidate
3003 writemsg("auxdb exception: (%s): %s\n" % (mylocation+"::"+cat+"/"+pkg,str(e)))
3004 # if self.auxdb[mylocation][cat].has_key(pkg):
3005 # self.auxdb[mylocation][cat].del_key(pkg)
3006 # self.auxdb[mylocation][cat].sync()
3007
3008 try: del self.auxdb[mycpv]
3009 except KeyError: pass
3010
3011 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
3012 if doregen:
3013 # print "doregen for %s, " % mycpv,doregen
3014 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
3015 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
3016
3017 # XXX: Part of the gvisible hack/fix to prevent deadlock
3018 # XXX: through doebuild. Need to isolate this somehow...
3019 self.mysettings.reset()
3020
3021 # XXX: is this truly required?
3022 # ~harring.
3023 if self.lock_held:
3024 raise "Lock is already held by me?"
3025
3026 self.lock_held = 1
3027
3028 mydata=ebuild.ebuild_handler().get_keys(myebuild,self.mysettings)
3029 self.lock_held = 0
3030 if mydata == None:
3031 raise Exception("Failed sourcing %s" % mycpv)
3032
3033 mydata["_mtime_"] = emtime
3034 if mydata.get("INHERITED", False):
3035 mydata["_eclasses_"] = self.eclassdb.get_eclass_data( \
3036 mydata["INHERITED"].split() )
3037 del mydata["INHERITED"]
3038 else:
3039 mydata["_eclasses_"] = {}
3040
3041 self.auxdb[mylocation][mycpv] = mydata
3042 # if not self.eclassdb.update_package(mylocation, cat, pkg, mydata.get("INHERITED","").split()):
3043 # print "failed updating eclass cache"
3044 # sys.exit(1)
3045
3046 #finally, we look at our internal cache entry and return the requested data.
3047 returnme = []
3048 for x in mylist:
3049 if x == "INHERITED":
3050 returnme.append(' '.join(mydata.get("_eclasses_",{}).keys()))
3051 else:
3052 returnme.append(mydata.get(x,""))
3053 return returnme
3054
3055 def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
3056 if mysettings == None:
3057 mysettings = self.mysettings
3058 try:
3059 myuris = self.aux_get(mypkg,["SRC_URI"])[0]
3060 except (IOError,KeyError):
3061 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
3062 sys.exit(1)
3063
3064 useflags = mysettings["USE"].split()
3065
3066 myurilist = portage_dep.paren_reduce(myuris)
3067 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
3068 newuris = flatten(myurilist)
3069
3070 myfiles = []
3071 for x in newuris:
3072 mya = os.path.basename(x)
3073 if not mya in myfiles:
3074 myfiles.append(mya)
3075 return [newuris, myfiles]
3076
3077 def getfetchsizes(self,mypkg,useflags=None,debug=0):
3078 # returns a filename:size dictionnary of remaining downloads
3079 mydigest=self.finddigest(mypkg)
3080 mymd5s=digestParseFile(mydigest)
3081 if not mymd5s:
3082 if debug: print "[empty/missing/bad digest]: "+mypkg
3083 return None
3084 filesdict={}
3085 if useflags == None:
3086 myuris, myfiles = self.getfetchlist(mypkg,all=1)
3087 else:
3088 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
3089 #XXX: maybe this should be improved: take partial downloads
3090 # into account? check md5sums?
3091 for myfile in myfiles:
3092 if debug and myfile not in mymd5s.keys():
3093 print "[bad digest]: missing",myfile,"for",mypkg
3094 elif myfile in mymd5s.keys():
3095 distfile=settings["DISTDIR"]+"/"+myfile
3096 if not os.access(distfile, os.R_OK):
3097 filesdict[myfile]=int(mymd5s[myfile]["size"])
3098 return filesdict
3099
3100 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
3101 if not useflags:
3102 if mysettings:
3103 useflags = mysettings["USE"].split()
3104 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
3105 mydigest = self.finddigest(mypkg)
3106 mysums = digestParseFile(mydigest)
3107
3108 failures = {}
3109 for x in myfiles:
3110 if not mysums or x not in mysums:
3111 ok = False
3112 reason = "digest missing"
3113 else:
3114 ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
3115 if not ok:
3116 failures[x] = reason
3117 if failures:
3118 return False
3119 return True
3120
3121 def getsize(self,mypkg,useflags=None,debug=0):
3122 # returns the total size of remaining downloads
3123 #
3124 # we use getfetchsizes() now, so this function would be obsoleted
3125 #
3126 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
3127 if filesdict==None:
3128 return "[empty/missing/bad digest]"
3129 mysize=0
3130 for myfile in filesdict.keys():
3131 mysum+=filesdict[myfile]
3132 return mysum
3133
3134 def cpv_exists(self,mykey):
3135 "Tells us whether an actual ebuild exists on disk (no masking)"
3136 cps2=mykey.split("/")
3137 cps=portage_versions.catpkgsplit(mykey,silent=0)
3138 if not cps:
3139 #invalid cat/pkg-v
3140 return 0
3141 if self.findname(cps[0]+"/"+cps2[1]):
3142 return 1
3143 else:
3144 return 0
3145
3146 def cp_all(self):
3147 "returns a list of all keys in our tree"
3148 d={}
3149 for oroot in self.porttrees:
3150 for x in self.mysettings.categories:
3151 for y in listdir(os.path.join(oroot, x),ignorecvs=1):
3152 mykey=x+"/"+y
3153 d[x+"/"+y] = None
3154 return d.keys()
3155
3156 def p_list(self,mycp):
3157 d={}
3158 for oroot in self.porttrees:
3159 for x in listdir(oroot+"/"+mycp,ignorecvs=1):
3160 if x[-7:]==".ebuild":
3161 mye=x[:-7]
3162 d[mye] = None
3163 return d.keys()
3164
3165 def cp_list(self,mycp,use_cache=1):
3166 mysplit=mycp.split("/")
3167 returnme=[]
3168 d={}
3169 for oroot in self.porttrees:
3170 for x in listdir(oroot+"/"+mycp,ignorecvs=1):
3171 if x[-7:]==".ebuild":
3172 d[mysplit[0]+"/"+x[:-7]] = None
3173 return d.keys()
3174
3175 def freeze(self):
3176 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
3177 self.xcache[x]={}
3178 self.frozen=1
3179
3180 def melt(self):
3181 self.xcache={}
3182 self.frozen=0
3183
3184 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
3185 "caching match function; very trick stuff"
3186 #if no updates are being made to the tree, we can consult our xcache...
3187 if self.frozen:
3188 try:
3189 return self.xcache[level][origdep]
3190 except KeyError:
3191 pass
3192
3193 if not mydep:
3194 #this stuff only runs on first call of xmatch()
3195 #create mydep, mykey from origdep
3196 mydep=dep_expand(origdep,mydb=self)
3197 mykey=portage_dep.dep_getkey(mydep)
3198
3199 if level=="list-visible":
3200 #a list of all visible packages, not called directly (just by xmatch())
3201 #myval=self.visible(self.cp_list(mykey))
3202 myval=self.gvisible(self.visible(self.cp_list(mykey)))
3203 elif level=="bestmatch-visible":
3204 #dep match -- best match of all visible packages
3205 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
3206 #get all visible matches (from xmatch()), then choose the best one
3207 elif level=="bestmatch-list":
3208 #dep match -- find best match but restrict search to sublist
3209 myval=best(portage_dep.match_from_list(mydep,mylist))
3210 #no point is calling xmatch again since we're not caching list deps
3211 elif level=="match-list":
3212 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
3213 myval=portage_dep.match_from_list(mydep,mylist)
3214 elif level=="match-visible":
3215 #dep match -- find all visible matches
3216 myval=portage_dep.match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
3217 #get all visible packages, then get the matching ones
3218 elif level=="match-all":
3219 #match *all* visible *and* masked packages
3220 myval=portage_dep.match_from_list(mydep,self.cp_list(mykey))
3221 else:
3222 print "ERROR: xmatch doesn't handle",level,"query!"
3223 raise KeyError
3224 if self.frozen and (level not in ["match-list","bestmatch-list"]):
3225 self.xcache[level][mydep]=myval
3226 return myval
3227
3228 def match(self,mydep,use_cache=1):
3229 return self.xmatch("match-visible",mydep)
3230
3231 def visible(self,mylist):
3232 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
3233 packages file to remove invisible entries, returning remaining items. This function assumes
3234 that all entries in mylist have the same category and package name."""
3235 if (mylist==None) or (len(mylist)==0):
3236 return []
3237 newlist=mylist[:]
3238 #first, we mask out packages in the package.mask file
3239 mykey=newlist[0]
3240 cpv=portage_versions.catpkgsplit(mykey)
3241 if not cpv:
3242 #invalid cat/pkg-v
3243 print "visible(): invalid cat/pkg-v:",mykey
3244 return []
3245 mycp=cpv[0]+"/"+cpv[1]
3246 maskdict=self.mysettings.pmaskdict
3247 unmaskdict=self.mysettings.punmaskdict
3248 if maskdict.has_key(mycp):
3249 for x in maskdict[mycp]:
3250 mymatches=self.xmatch("match-all",x)
3251 if mymatches==None:
3252 #error in package.mask file; print warning and continue:
3253 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
3254 continue
3255 for y in mymatches:
3256 unmask=0
3257 if unmaskdict.has_key(mycp):
3258 for z in unmaskdict[mycp]:
3259 mymatches_unmask=self.xmatch("match-all",z)
3260 if y in mymatches_unmask:
3261 unmask=1
3262 break
3263 if unmask==0:
3264 try:
3265 newlist.remove(y)
3266 except ValueError:
3267 pass
3268
3269 revmaskdict=self.mysettings.prevmaskdict
3270 if revmaskdict.has_key(mycp):
3271 for x in revmaskdict[mycp]:
3272 #important: only match against the still-unmasked entries...
3273 #notice how we pass "newlist" to the xmatch() call below....
3274 #Without this, ~ deps in the packages files are broken.
3275 mymatches=self.xmatch("match-list",x,mylist=newlist)
3276 if mymatches==None:
3277 #error in packages file; print warning and continue:
3278 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
3279 continue
3280 pos=0
3281 while pos<len(newlist):
3282 if newlist[pos] not in mymatches:
3283 del newlist[pos]
3284 else:
3285 pos += 1
3286 return newlist
3287
3288 def gvisible(self,mylist):
3289 "strip out group-masked (not in current group) entries"
3290 global groups
3291 if mylist==None:
3292 return []
3293 newlist=[]
3294
3295 pkgdict = self.mysettings.pkeywordsdict
3296 for mycpv in mylist:
3297 #we need to update this next line when we have fully integrated the new db api
3298 auxerr=0
3299 try:
3300 myaux=db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS"])
3301 except (KeyError,IOError,TypeError):
3302 continue
3303 if not myaux[0]:
3304 # KEYWORDS=""
3305 #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
3306 continue
3307 mygroups=myaux[0].split()
3308 pgroups=groups[:]
3309 cp = portage_dep.dep_getkey(mycpv)
3310 if cp in pkgdict:
3311 matches = portage_dep.match_to_list(mycpv, pkgdict[cp].keys())
3312 for atom in matches:
3313 pgroups.extend(pkgdict[cp][atom])
3314 match=0
3315 for gp in mygroups:
3316 if gp=="*":
3317 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
3318 match=1
3319 break
3320 elif "-"+gp in pgroups:
3321 match=0
3322 break
3323 elif gp in pgroups:
3324 match=1
3325 break
3326 else:
3327 if "*" in pgroups:
3328 for gp in mygroups:
3329 if not gp[0] in "~-":
3330 match=1
3331 break
3332 if "~*" in pgroups:
3333 for gp in mygroups:
3334 if gp[0] != "-":
3335 match=1
3336 break
3337 if match:
3338 newlist.append(mycpv)
3339 return newlist
3340
3341 class binarytree(packagetree):
3342 "this tree scans for a list of all packages available in PKGDIR"
3343 def __init__(self,root,pkgdir,virtual=None,clone=None):
3344
3345 if clone:
3346 # XXX This isn't cloning. It's an instance of the same thing.
3347 self.root=clone.root
3348 self.pkgdir=clone.pkgdir
3349 self.dbapi=clone.dbapi
3350 self.populated=clone.populated
3351 self.tree=clone.tree
3352 self.remotepkgs=clone.remotepkgs
3353 self.invalids=clone.invalids
3354 else:
3355 self.root=root
3356 #self.pkgdir=settings["PKGDIR"]
3357 self.pkgdir=pkgdir
3358 self.dbapi=bindbapi(self)
3359 self.populated=0
3360 self.tree={}
3361 self.remotepkgs={}
3362 self.invalids=[]
3363
3364 def move_ent(self,mylist):
3365 if not self.populated:
3366 self.populate()
3367 origcp=mylist[1]
3368 newcp=mylist[2]
3369 origmatches=self.dbapi.cp_list(origcp)
3370 if not origmatches:
3371 return
3372 for mycpv in origmatches:
3373 mycpsplit=portage_versions.catpkgsplit(mycpv)
3374 mynewcpv=newcp+"-"+mycpsplit[2]
3375 mynewcat=newcp.split("/")[0]
3376 mynewpkg=mynewcpv.split("/")[1]
3377 myoldpkg=mycpv.split("/")[1]
3378 if mycpsplit[3]!="r0":
3379 mynewcpv += "-"+mycpsplit[3]
3380 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
3381 writemsg("!!! Cannot update binary: Destination exists.\n")
3382 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
3383 continue
3384 tbz2path=self.getname(mycpv)
3385 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
3386 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
3387 continue
3388
3389 #print ">>> Updating data in:",mycpv
3390 sys.stdout.write("%")
3391 sys.stdout.flush()
3392 mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
3393 mytbz2=xpak.tbz2(tbz2path)
3394 mytbz2.decompose(mytmpdir, cleanup=1)
3395
3396 fixdbentries(origcp, newcp, mytmpdir)
3397
3398 catfile=open(mytmpdir+"/CATEGORY", "w")
3399 catfile.write(mynewcat+"\n")
3400 catfile.close()
3401 try:
3402 os.rename(mytmpdir+"/"+mycpv.split("/")[1]+".ebuild", mytmpdir+"/"+mynewcpv.split("/")[1]+".ebuild")
3403 except SystemExit, e:
3404 raise
3405 except Exception, e:
3406 pass
3407
3408 mytbz2.recompose(mytmpdir, cleanup=1)
3409
3410 self.dbapi.cpv_remove(mycpv)
3411 if (mynewpkg != myoldpkg):
3412 os.rename(tbz2path,self.getname(mynewcpv))
3413 self.dbapi.cpv_inject(mynewcpv)
3414 return 1
3415
3416 def move_slot_ent(self,mylist,mytmpdir):
3417 #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
3418 mytmpdir=mytmpdir+"/tbz2"
3419 if not self.populated:
3420 self.populate()
3421 pkg=mylist[1]
3422 origslot=mylist[2]
3423 newslot=mylist[3]
3424 origmatches=self.dbapi.match(pkg)
3425 if not origmatches:
3426 return
3427 for mycpv in origmatches:
3428 mycpsplit=portage_versions.catpkgsplit(mycpv)
3429 myoldpkg=mycpv.split("/")[1]
3430 tbz2path=self.getname(mycpv)
3431 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
3432 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
3433 continue
3434
3435 #print ">>> Updating data in:",mycpv
3436 mytbz2=xpak.tbz2(tbz2path)
3437 mytbz2.decompose(mytmpdir, cleanup=1)
3438
3439 slot=grabfile(mytmpdir+"/SLOT");
3440 if (not slot):
3441 continue
3442
3443 if (slot[0]!=origslot):
3444 continue
3445
3446 sys.stdout.write("S")
3447 sys.stdout.flush()
3448
3449 slotfile=open(mytmpdir+"/SLOT", "w")
3450 slotfile.write(newslot+"\n")
3451 slotfile.close()
3452 mytbz2.recompose(mytmpdir, cleanup=1)
3453 return 1
3454
3455 def update_ents(self,mybiglist,mytmpdir):
3456 #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
3457 if not self.populated:
3458 self.populate()
3459 for mycpv in self.dbapi.cp_all():
3460 tbz2path=self.getname(mycpv)
3461 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
3462 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
3463 continue
3464 #print ">>> Updating binary data:",mycpv
3465 writemsg("*")
3466 mytbz2=xpak.tbz2(tbz2path)
3467 mytbz2.decompose(mytmpdir,cleanup=1)
3468 for mylist in mybiglist:
3469 mylist=mylist.split()
3470 if mylist[0] != "move":
3471 continue
3472 fixdbentries(mylist[1], mylist[2], mytmpdir)
3473 mytbz2.recompose(mytmpdir,cleanup=1)
3474 return 1
3475
3476 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
3477 "populates the binarytree"
3478 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
3479 return 0
3480 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
3481 return 0
3482
3483 if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
3484 for mypkg in listdir(self.pkgdir+"/All"):
3485 if mypkg[-5:]!=".tbz2":
3486 continue
3487 mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
3488 mycat=mytbz2.getfile("CATEGORY")
3489 if not mycat:
3490 #old-style or corrupt package
3491 writemsg("!!! Invalid binary package: "+mypkg+"\n")
3492 self.invalids.append(mypkg)
3493 continue
3494 mycat=mycat.strip()
3495 fullpkg=mycat+"/"+mypkg[:-5]
3496 mykey=portage_dep.dep_getkey(fullpkg)
3497 try:
3498 # invalid tbz2's can hurt things.
3499 self.dbapi.cpv_inject(fullpkg)
3500 except SystemExit, e:
3501 raise
3502 except:
3503 continue
3504
3505 if getbinpkgs and not settings["PORTAGE_BINHOST"]:
3506 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
3507
3508 if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
3509 try:
3510 chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
3511 if chunk_size < 8:
3512 chunk_size = 8
3513 except SystemExit, e:
3514 raise
3515 except:
3516 chunk_size = 3000
3517
3518 writemsg(green("Fetching binary packages info...\n"))
3519 self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
3520 writemsg(green(" -- DONE!\n\n"))
3521
3522 for mypkg in self.remotepkgs.keys():
3523 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
3524 #old-style or corrupt package
3525 writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
3526 del self.remotepkgs[mypkg]
3527 continue
3528 mycat=self.remotepkgs[mypkg]["CATEGORY"].strip()
3529 fullpkg=mycat+"/"+mypkg[:-5]
3530 mykey=portage_dep.dep_getkey(fullpkg)
3531 try:
3532 # invalid tbz2's can hurt things.
3533 #print "cpv_inject("+str(fullpkg)+")"
3534 self.dbapi.cpv_inject(fullpkg)
3535 #print " -- Injected"
3536 except SystemExit, e:
3537 raise
3538 except:
3539 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
3540 del self.remotepkgs[mypkg]
3541 continue
3542 self.populated=1
3543
3544 def inject(self,cpv):
3545 return self.dbapi.cpv_inject(cpv)
3546
3547 def exists_specific(self,cpv):
3548 if not self.populated:
3549 self.populate()
3550 return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
3551
3552 def dep_bestmatch(self,mydep):
3553 "compatibility method -- all matches, not just visible ones"
3554 if not self.populated:
3555 self.populate()
3556 writemsg("\n\n", 1)
3557 writemsg("mydep: %s\n" % mydep, 1)
3558 mydep=dep_expand(mydep,mydb=self.dbapi)
3559 writemsg("mydep: %s\n" % mydep, 1)
3560 mykey=portage_dep.dep_getkey(mydep)
3561 writemsg("mykey: %s\n" % mykey, 1)
3562 mymatch=best(portage_dep.match_from_list(mydep,self.dbapi.cp_list(mykey)))
3563 writemsg("mymatch: %s\n" % mymatch, 1)
3564 if mymatch==None:
3565 return ""
3566 return mymatch
3567
3568 def getname(self,pkgname):
3569 "returns file location for this particular package"
3570 mysplit=pkgname.split("/")
3571 if len(mysplit)==1:
3572 return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
3573 else:
3574 return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
3575
3576 def isremote(self,pkgname):
3577 "Returns true if the package is kept remotely."
3578 mysplit=pkgname.split("/")
3579 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
3580 return remote
3581
3582 def get_use(self,pkgname):
3583 mysplit=pkgname.split("/")
3584 if self.isremote(pkgname):
3585 return self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:].split()
3586 tbz2=xpak.tbz2(self.getname(pkgname))
3587 return tbz2.getfile("USE").split()
3588
3589 def gettbz2(self,pkgname):
3590 "fetches the package from a remote site, if necessary."
3591 print "Fetching '"+str(pkgname)+"'"
3592 mysplit = pkgname.split("/")
3593 tbz2name = mysplit[1]+".tbz2"
3594 if not self.isremote(pkgname):
3595 if (tbz2name not in self.invalids):
3596 return
3597 else:
3598 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
3599 mydest = self.pkgdir+"/All/"
3600 try:
3601 os.makedirs(mydest, 0775)
3602 except SystemExit, e:
3603 raise
3604 except:
3605 pass
3606 getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
3607 return
3608
3609 def getslot(self,mycatpkg):
3610 "Get a slot for a catpkg; assume it exists."
3611 myslot = ""
3612 try:
3613 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
3614 except SystemExit, e:
3615 raise
3616 except Exception, e:
3617 pass
3618 return myslot
3619
3620 class dblink:
3621 "this class provides an interface to the standard text package database"
3622 def __init__(self,cat,pkg,myroot,mysettings,treetype="porttree"):
3623 "create a dblink object for cat/pkg. This dblink entry may or may not exist"
3624 self.cat = cat
3625 self.pkg = pkg
3626 self.mycpv = self.cat+"/"+self.pkg
3627 self.mysplit = portage_versions.pkgsplit(self.mycpv)
3628 self.treetype = treetype
3629
3630 self.dbroot = os.path.normpath(myroot+VDB_PATH)
3631 self.dbcatdir = self.dbroot+"/"+cat
3632 self.dbpkgdir = self.dbcatdir+"/"+pkg
3633 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
3634 self.dbdir = self.dbpkgdir
3635
3636 self.lock_pkg = None
3637 self.lock_tmp = None
3638 self.lock_num = 0 # Count of the held locks on the db.
3639
3640 self.settings = mysettings
3641 if self.settings==1:
3642 raise ValueError
3643
3644 self.myroot=myroot
3645 self.updateprotect()
3646 self.contentscache=[]
3647
3648 def lockdb(self):
3649 if self.lock_num == 0:
3650 self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
3651 self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
3652 self.lock_num += 1
3653
3654 def unlockdb(self):
3655 self.lock_num -= 1
3656 if self.lock_num == 0:
3657 portage_locks.unlockdir(self.lock_tmp)
3658 portage_locks.unlockdir(self.lock_pkg)
3659
3660 def getpath(self):
3661 "return path to location of db information (for >>> informational display)"
3662 return self.dbdir
3663
3664 def exists(self):
3665 "does the db entry exist? boolean."
3666 return os.path.exists(self.dbdir)
3667
3668 def create(self):
3669 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
3670 # XXXXX Delete this eventually
3671 raise Exception, "This is bad. Don't use it."
3672 if not os.path.exists(self.dbdir):
3673 os.makedirs(self.dbdir)
3674
3675 def delete(self):
3676 "erase this db entry completely"
3677 if not os.path.exists(self.dbdir):
3678 return
3679 try:
3680 for x in listdir(self.dbdir):
3681 os.unlink(self.dbdir+"/"+x)
3682 os.rmdir(self.dbdir)
3683 except OSError, e:
3684 print "!!! Unable to remove db entry for this package."
3685 print "!!! It is possible that a directory is in this one. Portage will still"
3686 print "!!! register this package as installed as long as this directory exists."
3687 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
3688 print "!!! "+str(e)
3689 print
3690 sys.exit(1)
3691
3692 def clearcontents(self):
3693 if os.path.exists(self.dbdir+"/CONTENTS"):
3694 os.unlink(self.dbdir+"/CONTENTS")
3695
3696 def getcontents(self):
3697 if not os.path.exists(self.dbdir+"/CONTENTS"):
3698 return None
3699 if self.contentscache != []:
3700 return self.contentscache
3701 pkgfiles={}
3702 myc=open(self.dbdir+"/CONTENTS","r")
3703 mylines=myc.readlines()
3704 myc.close()
3705 pos=1
3706 for line in mylines:
3707 mydat = line.split()
3708 # we do this so we can remove from non-root filesystems
3709 # (use the ROOT var to allow maintenance on other partitions)
3710 try:
3711 mydat[1]=os.path.normpath(root+mydat[1][1:])
3712 if mydat[0]=="obj":
3713 #format: type, mtime, md5sum
3714 pkgfiles[" ".join(mydat[1:-2])]=[mydat[0], mydat[-1], mydat[-2]]
3715 elif mydat[0]=="dir":
3716 #format: type
3717 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
3718 elif mydat[0]=="sym":
3719 #format: type, mtime, dest
3720 x=len(mydat)-1
3721 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
3722 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
3723 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
3724 x=len(mydat)-1
3725 splitter=-1
3726 while(x>=0):
3727 if mydat[x]=="->":
3728 splitter=x
3729 break
3730 x=x-1
3731 if splitter==-1:
3732 return None
3733 pkgfiles[" ".join(mydat[1:splitter])]=[mydat[0], mydat[-1], " ".join(mydat[(splitter+1):-1])]
3734 elif mydat[0]=="dev":
3735 #format: type
3736 pkgfiles[" ".join(mydat[1:])]=[mydat[0] ]
3737 elif mydat[0]=="fif":
3738 #format: type
3739 pkgfiles[" ".join(mydat[1:])]=[mydat[0]]
3740 else:
3741 return None
3742 except (KeyError,IndexError):
3743 print "portage: CONTENTS line",pos,"corrupt!"
3744 pos += 1
3745 self.contentscache=pkgfiles
3746 return pkgfiles
3747
3748 def updateprotect(self):
3749 #do some config file management prep
3750 self.protect=[]
3751 for x in self.settings["CONFIG_PROTECT"].split():
3752 ppath=normpath(self.myroot+x)+"/"
3753 if os.path.isdir(ppath):
3754 self.protect.append(ppath)
3755
3756 self.protectmask=[]
3757 for x in self.settings["CONFIG_PROTECT_MASK"].split():
3758 ppath=normpath(self.myroot+x)+"/"
3759 if os.path.isdir(ppath):
3760 self.protectmask.append(ppath)
3761 #if it doesn't exist, silently skip it
3762
3763 def isprotected(self,obj):
3764 """Checks if obj is in the current protect/mask directories. Returns
3765 0 on unprotected/masked, and 1 on protected."""
3766 masked=0
3767 protected=0
3768 for ppath in self.protect:
3769 if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
3770 protected=len(ppath)
3771 #config file management
3772 for pmpath in self.protectmask:
3773 if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
3774 #skip, it's in the mask
3775 masked=len(pmpath)
3776 return (protected > masked)
3777
3778 def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
3779 global dircache
3780 dircache={}
3781
3782 self.lockdb()
3783
3784 self.settings.load_infodir(self.dbdir)
3785
3786 if not pkgfiles:
3787 print "No package files given... Grabbing a set."
3788 pkgfiles=self.getcontents()
3789
3790 # Now, don't assume that the name of the ebuild is the same as the
3791 # name of the dir; the package may have been moved.
3792 myebuildpath=None
3793
3794 # We should use the environement file if possible,
3795 # as it has all sourced files already included.
3796 # XXX: Need to ensure it doesn't overwrite any important vars though.
3797 if os.access(self.dbdir+"/environment.bz2", os.R_OK):
3798 portage_exec.spawn("bzip2 -d "+self.dbdir+"/environment.bz2")
3799
3800 if not myebuildpath:
3801 mystuff=listdir(self.dbdir)
3802 for x in mystuff:
3803 if x[-7:]==".ebuild":
3804 myebuildpath=self.dbdir+"/"+x
3805 break
3806
3807 #do prerm script
3808 if myebuildpath and os.path.exists(myebuildpath):
3809 a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0, \
3810 tree=self.treetype)
3811 # XXX: Decide how to handle failures here.
3812 if a != 0:
3813 writemsg("!!! FAILED prerm: "+str(a)+"\n")
3814 sys.exit(123)
3815
3816 if pkgfiles:
3817 mykeys=pkgfiles.keys()
3818 mykeys.sort()
3819 mykeys.reverse()
3820
3821 self.updateprotect()
3822
3823 #process symlinks second-to-last, directories last.
3824 mydirs=[]
3825 mysyms=[]
3826 modprotect="/lib/modules/"
3827 for obj in mykeys:
3828 obj=os.path.normpath(obj)
3829 if obj[:2]=="//":
3830 obj=obj[1:]
3831 if not os.path.exists(obj):
3832 if not os.path.islink(obj):
3833 #we skip this if we're dealing with a symlink
3834 #because os.path.exists() will operate on the
3835 #link target rather than the link itself.
3836 print "--- !found "+str(pkgfiles[obj][0]), obj
3837 continue
3838 # next line includes a tweak to protect modules from being unmerged,
3839 # but we don't protect modules from being overwritten if they are
3840 # upgraded. We effectively only want one half of the config protection
3841 # functionality for /lib/modules. For portage-ng both capabilities
3842 # should be able to be independently specified.
3843 if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
3844 print "--- cfgpro "+str(pkgfiles[obj][0]), obj
3845 continue
3846
3847 lstatobj=os.lstat(obj)
3848 lmtime=str(lstatobj[stat.ST_MTIME])
3849 if (pkgfiles[obj][0] not in ("dir","fif","dev","sym")) and (lmtime != pkgfiles[obj][1]):
3850 print "--- !mtime", pkgfiles[obj][0], obj
3851 continue
3852
3853 if pkgfiles[obj][0]=="dir":
3854 if not os.path.isdir(obj):
3855 print "--- !dir ","dir", obj
3856 continue
3857 mydirs.append(obj)
3858 elif pkgfiles[obj][0]=="sym":
3859 if not os.path.islink(obj):
3860 print "--- !sym ","sym", obj
3861 continue
3862 mysyms.append(obj)
3863 elif pkgfiles[obj][0]=="obj":
3864 if not os.path.isfile(obj):
3865 print "--- !obj ","obj", obj
3866 continue
3867 mymd5=portage_checksum.perform_md5(obj, calc_prelink=1)
3868
3869 # string.lower is needed because db entries used to be in upper-case. The
3870 # string.lower allows for backwards compatibility.
3871 if mymd5 != pkgfiles[obj][2].lower():
3872 print "--- !md5 ","obj", obj
3873 continue
3874 try:
3875 os.unlink(obj)
3876 except (OSError,IOError),e:
3877 pass
3878 print "<<< ","obj",obj
3879 elif pkgfiles[obj][0]=="fif":
3880 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
3881 print "--- !fif ","fif", obj
3882 continue
3883 try:
3884 os.unlink(obj)
3885 except (OSError,IOError),e:
3886 pass
3887 print "<<< ","fif",obj
3888 elif pkgfiles[obj][0]=="dev":
3889 print "--- ","dev",obj
3890
3891 #Now, we need to remove symlinks and directories. We'll repeatedly
3892 #remove dead symlinks, then directories until we stop making progress.
3893 #This is how we'll clean up directories containing symlinks pointing to
3894 #directories that are now empty. These cases will require several
3895 #iterations through our two-stage symlink/directory cleaning loop.
3896
3897 #main symlink and directory removal loop:
3898
3899 #progress -- are we making progress? Initialized to 1 so loop will start
3900 progress=1
3901 while progress:
3902 #let's see if we're able to make progress this iteration...
3903 progress=0
3904
3905 #step 1: remove all the dead symlinks we can...
3906
3907 pos = 0
3908 while pos<len(mysyms):
3909 obj=mysyms[pos]
3910 if os.path.exists(obj):
3911 pos += 1
3912 else:
3913 #we have a dead symlink; remove it from our list, then from existence
3914 del mysyms[pos]
3915 #we've made progress!
3916 progress = 1
3917 try:
3918 os.unlink(obj)
3919 print "<<< ","sym",obj
3920 except (OSError,IOError),e:
3921 print "!!! ","sym",obj
3922 #immutable?
3923 pass
3924
3925 #step 2: remove all the empty directories we can...
3926
3927 pos = 0
3928 while pos<len(mydirs):
3929 obj=mydirs[pos]
3930 objld=listdir(obj)
3931
3932 if objld == None:
3933 print "mydirs["+str(pos)+"]",mydirs[pos]
3934 print "obj",obj
3935 print "objld",objld
3936 # the directory doesn't exist yet, continue
3937 pos += 1
3938 continue
3939
3940 if len(objld)>0:
3941 #we won't remove this directory (yet), continue
3942 pos += 1
3943 continue
3944 elif (objld != None):
3945 #zappo time
3946 del mydirs[pos]
3947 #we've made progress!
3948 progress = 1
3949 try:
3950 os.rmdir(obj)
3951 print "<<< ","dir",obj
3952 except (OSError,IOError),e:
3953 #immutable?
3954 pass
3955 #else:
3956 # print "--- !empty","dir", obj
3957 # continue
3958
3959 #step 3: if we've made progress, we'll give this another go...
3960
3961 #step 4: otherwise, we'll print out the remaining stuff that we didn't unmerge (and rightly so!)
3962
3963 #directories that aren't empty:
3964 for x in mydirs:
3965 print "--- !empty dir", x
3966
3967 #symlinks whose target still exists:
3968 for x in mysyms:
3969 print "--- !targe sym", x
3970
3971 #step 5: well, removal of package objects is complete, now for package *meta*-objects....
3972
3973 #remove self from vartree database so that our own virtual gets zapped if we're the last node
3974 db[self.myroot]["vartree"].zap(self.mycpv)
3975
3976 # New code to remove stuff from the world and virtuals files when unmerged.
3977 if trimworld:
3978 worldlist=grabfile(self.myroot+WORLD_FILE)
3979 mykey=cpv_getkey(self.mycpv)
3980 newworldlist=[]
3981 for x in worldlist:
3982 if portage_dep.dep_getkey(x)==mykey:
3983 matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
3984 if not matches:
3985 #zap our world entry
3986 pass
3987 elif (len(matches)==1) and (matches[0]==self.mycpv):
3988 #zap our world entry
3989 pass
3990 else:
3991 #others are around; keep it.
3992 newworldlist.append(x)
3993 else:
3994 #this doesn't match the package we're unmerging; keep it.
3995 newworldlist.append(x)
3996
3997 # if the base dir doesn't exist, create it.
3998 # (spanky noticed bug)
3999 # XXX: dumb question, but abstracting the root uid might be wise/useful for
4000 # 2nd pkg manager installation setups.
4001 if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
4002 pdir = os.path.dirname(self.myroot + WORLD_FILE)
4003 os.makedirs(pdir, mode=0755)
4004 os.chown(pdir, 0, portage_gid)
4005 os.chmod(pdir, 02770)
4006
4007 myworld=open(self.myroot+WORLD_FILE,"w")
4008 for x in newworldlist:
4009 myworld.write(x+"\n")
4010 myworld.close()
4011
4012 #do original postrm
4013 if myebuildpath and os.path.exists(myebuildpath):
4014 # XXX: This should be the old config, not the current one.
4015 # XXX: Use vardbapi to load up env vars.
4016 a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree=self.treetype)
4017 # XXX: Decide how to handle failures here.
4018 if a != 0:
4019 writemsg("!!! FAILED postrm: "+str(a)+"\n")
4020 sys.exit(123)
4021
4022 self.unlockdb()
4023
4024 def isowner(self,filename,destroot):
4025 """ check if filename is a new file or belongs to this package
4026 (for this or a previous version)"""
4027 destfile = os.path.normpath(destroot+"/"+filename)
4028 if not os.path.exists(destfile):
4029 return True
4030 if self.getcontents() and filename in self.getcontents().keys():
4031 return True
4032
4033 return False
4034
4035 def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
4036 global db
4037 # srcroot = ${D};
4038 # destroot = where to merge, ie. ${ROOT},
4039 # inforoot = root of db entry,
4040 # secondhand = list of symlinks that have been skipped due to
4041 # their target not existing (will merge later),
4042
4043 if not os.path.exists(self.dbcatdir):
4044 os.makedirs(self.dbcatdir)
4045
4046 # This blocks until we can get the dirs to ourselves.
4047 self.lockdb()
4048
4049 stopmerge=False
4050 import dcache
4051 dc=dcache.dcache()
4052 do_prelink = ("prelink" in features and portage_checksum.prelink_capable)
4053 if "collision-protect" in features or "verify-rdepend" in features or do_prelink:
4054 myfilelist = listdir(srcroot, recursive=1, filesonly=1,followSymlinks=False,cacheObject=dc)
4055 # the linkcheck only works if we are in srcroot
4056 try:
4057 mycwd = os.getcwd()
4058 except OSError:
4059 mycwd="/"
4060 os.chdir(srcroot)
4061 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1,
4062 filesonly=0,followSymlinks=False, cacheObject=dc))
4063 os.chdir(mycwd)
4064
4065 # check for package collisions
4066 otherversions=[]
4067 for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
4068 otherversions.append(v.split("/")[1])
4069
4070 if self.pkg in otherversions:
4071 otherversions.remove(self.pkg) # we already checked this package
4072
4073 if "collision-protect" in features:
4074 starttime=time.time()
4075 i=0
4076
4077 otherpkg=[]
4078 mypkglist=[]
4079
4080 for v in otherversions:
4081 # should we check for same SLOT here ?
4082 mypkglist.append(dblink(self.cat,v,destroot,self.settings))
4083
4084 print
4085 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
4086 for f in myfilelist:
4087 nocheck = False
4088 # listdir isn't intelligent enough to exclude symlinked dirs,
4089 # so we have to do it ourself
4090 for s in mysymlinks:
4091 # the length comparison makes sure that the symlink itself is checked
4092 if f[:len(s)] == s and len(f) > len(s):
4093 nocheck = True
4094 if nocheck:
4095 continue
4096 i=i+1
4097 if i % 1000 == 0:
4098 print "%6d files checked ..." % i
4099 if f[0] != "/":
4100 f="/"+f
4101 isowned = False
4102 for ver in [self]+mypkglist:
4103 if (ver.isowner(f, destroot) or ver.isprotected(f)):
4104 isowned = True
4105 break
4106 if not isowned:
4107 print "existing file "+f+" is not owned by this package"
4108 stopmerge=True
4109 print green("*")+" spent %.2f seconds checking for file collisions" % (time.time()-starttime)
4110 if stopmerge:
4111 print red("*")+" This package is blocked because it wants to overwrite"
4112 print red("*")+" files belonging to other packages (see messages above)."
4113 print red("*")+" If you have no clue what this is all about report it "
4114 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
4115 print
4116 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
4117 print
4118 # Why is the package already merged here db-wise? Shouldn't be the case
4119 # only unmerge if it ia new package and has no contents
4120 if not self.getcontents():
4121 self.unmerge()
4122 self.delete()
4123 self.unlockdb()
4124 sys.exit(1)
4125
4126 prelink_bins = []
4127 if "verify-rdepend" in features or do_prelink:
4128 checklist=[]
4129 print
4130 print green("*")+ " grabbing %s/%s's binaries/libs" % (self.cat,self.pkg)
4131
4132 try:
4133 mycwd = os.getcwd()
4134 except OSError:
4135 mycwd="/"
4136 os.chdir(srcroot)
4137
4138 for f in myfilelist:
4139 nocheck = False
4140 # listdir isn't intelligent enough to exclude symlinked dirs,
4141 # so we have to do it ourself
4142 for s in mysymlinks:
4143 # the length comparison makes sure that the symlink itself is checked
4144 if f[:len(s)] == s and len(f) > len(s):
4145 continue
4146
4147 retval, bins=portage_exec.spawn_get_output("ldd -r %s" % f,collect_fds=[1],emulate_gso=False)
4148 if retval:
4149 continue
4150 for x in bins:
4151 y=x.split()
4152 if y[0][0:13] != "linux-gate.so" and y[0] not in checklist:
4153 checklist.append(y[0])
4154
4155 prelink_bins.append(f)
4156 os.chdir(mycwd)
4157
4158 if "verify-rdepend" in features:
4159 starttime=time.time()
4160 print green("*")+ " checking %s/%s RDEPEND" % (self.cat, self.pkg)
4161
4162 # Step1: filter package's provided libs first.
4163 candidates_checked=["%s/%s" % (self.cat,self.pkg)]
4164 if len(checklist):
4165 l=[]
4166 for x in myfilelist:
4167 l.append(x.split("/")[-1])
4168 #mysymlinks is pairs of src, trg. add srcs in.
4169 for x in range(0,len(mysymlinks),2):
4170 l.append(mysymlinks[x].split("/")[-1])
4171 y=0
4172 while y < len(checklist):
4173 if checklist[y] in l or checklist[y] in myfilelist:
4174 checklist.pop(y)
4175 else:
4176 y+=1
4177
4178 # Step2: filter out libs from the packages states RDEPEND
4179 if len(checklist):
4180 rdep=portage_dep.paren_reduce(db[self.myroot][self.treetype].dbapi.aux_get( \
4181 self.mycpv,["RDEPEND"])[0])
4182 rdep=portage_util.unique_array(flatten(portage_dep.use_reduce(rdep, \
4183 uselist=self.settings["USE"],matchall=False)))
4184
4185 r=[]
4186 for x in rdep:
4187 r.extend(db[self.myroot]["vartree"].dbapi.match(x))
4188
4189 rdep=r
4190
4191 # filter first package rdeps, then virtual/.?libc, then gcc
4192
4193 lm = db[self.myroot]["vartree"].dbapi.match("virtual/glibc")
4194 lm.extend(db[self.myroot]["vartree"].dbapi.match("virtual/libc"))
4195 lm = portage_util.unique_array(lm)
4196
4197 for rd,msg in [(r,"%s/%s's RDEPEND" % (self.cat,self.pkg)), \
4198 (lm, "virtual/glibc, virtual/libc"), \
4199 (db[self.myroot]["vartree"].dbapi.match("gcc"), "gcc")]:
4200
4201 print green("*")+" Parsing %s contents" % msg
4202
4203 candidates_checked.extend(rd)
4204
4205 for r in rd:
4206 s=portage_versions.catsplit(r)
4207 # print "%s=" % r, s
4208 c=dblink(s[0],s[1],self.myroot,self.settings).getcontents()
4209 if c == None:
4210 print yellow("---")+" Installed package %s seems to lack a contents file" % r
4211 else:
4212 y=0
4213 l=[]
4214
4215 # build a list of obj files minus their directory.
4216 for x in c.keys():
4217 if c[x][0] in ["obj","sym"]:
4218 l.append(x.split("/")[-1])
4219
4220 while y < len(checklist):
4221 if c.has_key(checklist[y]) or checklist[y] in l:
4222 # print "%s satisfied by %s" % (checklist[y], r)
4223 checklist.pop(y)
4224 else:
4225 y+=1
4226 if len(checklist) == 0:
4227 break
4228 if len(checklist) == 0:
4229 break
4230
4231 # Step3: breadth then depth walk of package's RDEPEND's, RDEPEND's.
4232 # not much for this, since it's entirely possible invalid deps could be filtered out.
4233 # probably worth doing uselist=[], since at this point, a depend atom can't specify use flag.
4234 if len(checklist):
4235 # so now we recursive walk the RDEPEND's of cpv's RDEPENDS. yay.
4236 print green("*")+" Parsing breadth then depth of %s/%s's RDEPEND's now (libs remain)" % (self.cat,self.pkg)
4237 candidate=rdep
4238
4239 while len(checklist) and len(candidate):
4240 r=candidate.pop(0)
4241
4242 candidates_checked.append(r)
4243
4244 s=portage_versions.catsplit(r)
4245 c=dblink(s[0],s[1],self.myroot,self.settings).getcontents()
4246 if c == None:
4247 print yellow("---")+" Installed package %s seems to lack a contents file" % r
4248 else:
4249 l=[]
4250
4251 # build a list of obj files minus their directory.
4252 for x in c.keys():
4253 if c[x][0] in ["obj","sym"]:
4254 l.append(x.split("/")[-1])
4255
4256 y=0
4257 while y < len(checklist):
4258 if c.has_key(checklist[y]) or checklist[y] in l:
4259 checklist.pop(y)
4260 else:
4261 y+=1
4262
4263 if len(checklist):
4264 # append this nodes rdepend.
4265 rdep,u=db[self.myroot]["vartree"].dbapi.aux_get(r,["RDEPEND","USE"])
4266 pd=flatten(portage_dep.use_reduce(portage_dep.paren_reduce(rdep),\
4267 uselist=u,matchall=False))
4268 pd=portage_util.unique_array(pd)
4269 for x in pd:
4270 for y in db["/"]["vartree"].dbapi.match(x):
4271 if y not in candidates_checked:
4272 candidate.append(y)
4273 candidate=portage_util.unique_array(flatten(candidate))
4274
4275 # Step4: Complain. Loudly.
4276 if len(checklist):
4277 print
4278 print red("!!!")+" %s/%s has an incomplete RDEPEND: Unmatched libs:" % (self.cat, self.pkg)
4279 print red("!!! ")+string.join(checklist,", ")
4280 print
4281 for x in range(0,10):
4282 sys.stdout.write("\a")
4283 if "severe" in features:
4284 if not self.getcontents():
4285 self.unmerge()
4286 self.delete()
4287 self.unlockdb()
4288 sys.exit(1)
4289
4290 print green("*")+" spent %.2f seconds verifying RDEPEND" % (time.time()-starttime)
4291
4292
4293 if do_prelink:
4294 starttime=time.time()
4295 print
4296 print green("*")+ " prelinking %d binaries" % len(prelink_bins)
4297 c = [PRELINK_BINARY]
4298 if settings.has_key("PRELINK_OPTS"):
4299 c.extend(settings["PRELINK_OPTS"].split())
4300
4301 for x in range(0,len(prelink_bins),10):
4302 c2=c[:]
4303 if x + 10 > len(prelink_bins):
4304 c2.extend(prelink_bins[x:])
4305 else:
4306 c2.extend(prelink_bins[x:x+10])
4307 try:
4308 portage_exec.spawn(c2)
4309 except SystemExit:
4310 raise
4311 except Exception,e:
4312 print "caught exception while prelinking",e
4313
4314 print green("*")+" spent %.2f seconds prelinking" % (time.time()-starttime)
4315
4316
4317 # get old contents info for later unmerging
4318 oldcontents = self.getcontents()
4319
4320 self.dbdir = self.dbtmpdir
4321 self.delete()
4322 if not os.path.exists(self.dbtmpdir):
4323 os.makedirs(self.dbtmpdir)
4324
4325 print ">>> Merging",self.mycpv,"to",destroot
4326
4327 # run preinst script
4328 if myebuild:
4329 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
4330 # (if any).
4331 a=doebuild(myebuild,"preinst",root,self.settings,cleanup=0,use_cache=0, \
4332 use_info_env=False,tree=self.treetype)
4333 else:
4334 a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=0, \
4335 use_cache=0,tree=self.treetype)
4336
4337 # XXX: Decide how to handle failures here.
4338 if a != 0:
4339 writemsg("!!! FAILED preinst: "+str(a)+"\n")
4340 sys.exit(123)
4341
4342 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
4343 for x in listdir(inforoot):
4344 self.copyfile(inforoot+"/"+x)
4345
4346 # get current counter value (counter_tick also takes care of incrementing it)
4347 # XXX Need to make this destroot, but it needs to be initialized first. XXX
4348 # XXX bis: leads to some invalidentry() call through cp_all().
4349 counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
4350 # write local package counter for recording
4351 lcfile = open(self.dbtmpdir+"/COUNTER","w")
4352 lcfile.write(str(counter))
4353 lcfile.close()
4354
4355 # open CONTENTS file (possibly overwriting old one) for recording
4356 outfile=open(self.dbtmpdir+"/CONTENTS","w")
4357
4358 self.updateprotect()
4359
4360 #if we have a file containing previously-merged config file md5sums, grab it.
4361 if os.path.exists(destroot+CONFIG_MEMORY_FILE):
4362 cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
4363 else:
4364 cfgfiledict={}
4365 if self.settings.has_key("NOCONFMEM"):
4366 cfgfiledict["IGNORE"]=1
4367 else:
4368 cfgfiledict["IGNORE"]=0
4369
4370 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
4371 mymtime = long(time.time())
4372 prevmask = os.umask(0)
4373 secondhand = []
4374
4375 # we do a first merge; this will recurse through all files in our srcroot but also build up a
4376 # "second hand" of symlinks to merge later
4377 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
4378 return 1
4379
4380 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
4381 # broken symlinks. We'll merge them too.
4382 lastlen=0
4383 while len(secondhand) and len(secondhand)!=lastlen:
4384 # clear the thirdhand. Anything from our second hand that
4385 # couldn't get merged will be added to thirdhand.
4386
4387 thirdhand=[]
4388 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
4389
4390 #swap hands
4391 lastlen=len(secondhand)
4392
4393 # our thirdhand now becomes our secondhand. It's ok to throw
4394 # away secondhand since thirdhand contains all the stuff that
4395 # couldn't be merged.
4396 secondhand = thirdhand
4397
4398 if len(secondhand):
4399 # force merge of remaining symlinks (broken or circular; oh well)
4400 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
4401
4402 #restore umask
4403 os.umask(prevmask)
4404
4405 #if we opened it, close it
4406 outfile.flush()
4407 outfile.close()
4408
4409 if (oldcontents):
4410 print ">>> Safely unmerging already-installed instance..."
4411 self.dbdir = self.dbpkgdir
4412 self.unmerge(oldcontents,trimworld=0)
4413 self.dbdir = self.dbtmpdir
4414 print ">>> original instance of package unmerged safely."
4415
4416 # We hold both directory locks.
4417 self.dbdir = self.dbpkgdir
4418 self.delete()
4419 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
4420
4421 self.unlockdb()
4422
4423 #write out our collection of md5sums
4424 if cfgfiledict.has_key("IGNORE"):
4425 del cfgfiledict["IGNORE"]
4426
4427 # XXXX: HACK! PathSpec is very necessary here.
4428 if not os.path.exists(destroot+PRIVATE_PATH):
4429 os.makedirs(destroot+PRIVATE_PATH)
4430 os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
4431 os.chmod(destroot+PRIVATE_PATH,02770)
4432 dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
4433 while dirlist:
4434 dirlist.sort()
4435 dirlist.reverse() # Gets them in file-before basedir order
4436 x = dirlist[0]
4437 if os.path.isdir(x):
4438 dirlist += prefix_array(listdir(x),x+"/")
4439 continue
4440 os.unlink(destroot+PRIVATE_PATH+"/"+x)
4441
4442 mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
4443 writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
4444 portage_locks.unlockfile(mylock)
4445
4446 #do postinst script
4447 if myebuild:
4448 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
4449 # (if any).
4450 a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0,use_info_env=False,cleanup=0,tree=self.treetype)
4451 else:
4452 a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0,cleanup=0,tree=self.treetype)
4453
4454 # XXX: Decide how to handle failures here.
4455 if a != 0:
4456 writemsg("!!! FAILED postinst: "+str(a)+"\n")
4457 sys.exit(123)
4458
4459 downgrade = False
4460 for v in otherversions:
4461 if portage_versions.pkgcmp(portage_versions.catpkgsplit(self.pkg)[1:], portage_versions.catpkgsplit(v)[1:]) < 0:
4462 downgrade = True
4463
4464 #update environment settings, library paths. DO NOT change symlinks.
4465 env_update(self.myroot,makelinks=(not downgrade))
4466 #dircache may break autoclean because it remembers the -MERGING-pkg file
4467 global dircache
4468 if dircache.has_key(self.dbcatdir):
4469 del dircache[self.dbcatdir]
4470 print ">>>",self.mycpv,"merged."
4471
4472 # Process ebuild logfiles
4473 elog_process(self.mycpv, self.settings)
4474
4475 return 0
4476
4477 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
4478 srcroot=os.path.normpath("///"+srcroot)+"/"
4479 destroot=os.path.normpath("///"+destroot)+"/"
4480 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
4481 if type(stufftomerge)==str:
4482 #A directory is specified. Figure out protection paths, listdir() it and process it.
4483 mergelist=listdir(srcroot+stufftomerge)
4484 offset=stufftomerge
4485 # We need mydest defined up here to calc. protection paths. This is now done once per
4486 # directory rather than once per file merge. This should really help merge performance.
4487 # Trailing / ensures that protects/masks with trailing /'s match.
4488 mytruncpath="/"+offset+"/"
4489 myppath=self.isprotected(mytruncpath)
4490 else:
4491 mergelist=stufftomerge
4492 offset=""
4493 for x in mergelist:
4494 mysrc=os.path.normpath("///"+srcroot+offset+x)
4495 mydest=os.path.normpath("///"+destroot+offset+x)
4496 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
4497 myrealdest="/"+offset+x
4498 # stat file once, test using S_* macros many times (faster that way)
4499 try:
4500 mystat=os.lstat(mysrc)
4501 except SystemExit, e:
4502 raise
4503 except OSError, e:
4504 writemsg("\n")
4505 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
4506 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
4507 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
4508 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
4509 writemsg(red("!!! File: ")+str(mysrc)+"\n")
4510 writemsg(red("!!! Error: ")+str(e)+"\n")
4511 sys.exit(1)
4512 except Exception, e:
4513 writemsg("\n")
4514 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
4515 writemsg(red("!!! A stat call returned the following error for the following file:"))
4516 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
4517 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
4518 writemsg( "!!! File: "+str(mysrc)+"\n")
4519 writemsg( "!!! Error: "+str(e)+"\n")
4520 sys.exit(1)
4521
4522
4523 mymode=mystat[stat.ST_MODE]
4524 # handy variables; mydest is the target object on the live filesystems;
4525 # mysrc is the source object in the temporary install dir
4526 try:
4527 mydmode=os.lstat(mydest)[stat.ST_MODE]
4528 except SystemExit, e:
4529 raise
4530 except:
4531 #dest file doesn't exist
4532 mydmode=None
4533
4534 if stat.S_ISLNK(mymode):
4535 # we are merging a symbolic link
4536 myabsto=abssymlink(mysrc)
4537 if myabsto[0:len(srcroot)]==srcroot:
4538 myabsto=myabsto[len(srcroot):]
4539 if myabsto[0]!="/":
4540 myabsto="/"+myabsto
4541 myto=os.readlink(mysrc)
4542 if self.settings and self.settings["D"]:
4543 if myto.find(self.settings["D"])==0:
4544 myto=myto[len(self.settings["D"]):]
4545 # myrealto contains the path of the real file to which this symlink points.
4546 # we can simply test for existence of this file to see if the target has been merged yet
4547 myrealto=os.path.normpath(os.path.join(destroot,myabsto))
4548 if mydmode!=None:
4549 #destination exists
4550 if not stat.S_ISLNK(mydmode):
4551 if stat.S_ISDIR(mydmode):
4552 # directory in the way: we can't merge a symlink over a directory
4553 # we won't merge this, continue with next file...
4554 continue
4555 if self.isprotected(mydest):
4556 # Use md5 of the target in ${D} if it exists...
4557 if os.path.exists(os.path.normpath(srcroot+myabsto)):
4558 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
4559 else:
4560 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
4561
4562 # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
4563 if (secondhand!=None) and (not os.path.exists(myrealto)):
4564 # either the target directory doesn't exist yet or the target file doesn't exist -- or
4565 # the target is a broken symlink. We will add this file to our "second hand" and merge
4566 # it later.
4567 secondhand.append(mysrc[len(srcroot):])
4568 continue
4569 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
4570 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
4571 if mymtime!=None:
4572 print ">>>",mydest,"->",myto
4573 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
4574 else:
4575 print "!!! Failed to move file."
4576 print "!!!",mydest,"->",myto
4577 sys.exit(1)
4578 elif stat.S_ISDIR(mymode):
4579 # we are merging a directory
4580 if mydmode!=None:
4581 # destination exists
4582
4583 if bsd_chflags:
4584 # Save then clear flags on dest.
4585 dflags=bsd_chflags.lgetflags(mydest)
4586 if(bsd_chflags.lchflags(mydest, 0)<0):
4587 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
4588
4589 if not os.access(mydest, os.W_OK):
4590 pkgstuff = portage_versions.pkgsplit(self.pkg)
4591 writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
4592 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
4593 writemsg("!!! You may start the merge process again by using ebuild:\n")
4594 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
4595 writemsg("!!! And finish by running this: env-update\n\n")
4596 return 1
4597
4598 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
4599 # a symlink to an existing directory will work for us; keep it:
4600 print "---",mydest+"/"
4601 if bsd_chflags:
4602 bsd_chflags.lchflags(mydest, dflags)
4603 else:
4604 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
4605 if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
4606 print "failed move"
4607 sys.exit(1)
4608 print "bak",mydest,mydest+".backup"
4609 #now create our directory
4610 if selinux_enabled:
4611 sid = selinux.get_sid(mysrc)
4612 selinux.secure_mkdir(mydest,sid)
4613 else:
4614 os.mkdir(mydest)
4615 if bsd_chflags:
4616 bsd_chflags.lchflags(mydest, dflags)
4617 os.chmod(mydest,mystat[0])
4618 lchown(mydest,mystat[4],mystat[5])
4619 print ">>>",mydest+"/"
4620 else:
4621 #destination doesn't exist
4622 if selinux_enabled:
4623 sid = selinux.get_sid(mysrc)
4624 selinux.secure_mkdir(mydest,sid)
4625 else:
4626 os.mkdir(mydest)
4627 os.chmod(mydest,mystat[0])
4628 if bsd_chflags:
4629 bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
4630 lchown(mydest,mystat[4],mystat[5])
4631 print ">>>",mydest+"/"
4632 outfile.write("dir "+myrealdest+"\n")
4633 # recurse and merge this directory
4634 if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
4635 return 1
4636 elif stat.S_ISREG(mymode):
4637 # we are merging a regular file
4638 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
4639 # calculate config file protection stuff
4640 mydestdir=os.path.dirname(mydest)
4641 moveme=1
4642 zing="!!!"
4643 if mydmode!=None:
4644 # destination file exists
4645 if stat.S_ISDIR(mydmode):
4646 # install of destination is blocked by an existing directory with the same name
4647 moveme=0
4648 print "!!!",mydest
4649 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
4650 cfgprot=0
4651 # install of destination is blocked by an existing regular file,
4652 # or by a symlink to an existing regular file;
4653 # now, config file management may come into play.
4654 # we only need to tweak mydest if cfg file management is in play.
4655 if myppath:
4656 # we have a protection path; enable config file management.
4657 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
4658 cycled=0
4659 if cfgfiledict.has_key(myrealdest):
4660 if destmd5 in cfgfiledict[myrealdest]:
4661 #cycle
4662 print "cycle"
4663 del cfgfiledict[myrealdest]
4664 cycled=1
4665 if mymd5==destmd5:
4666 #file already in place; simply update mtimes of destination
4667 os.utime(mydest,(thismtime,thismtime))
4668 zing="---"
4669 moveme=0
4670 elif cycled:
4671 #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
4672 moveme=1
4673 cfgfiledict[myrealdest]=[mymd5]
4674 cfgprot=1
4675 elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
4676 #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
4677 zing="-o-"
4678 moveme=cfgfiledict["IGNORE"]
4679 cfgprot=cfgfiledict["IGNORE"]
4680 else:
4681 #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
4682 moveme=1
4683 cfgprot=1
4684 if not cfgfiledict.has_key(myrealdest):
4685 cfgfiledict[myrealdest]=[]
4686 if mymd5 not in cfgfiledict[myrealdest]:
4687 cfgfiledict[myrealdest].append(mymd5)
4688 #don't record more than 16 md5sums
4689 if len(cfgfiledict[myrealdest])>16:
4690 del cfgfiledict[myrealdest][0]
4691
4692 if cfgprot:
4693 mydest = new_protect_filename(myrealdest, newmd5=mymd5)
4694
4695 # whether config protection or not, we merge the new file the
4696 # same way. Unless moveme=0 (blocking directory)
4697 if moveme:
4698 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
4699 if mymtime == None:
4700 print "failed move"
4701 sys.exit(1)
4702 zing=">>>"
4703 else:
4704 mymtime=thismtime
4705 # We need to touch the destination so that on --update the
4706 # old package won't yank the file with it. (non-cfgprot related)
4707 os.utime(myrealdest,(thismtime,thismtime))
4708 zing="---"
4709 if self.settings["ARCH"] == "ppc-macos" and myrealdest[-2:] == ".a":
4710
4711 # XXX kludge, bug #58848; can be killed when portage stops relying on
4712 # md5+mtime, and uses refcounts
4713 # alright, we've fooled w/ mtime on the file; this pisses off static archives
4714 # basically internal mtime != file's mtime, so the linker (falsely) thinks
4715 # the archive is stale, and needs to have it's toc rebuilt.
4716
4717 myf=open(myrealdest,"r+")
4718
4719 # ar mtime field is digits padded with spaces, 12 bytes.
4720 lms=str(thismtime+5).ljust(12)
4721 myf.seek(0)
4722 magic=myf.read(8)
4723 if magic != "!<arch>\n":
4724 # not an archive (dolib.a from portage.py makes it here fex)
4725 myf.close()
4726 else:
4727 st=os.stat(myrealdest)
4728 while myf.tell() < st.st_size - 12:
4729 # skip object name
4730 myf.seek(16,1)
4731
4732 # update mtime
4733 myf.write(lms)
4734
4735 # skip uid/gid/mperm
4736 myf.seek(20,1)
4737
4738 # read the archive member's size
4739 x=long(myf.read(10))
4740
4741 # skip the trailing newlines, and add the potential
4742 # extra padding byte if it's not an even size
4743 myf.seek(x + 2 + (x % 2),1)
4744
4745 # and now we're at the end. yay.
4746 myf.close()
4747 mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
4748 os.utime(myrealdest,(thismtime,thismtime))
4749
4750 if mymtime!=None:
4751 zing=">>>"
4752 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
4753 print zing,mydest
4754 else:
4755 # we are merging a fifo or device node
4756 zing="!!!"
4757 if mydmode==None:
4758 # destination doesn't exist
4759 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
4760 zing=">>>"
4761 if stat.S_ISFIFO(mymode):
4762 # we don't record device nodes in CONTENTS,
4763 # although we do merge them.
4764 outfile.write("fif "+myrealdest+"\n")
4765 else:
4766 sys.exit(1)
4767 print zing+" "+mydest
4768
4769 def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
4770 return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
4771
4772 def getstring(self,name):
4773 """
4774 returns contents of a file with whitespace
4775 (including newlines) converted to spaces
4776 """
4777 if not os.path.exists(self.dbdir+"/"+name):
4778 return ""
4779 myfile=open(self.dbdir+"/"+name,"r")
4780 mydata=myfile.read().split()
4781 return " ".join(mydata)
4782
4783 def copyfile(self,fname):
4784 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
4785
4786 def getfile(self,fname):
4787 if not os.path.exists(self.dbdir+"/"+fname):
4788 return ""
4789 myfile=open(self.dbdir+"/"+fname,"r")
4790 mydata=myfile.read()
4791 myfile.close()
4792 return mydata
4793
4794 def setfile(self,fname,data):
4795 myfile=open(self.dbdir+"/"+fname,"w")
4796 myfile.write(data)
4797 myfile.close()
4798
4799 def getelements(self,ename):
4800 if not os.path.exists(self.dbdir+"/"+ename):
4801 return []
4802 myelement=open(self.dbdir+"/"+ename,"r")
4803 mylines=myelement.readlines()
4804 myreturn=[]
4805 for x in mylines:
4806 for y in x[:-1].split():
4807 myreturn.append(y)
4808 myelement.close()
4809 return myreturn
4810
4811 def setelements(self,mylist,ename):
4812 myelement=open(self.dbdir+"/"+ename,"w")
4813 for x in mylist:
4814 myelement.write(x+"\n")
4815 myelement.close()
4816
4817 def isregular(self):
4818 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
4819 return os.path.exists(self.dbdir+"/CATEGORY")
4820
4821 def cleanup_pkgmerge(mypkg,origdir=None):
4822 shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/binpkgs/"+mypkg)
4823 if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
4824 os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
4825 if origdir:
4826 os.chdir(origdir)
4827
4828 def pkgmerge(mytbz2,myroot,mysettings):
4829 """will merge a .tbz2 file, returning a list of runtime dependencies
4830 that must be satisfied, or None if there was a merge error. This
4831 code assumes the package exists."""
4832 if mytbz2[-5:]!=".tbz2":
4833 print "!!! Not a .tbz2 file"
4834 return None
4835 mypkg=os.path.basename(mytbz2)[:-5]
4836 xptbz2=xpak.tbz2(mytbz2)
4837 pkginfo={}
4838 mycat=xptbz2.getfile("CATEGORY")
4839 if not mycat:
4840 print "!!! CATEGORY info missing from info chunk, aborting..."
4841 return None
4842 mycat=mycat.strip()
4843 mycatpkg=mycat+"/"+mypkg
4844 tmploc=mysettings["PORTAGE_TMPDIR"]+"/binpkgs/"
4845 pkgloc=tmploc+"/"+mypkg+"/bin/"
4846 infloc=tmploc+"/"+mypkg+"/inf/"
4847 myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
4848 if os.path.exists(tmploc+"/"+mypkg):
4849 shutil.rmtree(tmploc+"/"+mypkg,1)
4850 os.makedirs(pkgloc)
4851 os.makedirs(infloc)
4852 print ">>> extracting info"
4853 xptbz2.unpackinfo(infloc)
4854 # run pkg_setup early, so we can bail out early
4855 # (before extracting binaries) if there's a problem
4856 origdir=getcwd()
4857 os.chdir(pkgloc)
4858
4859 mysettings.configdict["pkg"]["CATEGORY"] = mycat
4860 a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
4861 print ">>> extracting",mypkg
4862 notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
4863 if notok:
4864 print "!!! Error extracting",mytbz2
4865 cleanup_pkgmerge(mypkg,origdir)
4866 return None
4867
4868 # the merge takes care of pre/postinst and old instance
4869 # auto-unmerge, virtual/provides updates, etc.
4870 mysettings.load_infodir(infloc)
4871 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype="bintree")
4872 mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
4873
4874 if not os.path.exists(infloc+"/RDEPEND"):
4875 returnme=""
4876 else:
4877 #get runtime dependencies
4878 a=open(infloc+"/RDEPEND","r")
4879 returnme=" ".join( a.read().split())
4880 a.close()
4881 cleanup_pkgmerge(mypkg,origdir)
4882 return returnme
4883
4884
4885 if os.environ.has_key("ROOT"):
4886 root=os.environ["ROOT"]
4887 if not len(root):
4888 root="/"
4889 elif root[-1]!="/":
4890 root=root+"/"
4891 else:
4892 root="/"
4893 if root != "/":
4894 if not os.path.exists(root[:-1]):
4895 writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
4896 writemsg("!!! Exiting.\n\n")
4897 sys.exit(1)
4898 elif not os.path.isdir(root[:-1]):
4899 writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
4900 writemsg("!!! Exiting.\n\n")
4901 sys.exit(1)
4902
4903 #create tmp and var/tmp if they don't exist; read config
4904 os.umask(0)
4905 if not os.path.exists(root+"tmp"):
4906 writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
4907 os.mkdir(root+"tmp",01777)
4908 if not os.path.exists(root+"var/tmp"):
4909 writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
4910 try:
4911 os.mkdir(root+"var",0755)
4912 except (OSError,IOError):
4913 pass
4914 try:
4915 os.mkdir(root+"var/tmp",01777)
4916 except SystemExit, e:
4917 raise
4918 except:
4919 writemsg("portage: couldn't create /var/tmp; exiting.\n")
4920 sys.exit(1)
4921
4922
4923 #####################################
4924 # Deprecation Checks
4925
4926 os.umask(022)
4927 profiledir=None
4928 if os.path.isdir(PROFILE_PATH):
4929 profiledir = PROFILE_PATH
4930 if os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
4931 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
4932 dcontent = deprecatedfile.readlines()
4933 deprecatedfile.close()
4934 newprofile = dcontent[0]
4935 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
4936 writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
4937 writemsg(8*" "+green(newprofile)+"\n")
4938 if len(dcontent) > 1:
4939 writemsg("To upgrade do the following steps:\n")
4940 for myline in dcontent[1:]:
4941 writemsg(myline)
4942 writemsg("\n\n")
4943
4944 if os.path.exists(USER_VIRTUALS_FILE):
4945 writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
4946 writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
4947 writemsg(red("!!! this new location.\n\n"))
4948
4949 #
4950 #####################################
4951
4952 db={}
4953
4954 # =============================================================================
4955 # =============================================================================
4956 # -----------------------------------------------------------------------------
4957 # We're going to lock the global config to prevent changes, but we need
4958 # to ensure the global settings are right.
4959 settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
4960
4961 # useful info
4962 settings["PORTAGE_MASTER_PID"]=str(os.getpid())
4963 settings.backup_changes("PORTAGE_MASTER_PID")
4964
4965 def do_vartree(mysettings):
4966 global virts, virts_p, db
4967 virts=mysettings.getvirtuals("/")
4968 virts_p={}
4969
4970 if virts:
4971 myvkeys=virts.keys()
4972 for x in myvkeys:
4973 vkeysplit=x.split("/")
4974 if not virts_p.has_key(vkeysplit[1]):
4975 virts_p[vkeysplit[1]]=virts[x]
4976 db["/"]={
4977 "virtuals":virts,
4978 "vartree":vartree("/",virts),
4979 }
4980 if root!="/":
4981 virts=mysettings.getvirtuals(root)
4982 db[root]={
4983 "virtuals":virts,
4984 "vartree":vartree(root,virts)}
4985 #We need to create the vartree first, then load our settings, and then set up our other trees
4986
4987 # XXX: This is a circular fix.
4988 #do_vartree(settings)
4989 #settings.loadVirtuals('/')
4990 do_vartree(settings)
4991 #settings.loadVirtuals('/')
4992
4993 settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
4994
4995
4996 # XXX: Might cause problems with root="/" assumptions
4997 portdb=portdbapi(settings["PORTDIR"])
4998
4999 settings.lock()
5000 # -----------------------------------------------------------------------------
5001 # =============================================================================
5002 # =============================================================================
5003
5004
5005 if 'selinux' in settings["USE"].split(" "):
5006 try:
5007 import selinux
5008 selinux_enabled=1
5009 portage_exec.selinux_capable = True
5010 except OSError, e:
5011 writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
5012 selinux_enabled=0
5013 except ImportError:
5014 writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
5015 selinux_enabled=0
5016 else:
5017 selinux_enabled=0
5018
5019 cachedirs=[CACHE_PATH]
5020 if root!="/":
5021 cachedirs.append(root+CACHE_PATH)
5022 if not os.environ.has_key("SANDBOX_ACTIVE"):
5023 for cachedir in cachedirs:
5024 if not os.path.exists(cachedir):
5025 os.makedirs(cachedir,0755)
5026 writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
5027 if not os.path.exists(cachedir+"/dep"):
5028 os.makedirs(cachedir+"/dep",2755)
5029 writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
5030 try:
5031 os.chown(cachedir,uid,portage_gid)
5032 os.chmod(cachedir,0775)
5033 except OSError:
5034 pass
5035 try:
5036 mystat=os.lstat(cachedir+"/dep")
5037 os.chown(cachedir+"/dep",uid,portage_gid)
5038 os.chmod(cachedir+"/dep",0775)
5039 if mystat[stat.ST_GID]!=portage_gid:
5040 spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
5041 spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
5042 except OSError:
5043 pass
5044
5045 def flushmtimedb(record):
5046 if mtimedb:
5047 if record in mtimedb.keys():
5048 del mtimedb[record]
5049 #print "mtimedb["+record+"] is cleared."
5050 else:
5051 writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
5052
5053 #grab mtimes for eclasses and upgrades
5054 mtimedb={}
5055 mtimedbkeys=[
5056 "updates", "info",
5057 "version", "starttime",
5058 "resume", "ldpath"
5059 ]
5060 mtimedbfile=root+"var/cache/edb/mtimedb"
5061 try:
5062 mypickle=cPickle.Unpickler(open(mtimedbfile))
5063 mypickle.find_global=None
5064 mtimedb=mypickle.load()
5065 if mtimedb.has_key("old"):
5066 mtimedb["updates"]=mtimedb["old"]
5067 del mtimedb["old"]
5068 if mtimedb.has_key("cur"):
5069 del mtimedb["cur"]
5070 except SystemExit, e:
5071 raise
5072 except:
5073 #print "!!!",e
5074 mtimedb={"updates":{},"version":"","starttime":0}
5075
5076 for x in mtimedb.keys():
5077 if x not in mtimedbkeys:
5078 writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
5079 del mtimedb[x]
5080
5081 #,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
5082 features=settings["FEATURES"].split()
5083
5084 do_upgrade_packagesmessage=0
5085 def do_upgrade(mykey):
5086 global do_upgrade_packagesmessage
5087 writemsg("\n\n")
5088 writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
5089 writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
5090 writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
5091 processed=1
5092 #remove stale virtual entries (mappings for packages that no longer exist)
5093
5094 update_files={}
5095 file_contents={}
5096 myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
5097 myxfiles = myxfiles + prefix_array(myxfiles, "profile/")
5098 for x in myxfiles:
5099 try:
5100 myfile = open("/etc/portage/"+x,"r")
5101 file_contents[x] = myfile.readlines()
5102 myfile.close()
5103 except IOError:
5104 if file_contents.has_key(x):
5105 del file_contents[x]
5106 continue
5107
5108 worldlist=grabfile("/"+WORLD_FILE)
5109 myupd=grabfile(mykey)
5110 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
5111 for myline in myupd:
5112 mysplit=myline.split()
5113 if not len(mysplit):
5114 continue
5115 if mysplit[0]!="move" and mysplit[0]!="slotmove":
5116 writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
5117 processed=0
5118 continue
5119 if mysplit[0]=="move" and len(mysplit)!=3:
5120 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
5121 processed=0
5122 continue
5123 if mysplit[0]=="slotmove" and len(mysplit)!=4:
5124 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
5125 processed=0
5126 continue
5127 sys.stdout.write(".")
5128 sys.stdout.flush()
5129
5130 if mysplit[0]=="move":
5131 db["/"]["vartree"].dbapi.move_ent(mysplit)
5132 db["/"]["bintree"].move_ent(mysplit)
5133 #update world entries:
5134 for x in range(0,len(worldlist)):
5135 #update world entries, if any.
5136 worldlist[x]=dep_transform(worldlist[x],mysplit[1],mysplit[2])
5137
5138 #update /etc/portage/packages.*
5139 for x in file_contents:
5140 for mypos in range(0,len(file_contents[x])):
5141 line=file_contents[x][mypos]
5142 if line.strip()=="" or line.strip()[0]=="#":
5143 continue
5144 key=portage_dep.dep_getkey(line.split()[0])
5145 if key==mysplit[1]:
5146 file_contents[x][mypos]=line.replace(mysplit[1],mysplit[2])
5147 update_files[x]=1
5148 sys.stdout.write("p")
5149 sys.stdout.flush()
5150
5151 elif mysplit[0]=="slotmove":
5152 db["/"]["vartree"].dbapi.move_slot_ent(mysplit)
5153 db["/"]["bintree"].move_slot_ent(mysplit,settings["PORTAGE_TMPDIR"]+"/tbz2")
5154
5155 for x in update_files:
5156 mydblink = dblink('','','/',settings)
5157 if mydblink.isprotected("/etc/portage/"+x):
5158 updating_file=new_protect_filename("/etc/portage/"+x)[0]
5159 else:
5160 updating_file="/etc/portage/"+x
5161 try:
5162 myfile=open(updating_file,"w")
5163 myfile.writelines(file_contents[x])
5164 myfile.close()
5165 except IOError:
5166 continue
5167
5168 # We gotta do the brute force updates for these now.
5169 if (settings["PORTAGE_CALLER"] in ["fixpackages"]) or \
5170 ("fixpackages" in features):
5171 db["/"]["bintree"].update_ents(myupd,settings["PORTAGE_TMPDIR"]+"/tbz2")
5172 else:
5173 do_upgrade_packagesmessage = 1
5174
5175 if processed:
5176 #update our internal mtime since we processed all our directives.
5177 mtimedb["updates"][mykey]=os.stat(mykey)[stat.ST_MTIME]
5178 myworld=open("/"+WORLD_FILE,"w")
5179 for x in worldlist:
5180 myworld.write(x+"\n")
5181 myworld.close()
5182 print ""
5183
5184 exit_callbacks = []
5185
5186 def append_exit_callback(func,args=[],kwargs={}):
5187 """append a callback to the exit callback list
5188 args is positionally expanded and must be a list/tuple
5189 kwargs is a optional param, and expanded- must be a dict."""
5190 global exit_callbacks
5191 exit_callbacks.append((func,args,kwargs))
5192
5193 def kill_spawned_pids():
5194 """since exit_callbacks takes an array, and kwargs upon initializing, it's possible that
5195 faulty code in portage_exec.spawn might replace spawned_pids, instead of modifying it.
5196 In doing so, that would make portage not have an up to date list of pids
5197 so, we call this function instead which pulls (at exit time) from portage_exec.spawned_pids"""
5198 portage_exec.cleanup(portage_exec.spawned_pids)
5199
5200 #order here is important. we want to attempt to cleanly shutdown the daemons prior to
5201 #resorting to wax'ing them w/ a sigint/sigkill
5202 append_exit_callback(ebuild.shutdown_all_processors)
5203 append_exit_callback(kill_spawned_pids)
5204
5205 def portageexit():
5206 global uid,portage_gid,portdb,db
5207 global exit_callbacks
5208 for x in exit_callbacks:
5209 try:
5210 x[0](*x[1],**x[2])
5211 except SystemExit:
5212 raise
5213 except Exception, e:
5214 print "caught exception for exit_callback func",x[0]
5215 print e
5216 pass
5217
5218 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
5219 # wait child process death
5220 try:
5221 while True:
5222 os.wait()
5223 except OSError:
5224 #writemsg(">>> All child process are now dead.")
5225 pass
5226
5227 close_portdbapi_caches()
5228
5229 if mtimedb:
5230 # Store mtimedb
5231 mymfn=mtimedbfile
5232 try:
5233 mtimedb["version"]=VERSION
5234 cPickle.dump(mtimedb, open(mymfn,"w"), cPickle.HIGHEST_PROTOCOL)
5235 #print "*** Wrote out mtimedb data successfully."
5236 os.chown(mymfn,uid,portage_gid)
5237 os.chmod(mymfn,0664)
5238 except SystemExit, e:
5239 raise
5240 except Exception, e:
5241 pass
5242 try:
5243 os.chown(mymfn,-1,portage_gid)
5244 m=os.umask(0)
5245 os.chmod(mymfn,0664)
5246 os.umask(m)
5247 except (IOError, OSError):
5248 pass
5249
5250 atexit.register(portageexit)
5251
5252 if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
5253 if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
5254 #only do this if we're root and not running repoman/ebuild digest
5255 updpath=os.path.normpath(settings["PORTDIR"]+"///profiles/updates")
5256 didupdate=0
5257 if not mtimedb.has_key("updates"):
5258 mtimedb["updates"]={}
5259 try:
5260 mylist=listdir(updpath)
5261 # resort the list
5262 mylist=[myfile[3:]+"-"+myfile[:2] for myfile in mylist]
5263 mylist.sort()
5264 mylist=[myfile[5:]+"-"+myfile[:4] for myfile in mylist]
5265 for myfile in mylist:
5266 mykey=updpath+"/"+myfile
5267 if not os.path.isfile(mykey):
5268 continue
5269 if (not mtimedb["updates"].has_key(mykey)) or \
5270 (mtimedb["updates"][mykey] != os.stat(mykey)[stat.ST_MTIME]) or \
5271 (settings["PORTAGE_CALLER"] == "fixpackages"):
5272 didupdate=1
5273 do_upgrade(mykey)
5274 portageexit() # This lets us save state for C-c.
5275 except OSError:
5276 #directory doesn't exist
5277 pass
5278 if didupdate:
5279 #make sure our internal databases are consistent; recreate our virts and vartree
5280 do_vartree(settings)
5281 if do_upgrade_packagesmessage and \
5282 listdir(settings["PKGDIR"]+"/All/"):
5283 writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
5284 writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
5285 writemsg("\n")
5286
5287
5288
5289
5290
5291 #continue setting up other trees
5292 db["/"]["porttree"]=portagetree("/",virts)
5293 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
5294 if root!="/":
5295 db[root]["porttree"]=portagetree(root,virts)
5296 db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
5297 thirdpartymirrors=grabdict(settings["PORTDIR"]+"/profiles/thirdpartymirrors")
5298
5299 if not os.path.exists(settings["PORTAGE_TMPDIR"]):
5300 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
5301 writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
5302 sys.exit(1)
5303 if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
5304 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
5305 writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
5306 sys.exit(1)
5307
5308 # COMPATABILITY -- This shouldn't be used.
5309 pkglines = settings.packages
5310
5311 groups=settings["ACCEPT_KEYWORDS"].split()
5312 archlist=[]
5313 for myarch in grabfile(settings["PORTDIR"]+"/profiles/arch.list"):
5314 archlist += [myarch,"~"+myarch]
5315 for group in groups:
5316 if not archlist:
5317 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
5318 break
5319 elif (group not in archlist) and group[0]!='-':
5320 writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
5321
5322 # Clear the cache
5323 dircache={}
5324
5325 if not "sandbox" in features and not "usersandbox" in features:
5326 portage_exec.sandbox_capable = False
5327
5328 fetcher=None
5329 def get_preferred_fetcher():
5330 """get the preferred fetcher. basically an initial check to verify FETCHCOMMAND/RESUMECOMMAND
5331 are actually usable.
5332
5333 If they aren't, it defaults to complaining for every request for a fetcher, and returning
5334 transports.bundled_lib.BundledConnection.
5335 This only checks the command's bin is available- it won't catch wget w/ missing libssl issues.
5336 That's reserved for fetch at the moment"""
5337
5338 global fetcher,settings
5339 usable=True
5340 if fetcher == None:
5341 if not (settings.has_key("FETCHCOMMAND") and settings.has_key("RESUMECOMMAND")):
5342 print red("!!!")+" warning, either FETCHCOMMAND or RESUMECOMMAND aren't defined."
5343 print red("!!!")+" falling back to the bundled libs. Please rectify this."
5344 usable=False
5345 else:
5346 f=settings["FETCHCOMMAND"].split()[0]
5347 r=settings["RESUMECOMMAND"].split()[0]
5348 usable=((os.path.exists(f) and os.access(f,os.X_OK)) or portage_exec.find_binary(f))
5349 if usable and r != f:
5350 usable=((os.path.exists(f) and os.access(f,os.X_OK)) or portage_exec.find_binary(r))
5351
5352 # note this doesn't check for wget/libssl type issues. fetch manages that.
5353
5354 if usable:
5355 if selinux_enabled:
5356 selinux_context=selinux.getcontext()
5357 selinux_context=selinux_context.replace(settings["PORTAGE_T"], \
5358 settings["PORTAGE_FETCH_T"])
5359 else:
5360 selinux_context = None
5361
5362 fetcher=transports.fetchcommand.CustomConnection(settings,selinux_context=selinux_context)
5363 if usable:
5364 return fetcher
5365 return transports.bundled_lib.BundledConnection()
5366
5367 if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
5368 writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
5369 writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
5370 writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
5371 time.sleep(3)
5372
5373 # ============================================================================
5374 # ============================================================================

  ViewVC Help
Powered by ViewVC 1.1.20