/[gentoo-src]/portage/pym/portage.py
Gentoo

Contents of /portage/pym/portage.py

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1.590 - (show annotations) (download) (as text)
Sun Aug 14 16:49:41 2005 UTC (11 years ago) by genone
Branch: MAIN
CVS Tags: HEAD
Changes since 1.589: +4 -4 lines
File MIME type: text/x-python
fix logic error in sandbox

1
2 # portage.py -- core Portage functionality
3 # Copyright 1998-2004 Gentoo Foundation
4 # Distributed under the terms of the GNU General Public License v2
5 # $Header: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.589 2005/04/29 04:43:19 vapier Exp $
6 cvs_id_string="$Id: portage.py,v 1.589 2005/04/29 04:43:19 vapier Exp $"[5:-2]
7
8 VERSION="$Revision: 1.589 $"[11:-2] + "-cvs"
9
10 # ===========================================================================
11 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
12 # ===========================================================================
13
14
15 try:
16 import sys
17 except SystemExit, e:
18 raise
19 except:
20 print "Failed to import sys! Something is _VERY_ wrong with python."
21 raise SystemExit, 127
22
23 try:
24 import os,string,types,atexit,signal,fcntl
25 import time,cPickle,traceback,copy
26 import re,pwd,grp
27 import shlex,shutil
28 import stat
29 from time import sleep
30 from random import shuffle
31 except SystemExit, e:
32 raise
33 except Exception, e:
34 sys.stderr.write("\n\n")
35 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
36 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
37 sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
38
39 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
40 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
41
42 sys.stderr.write(" "+str(e)+"\n\n");
43 sys.exit(127)
44 except:
45 sys.stderr.write("\n\n")
46 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
47 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
48 sys.stderr.write("!!! itself and thus portage is no able to continue processing.\n\n")
49
50 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
51 sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
52 sys.exit(127)
53
54
55 try:
56 #XXX: This should get renamed to bsd_chflags, I think.
57 import chflags
58 bsd_chflags = chflags
59 except SystemExit, e:
60 raise
61 except:
62 # XXX: This should get renamed to bsd_chflags, I think.
63 bsd_chflags = None
64
65 try:
66 from config import config
67 import ebuild
68 import cvstree
69 import xpak
70 import getbinpkg
71 import portage_dep
72 import eclass_cache
73 import portage_versions
74
75 #assign these to portage's namespace to keep the tool monkeys happy.
76 catpkgsplit = portage_versions.catpkgsplit
77 pkgsplit = portage_versions.pkgsplit
78 pkgcmp = portage_versions.pkgcmp
79
80 # XXX: This needs to get cleaned up.
81 import output
82 from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
83 darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
84 xtermTitle, xtermTitleReset, yellow
85
86 import portage_const
87 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
88 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
89 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
90 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
91 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
92 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
93 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, SANDBOX_PIDS_FILE, CONFIG_MEMORY_FILE,\
94 INCREMENTALS, STICKIES
95
96 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
97 portage_uid, portage_gid
98
99 import portage_util
100 from portage_util import grab_multiple, grabdict, grabdict_package, grabfile, grabfile_package, \
101 grabints, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
102 unique_array, varexpand, writedict, writeints, writemsg, getconfig, movefile, flatten, \
103 abssymlink
104 from portage_file import normpath, listdir
105 import portage_exception
106 import portage_gpg
107 import portage_locks
108 import portage_exec
109 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
110 import portage_checksum
111 from portage_checksum import perform_md5,perform_checksum,prelink_capable
112
113 import transports.bundled_lib
114 import transports.fetchcommand
115 except SystemExit, e:
116 raise
117 except Exception, e:
118 sys.stderr.write("\n\n")
119 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
120 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
121 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
122 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
123 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
124 sys.stderr.write("!!! a recovery of portage.\n")
125
126 sys.stderr.write(" "+str(e)+"\n\n")
127 sys.exit(127)
128
129
130 # ===========================================================================
131 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
132 # ===========================================================================
133
134
135 def exithandler(signum,frame):
136 """Handles ^C interrupts in a sane manner"""
137 signal.signal(signal.SIGINT, signal.SIG_IGN)
138 signal.signal(signal.SIGTERM, signal.SIG_IGN)
139
140 # 0=send to *everybody* in process group
141 print "caught %i in %i" % (signum, os.getpid())
142 portageexit()
143 print "Exiting due to signal"
144 os.kill(0,signum)
145 sys.exit(1)
146
147 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
148 signal.signal(signal.SIGINT, exithandler)
149 signal.signal(signal.SIGTERM, exithandler)
150
151 def getcwd():
152 "this fixes situations where the current directory doesn't exist"
153 try:
154 return os.getcwd()
155 except SystemExit, e:
156 raise
157 except:
158 os.chdir("/")
159 return "/"
160 getcwd()
161
162 def suffix_array(array,suffix,doblanks=1):
163 """Appends a given suffix to each element in an Array/List/Tuple.
164 Returns a List."""
165 if type(array) not in (list,tuple):
166 raise TypeError, "List or Tuple expected. Got %s" % type(array)
167 newarray=[]
168 for x in array:
169 if x or doblanks:
170 newarray.append(x + suffix)
171 else:
172 newarray.append(x)
173 return newarray
174
175 def prefix_array(array,prefix,doblanks=1):
176 """Prepends a given prefix to each element in an Array/List/Tuple.
177 Returns a List."""
178 if type(array) not in (list,tuple):
179 raise TypeError, "List or Tuple expected. Got %s" % type(array)
180 newarray=[]
181 for x in array:
182 if x or doblanks:
183 newarray.append(prefix + x)
184 else:
185 newarray.append(x)
186 return newarray
187
188 starttime=long(time.time())
189 features=[]
190
191 def tokenize(mystring):
192 """breaks a string like 'foo? (bar) oni? (blah (blah))'
193 into embedded lists; returns None on paren mismatch"""
194
195 # This function is obsoleted.
196 # Use dep_parenreduce
197
198 newtokens=[]
199 curlist=newtokens
200 prevlists=[]
201 level=0
202 accum=""
203 for x in mystring:
204 if x=="(":
205 if accum:
206 curlist.append(accum)
207 accum=""
208 prevlists.append(curlist)
209 curlist=[]
210 level=level+1
211 elif x==")":
212 if accum:
213 curlist.append(accum)
214 accum=""
215 if level==0:
216 writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
217 return None
218 newlist=curlist
219 curlist=prevlists.pop()
220 curlist.append(newlist)
221 level=level-1
222 elif x in string.whitespace:
223 if accum:
224 curlist.append(accum)
225 accum=""
226 else:
227 accum=accum+x
228 if accum:
229 curlist.append(accum)
230 if (level!=0):
231 writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
232 return None
233 return newtokens
234
235
236 def elog_process(cpv, mysettings):
237 mylogfiles = listdir(mysettings["T"]+"/logging/")
238 # shortcut for packages without any messages
239 if len(mylogfiles) == 0:
240 return
241 # exploit listdir() file order so we process log entries in cronological order
242 mylogfiles.reverse()
243 mylogentries = {}
244 for f in mylogfiles:
245 msgfunction, msgtype = f.split(".")
246 if not msgtype.upper() in mysettings["PORTAGE_LOG_CLASSES"].split() \
247 and not msgtype.lower() in mysettings["PORTAGE_LOG_CLASSES"].split():
248 continue
249 if msgfunction not in portage_const.EBUILD_PHASES.split():
250 print "!!! can't process invalid log file: %s" % f
251 continue
252 if not msgfunction in mylogentries:
253 mylogentries[msgfunction] = []
254 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
255 mylogentries[msgfunction].append((msgtype, msgcontent))
256
257 # in case the filters matched all messages
258 if len(mylogentries) == 0:
259 return
260
261 # generate a single string with all log messages
262 fulllog = ""
263 for phase in portage_const.EBUILD_PHASES.split():
264 if not phase in mylogentries:
265 continue
266 for msgtype,msgcontent in mylogentries[phase]:
267 fulllog += "%s: %s\n" % (msgtype, phase)
268 for line in msgcontent:
269 fulllog += line
270 fulllog += "\n"
271
272 # pass the processing to the individual modules
273 logsystems = mysettings["PORTAGE_LOG_SYSTEM"].split()
274 for s in logsystems:
275 try:
276 # FIXME: ugly ad.hoc import code
277 # TODO: implement a common portage module loader
278 logmodule = __import__("elog_modules.mod_"+s)
279 m = getattr(logmodule, "mod_"+s)
280 m.process(mysettings, cpv, mylogentries, fulllog)
281 except (ImportError, AttributeError), e:
282 print "!!! Error while importing logging modules:"
283 print e
284 except portage_exception.PortageException, e:
285 print e
286
287 #parse /etc/env.d and generate /etc/profile.env
288
289 #move this to config.
290 def env_update(root,makelinks=1):
291 if not os.path.exists(root+"etc/env.d"):
292 prevmask=os.umask(0)
293 os.makedirs(root+"etc/env.d",0755)
294 os.umask(prevmask)
295 fns=listdir(root+"etc/env.d")
296 fns.sort()
297 pos=0
298 while (pos<len(fns)):
299 if len(fns[pos])<=2:
300 del fns[pos]
301 continue
302 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
303 del fns[pos]
304 continue
305 pos=pos+1
306
307 specials={
308 "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
309 "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
310 "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
311 "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
312 }
313 colon_separated = [
314 "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
315 "LDPATH", "MANPATH",
316 "PATH", "PRELINK_PATH",
317 "PRELINK_PATH_MASK", "PYTHON_PATH"
318 ]
319
320 env={}
321
322 for x in fns:
323 # don't process backup files
324 if x[-1]=='~' or x[-4:]==".bak":
325 continue
326 myconfig=getconfig(root+"etc/env.d/"+x)
327 if myconfig==None:
328 writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
329 #parse error
330 continue
331 # process PATH, CLASSPATH, LDPATH
332 for myspec in specials.keys():
333 if myconfig.has_key(myspec):
334 if myspec in colon_separated:
335 specials[myspec].extend(string.split(varexpand(myconfig[myspec]),":"))
336 else:
337 specials[myspec].append(varexpand(myconfig[myspec]))
338 del myconfig[myspec]
339 # process all other variables
340 for myenv in myconfig.keys():
341 env[myenv]=varexpand(myconfig[myenv])
342
343 if os.path.exists(root+"etc/ld.so.conf"):
344 myld=open(root+"etc/ld.so.conf")
345 myldlines=myld.readlines()
346 myld.close()
347 oldld=[]
348 for x in myldlines:
349 #each line has at least one char (a newline)
350 if x[0]=="#":
351 continue
352 oldld.append(x[:-1])
353 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
354 # Where is the new ld.so.conf generated? (achim)
355 else:
356 oldld=None
357
358 ld_cache_update=False
359 if os.environ.has_key("PORTAGE_CALLER") and \
360 os.environ["PORTAGE_CALLER"] == "env-update":
361 ld_cache_update = True
362
363 newld=specials["LDPATH"]
364 if (oldld!=newld):
365 #ld.so.conf needs updating and ldconfig needs to be run
366 myfd=open(root+"etc/ld.so.conf","w")
367 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
368 myfd.write("# contents of /etc/env.d directory\n")
369 for x in specials["LDPATH"]:
370 myfd.write(x+"\n")
371 myfd.close()
372 ld_cache_update=True
373
374 # Update prelink.conf if we are prelink-enabled
375 if prelink_capable:
376 newprelink=open(root+"etc/prelink.conf","w")
377 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
378 newprelink.write("# contents of /etc/env.d directory\n")
379
380 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
381 newprelink.write("-l "+x+"\n");
382 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
383 if not x:
384 continue
385 if x[-1] != "/":
386 x += "/"
387 plmasked=0
388 for y in specials["PRELINK_PATH_MASK"]:
389 if y[-1]!='/':
390 y=y+"/"
391 if y==x[0:len(y)]:
392 plmasked=1
393 break
394 if not plmasked:
395 newprelink.write("-h "+x+"\n")
396 for x in specials["PRELINK_PATH_MASK"]:
397 newprelink.write("-b "+x+"\n")
398 newprelink.close()
399
400 if not mtimedb.has_key("ldpath"):
401 mtimedb["ldpath"]={}
402
403 for x in specials["LDPATH"]+['/usr/lib','/lib']:
404 try:
405 newldpathtime=os.stat(x)[stat.ST_MTIME]
406 except SystemExit, e:
407 raise
408 except:
409 newldpathtime=0
410 if mtimedb["ldpath"].has_key(x):
411 if mtimedb["ldpath"][x]==newldpathtime:
412 pass
413 else:
414 mtimedb["ldpath"][x]=newldpathtime
415 ld_cache_update=True
416 else:
417 mtimedb["ldpath"][x]=newldpathtime
418 ld_cache_update=True
419
420 if (ld_cache_update or makelinks):
421 # We can't update links if we haven't cleaned other versions first, as
422 # an older package installed ON TOP of a newer version will cause ldconfig
423 # to overwrite the symlinks we just made. -X means no links. After 'clean'
424 # we can safely create links.
425 writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
426 cwd="/"
427 try: cwd=os.getcwd()
428 except (OSError, IOError): pass
429 if makelinks:
430 portage_exec.spawn("/sbin/ldconfig -r "+root)
431 else:
432 portage_exec.spawn("/sbin/ldconfig -X -r "+root)
433 try: os.chdir(cwd)
434 except OSError: pass
435
436 del specials["LDPATH"]
437
438 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
439 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
440 cenvnotice = penvnotice
441 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
442 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
443
444 #create /etc/profile.env for bash support
445 outfile=open(root+"/etc/profile.env","w")
446 outfile.write(penvnotice)
447
448 for path, values in specials.items():
449 if not values:
450 continue
451 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
452 sep = " "
453 else:
454 sep = ":"
455 outstring = "export %s='%s'\n" % (path,sep.join(values))
456 outfile.write(outstring)
457
458 #create /etc/profile.env
459 for x in env:
460 if type(env[x])!=str:
461 continue
462 outfile.write("export "+x+"='"+env[x]+"'\n")
463 outfile.close()
464
465 #create /etc/csh.env for (t)csh support
466 outfile=open(root+"/etc/csh.env","w")
467 outfile.write(cenvnotice)
468
469 for path, values in specials.items():
470 if not values:
471 continue
472 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
473 sep = " "
474 else:
475 sep = ":"
476 outstring = "setenv %s '%s'\n" % (path, sep.join(values))
477 outfile.write(outstring)
478
479 #create /etc/csh.env
480 for x in env:
481 if type(env[x])!=str:
482 continue
483 outfile.write("setenv "+x+" '"+env[x]+"'\n")
484 outfile.close()
485
486 def new_protect_filename(mydest, newmd5=None):
487 """Resolves a config-protect filename for merging, optionally
488 using the last filename if the md5 matches.
489 (dest,md5) ==> 'string' --- path_to_target_filename
490 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
491 """
492
493 # config protection filename format:
494 # ._cfg0000_foo
495 # 0123456789012
496 prot_num=-1
497 last_pfile=""
498
499 if (len(mydest) == 0):
500 raise ValueError, "Empty path provided where a filename is required"
501 if (mydest[-1]=="/"): # XXX add better directory checking
502 raise ValueError, "Directory provided but this function requires a filename"
503 if not os.path.exists(mydest):
504 return mydest
505
506 real_filename = os.path.basename(mydest)
507 real_dirname = os.path.dirname(mydest)
508 for pfile in listdir(real_dirname):
509 if pfile[0:5] != "._cfg":
510 continue
511 if pfile[10:] != real_filename:
512 continue
513 try:
514 new_prot_num = int(pfile[5:9])
515 if new_prot_num > prot_num:
516 prot_num = new_prot_num
517 last_pfile = pfile
518 except SystemExit, e:
519 raise
520 except:
521 continue
522 prot_num = prot_num + 1
523
524 new_pfile = os.path.normpath(real_dirname+"/._cfg"+str(prot_num).zfill(4)+"_"+real_filename)
525 old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
526 if last_pfile and newmd5:
527 if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
528 return old_pfile
529 else:
530 return new_pfile
531 elif newmd5:
532 return new_pfile
533 else:
534 return (new_pfile, old_pfile)
535
536 #XXX: These two are now implemented in portage_util.py but are needed here
537 #XXX: until the isvalidatom() dependency is sorted out.
538
539 def grabdict_package(myfilename,juststrings=0):
540 pkgs=grabdict(myfilename, juststrings=juststrings, empty=1)
541 for x in pkgs.keys():
542 if not portage_dep.isvalidatom(x):
543 del(pkgs[x])
544 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
545 return pkgs
546
547 def grabfile_package(myfilename,compatlevel=0):
548 pkgs=grabfile(myfilename,compatlevel)
549 for x in range(len(pkgs)-1,-1,-1):
550 pkg = pkgs[x]
551 if pkg[0] == "-":
552 pkg = pkg[1:]
553 if pkg[0] == "*":
554 pkg = pkg[1:]
555 if not portage_dep.isvalidatom(pkg):
556 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
557 del(pkgs[x])
558 return pkgs
559
560 # returns a tuple. (version[string], error[string])
561 # They are pretty much mutually exclusive.
562 # Either version is a string and error is none, or
563 # version is None and error is a string
564 #
565 def ExtractKernelVersion(base_dir):
566 lines = []
567 pathname = os.path.join(base_dir, 'Makefile')
568 try:
569 f = open(pathname, 'r')
570 except OSError, details:
571 return (None, str(details))
572 except IOError, details:
573 return (None, str(details))
574
575 try:
576 for i in range(4):
577 lines.append(f.readline())
578 except OSError, details:
579 return (None, str(details))
580 except IOError, details:
581 return (None, str(details))
582
583 lines = [ l.strip() for l in lines ]
584
585 version = ''
586
587 #XXX: The following code relies on the ordering of vars within the Makefile
588 for line in lines:
589 # split on the '=' then remove annoying whitespace
590 items = [ i.strip() for i in line.split('=') ]
591 if items[0] == 'VERSION' or \
592 items[0] == 'PATCHLEVEL':
593 version += items[1]
594 version += "."
595 elif items[0] == 'SUBLEVEL':
596 version += items[1]
597 elif items[0] == 'EXTRAVERSION' and \
598 items[-1] != items[0]:
599 version += items[1]
600
601 # Grab a list of files named localversion* and sort them
602 localversions = os.listdir(base_dir)
603 for x in range(len(localversions)-1,-1,-1):
604 if localversions[x][:12] != "localversion":
605 del localversions[x]
606 localversions.sort()
607
608 # Append the contents of each to the version string, stripping ALL whitespace
609 for lv in localversions:
610 version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
611
612 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
613 kernelconfig = getconfig(base_dir+"/.config")
614 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
615 version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
616
617 return (version,None)
618
619
620 # XXX This would be to replace getstatusoutput completely.
621 # XXX Issue: cannot block execution. Deadlock condition.
622 def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
623 """spawn a subprocess with optional sandbox protection,
624 depending on whether sandbox is enabled. The "free" argument,
625 when set to 1, will disable sandboxing. This allows us to
626 spawn processes that are supposed to modify files outside of the
627 sandbox. We can't use os.system anymore because it messes up
628 signal handling. Using spawn allows our Portage signal handler
629 to work."""
630
631 if type(mysettings) == types.DictType:
632 env=mysettings
633 keywords["opt_name"]="[ %s ]" % "portage"
634 else:
635 if not isinstance(mysettings, config):
636 raise TypeError, "Invalid type for config object: %s" % mysettings.__class_
637 env=mysettings.environ()
638 keywords["opt_name"]="[%s]" % mysettings["PF"]
639
640
641 # XXX: Negative RESTRICT word
642 myrestrict = mysettings["RESTRICT"].split()
643 droppriv=(droppriv and "userpriv" in mysettings.features and
644 "nouserpriv" not in myrestrict and "userpriv" not in myrestrict)
645
646 if ("sandbox" in features) and (not free):
647 keywords["opt_name"] += " sandbox"
648 if droppriv and portage_gid and portage_uid:
649 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
650 return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
651 else:
652 keywords["opt_name"] += " bash"
653 return portage_exec.spawn_bash(mystring,env=env,**keywords)
654
655 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1,verbosity=0):
656 "fetch files. Will use digest file if available."
657
658 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
659 myrestrict = mysettings["RESTRICT"].split()
660 if "mirror" in myrestrict or "nomirror" in myrestrict:
661 if ("mirror" in mysettings.features) and ("lmirror" not in mysettings.features):
662 # lmirror should allow you to bypass mirror restrictions.
663 # XXX: This is not a good thing, and is temporary at best.
664 if verbosity:
665 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
666 return 1
667
668 global thirdpartymirrors
669
670 if not isinstance(mysettings, config):
671 raise TypeError, "Invalid type for config object: %s" % mysettings.__class_
672
673 custommirrors=grabdict(CUSTOM_MIRRORS_FILE)
674
675 mymirrors=[]
676
677 if listonly or ("distlocks" not in features):
678 use_locks = 0
679
680 # local mirrors are always added
681 if custommirrors.has_key("local"):
682 mymirrors += custommirrors["local"]
683
684 if ("nomirror" in mysettings["RESTRICT"].split()) or \
685 ("mirror" in mysettings["RESTRICT"].split()):
686 # We don't add any mirrors.
687 pass
688 else:
689 if try_mirrors:
690 for x in mysettings["GENTOO_MIRRORS"].split():
691 if x:
692 if x[-1] == '/':
693 mymirrors += [x[:-1]]
694 else:
695 mymirrors += [x]
696
697 mydigests={}
698 digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
699 if os.path.exists(digestfn):
700 mydigests = digestParseFile(digestfn)
701
702 fsmirrors = []
703 for x in range(len(mymirrors)-1,-1,-1):
704 if mymirrors[x] and mymirrors[x][0]=='/':
705 fsmirrors += [mymirrors[x]]
706 del mymirrors[x]
707
708 for myuri in myuris:
709 myfile=os.path.basename(myuri)
710 try:
711 destdir = mysettings["DISTDIR"]+"/"
712 if not os.path.exists(destdir+myfile):
713 for mydir in fsmirrors:
714 if os.path.exists(mydir+"/"+myfile):
715 writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
716 shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
717 break
718 except (OSError,IOError),e:
719 # file does not exist
720 writemsg(_("!!! %(file)s not found in %(dir)s." % {"file":myfile,"dir":mysettings["DISTDIR"]}),verbosity)
721 gotit=0
722
723 if "fetch" in mysettings["RESTRICT"].split():
724 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
725 # print message and exit.
726 gotit=1
727 for myuri in myuris:
728 myfile=os.path.basename(myuri)
729 try:
730 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
731 except (OSError,IOError),e:
732 # file does not exist
733 # FIXME: gettext doesn't work yet
734 # writemsg(_("!!! %(file)s not found in %(dir)s." % {"file":myfile, "dir":mysettings["DISTDIR"]}),verbosity)
735 writemsg("!!! %(file)s not found in %(dir)s." % {"file":myfile, "dir":mysettings["DISTDIR"]},verbosity)
736 gotit=0
737 if not gotit:
738 writemsg("\n!!!"+mysettings["CATEGORY"]+"/"+mysettings["PF"]+"has fetch restriction turned on.\n"+
739 "!!! This probably means that this ebuild's files must be downloaded\n"+
740 "!!! manually. See the comments in the ebuild for more information.\n\n",
741 verbosity)
742 spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
743 return 0
744 return 1
745 locations=mymirrors[:]
746 filedict={}
747 primaryuri_indexes={}
748 for myuri in myuris:
749 myfile=os.path.basename(myuri)
750 if not filedict.has_key(myfile):
751 filedict[myfile]=[]
752 for y in range(0,len(locations)):
753 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
754 if myuri[:9]=="mirror://":
755 eidx = myuri.find("/", 9)
756 if eidx != -1:
757 mirrorname = myuri[9:eidx]
758
759 # Try user-defined mirrors first
760 if custommirrors.has_key(mirrorname):
761 for cmirr in custommirrors[mirrorname]:
762 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
763 # remove the mirrors we tried from the list of official mirrors
764 if cmirr.strip() in thirdpartymirrors[mirrorname]:
765 thirdpartymirrors[mirrorname].remove(cmirr)
766 # now try the official mirrors
767 if thirdpartymirrors.has_key(mirrorname):
768 try:
769 shuffle(thirdpartymirrors[mirrorname])
770 except SystemExit, e:
771 raise
772 except:
773 writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"),verbosity)
774 writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n",verbosity)
775 writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n",verbosity)
776 writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n",verbosity)
777 writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n",verbosity)
778 time.sleep(10)
779
780 for locmirr in thirdpartymirrors[mirrorname]:
781 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
782
783
784 if not filedict[myfile]:
785 writemsg("No known mirror by the name: %s\n" % (mirrorname),verbosity)
786 else:
787 writemsg("Invalid mirror definition in SRC_URI:\n",verbosity)
788 writemsg(" %s\n" % (myuri),verbosity)
789 else:
790 if "primaryuri" in mysettings["RESTRICT"].split():
791 # Use the source site first.
792 if primaryuri_indexes.has_key(myfile):
793 primaryuri_indexes[myfile] += 1
794 else:
795 primaryuri_indexes[myfile] = 0
796 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
797 else:
798 filedict[myfile].append(myuri)
799
800 missingSourceHost = False
801 for myfile in filedict.keys(): # Gives a list, not just the first one
802 if not filedict[myfile]:
803 writemsg("Warning: No mirrors available for file '%s'\n" % (myfile),verbosity)
804 missingSourceHost = True
805 if missingSourceHost:
806 return 0
807 del missingSourceHost
808
809 can_fetch=True
810 if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
811 writemsg("!!! No write access to %s" % mysettings["DISTDIR"]+"/\n",verbosity)
812 can_fetch=False
813 else:
814 mystat=os.stat(mysettings["DISTDIR"]+"/")
815 if mystat.st_gid != portage_gid:
816 try:
817 os.chown(mysettings["DISTDIR"],-1,portage_gid)
818 except OSError, oe:
819 if oe.errno == 1:
820 writemsg(red("!!!")+" Unable to chgrp of %s to portage, continuing\n" %
821 mysettings["DISTDIR"],verbosity)
822 else:
823 raise oe
824
825 # writable by portage_gid? This is specific to root, adjust perms if needed automatically.
826 if not stat.S_IMODE(mystat.st_mode) & 020:
827 try:
828 os.chmod(mysettings["DISTDIR"],stat.S_IMODE(mystat.st_mode) | 020)
829 except OSError, oe:
830 if oe.errno == 1:
831 writemsg(red("!!!")+" Unable to chmod %s to perms 0755. Non-root users will experience issues.\n" % mysettings["DISTDIR"],verbosity)
832 else:
833 raise oe
834
835 if use_locks and locks_in_subdir:
836 if os.path.exists(mysettings["DISTDIR"]+"/"+locks_in_subdir):
837 if not os.access(mysettings["DISTDIR"]+"/"+locks_in_subdir,os.W_OK):
838 writemsg("!!! No write access to write to %s. Aborting.\n" % mysettings["DISTDIR"]+"/"+locks_in_subdir,verbosity)
839 return 0
840 else:
841 old_umask=os.umask(0002)
842 os.mkdir(mysettings["DISTDIR"]+"/"+locks_in_subdir,0775)
843 if os.stat(mysettings["DISTDIR"]+"/"+locks_in_subdir).st_gid != portage_gid:
844 try:
845 os.chown(mysettings["DISTDIR"]+"/"+locks_in_subdir,-1,portage_gid)
846 except SystemExit, e:
847 raise
848 except:
849 pass
850 os.umask(old_umask)
851
852
853 fetcher = get_preferred_fetcher()
854 for myfile in filedict.keys():
855 fetched=0
856 file_lock = None
857 if listonly:
858 writemsg("\n",verbosity)
859 else:
860 if use_locks and can_fetch:
861 if locks_in_subdir:
862 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1,verbosity=verbosity)
863 else:
864 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1,verbosity=verbosity)
865 try:
866 for loc in filedict[myfile]:
867 if listonly:
868 writemsg(loc+" ",verbosity)
869 continue
870
871 try:
872 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
873 if mydigests.has_key(myfile):
874 #if we have the digest file, we know the final size and can resume the download.
875 if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
876 fetched=1
877 else:
878 #we already have it downloaded, skip.
879 #if our file is bigger than the recorded size, digestcheck should catch it.
880 if not fetchonly:
881 fetched=2
882 else:
883 # Check md5sum's at each fetch for fetchonly.
884 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
885 if not verified_ok:
886 writemsg("!!! Previously fetched file: "+str(myfile)+"\n!!! Reason: "+reason+"\nRefetching...\n\n",verbosity)
887 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
888 fetched=0
889 else:
890 for x_key in mydigests[myfile].keys():
891 writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n",verbosity)
892 fetched=2
893 break #No need to keep looking for this file, we have it!
894 else:
895 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
896 fetched=2
897 except (OSError,IOError),e:
898 writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),verbosity+1)
899 fetched=0
900
901 if not can_fetch:
902 if fetched != 2:
903 if fetched == 0:
904 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile,verbosity)
905 else:
906 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile,verbosity)
907 return 0
908 else:
909 continue
910
911 # check if we can actually write to the directory/existing file.
912 if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
913 os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK):
914 writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile),verbosity)
915 fetched=0
916 break
917 elif fetched!=2:
918 #we either need to resume or start the download
919 #you can't use "continue" when you're inside a "try" block
920 if fetched==1:
921 #resume mode:
922 writemsg(">>> Resuming download...\n",verbosity)
923 locfetch=fetcher.resume
924 else:
925 #normal mode:
926 locfetch=fetcher.fetch
927 writemsg(">>> Downloading "+str(loc)+"\n",verbosity)
928 try:
929 myret=locfetch(loc,file_name=mysettings["DISTDIR"]+"/"+myfile, \
930 verbose=(verbosity==0))
931 if myret==127 and \
932 isinstance(fetcher,transports.fetchcommand.CustomConnection):
933 # this is an indication of a missing libs for the binary.
934 # fex: USE="ssl" wget, missing libssl.
935 #
936 # lets try to be helpful. ;-)
937 f=transports.bundled_lib.BundledConnection()
938 if fetched==1:
939 myret=f.resume(loc, \
940 file_name=mysettings["DISTDIR"]+"/"+myfile,
941 verbose=(verbosity==0))
942 else:
943 myret=f.fetch(loc, \
944 file_name=mysettings["DISTDIR"]+"/"+myfile,
945 verbose=(verbosity==0))
946 if not myret:
947 writemsg(red("!!!")+"\n")
948 writemsg(red("!!!")+" FETCHCOMMAND/RESUMECOMMAND exited with code 127\n")
949 writemsg(red("!!!")+" This is indicative of missing libs for the fetch/resume binaries\n")
950 writemsg(red("!!!")+" Added, the independ BundledConnection succeeded\n")
951 writemsg(red("!!!")+" Please check your installation.\n")
952 writemsg(red("!!!")+" Defaulting to BundledConnection for the remainder of this fetch request\n")
953 writemsg(red("!!!")+"\n")
954 fetcher = f
955 finally:
956 #if root, -always- set the perms.
957 if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0):
958 if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
959 try:
960 os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
961 except SystemExit, e:
962 raise
963 except:
964 writemsg("chown failed on distfile: " + str(myfile),verbosity)
965 os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
966
967 if mydigests!=None and mydigests.has_key(myfile):
968 try:
969 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
970 # no exception? file exists. let digestcheck() report
971 # an appropriately for size or md5 errors
972 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
973 # Fetch failed... Try the next one... Kill 404 files though.
974 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
975 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
976 try:
977 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
978 try:
979 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
980 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n",verbosity)
981 except SystemExit, e:
982 raise
983 except:
984 pass
985 except SystemExit, e:
986 raise
987 except:
988 pass
989 continue
990 if not fetchonly:
991 fetched=2
992 break
993 else:
994 # File is the correct size--check the MD5 sum for the fetched
995 # file NOW, for those users who don't have a stable/continuous
996 # net connection. This way we have a chance to try to download
997 # from another mirror...
998 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
999 if not verified_ok:
1000 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n!!! Reason: "+reason+"\nRemoving corrupt distfile...\n",verbosity)
1001 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1002 fetched=0
1003 else:
1004 for x_key in mydigests[myfile].keys():
1005 writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n",verbosity)
1006 fetched=2
1007 break
1008 except (OSError,IOError),e:
1009 writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),verbosity+1)
1010 fetched=0
1011 else:
1012 if not myret:
1013 fetched=2
1014 break
1015 elif mydigests!=None:
1016 writemsg("No digest file available and download failed.\n\n")
1017 finally:
1018 if use_locks and file_lock:
1019 portage_locks.unlockfile(file_lock)
1020
1021 if listonly:
1022 writemsg("\n")
1023 if (fetched!=2) and not listonly:
1024 writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n",verbosity)
1025 return 0
1026 return 1
1027
1028
1029 def digestCreate(myfiles,basedir,oldDigest={}):
1030 """Takes a list of files and the directory they are in and returns the
1031 dict of dict[filename][CHECKSUM_KEY] = hash
1032 returns None on error."""
1033 mydigests={}
1034 for x in myfiles:
1035 print "<<<",x
1036 myfile=os.path.normpath(basedir+"///"+x)
1037 if os.path.exists(myfile):
1038 if not os.access(myfile, os.R_OK):
1039 print "!!! Given file does not appear to be readable. Does it exist?"
1040 print "!!! File:",myfile
1041 return None
1042 mydigests[x] = portage_checksum.perform_all(myfile)
1043 mysize = os.stat(myfile)[stat.ST_SIZE]
1044 else:
1045 if x in oldDigest:
1046 # DeepCopy because we might not have a unique reference.
1047 mydigests[x] = copy.deepcopy(oldDigest[x])
1048 mysize = oldDigest[x]["size"]
1049 else:
1050 print "!!! We have a source URI, but no file..."
1051 print "!!! File:",myfile
1052 return None
1053
1054 if "size" in mydigests[x] and (mydigests[x]["size"] != mysize):
1055 raise portage_exception.DigestException, "Size mismatch during checksums"
1056 mydigests[x]["size"] = mysize
1057
1058
1059 return mydigests
1060
1061 def digestCreateLines(filelist, mydigests):
1062 mylines = []
1063 for myarchive in filelist:
1064 mysize = mydigests[myarchive]["size"]
1065 if len(mydigests[myarchive]) == 0:
1066 raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
1067 for sumName in mydigests[myarchive].keys():
1068 if sumName not in portage_checksum.get_valid_checksum_keys():
1069 continue
1070 mysum = mydigests[myarchive][sumName]
1071
1072 myline = " ".join([sumName, mysum, myarchive, str(mysize)])
1073 if sumName != "MD5":
1074 # XXXXXXXXXXXXXXXX This cannot be used!
1075 # Older portage make very dumb assumptions about the formats.
1076 # We need a lead-in period before we break everything.
1077 continue
1078 mylines.append(myline)
1079 return mylines
1080
1081 def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0,verbosity=0):
1082 """generates digest file if missing. Assumes all files are available. If
1083 overwrite=0, the digest will only be created if it doesn't already exist."""
1084
1085 # archive files
1086 basedir=mysettings["DISTDIR"]+"/"
1087 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1088
1089 # portage files -- p(ortagefiles)basedir
1090 pbasedir=mysettings["O"]+"/"
1091 manifestfn=pbasedir+"Manifest"
1092
1093 if not manifestonly:
1094 if not os.path.isdir(mysettings["FILESDIR"]):
1095 os.makedirs(mysettings["FILESDIR"])
1096 mycvstree=cvstree.getentries(pbasedir, recursive=1)
1097
1098 if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
1099 if not cvstree.isadded(mycvstree,"files"):
1100 if "autoaddcvs" in features:
1101 writemsg(">>> Auto-adding files/ dir to CVS...\n",verbosity - 1)
1102 spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
1103 else:
1104 writemsg("--- Warning: files/ is not added to cvs.\n",verbosity)
1105
1106 if (not overwrite) and os.path.exists(digestfn):
1107 return 1
1108
1109 print green(">>> Generating digest file...")
1110
1111 # Track the old digest so that we can assume checksums without requiring
1112 # all files be downloaded. 'Assuming'
1113 # XXX: <harring>- why does this seem like a way to pollute the hell out of the
1114 # digests? This strikes me as lining the path between your bed and coffee machine
1115 # with land mines...
1116 myolddigest = {}
1117 if os.path.exists(digestfn):
1118 myolddigest = digestParseFile(digestfn)
1119
1120 mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
1121 if mydigests==None: # There was a problem, exit with an errorcode.
1122 return 0
1123
1124 try:
1125 outfile=open(digestfn, "w+")
1126 except SystemExit, e:
1127 raise
1128 except Exception, e:
1129 print "!!! Filesystem error skipping generation. (Read-Only?)"
1130 print "!!!",e
1131 return 0
1132 for x in digestCreateLines(myarchives, mydigests):
1133 outfile.write(x+"\n")
1134 outfile.close()
1135 try:
1136 os.chown(digestfn,os.getuid(),portage_gid)
1137 os.chmod(digestfn,0664)
1138 except SystemExit, e:
1139 raise
1140 except Exception,e:
1141 print e
1142
1143 print green(">>> Generating manifest file...")
1144 mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1)
1145 mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
1146 if "Manifest" in mypfiles:
1147 del mypfiles[mypfiles.index("Manifest")]
1148
1149 mydigests=digestCreate(mypfiles, pbasedir)
1150 if mydigests==None: # There was a problem, exit with an errorcode.
1151 return 0
1152
1153 try:
1154 outfile=open(manifestfn, "w+")
1155 except SystemExit, e:
1156 raise
1157 except Exception, e:
1158 print "!!! Filesystem error skipping generation. (Read-Only?)"
1159 print "!!!",e
1160 return 0
1161 for x in digestCreateLines(mypfiles, mydigests):
1162 outfile.write(x+"\n")
1163 outfile.close()
1164 try:
1165 os.chown(manifestfn,os.getuid(),portage_gid)
1166 os.chmod(manifestfn,0664)
1167 except SystemExit, e:
1168 raise
1169 except Exception,e:
1170 print e
1171
1172 if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
1173 mycvstree=cvstree.getentries(pbasedir, recursive=1)
1174 myunaddedfiles=""
1175 if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
1176 if digestfn[:len(pbasedir)]==pbasedir:
1177 myunaddedfiles=digestfn[len(pbasedir):]+" "
1178 else:
1179 myunaddedfiles=digestfn+" "
1180 if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
1181 if manifestfn[:len(pbasedir)]==pbasedir:
1182 myunaddedfiles+=manifestfn[len(pbasedir):]+" "
1183 else:
1184 myunaddedfiles+=manifestfn
1185 if myunaddedfiles:
1186 if "autoaddcvs" in features:
1187 print blue(">>> Auto-adding digest file(s) to CVS...")
1188 spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
1189 else:
1190 print "--- Warning: digests are not yet added into CVS."
1191 print darkgreen(">>> Computed message digests.")
1192 print
1193 return 1
1194
1195
1196 def digestParseFile(myfilename):
1197 """(filename) -- Parses a given file for entries matching:
1198 MD5 MD5_STRING_OF_HEX_CHARS FILE_NAME FILE_SIZE
1199 Ignores lines that do not begin with 'MD5' and returns a
1200 dict with the filenames as keys and [md5,size] as the values."""
1201
1202 if not os.path.exists(myfilename):
1203 return None
1204 mylines = portage_util.grabfile(myfilename, compat_level=1)
1205
1206 mydigests={}
1207 for x in mylines:
1208 myline=x.split()
1209 if len(myline) < 4:
1210 #invalid line
1211 continue
1212 if myline[0] not in portage_checksum.get_valid_checksum_keys():
1213 continue
1214 mykey = myline.pop(0)
1215 myhash = myline.pop(0)
1216 mysize = long(myline.pop())
1217 myfn = " ".join(myline)
1218 if myfn not in mydigests:
1219 mydigests[myfn] = {}
1220 mydigests[myfn][mykey] = myhash
1221 if "size" in mydigests[myfn]:
1222 if mydigests[myfn]["size"] != mysize:
1223 raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
1224 else:
1225 mydigests[myfn]["size"] = mysize
1226 return mydigests
1227
1228 # XXXX strict was added here to fix a missing name error.
1229 # XXXX It's used below, but we're not paying attention to how we get it?
1230 def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0,verbosity=0):
1231 """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
1232 of their digests and checks the digests against the indicated files in
1233 the basedir given. Returns 1 only if all files exist and match the md5s.
1234 """
1235 for x in myfiles:
1236 if not mydigests.has_key(x):
1237 writemsg("\n",verbosity)
1238 writemsg(red("!!! No message digest entry found for file \""+x+".\"")+"\n"+
1239 "!!! Most likely a temporary problem. Try 'emerge sync' again later.\n"+
1240 "!!! If you are certain of the authenticity of the file then you may type\n"+
1241 "!!! the following to generate a new digest:\n"+
1242 "!!! ebuild /usr/portage/category/package/package-version.ebuild digest\n",
1243 verbosity)
1244 return 0
1245 myfile=os.path.normpath(basedir+"/"+x)
1246 if not os.path.exists(myfile):
1247 if strict:
1248 writemsg("!!! File does not exist:"+str(myfile)+"\n",verbosity)
1249 return 0
1250 continue
1251
1252 ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
1253 if not ok:
1254 writemsg("\n"+red("!!! Digest verification Failed:")+"\n"+
1255 red("!!!")+" "+str(myfile)+"\n"+
1256 red("!!! Reason: ")+reason+"\n",
1257 verbosity)
1258 return 0
1259 else:
1260 writemsg(">>> md5 "+note+" ;-) %s\n" % str(x),verbosity)
1261 return 1
1262
1263
1264 def digestcheck(myfiles, mysettings, strict=0,verbosity=0):
1265 """Checks md5sums. Assumes all files have been downloaded."""
1266 # archive files
1267 basedir=mysettings["DISTDIR"]+"/"
1268 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1269
1270 # portage files -- p(ortagefiles)basedir
1271 pbasedir=mysettings["O"]+"/"
1272 manifestfn=pbasedir+"Manifest"
1273
1274 if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
1275 if "digest" in features:
1276 writemsg(">>> No package digest/Manifest file found.\n",verbosity)
1277 writemsg(">>> \"digest\" mode enabled; auto-generating new digest...\n",verbosity)
1278 return digestgen(myfiles,mysettings,verbosity=verbosity)
1279 else:
1280 if not os.path.exists(manifestfn):
1281 if strict:
1282 writemsg(red("!!! No package manifest found:")+" %s\n" % manifestfn,verbosity)
1283 return 0
1284 else:
1285 writemsg("--- No package manifest found: %s\n" % manifestfn,verbosity)
1286 if not os.path.exists(digestfn):
1287 writemsg("!!! No package digest file found: %s\n" % digestfn,verbosity)
1288 writemsg("!!! Type \"ebuild foo.ebuild digest\" to generate it.\n", verbosity)
1289 return 0
1290
1291 mydigests=digestParseFile(digestfn)
1292 if mydigests==None:
1293 writemsg("!!! Failed to parse digest file: %s\n" % digestfn, verbosity)
1294 return 0
1295 mymdigests=digestParseFile(manifestfn)
1296 if "strict" not in features:
1297 # XXX: Remove this when manifests become mainstream.
1298 pass
1299 elif mymdigests==None:
1300 writemsg("!!! Failed to parse manifest file: %s\n" % manifestfn,verbosity)
1301 if strict:
1302 return 0
1303 else:
1304 # Check the portage-related files here.
1305 mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1)
1306 manifest_files = mymdigests.keys()
1307 for x in range(len(mymfiles)-1,-1,-1):
1308 if mymfiles[x]=='Manifest': # We don't want the manifest in out list.
1309 del mymfiles[x]
1310 continue
1311 if mymfiles[x] in manifest_files:
1312 manifest_files.remove(mymfiles[x])
1313 elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
1314 # we filter here, rather then above; manifest might have files flagged by the filter.
1315 # if something is returned, then it's flagged as a bad file
1316 # manifest doesn't know about it, so we kill it here.
1317 del mymfiles[x]
1318 else:
1319 writemsg(red("!!! Security Violation: A file exists that is not in the manifest.")+"\n",verbosity)
1320 writemsg("!!! File: %s\n" % mymfiles[x],verbosity)
1321 if strict:
1322 return 0
1323
1324 if manifest_files and strict:
1325 for x in grabfile(USER_CONFIG_PATH+"/manifest_excludes"):
1326 if x in manifest_files:
1327 #writemsg(yellow(">>>")+" md5-ignore: "+x,verbosity)
1328 manifest_files.remove(x)
1329
1330 if manifest_files:
1331 writemsg(red("!!! Files listed in the manifest do not exist!")+"\n",verbosity)
1332 for x in manifest_files:
1333 writemsg(x+"\n",verbosity)
1334 return 0
1335
1336 if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict, verbosity=verbosity):
1337 if strict:
1338 writemsg(">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and\n"+
1339 ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")+"\n\n",
1340 verbosity)
1341 return 0
1342 else:
1343 writemsg("--- Manifest check failed. 'strict' not enabled; ignoring.\n\n",verbosity)
1344
1345 # Just return the status, as it's the last check.
1346 return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict,verbosity=verbosity)
1347
1348 # note, use_info_env is a hack to allow treewalk to specify the correct env. it sucks, but so does this doebuild
1349 # setup
1350 def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,\
1351 fetchall=0,tree="porttree",allstages=True,use_info_env=True,verbosity=0):
1352
1353 retval = ebuild.ebuild_handler().process_phase(mydo,mysettings,myebuild,myroot, debug=debug, listonly=listonly, \
1354 fetchonly=fetchonly, cleanup=cleanup, use_cache=use_cache, fetchall=fetchall, tree=tree, allstages=allstages, \
1355 use_info_env=use_info_env,verbosity=verbosity)
1356
1357 #def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree="porttree",allstages=True,use_info_env=True):
1358 # retval=ebuild.ebuild_handler().process_phase(mydo, mysettings,myebuild,myroot,debug=debug,listonly=listonly,fetchonly=fetchonly,cleanup=cleanup,dbkey=None,use_cache=1,fetchall=0,tree="porttree",allstages=allstages,use_info_env=use_info_env)
1359 return retval
1360
1361
1362 expandcache={}
1363
1364 def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None):
1365 mylink=dblink(mycat,mypkg,myroot,mysettings)
1366 return mylink.merge(pkgloc,infloc,myroot,myebuild)
1367
1368 def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
1369 mylink=dblink(cat,pkg,myroot,mysettings)
1370 if mylink.exists():
1371 mylink.unmerge(trimworld=mytrimworld,cleanup=1)
1372 mylink.delete()
1373
1374 def getCPFromCPV(mycpv):
1375 """Calls portage_versions.pkgsplit on a cpv and returns only the cp."""
1376 return portage_versions.pkgsplit(mycpv)[0]
1377
1378
1379 def dep_parenreduce(mysplit,mypos=0):
1380 """
1381 Accepts a list of strings, and converts
1382 '(' and ')' surrounded items to sub-lists
1383 """
1384 while (mypos<len(mysplit)):
1385 if (mysplit[mypos]=="("):
1386 firstpos=mypos
1387 mypos=mypos+1
1388 while (mypos<len(mysplit)):
1389 if mysplit[mypos]==")":
1390 mysplit[firstpos:mypos+1]=[mysplit[firstpos+1:mypos]]
1391 mypos=firstpos
1392 break
1393 elif mysplit[mypos]=="(":
1394 #recurse
1395 mysplit=dep_parenreduce(mysplit,mypos=mypos)
1396 mypos=mypos+1
1397 mypos=mypos+1
1398 return mysplit
1399
1400 def dep_opconvert(mysplit,myuse,mysettings):
1401 """
1402 Does dependency operator conversion
1403 """
1404
1405
1406 mypos=0
1407 newsplit=[]
1408 while mypos<len(mysplit):
1409 if type(mysplit[mypos])==types.ListType:
1410 newsplit.append(dep_opconvert(mysplit[mypos],myuse,mysettings))
1411 mypos += 1
1412 elif mysplit[mypos]==")":
1413 #mismatched paren, error
1414 return None
1415 elif mysplit[mypos]=="||":
1416 if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType):
1417 # || must be followed by paren'd list
1418 return None
1419 try:
1420 mynew=dep_opconvert(mysplit[mypos+1],myuse,mysettings)
1421 except SystemExit, e:
1422 raise
1423 except Exception, e:
1424 print "!!! Unable to satisfy OR dependency:", " || ".join(mysplit)
1425 raise
1426 mynew[0:0]=["||"]
1427 newsplit.append(mynew)
1428 mypos += 2
1429 elif mysplit[mypos][-1]=="?":
1430 #uses clause, i.e "gnome? ( foo bar )"
1431 #this is a quick and dirty hack so that repoman can enable all USE vars:
1432 if (len(myuse)==1) and (myuse[0]=="*") and mysettings:
1433 # enable it even if it's ! (for repoman) but kill it if it's
1434 # an arch variable that isn't for this arch. XXX Sparc64?
1435 k=mysplit[mypos][:-1]
1436 if k[0]=="!":
1437 k=k[1:]
1438 if k not in archlist and k not in mysettings.usemask:
1439 enabled=1
1440 elif k in archlist:
1441 if k==mysettings["ARCH"]:
1442 if mysplit[mypos][0]=="!":
1443 enabled=0
1444 else:
1445 enabled=1
1446 elif mysplit[mypos][0]=="!":
1447 enabled=1
1448 else:
1449 enabled=0
1450 else:
1451 enabled=0
1452 else:
1453 if mysplit[mypos][0]=="!":
1454 myusevar=mysplit[mypos][1:-1]
1455 if myusevar in myuse:
1456 enabled=0
1457 else:
1458 enabled=1
1459 else:
1460 myusevar=mysplit[mypos][:-1]
1461 if myusevar in myuse:
1462 enabled=1
1463 else:
1464 enabled=0
1465 if (mypos+2<len(mysplit)) and (mysplit[mypos+2]==":"):
1466 #colon mode
1467 if enabled:
1468 #choose the first option
1469 if type(mysplit[mypos+1])==types.ListType:
1470 newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
1471 else:
1472 newsplit.append(mysplit[mypos+1])
1473 else:
1474 #choose the alternate option
1475 if type(mysplit[mypos+1])==types.ListType:
1476 newsplit.append(dep_opconvert(mysplit[mypos+3],myuse,mysettings))
1477 else:
1478 newsplit.append(mysplit[mypos+3])
1479 mypos += 4
1480 else:
1481 #normal use mode
1482 if enabled:
1483 if type(mysplit[mypos+1])==types.ListType:
1484 newsplit.append(dep_opconvert(mysplit[mypos+1],myuse,mysettings))
1485 else:
1486 newsplit.append(mysplit[mypos+1])
1487 #otherwise, continue.
1488 mypos += 2
1489 else:
1490 #normal item
1491 newsplit.append(mysplit[mypos])
1492 mypos += 1
1493 return newsplit
1494
1495 def dep_virtual(mysplit, mysettings):
1496 """
1497 Does virtual dependency conversion
1498 """
1499 newsplit=[]
1500 for x in mysplit:
1501 if type(x)==list:
1502 newsplit.append(dep_virtual(x, mysettings))
1503 else:
1504 mykey=portage_dep.dep_getkey(x)
1505 if mysettings.virtuals.has_key(mykey):
1506 if len(mysettings.virtuals[mykey])==1:
1507 a=x.replace( mykey, mysettings.virtuals[mykey][0])
1508 else:
1509 if x[0]=="!":
1510 # blocker needs "and" not "or(||)".
1511 a=[]
1512 else:
1513 a=['||']
1514 for y in mysettings.virtuals[mykey]:
1515 a.append( x.replace( mykey, y) )
1516 newsplit.append(a)
1517 else:
1518 newsplit.append(x)
1519 return newsplit
1520
1521 def dep_eval(deplist):
1522 if len(deplist)==0:
1523 return 1
1524 if deplist[0]=="||":
1525 #or list; we just need one "1"
1526 for x in deplist[1:]:
1527 if type(x)==types.ListType:
1528 if dep_eval(x)==1:
1529 return 1
1530 elif x==1:
1531 return 1
1532 return 0
1533 else:
1534 for x in deplist:
1535 if type(x)==types.ListType:
1536 if dep_eval(x)==0:
1537 return 0
1538 elif x==0 or x==2:
1539 return 0
1540 return 1
1541
1542 def dep_zapdeps(unreduced,reduced,vardbapi=None,use_binaries=0):
1543 """
1544 Takes an unreduced and reduced deplist and removes satisfied dependencies.
1545 Returned deplist contains steps that must be taken to satisfy dependencies.
1546 """
1547 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
1548 if unreduced==[] or unreduced==['||'] :
1549 return []
1550 if unreduced[0]=="||":
1551 if dep_eval(reduced):
1552 #deps satisfied, return empty list.
1553 return []
1554 else:
1555 #try to find an installed dep.
1556 ### We use fakedb when --update now, so we can't use local vardbapi here.
1557 ### This should be fixed in the feature.
1558 ### see bug 45468.
1559 ##if vardbapi:
1560 ## mydbapi=vardbapi
1561 ##else:
1562 ## mydbapi=db[root]["vartree"].dbapi
1563 mydbapi=db[root]["vartree"].dbapi
1564
1565 if db["/"].has_key("porttree"):
1566 myportapi=db["/"]["porttree"].dbapi
1567 else:
1568 myportapi=None
1569
1570 if use_binaries and db["/"].has_key("bintree"):
1571 mybinapi=db["/"]["bintree"].dbapi
1572 writemsg("Using bintree...\n",2)
1573 else:
1574 mybinapi=None
1575
1576 x=1
1577 candidate=[]
1578 while x<len(reduced):
1579 writemsg("x: %s, reduced[x]: %s\n" % (x,reduced[x]), 2)
1580 if (type(reduced[x])==types.ListType):
1581 newcand = dep_zapdeps(unreduced[x], reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
1582 candidate.append(newcand)
1583 else:
1584 if (reduced[x]==False):
1585 candidate.append([unreduced[x]])
1586 else:
1587 candidate.append([])
1588 x+=1
1589
1590 #use installed and no-masked package(s) in portage.
1591 for x in candidate:
1592 match=1
1593 for pkg in x:
1594 if not mydbapi.match(pkg):
1595 match=0
1596 break
1597 if myportapi:
1598 if not myportapi.match(pkg):
1599 match=0
1600 break
1601 if match:
1602 writemsg("Installed match: %s\n" % (x), 2)
1603 return x
1604
1605 # Use binary packages if available.
1606 if mybinapi:
1607 for x in candidate:
1608 match=1
1609 for pkg in x:
1610 if not mybinapi.match(pkg):
1611 match=0
1612 break
1613 else:
1614 writemsg("Binary match: %s\n" % (pkg), 2)
1615 if match:
1616 writemsg("Binary match final: %s\n" % (x), 2)
1617 return x
1618
1619 #use no-masked package(s) in portage tree
1620 if myportapi:
1621 for x in candidate:
1622 match=1
1623 for pkg in x:
1624 if not myportapi.match(pkg):
1625 match=0
1626 break
1627 if match:
1628 writemsg("Porttree match: %s\n" % (x), 2)
1629 return x
1630
1631 #none of the no-masked pkg, use the first one
1632 writemsg("Last resort candidate: %s\n" % (candidate[0]), 2)
1633 return candidate[0]
1634 else:
1635 if dep_eval(reduced):
1636 #deps satisfied, return empty list.
1637 return []
1638 else:
1639 returnme=[]
1640 x=0
1641 while x<len(reduced):
1642 if type(reduced[x])==types.ListType:
1643 returnme+=dep_zapdeps(unreduced[x],reduced[x], vardbapi=vardbapi, use_binaries=use_binaries)
1644 else:
1645 if reduced[x]==False:
1646 returnme.append(unreduced[x])
1647 x += 1
1648 return returnme
1649
1650 def cpv_getkey(mycpv):
1651 myslash=mycpv.split("/")
1652 mysplit=portage_versions.pkgsplit(myslash[-1])
1653 mylen=len(myslash)
1654 if mylen==2:
1655 return myslash[0]+"/"+mysplit[0]
1656 elif mylen==1:
1657 return mysplit[0]
1658 else:
1659 return mysplit
1660
1661 def key_expand(mykey,mydb=None,use_cache=1):
1662 mysplit=mykey.split("/")
1663 if len(mysplit)==1:
1664 if mydb and type(mydb)==types.InstanceType:
1665 for x in settings.categories:
1666 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
1667 return x+"/"+mykey
1668 if virts_p.has_key(mykey):
1669 print "VIRTS_P (Report to #gentoo-portage or bugs.g.o):",mykey
1670 return(virts_p[mykey][0])
1671 return "null/"+mykey
1672 elif mydb:
1673 if type(mydb)==types.InstanceType:
1674 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
1675 return virts[mykey][0]
1676 return mykey
1677
1678 def cpv_expand(mycpv,mydb=None,use_cache=1):
1679 """
1680 Given a string (packagename or virtual) expand it into a valid
1681 cat/package string. Virtuals use the mydb to determine which provided
1682 virtual is a valid choice and defaults to the first element when there
1683 are no installed/available candidates.
1684 """
1685 myslash=mycpv.split("/")
1686 mysplit=portage_versions.pkgsplit(myslash[-1])
1687 if len(myslash)>2:
1688 # this is illegal case.
1689 mysplit=[]
1690 mykey=mycpv
1691 elif len(myslash)==2:
1692 if mysplit:
1693 mykey=myslash[0]+"/"+mysplit[0]
1694 else:
1695 mykey=mycpv
1696 if mydb:
1697 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
1698 if type(mydb)==types.InstanceType:
1699 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
1700 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
1701 mykey_orig = mykey
1702 for vkey in virts[mykey]:
1703 if mydb.cp_list(vkey,use_cache=use_cache):
1704 mykey = vkey
1705 writemsg("virts chosen: %s\n" % (mykey), 1)
1706 break
1707 if mykey == mykey_orig:
1708 mykey=virts[mykey][0]
1709 writemsg("virts defaulted: %s\n" % (mykey), 1)
1710 #we only perform virtual expansion if we are passed a dbapi
1711 else:
1712 #specific cpv, no category, ie. "foo-1.0"
1713 if mysplit:
1714 myp=mysplit[0]
1715 else:
1716 # "foo" ?
1717 myp=mycpv
1718 mykey=None
1719 matches=[]
1720 if mydb:
1721 for x in settings.categories:
1722 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
1723 matches.append(x+"/"+myp)
1724 if (len(matches)>1):
1725 raise ValueError, matches
1726 elif matches:
1727 mykey=matches[0]
1728
1729 if not mykey and type(mydb)!=types.ListType:
1730 if virts_p.has_key(myp):
1731 print "VIRTS_P,ce (Report to #gentoo-portage or bugs.g.o):",myp
1732 mykey=virts_p[myp][0]
1733 #again, we only perform virtual expansion if we have a dbapi (not a list)
1734 if not mykey:
1735 mykey="null/"+myp
1736 if mysplit:
1737 if mysplit[2]=="r0":
1738 return mykey+"-"+mysplit[1]
1739 else:
1740 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
1741 else:
1742 return mykey
1743
1744 def dep_transform(mydep,oldkey,newkey):
1745 origdep=mydep
1746 if not len(mydep):
1747 return mydep
1748 if mydep[0]=="*":
1749 mydep=mydep[1:]
1750 prefix=""
1751 postfix=""
1752 if mydep[-1]=="*":
1753 mydep=mydep[:-1]
1754 postfix="*"
1755 if mydep[:2] in [ ">=", "<=" ]:
1756 prefix=mydep[:2]
1757 mydep=mydep[2:]
1758 elif mydep[:1] in "=<>~!":
1759 prefix=mydep[:1]
1760 mydep=mydep[1:]
1761 if mydep==oldkey:
1762 return prefix+newkey+postfix
1763 else:
1764 return origdep
1765
1766 def dep_expand(mydep,mydb=None,use_cache=1):
1767 if not len(mydep):
1768 return mydep
1769 if mydep[0]=="*":
1770 mydep=mydep[1:]
1771 prefix=""
1772 postfix=""
1773 if mydep[-1]=="*":
1774 mydep=mydep[:-1]
1775 postfix="*"
1776 if mydep[:2] in [ ">=", "<=" ]:
1777 prefix=mydep[:2]
1778 mydep=mydep[2:]
1779 elif mydep[:1] in "=<>~!":
1780 prefix=mydep[:1]
1781 mydep=mydep[1:]
1782 return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
1783
1784 def get_parsed_deps(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None):
1785
1786 if use=="all":
1787 #enable everything (for repoman)
1788 myusesplit=["*"]
1789 elif use=="yes":
1790 if myuse==None:
1791 #default behavior
1792 myusesplit = mysettings["USE"].split()
1793 else:
1794 myusesplit = myuse
1795 # We've been given useflags to use.
1796 #print "USE FLAGS PASSED IN."
1797 #print myuse
1798 #if "bindist" in myusesplit:
1799 # print "BINDIST is set!"
1800 #else:
1801 # print "BINDIST NOT set."
1802 else:
1803 #we are being run by autouse(), don't consult USE vars yet.
1804 # WE ALSO CANNOT USE SETTINGS
1805 myusesplit=[]
1806
1807 #convert parenthesis to sublists
1808 mysplit = portage_dep.paren_reduce(depstring)
1809
1810 if mysettings:
1811 # XXX: use="all" is only used by repoman. Why would repoman checks want
1812 # profile-masked USE flags to be enabled?
1813 #if use=="all":
1814 # mymasks=archlist[:]
1815 #else:
1816 mymasks=mysettings.usemask+archlist[:]
1817
1818 while mysettings["ARCH"] in mymasks:
1819 del mymasks[mymasks.index(mysettings["ARCH"])]
1820 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
1821 else:
1822 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
1823 return mysplit
1824
1825 def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0):
1826 """Takes a depend string and parses the condition."""
1827
1828 mysplit=get_parsed_deps(depstring,mydbapi,mysettings,use=use,myuse=myuse)
1829 # Do the || conversions
1830 mysplit=portage_dep.dep_opconvert(mysplit)
1831
1832 #convert virtual dependencies to normal packages.
1833 mysplit=dep_virtual(mysplit, mysettings)
1834 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
1835 #up until here, we haven't needed to look at the database tree
1836
1837 if mysplit==None:
1838 return [0,"Parse Error (parentheses mismatch?)"]
1839 elif mysplit==[]:
1840 #dependencies were reduced to nothing
1841 return [1,[]]
1842 mysplit2=mysplit[:]
1843 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
1844 if mysplit2==None:
1845 return [0,"Invalid token"]
1846
1847 writemsg("\n\n\n", 1)
1848 writemsg("mysplit: %s\n" % (mysplit), 1)
1849 writemsg("mysplit2: %s\n" % (mysplit2), 1)
1850 myeval=dep_eval(mysplit2)
1851 writemsg("myeval: %s\n" % (myeval), 1)
1852
1853 if myeval:
1854 return [1,[]]
1855 else:
1856 myzaps = dep_zapdeps(mysplit,mysplit2,vardbapi=mydbapi,use_binaries=use_binaries)
1857 mylist = flatten(myzaps)
1858 writemsg("myzaps: %s\n" % (myzaps), 1)
1859 writemsg("mylist: %s\n" % (mylist), 1)
1860 #remove duplicates
1861 mydict={}
1862 for x in mylist:
1863 mydict[x]=1
1864 writemsg("mydict: %s\n" % (mydict), 1)
1865 return [1,mydict.keys()]
1866
1867 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
1868 "Reduces the deplist to ones and zeros"
1869 mypos=0
1870 deplist=mydeplist[:]
1871 while mypos<len(deplist):
1872 if type(deplist[mypos])==types.ListType:
1873 #recurse
1874 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
1875 elif deplist[mypos]=="||":
1876 pass
1877 else:
1878 mykey = portage_dep.dep_getkey(deplist[mypos])
1879 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
1880 portage_dep.match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
1881 deplist[mypos]=True
1882 else:
1883 if mode:
1884 mydep=mydbapi.xmatch(mode,deplist[mypos])
1885 else:
1886 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
1887 if mydep!=None:
1888 tmp=(len(mydep)>=1)
1889 if deplist[mypos][0]=="!":
1890 #tmp=not tmp
1891 # This is ad-hoc code. We should rewrite this later.. (See #52377)
1892 # The reason is that portage uses fakedb when --update option now.
1893 # So portage considers that a block package doesn't exist even if it exists.
1894 # Then, #52377 happens.
1895 # ==== start
1896 # emerge checks if it's block or not, so we can always set tmp=False.
1897 # but it's not clean..
1898 tmp=False
1899 # ==== end
1900 deplist[mypos]=tmp
1901 else:
1902 #encountered invalid string
1903 return None
1904 mypos=mypos+1
1905 return deplist
1906
1907 def fixdbentries(old_value, new_value, dbdir):
1908 """python replacement for the fixdbentries script, replaces old_value
1909 with new_value for package names in files in dbdir."""
1910 for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
1911 f = open(dbdir+"/"+myfile, "r")
1912 mycontent = f.read()
1913 f.close()
1914 if not mycontent.count(old_value):
1915 continue
1916 old_value = re.escape(old_value);
1917 mycontent = re.sub(old_value+"$", new_value, mycontent)
1918 mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
1919 mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
1920 mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
1921 f = open(dbdir+"/"+myfile, "w")
1922 f.write(mycontent)
1923 f.close()
1924
1925 class packagetree:
1926 def __init__(self,virtual,clone=None):
1927 if clone:
1928 self.tree=clone.tree.copy()
1929 self.populated=clone.populated
1930 self.virtual=clone.virtual
1931 self.dbapi=None
1932 else:
1933 self.tree={}
1934 self.populated=0
1935 self.virtual=virtual
1936 self.dbapi=None
1937
1938 def resolve_key(self,mykey):
1939 return key_expand(mykey,mydb=self.dbapi)
1940
1941 def dep_nomatch(self,mypkgdep):
1942 mykey=portage_dep.dep_getkey(mypkgdep)
1943 nolist=self.dbapi.cp_list(mykey)
1944 mymatch=self.dbapi.match(mypkgdep)
1945 if not mymatch:
1946 return nolist
1947 for x in mymatch:
1948 if x in nolist:
1949 nolist.remove(x)
1950 return nolist
1951
1952 def depcheck(self,mycheck,use="yes",myusesplit=None):
1953 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
1954
1955 def populate(self):
1956 "populates the tree with values"
1957 populated=1
1958 pass
1959
1960 def best(mymatches):
1961 "accepts None arguments; assumes matches are valid."
1962 global bestcount
1963 if mymatches==None:
1964 return ""
1965 if not len(mymatches):
1966 return ""
1967 bestmatch=mymatches[0]
1968 p2=portage_versions.catpkgsplit(bestmatch)[1:]
1969 for x in mymatches[1:]:
1970 p1=portage_versions.catpkgsplit(x)[1:]
1971 if portage_versions.pkgcmp(p1,p2)>0:
1972 bestmatch=x
1973 p2=portage_versions.catpkgsplit(bestmatch)[1:]
1974 return bestmatch
1975
1976 class portagetree:
1977 def __init__(self,root="/",virtual=None,clone=None):
1978 global portdb
1979 if clone:
1980 self.root=clone.root
1981 self.portroot=clone.portroot
1982 self.pkglines=clone.pkglines
1983 else:
1984 self.root=root
1985 self.portroot=settings["PORTDIR"]
1986 self.virtual=virtual
1987 self.dbapi=portdb
1988
1989 def dep_bestmatch(self,mydep):
1990 "compatibility method"
1991 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
1992 if mymatch==None:
1993 return ""
1994 return mymatch
1995
1996 def dep_match(self,mydep):
1997 "compatibility method"
1998 mymatch=self.dbapi.xmatch("match-visible",mydep)
1999 if mymatch==None:
2000 return []
2001 return mymatch
2002
2003 def exists_specific(self,cpv):
2004 return self.dbapi.cpv_exists(cpv)
2005
2006 def getallnodes(self):
2007 """new behavior: these are all *unmasked* nodes. There may or may not be available
2008 masked package for nodes in this nodes list."""
2009 return self.dbapi.cp_all()
2010
2011 def getname(self,pkgname):
2012 "returns file location for this particular package (DEPRECATED)"
2013 if not pkgname:
2014 return ""
2015 mysplit=pkgname.split("/")
2016 psplit=portage_versions.pkgsplit(mysplit[1])
2017 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
2018
2019 def resolve_specific(self,myspec):
2020 cps=portage_versions.catpkgsplit(myspec)
2021 if not cps:
2022 return None
2023 mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
2024 mykey=mykey+"-"+cps[2]
2025 if cps[3]!="r0":
2026 mykey=mykey+"-"+cps[3]
2027 return mykey
2028
2029 def depcheck(self,mycheck,use="yes",myusesplit=None):
2030 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
2031
2032 def getslot(self,mycatpkg):
2033 "Get a slot for a catpkg; assume it exists."
2034 myslot = ""
2035 try:
2036 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
2037 except SystemExit, e:
2038 raise
2039 except Exception, e:
2040 pass
2041 return myslot
2042
2043
2044 class dbapi:
2045 def __init__(self):
2046 pass
2047
2048 def close_caches(self):
2049 pass
2050
2051 def cp_list(self,cp,use_cache=1):
2052 return
2053
2054 def aux_get(self,mycpv,mylist):
2055 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
2056 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
2057 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
2058 raise NotImplementedError
2059
2060 def match(self,origdep,use_cache=1):
2061 mydep=dep_expand(origdep,mydb=self)
2062 mykey=portage_dep.dep_getkey(mydep)
2063 mycat=mykey.split("/")[0]
2064 return portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2065
2066 def match2(self,mydep,mykey,mylist):
2067 writemsg("DEPRECATED: dbapi.match2\n")
2068 portage_dep.match_from_list(mydep,mylist)
2069
2070 def counter_tick(self,myroot,mycpv=None):
2071 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
2072
2073 def get_counter_tick_core(self,myroot,mycpv=None):
2074 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
2075
2076 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
2077 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
2078 cpath=myroot+"var/cache/edb/counter"
2079 changed=0
2080 min_counter = 0
2081 if mycpv:
2082 mysplit = portage_versions.pkgsplit(mycpv)
2083 for x in self.match(mysplit[0],use_cache=0):
2084 # fixed bug #41062
2085 if x==mycpv:
2086 continue
2087 try:
2088 old_counter = long(self.aux_get(x,["COUNTER"])[0])
2089 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
2090 except SystemExit, e:
2091 raise
2092 except:
2093 old_counter = 0
2094 writemsg("!!! BAD COUNTER in '%s'\n" % (x))
2095 if old_counter > min_counter:
2096 min_counter = old_counter
2097
2098 # We write our new counter value to a new file that gets moved into
2099 # place to avoid filesystem corruption.
2100 if os.path.exists(cpath):
2101 cfile=open(cpath, "r")
2102 try:
2103 counter=long(cfile.readline())
2104 except (ValueError,OverflowError):
2105 try:
2106 counter=long(portage_exec.spawn_get_output("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'",spawn_type=portage_exec.spawn_bash)[1])
2107 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
2108 changed=1
2109 except (ValueError,OverflowError):
2110 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
2111 writemsg("!!! corrected/normalized so that portage can operate properly.\n")
2112 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
2113 sys.exit(2)
2114 cfile.close()
2115 else:
2116 try:
2117 counter=long(portage_exec.spawn_get_output("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'",spawn_type=portage_exec.spawn_bash)[1])
2118 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
2119 except SystemExit, e:
2120 raise
2121 except:
2122 writemsg("!!! Initializing global counter.\n")
2123 counter=long(0)
2124 changed=1
2125
2126 if counter < min_counter:
2127 counter = min_counter+1000
2128 changed = 1
2129
2130 if incrementing or changed:
2131
2132 #increment counter
2133 counter += 1
2134 # update new global counter file
2135 newcpath=cpath+".new"
2136 newcfile=open(newcpath,"w")
2137 newcfile.write(str(counter))
2138 newcfile.close()
2139 # now move global counter file into place
2140 os.rename(newcpath,cpath)
2141 return counter
2142
2143 def invalidentry(self, mypath):
2144 if re.search("portage_lockfile$",mypath):
2145 if not os.environ.has_key("PORTAGE_MASTER_PID"):
2146 writemsg("Lockfile removed: %s\n" % mypath, 1)
2147 portage_locks.unlockfile((mypath,None,None))
2148 else:
2149 # Nothing we can do about it. We're probably sandboxed.
2150 pass
2151 elif re.search(".*/-MERGING-(.*)",mypath):
2152 if os.path.exists(mypath):
2153 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
2154 else:
2155 writemsg("!!! Invalid db entry: %s\n" % mypath)
2156
2157
2158
2159 class fakedbapi(dbapi):
2160 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
2161 def __init__(self):
2162 self.cpvdict={}
2163 self.cpdict={}
2164
2165 def cpv_exists(self,mycpv):
2166 return self.cpvdict.has_key(mycpv)
2167
2168 def cp_list(self,mycp,use_cache=1):
2169 return self.cpdict.get(mycp,[])
2170
2171 def cp_all(self):
2172 returnme=[]
2173 for x in self.cpdict.keys():
2174 returnme.extend(self.cpdict[x])
2175 return returnme
2176
2177 def cpv_inject(self,mycpv):
2178 """Adds a cpv from the list of available packages."""
2179 mycp=cpv_getkey(mycpv)
2180 self.cpvdict[mycpv]=1
2181 cplist = self.cpdict.setdefault(mycp,[])
2182 if mycpv not in cplist:
2183 cplist.append(mycpv)
2184
2185 #def cpv_virtual(self,oldcpv,newcpv):
2186 # """Maps a cpv to the list of available packages."""
2187 # mycp=cpv_getkey(newcpv)
2188 # self.cpvdict[newcpv]=1
2189 # if not self.virtdict.has_key(mycp):
2190 # self.virtdict[mycp]=[]
2191 # if not mycpv in self.virtdict[mycp]:
2192 # self.virtdict[mycp].append(oldcpv)
2193 # cpv_remove(oldcpv)
2194
2195 def cpv_remove(self,mycpv):
2196 """Removes a cpv from the list of available packages."""
2197 mycp=cpv_getkey(mycpv)
2198 if mycpv in self.cpvdict:
2199 del self.cpvdict[mycpv]
2200 cpvlist = self.cpdict.get(mycp)
2201 if cpvlist is None:
2202 return
2203 while mycpv in cpvlist:
2204 cpvlist.remove( mycpv )
2205 if not cpvlist:
2206 del self.cpdict[mycp]
2207
2208 class bindbapi(fakedbapi):
2209 def __init__(self,mybintree=None):
2210 self.bintree = mybintree
2211 self.cpvdict={}
2212 self.cpdict={}
2213
2214 def aux_get(self,mycpv,wants):
2215 mysplit = mycpv.split("/")
2216 mylist = []
2217 tbz2name = mysplit[1]+".tbz2"
2218 if self.bintree and not self.bintree.isremote(mycpv):
2219 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
2220 for x in wants:
2221 if self.bintree and self.bintree.isremote(mycpv):
2222 # We use the cache for remote packages
2223 if self.bintree.remotepkgs[tbz2name].has_key(x):
2224 mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
2225 else:
2226 mylist.append("")
2227 else:
2228 myval = tbz2.getfile(x)
2229 if myval == None:
2230 myval = ""
2231 else:
2232 myval = ' '.join(myval.split())
2233 mylist.append(myval)
2234
2235 return mylist
2236
2237
2238 cptot=0
2239 class vardbapi(dbapi):
2240 def __init__(self,root,categories=None):
2241 self.root = root
2242 #cache for category directory mtimes
2243 self.mtdircache = {}
2244 #cache for dependency checks
2245 self.matchcache = {}
2246 #cache for cp_list results
2247 self.cpcache = {}
2248 self.blockers = None
2249 self.categories = copy.deepcopy(categories)
2250
2251 def cpv_exists(self,mykey):
2252 "Tells us whether an actual ebuild exists on disk (no masking)"
2253 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
2254
2255 def cpv_counter(self,mycpv):
2256 "This method will grab the COUNTER. Returns a counter value."
2257 cdir=self.root+VDB_PATH+"/"+mycpv
2258 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
2259
2260 # We write our new counter value to a new file that gets moved into
2261 # place to avoid filesystem corruption on XFS (unexpected reboot.)
2262 corrupted=0
2263 if os.path.exists(cpath):
2264 cfile=open(cpath, "r")
2265 try:
2266 counter=long(cfile.readline())
2267 except ValueError:
2268 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
2269 counter=long(0)
2270 corrupted=1
2271 cfile.close()
2272 elif os.path.exists(cdir):
2273 mys = portage_versions.pkgsplit(mycpv)
2274 myl = self.match(mys[0],use_cache=0)
2275 print mys,myl
2276 if len(myl) == 1:
2277 try:
2278 # Only one package... Counter doesn't matter.
2279 myf = open(cpath, "w")
2280 myf.write("1")
2281 myf.flush()
2282 myf.close()
2283 counter = 1
2284 except SystemExit, e:
2285 raise
2286 except Exception, e:
2287 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
2288 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
2289 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
2290 writemsg("!!! unmerge this exact version.\n")
2291 writemsg("!!! %s\n" % e)
2292 sys.exit(1)
2293 else:
2294 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
2295 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
2296 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
2297 writemsg("!!! remerge the package.\n")
2298 sys.exit(1)
2299 else:
2300 counter=long(0)
2301 if corrupted:
2302 newcpath=cpath+".new"
2303 # update new global counter file
2304 newcfile=open(newcpath,"w")
2305 newcfile.write(str(counter))
2306 newcfile.close()
2307 # now move global counter file into place
2308 os.rename(newcpath,cpath)
2309 return counter
2310
2311 def cpv_inject(self,mycpv):
2312 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
2313 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
2314 counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
2315 # write local package counter so that emerge clean does the right thing
2316 lcfile=open(self.root+VDB_PATH+"/"+mycpv+"/COUNTER","w")
2317 lcfile.write(str(counter))
2318 lcfile.close()
2319
2320 def isInjected(self,mycpv):
2321 if self.cpv_exists(mycpv):
2322 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
2323 return True
2324 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
2325 return True
2326 return False
2327
2328 def move_ent(self,mylist):
2329 origcp=mylist[1]
2330 newcp=mylist[2]
2331 origmatches=self.match(origcp,use_cache=0)
2332 if not origmatches:
2333 return
2334 for mycpv in origmatches:
2335 mycpsplit=portage_versions.catpkgsplit(mycpv)
2336 mynewcpv=newcp+"-"+mycpsplit[2]
2337 mynewcat=newcp.split("/")[0]
2338 if mycpsplit[3]!="r0":
2339 mynewcpv += "-"+mycpsplit[3]
2340 mycpsplit_new = portage_versions.catpkgsplit(mynewcpv)
2341 origpath=self.root+VDB_PATH+"/"+mycpv
2342 if not os.path.exists(origpath):
2343 continue
2344 writemsg("@")
2345 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
2346 #create the directory
2347 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
2348 newpath=self.root+VDB_PATH+"/"+mynewcpv
2349 if os.path.exists(newpath):
2350 #dest already exists; keep this puppy where it is.
2351 continue
2352 spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
2353
2354 # We need to rename the ebuild now.
2355 old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
2356 new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
2357 if mycpsplit[3] != "r0":
2358 old_eb_path += "-"+mycpsplit[3]
2359 new_eb_path += "-"+mycpsplit[3]
2360 if os.path.exists(old_eb_path+".ebuild"):
2361 os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
2362
2363 catfile=open(newpath+"/CATEGORY", "w")
2364 catfile.write(mynewcat+"\n")
2365 catfile.close()
2366
2367 dbdir = self.root+VDB_PATH
2368 for catdir in listdir(dbdir):
2369 catdir = dbdir+"/"+catdir
2370 if os.path.isdir(catdir):
2371 for pkgdir in listdir(catdir):
2372 pkgdir = catdir+"/"+pkgdir
2373 if os.path.isdir(pkgdir):
2374 fixdbentries(origcp, newcp, pkgdir)
2375
2376 def move_slot_ent(self,mylist):
2377 pkg=mylist[1]
2378 origslot=mylist[2]
2379 newslot=mylist[3]
2380
2381 origmatches=self.match(pkg,use_cache=0)
2382 if not origmatches:
2383 return
2384 for mycpv in origmatches:
2385 origpath=self.root+VDB_PATH+"/"+mycpv
2386 if not os.path.exists(origpath):
2387 continue
2388
2389 slot=grabfile(origpath+"/SLOT");
2390 if (not slot):
2391 continue
2392
2393 if (slot[0]!=origslot):
2394 continue
2395
2396 writemsg("s")
2397 slotfile=open(origpath+"/SLOT", "w")
2398 slotfile.write(newslot+"\n")
2399 slotfile.close()
2400
2401 def cp_list(self,mycp,use_cache=1):
2402 mysplit=mycp.split("/")
2403 if mysplit[0] == '*':
2404 mysplit[0] = mysplit[0][1:]
2405 try:
2406 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
2407 except OSError:
2408 mystat=0
2409 if use_cache and self.cpcache.has_key(mycp):
2410 cpc=self.cpcache[mycp]
2411 if cpc[0]==mystat:
2412 return cpc[1]
2413 list=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2414
2415 if (list==None):
2416 return []
2417 returnme=[]
2418 for x in list:
2419 if x[0] == '-':
2420 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
2421 continue
2422 ps=portage_versions.pkgsplit(x)
2423 if not ps:
2424 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2425 continue
2426 if len(mysplit) > 1:
2427 if ps[0]==mysplit[1]:
2428 returnme.append(mysplit[0]+"/"+x)
2429 if use_cache:
2430 self.cpcache[mycp]=[mystat,returnme]
2431 elif self.cpcache.has_key(mycp):
2432 del self.cpcache[mycp]
2433 return returnme
2434
2435 def cpv_all(self, use_cache=1):
2436 return list(self.iter_cpv_all(use_cache=use_cache))
2437
2438 def iter_cpv_all(self,use_cache=1):
2439 basepath = self.root+VDB_PATH+"/"
2440
2441 mycats = self.categories
2442 if mycats == None:
2443 # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
2444 mycats = settings.categories
2445
2446 for x in mycats:
2447 for y in listdir(basepath+x):
2448 subpath = x+"/"+y
2449 # -MERGING- should never be a cpv, nor should files.
2450 if os.path.isdir(basepath+subpath) and (portage_versions.pkgsplit(y) is not None):
2451 yield subpath
2452
2453 def cp_all(self,use_cache=1):
2454 mylist = self.cpv_all(use_cache=use_cache)
2455 d=dict()
2456 for y in self.iter_cpv_all(use_cache=use_cache):
2457 if y[0] == '*':
2458 y = y[1:]
2459 mysplit=portage_versions.catpkgsplit(y)
2460 if not mysplit:
2461 self.invalidentry(self.root+VDB_PATH+"/"+y)
2462 continue
2463 mykey=mysplit[0]+"/"+mysplit[1]
2464 d[mysplit[0]+"/"+mysplit[1]] = None
2465 return d.keys()
2466
2467 def checkblockers(self,origdep):
2468 pass
2469
2470 def match(self,origdep,use_cache=1):
2471 "caching match function"
2472 mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
2473 mykey=portage_dep.dep_getkey(mydep)
2474 mycat=mykey.split("/")[0]
2475 if not use_cache:
2476 if self.matchcache.has_key(mycat):
2477 del self.mtdircache[mycat]
2478 del self.matchcache[mycat]
2479 return portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2480 try:
2481 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
2482 except SystemExit, e:
2483 raise
2484 except:
2485 curmtime=0
2486
2487 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
2488 # clear cache entry
2489 self.mtdircache[mycat]=curmtime
2490 self.matchcache[mycat]={}
2491 if not self.matchcache[mycat].has_key(mydep):
2492 mymatch=portage_dep.match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
2493 self.matchcache[mycat][mydep]=mymatch
2494 return self.matchcache[mycat][mydep][:]
2495
2496 def aux_get(self, mycpv, wants):
2497 global auxdbkeys
2498 results = []
2499 if not self.cpv_exists(mycpv):
2500 return []
2501 for x in wants:
2502 myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
2503 if os.access(myfn,os.R_OK):
2504 myf = open(myfn, "r")
2505 myd = myf.read()
2506 myf.close()
2507 myd = " ".join( myd.split() )
2508 else:
2509 myd = ""
2510 results.append(myd)
2511 return results
2512
2513
2514 class vartree(packagetree):
2515 """
2516 this tree will scan a var/db/pkg database located at root (passed to init)
2517 """
2518 def __init__(self,root="/",virtual=None,clone=None,categories=None):
2519 if clone:
2520 self.root = clone.root
2521 self.dbapi = copy.deepcopy(clone.dbapi)
2522 self.populated = 1
2523 else:
2524 self.root = root
2525 self.dbapi = vardbapi(self.root,categories=categories)
2526 self.populated = 1
2527
2528 def zap(self,mycpv):
2529 return
2530
2531 def inject(self,mycpv):
2532 return
2533
2534 def get_provide(self,mycpv):
2535 myprovides=[]
2536 try:
2537 mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
2538 if mylines:
2539 myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
2540 myuse = " ".join(myuse).split()
2541 mylines = " ".join(mylines)
2542 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
2543 for myprovide in mylines:
2544 mys = portage_versions.catpkgsplit(myprovide)
2545 if not mys:
2546 mys = myprovide.split("/")
2547 myprovides += [mys[0] + "/" + mys[1]]
2548 return myprovides
2549 except SystemExit, e:
2550 raise
2551 except Exception, e:
2552 print
2553 print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
2554 print "Possibly Invalid: " + str(mylines)
2555 print "Exception: "+str(e)
2556 print
2557 return []
2558
2559 def get_all_provides(self):
2560 myprovides = {}
2561 for node in self.getallcpv():
2562 for mykey in self.get_provide(node):
2563 if myprovides.has_key(mykey):
2564 myprovides[mykey] += [node]
2565 else:
2566 myprovides[mykey] = [node]
2567 return myprovides
2568
2569 def dep_bestmatch(self,mydep,use_cache=1):
2570 "compatibility method -- all matches, not just visible ones"
2571 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
2572 mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
2573 if mymatch==None:
2574 return ""
2575 else:
2576 return mymatch
2577
2578 def dep_match(self,mydep,use_cache=1):
2579 "compatibility method -- we want to see all matches, not just visible ones"
2580 #mymatch=match(mydep,self.dbapi)
2581 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
2582 if mymatch==None:
2583 return []
2584 else:
2585 return mymatch
2586
2587 def exists_specific(self,cpv):
2588 return self.dbapi.cpv_exists(cpv)
2589
2590 def getallcpv(self):
2591 """temporary function, probably to be renamed --- Gets a list of all
2592 category/package-versions installed on the system."""
2593 return self.dbapi.cpv_all()
2594
2595 def getallnodes(self):
2596 """new behavior: these are all *unmasked* nodes. There may or may not be available
2597 masked package for nodes in this nodes list."""
2598 return self.dbapi.cp_all()
2599
2600 def exists_specific_cat(self,cpv,use_cache=1):
2601 cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
2602 a=portage_versions.catpkgsplit(cpv)
2603 if not a:
2604 return 0
2605 mylist=listdir(self.root+VDB_PATH+"/"+a[0])
2606 for x in mylist:
2607 b=portage_versions.pkgsplit(x)
2608 if not b:
2609 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
2610 continue
2611 if a[1]==b[0]:
2612 return 1
2613 return 0
2614
2615 def getebuildpath(self,fullpackage):
2616 cat,package=fullpackage.split("/")
2617 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
2618
2619 def getnode(self,mykey,use_cache=1):
2620 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
2621 if not mykey:
2622 return []
2623 mysplit=mykey.split("/")
2624 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2625 returnme=[]
2626 for x in mydirlist:
2627 mypsplit=portage_versions.pkgsplit(x)
2628 if not mypsplit:
2629 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2630 continue
2631 if mypsplit[0]==mysplit[1]:
2632 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
2633 returnme.append(appendme)
2634 return returnme
2635
2636
2637 def getslot(self,mycatpkg):
2638 "Get a slot for a catpkg; assume it exists."
2639 myslot = ""
2640 try:
2641 myslot=" ".join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
2642 except SystemExit, e:
2643 raise
2644 except Exception, e:
2645 pass
2646 return myslot
2647
2648 def hasnode(self,mykey,use_cache):
2649 """Does the particular node (cat/pkg key) exist?"""
2650 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
2651 mysplit=mykey.split("/")
2652 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0])
2653 for x in mydirlist:
2654 mypsplit=portage_versions.pkgsplit(x)
2655 if not mypsplit:
2656 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
2657 continue
2658 if mypsplit[0]==mysplit[1]:
2659 return 1
2660 return 0
2661
2662 def populate(self):
2663 self.populated=1
2664
2665
2666 auxdbkeys=[
2667 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
2668 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
2669 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
2670 'PDEPEND', 'PROVIDE',
2671 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
2672 'UNUSED_05', 'UNUSED_06', 'UNUSED_07', 'UNUSED_08',
2673 ]
2674 auxdbkeylen=len(auxdbkeys)
2675
2676 def close_portdbapi_caches():
2677 for i in portdbapi.portdbapi_instances:
2678 i.close_caches()
2679 class portdbapi(dbapi):
2680 """this tree will scan a portage directory located at root (passed to init)"""
2681 portdbapi_instances = []
2682
2683 def __init__(self,porttree_root,mysettings=None):
2684 portdbapi.portdbapi_instances.append(self)
2685 self.lock_held = 0;
2686
2687 if mysettings:
2688 self.mysettings = mysettings
2689 else:
2690 self.mysettings = config(clone=settings)
2691
2692 self.manifestVerifyLevel = None
2693 self.manifestVerifier = None
2694 self.manifestCache = {} # {location: [stat, md5]}
2695 self.manifestMissingCache = []
2696
2697 if "gpg" in self.mysettings.features:
2698 self.manifestVerifyLevel = portage_gpg.EXISTS
2699 if "strict" in self.mysettings.features:
2700 self.manifestVerifyLevel = portage_gpg.MARGINAL
2701 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
2702 elif "severe" in self.mysettings.features:
2703 self.manifestVerifyLevel = portage_gpg.TRUSTED
2704 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
2705 else:
2706 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
2707
2708 #self.root=settings["PORTDIR"]
2709 self.porttree_root = porttree_root
2710
2711 self.depcachedir = self.mysettings.depcachedir
2712
2713 self.eclassdb = eclass_cache.cache(self.porttree_root, self.mysettings["PORTDIR_OVERLAY"].split())
2714
2715 self.metadb = {}
2716 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
2717
2718 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
2719 self.xcache={}
2720 self.frozen=0
2721
2722 self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
2723
2724 filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED")]
2725 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
2726 self.auxdb = {}
2727 for x in self.porttrees:
2728 self.auxdb[x] = self.auxdbmodule(x, filtered_auxdbkeys, basepath=portage_const.DEPCACHE_PATH,
2729 gid=portage_gid)
2730
2731
2732 def getmaskingreason(self,mycpv):
2733 mysplit = portage_versions.catpkgsplit(mycpv)
2734 if not mysplit:
2735 raise ValueError("invalid CPV: %s" % mycpv)
2736 if not self.cpv_exists(mycpv):
2737 raise KeyError("CPV %s does not exist" % mycpv)
2738 mycp=mysplit[0]+"/"+mysplit[1]
2739
2740 if settings.pmaskdict.has_key(mycp):
2741 for x in settings.pmaskdict[mycp]:
2742 if mycpv in self.xmatch("match-all", x):
2743 pmaskfile = open(settings["PORTDIR"]+"/profiles/package.mask")
2744 comment = ""
2745 l = "\n"
2746 while len(l) > 0:
2747 l = pmaskfile.readline()
2748 if len(l) == 0:
2749 pmaskfile.close()
2750 return None
2751 if l[0] == "#":
2752 comment += l
2753 elif l == "\n":
2754 comment = ""
2755 elif l.strip() == x:
2756 pmaskfile.close()
2757 return comment
2758 pmaskfile.close()
2759 return None
2760
2761 def getmaskingstatus(self,mycpv):
2762 mysplit = portage_versions.catpkgsplit(mycpv)
2763 if not mysplit:
2764 raise ValueError("invalid CPV: %s" % mycpv)
2765 if not self.cpv_exists(mycpv):
2766 raise KeyError("CPV %s does not exist" % mycpv)
2767 mycp=mysplit[0]+"/"+mysplit[1]
2768
2769 rValue = []
2770
2771 # profile checking
2772 revmaskdict=settings.prevmaskdict
2773 if revmaskdict.has_key(mycp):
2774 for x in revmaskdict[mycp]:
2775 if x[0]=="*":
2776 myatom = x[1:]
2777 else:
2778 myatom = x
2779 if not portage_dep.match_to_list(mycpv, [myatom]):
2780 rValue.append("profile")
2781 break
2782
2783 # package.mask checking
2784 maskdict=settings.pmaskdict
2785 unmaskdict=settings.punmaskdict
2786 if maskdict.has_key(mycp):
2787 for x in maskdict[mycp]:
2788 if mycpv in self.xmatch("match-all", x):
2789 unmask=0
2790 if unmaskdict.has_key(mycp):
2791 for z in unmaskdict[mycp]:
2792 if mycpv in self.xmatch("match-all",z):
2793 unmask=1
2794 break
2795 if unmask==0:
2796 rValue.append("package.mask")
2797
2798 # keywords checking
2799 mygroups = self.aux_get(mycpv, ["KEYWORDS"])[0].split()
2800 pgroups=groups[:]
2801 myarch = settings["ARCH"]
2802 pkgdict = settings.pkeywordsdict
2803
2804 cp = portage_dep.dep_getkey(mycpv)
2805 if pkgdict.has_key(cp):
2806 matches = portage_dep.match_to_list(mycpv, pkgdict[cp].keys())
2807 for match in matches:
2808 pgroups.extend(pkgdict[cp][match])
2809
2810 kmask = "missing"
2811
2812 for keyword in pgroups:
2813 if keyword in mygroups:
2814 kmask=None
2815
2816 if kmask:
2817 for gp in mygroups:
2818 if gp=="*":
2819 kmask=None
2820 break
2821 elif gp=="-*":
2822 break
2823 elif gp=="-"+myarch:
2824 kmask="-"+myarch
2825 break
2826 elif gp=="~"+myarch:
2827 kmask="~"+myarch
2828 break
2829
2830 if kmask:
2831 rValue.append(kmask+" keyword")
2832 return rValue
2833
2834
2835 def regen_keys(self,cleanse_stale=True):
2836 """walk all entries of this instance to update the cache.
2837 If the cache is pregenned, pass it in via src_cache, and the cache will be updated
2838 from that instance.
2839 cleanse_stale controls whether or not the cache's old/stale entries are removed.
2840 This is useful both for emerge metadata, and emerge regen (moreso for regen)"""
2841
2842 import cache.cache_errors
2843 valid_nodes = {}
2844 for x in self.cp_all():
2845 # print "processing pkg %s" % x
2846 for y in self.cp_list(x):
2847 valid_nodes[y] = None
2848 try: self.aux_get(y,["_mtime_"])
2849 except cache.cache_errors.CacheError, ce:
2850 print "Cache Exception-", ce
2851 del ce
2852 for loc, tree in self.auxdb.items():
2853 print "cleansing cache for tree at %s" % loc
2854 for x in tree.keys():
2855 if x not in valid_nodes:
2856 try: del tree[x]
2857 except (KeyError, cache.cache_errors.CacheError):
2858 pass
2859
2860
2861 def close_caches(self):
2862 pass
2863 # for y in self.auxdb[x].keys():
2864 # self.auxdb[x][y].sync()
2865 # self.auxdb[x][y].close()
2866 # del self.auxdb[x][y]
2867 # del self.auxdb[x]
2868 # self.eclassdb.close_caches()
2869
2870 def flush_cache(self):
2871 self.metadb.clear()
2872 self.auxdb.clear()
2873 # self.eclassdb.flush_cache()
2874
2875 def finddigest(self,mycpv):
2876 try:
2877 mydig = self.findname2(mycpv)[0]
2878 mydigs = mydig.split("/")[:-1]
2879 mydig = "/".join(mydigs)
2880
2881 mysplit = mycpv.split("/")
2882 except SystemExit, e:
2883 raise
2884 except:
2885 return ""
2886 return mydig+"/files/digest-"+mysplit[-1]
2887
2888 def findname(self,mycpv):
2889 return self.findname2(mycpv)[0]
2890
2891 def findname2(self,mycpv):
2892 "returns file location for this particular package and in_overlay flag"
2893 if not mycpv:
2894 return "",0
2895 mysplit=mycpv.split("/")
2896 if mysplit[0]=="virtual":
2897 print "!!! Cannot resolve a virtual package name to an ebuild."
2898 print "!!! This is a bug, please report it. ("+mycpv+")"
2899 sys.exit(1)
2900
2901 psplit=portage_versions.pkgsplit(mysplit[1])
2902 ret=None
2903 if psplit:
2904 for x in self.porttrees:
2905 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
2906
2907 if os.access(file, os.R_OK):
2908 # when found
2909 ret=[file, x]
2910 if ret:
2911 return ret[0], ret[1]
2912
2913 # when not found
2914 return None, 0
2915
2916 def aux_get(self,mycpv,mylist,strict=0,metacachedir=None,debug=0):
2917 """
2918 stub code for returning auxilliary db information, such as SLOT, DEPEND, etc.
2919 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
2920 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
2921 """
2922 global auxdbkeys,auxdbkeylen
2923
2924 cat,pkg = mycpv.split( "/", 1)
2925
2926 if metacachedir:
2927 if cat not in self.metadb:
2928 self.metadb[cat] = self.metadbmodule(metacachedir,cat,auxdbkeys,uid,portage_gid)
2929
2930 myebuild, mylocation=self.findname2(mycpv)
2931
2932 if not myebuild:
2933 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
2934 writemsg("!!! %s\n" % myebuild)
2935 raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
2936
2937 if "gpg" in self.mysettings.features:
2938 myManifestPath = os.path.join("/",os.path.dirname(myebuild),"Manifest")
2939 try:
2940 mys = portage_gpg.fileStats(