1 | # Copyright 1999-2003 Gentoo Technologies, Inc. |
1 | # Copyright 1999-2004 Gentoo Technologies, Inc. |
2 | # Distributed under the terms of the GNU General Public License v2 |
2 | # Distributed under the terms of the GNU General Public License v2 |
3 | # $Header: /var/cvsroot/gentoo-x86/eclass/eutils.eclass,v 1.69 2003/11/26 22:13:35 mr_bones_ Exp $ |
3 | # $Header: /var/cvsroot/gentoo-x86/eclass/eutils.eclass,v 1.78 2004/02/09 17:08:44 brad_mssw Exp $ |
4 | # |
4 | # |
5 | # Author: Martin Schlemmer <azarah@gentoo.org> |
5 | # Author: Martin Schlemmer <azarah@gentoo.org> |
6 | # |
6 | # |
7 | # This eclass is for general purpose functions that most ebuilds |
7 | # This eclass is for general purpose functions that most ebuilds |
8 | # have to implement themselves. |
8 | # have to implement themselves. |
… | |
… | |
422 | jobs="$((`grep "^cpus active" /proc/cpuinfo | sed -e "s/^.*: //"` * 2))" |
422 | jobs="$((`grep "^cpus active" /proc/cpuinfo | sed -e "s/^.*: //"` * 2))" |
423 | else |
423 | else |
424 | jobs=2 |
424 | jobs=2 |
425 | fi |
425 | fi |
426 | |
426 | |
427 | elif [ "${ARCH}" = "ppc" ] |
427 | elif [ "${ARCH}" = "ppc" -o "${ARCH}" = "ppc64" ] |
428 | then |
428 | then |
429 | # ppc has "processor", but only when compiled with SMP |
429 | # ppc has "processor", but only when compiled with SMP |
430 | if [ "`grep -c "^processor" /proc/cpuinfo`" -eq 1 ] |
430 | if [ "`grep -c "^processor" /proc/cpuinfo`" -eq 1 ] |
431 | then |
431 | then |
432 | jobs="$((`grep -c ^processor /proc/cpuinfo` * 2))" |
432 | jobs="$((`grep -c ^processor /proc/cpuinfo` * 2))" |
… | |
… | |
757 | fi |
757 | fi |
758 | |
758 | |
759 | return 0 |
759 | return 0 |
760 | } |
760 | } |
761 | |
761 | |
762 | # new convenience patch wrapper function to eventually replace epatch(), |
762 | # for internal use only (unpack_pdv and unpack_makeself) |
763 | # $PATCHES, $PATCHES1, src_unpack:patch, src_unpack:autopatch and |
763 | find_unpackable_file() { |
764 | # /usr/bin/patch |
764 | local src="$1" |
765 | # Features: |
|
|
766 | # - bulk patch handling similar to epatch()'s |
|
|
767 | # - automatic patch level detection like epatch()'s |
|
|
768 | # - automatic patch uncompression like epatch()'s |
|
|
769 | # - doesn't have the --dry-run overhead of epatch() - inspects patchfiles |
|
|
770 | # manually instead |
|
|
771 | # - once I decide it's production-ready, it'll be called from base_src_unpack |
|
|
772 | # to handle $PATCHES to avoid defining src_unpack just to use xpatch |
|
|
773 | |
|
|
774 | # accepts zero or more parameters specifying patchfiles and/or patchdirs |
|
|
775 | |
|
|
776 | # known issues: |
|
|
777 | # - only supports unified style patches (does anyone _really_ use anything |
|
|
778 | # else?) |
|
|
779 | # - because it doesn't use --dry-run there is a risk of it failing |
|
|
780 | # to find the files to patch, ie detect the patchlevel, properly. It doesn't use |
|
|
781 | # any of the backup heuristics that patch employs to discover a filename. |
|
|
782 | # however, this isn't dangerous because if it works for the developer who's |
|
|
783 | # writing the ebuild, it'll always work for the users, and if it doesn't, |
|
|
784 | # then we'll fix it :-) |
|
|
785 | # - no support as yet for patches applying outside $S (and not directly in $WORKDIR). |
|
|
786 | xpatch() { |
|
|
787 | |
|
|
788 | debug-print-function ${FUNCNAME} $* |
|
|
789 | |
|
|
790 | local list= |
|
|
791 | local list2= |
|
|
792 | declare -i plevel |
|
|
793 | |
|
|
794 | # parse patch sources |
|
|
795 | for x in $* |
|
|
796 | do |
|
|
797 | debug-print "${FUNCNAME}: parsing parameter ${x}" |
|
|
798 | if [ -f "${x}" ] |
765 | if [ -z "${src}" ] |
799 | then |
766 | then |
800 | list="${list} ${x}" |
767 | src="${DISTDIR}/${A}" |
801 | elif [ -d "${x}" ] |
768 | else |
802 | then |
769 | if [ -e "${DISTDIR}/${src}" ] |
803 | # handles patchdirs like epatch() for now: no recursion. |
|
|
804 | # patches are sorted by filename, so with an xy_foo naming scheme you'll get the right order. |
|
|
805 | # only patches with _$ARCH_ or _all_ in their filenames are applied. |
|
|
806 | for file in `ls -A ${x}` |
|
|
807 | do |
|
|
808 | debug-print "${FUNCNAME}: parsing in subdir: file ${file}" |
|
|
809 | if [ -f "${x}/${file}" -a "${file}" != "${file/_all_}" -o \ |
|
|
810 | "${file}" != "${file/_$ARCH_}" ] |
|
|
811 | then |
770 | then |
812 | list2="${list2} ${x}/${file}" |
771 | src="${DISTDIR}/${src}" |
|
|
772 | elif [ -e "${PWD}/${src}" ] |
|
|
773 | then |
|
|
774 | src="${PWD}/${src}" |
|
|
775 | elif [ -e "${src}" ] |
|
|
776 | then |
|
|
777 | src="${src}" |
813 | fi |
778 | fi |
814 | done |
779 | fi |
815 | list="`echo ${list2} | sort` ${list}" |
780 | [ ! -e "${src}" ] && die "Could not find requested archive ${src}" |
|
|
781 | echo "${src}" |
|
|
782 | } |
|
|
783 | |
|
|
784 | # Unpack those pesky pdv generated files ... |
|
|
785 | # They're self-unpacking programs with the binary package stuffed in |
|
|
786 | # the middle of the archive. Valve seems to use it a lot ... too bad |
|
|
787 | # it seems to like to segfault a lot :(. So lets take it apart ourselves. |
|
|
788 | # |
|
|
789 | # Usage: unpack_pdv [file to unpack] [size of off_t] |
|
|
790 | # - you have to specify the off_t size ... i have no idea how to extract that |
|
|
791 | # information out of the binary executable myself. basically you pass in |
|
|
792 | # the size of the off_t type (in bytes) on the machine that built the pdv |
|
|
793 | # archive. one way to determine this is by running the following commands: |
|
|
794 | # strings <pdv archive> | grep lseek |
|
|
795 | # strace -elseek <pdv archive> |
|
|
796 | # basically look for the first lseek command (we do the strings/grep because |
|
|
797 | # sometimes the function call is _llseek or something) and steal the 2nd |
|
|
798 | # parameter. here is an example: |
|
|
799 | # root@vapier 0 pdv_unpack # strings hldsupdatetool.bin | grep lseek |
|
|
800 | # lseek |
|
|
801 | # root@vapier 0 pdv_unpack # strace -elseek ./hldsupdatetool.bin |
|
|
802 | # lseek(3, -4, SEEK_END) = 2981250 |
|
|
803 | # thus we would pass in the value of '4' as the second parameter. |
|
|
804 | unpack_pdv() { |
|
|
805 | local src="`find_unpackable_file $1`" |
|
|
806 | local sizeoff_t="$2" |
|
|
807 | |
|
|
808 | [ -z "${sizeoff_t}" ] && die "No idea what off_t size was used for this pdv :(" |
|
|
809 | |
|
|
810 | local shrtsrc="`basename ${src}`" |
|
|
811 | echo ">>> Unpacking ${shrtsrc} to ${PWD}" |
|
|
812 | local metaskip=`tail -c ${sizeoff_t} ${src} | hexdump -e \"%i\"` |
|
|
813 | local tailskip=`tail -c $((${sizeoff_t}*2)) ${src} | head -c ${sizeoff_t} | hexdump -e \"%i\"` |
|
|
814 | |
|
|
815 | # grab metadata for debug reasons |
|
|
816 | local metafile="`mymktemp ${T}`" |
|
|
817 | tail -c +$((${metaskip}+1)) ${src} > ${metafile} |
|
|
818 | |
|
|
819 | # rip out the final file name from the metadata |
|
|
820 | local datafile="`tail -c +$((${metaskip}+1)) ${src} | strings | head -n 1`" |
|
|
821 | datafile="`basename ${datafile}`" |
|
|
822 | |
|
|
823 | # now lets uncompress/untar the file if need be |
|
|
824 | local tmpfile="`mymktemp ${T}`" |
|
|
825 | tail -c +$((${tailskip}+1)) ${src} 2>/dev/null | head -c 512 > ${tmpfile} |
|
|
826 | |
|
|
827 | local iscompressed="`file -b ${tmpfile}`" |
|
|
828 | if [ "${iscompressed:0:8}" == "compress" ] ; then |
|
|
829 | iscompressed=1 |
|
|
830 | mv ${tmpfile}{,.Z} |
|
|
831 | gunzip ${tmpfile} |
|
|
832 | else |
|
|
833 | iscompressed=0 |
|
|
834 | fi |
|
|
835 | local istar="`file -b ${tmpfile}`" |
|
|
836 | if [ "${istar:0:9}" == "POSIX tar" ] ; then |
|
|
837 | istar=1 |
|
|
838 | else |
|
|
839 | istar=0 |
|
|
840 | fi |
|
|
841 | |
|
|
842 | #for some reason gzip dies with this ... dd cant provide buffer fast enough ? |
|
|
843 | #dd if=${src} ibs=${metaskip} count=1 \ |
|
|
844 | # | dd ibs=${tailskip} skip=1 \ |
|
|
845 | # | gzip -dc \ |
|
|
846 | # > ${datafile} |
|
|
847 | if [ ${iscompressed} -eq 1 ] ; then |
|
|
848 | if [ ${istar} -eq 1 ] ; then |
|
|
849 | tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \ |
|
|
850 | | head -c $((${metaskip}-${tailskip})) \ |
|
|
851 | | tar -xzf - |
816 | else |
852 | else |
817 | die "Couldn't find ${x}" |
853 | tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \ |
|
|
854 | | head -c $((${metaskip}-${tailskip})) \ |
|
|
855 | | gzip -dc \ |
|
|
856 | > ${datafile} |
|
|
857 | fi |
|
|
858 | else |
|
|
859 | if [ ${istar} -eq 1 ] ; then |
|
|
860 | tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \ |
|
|
861 | | head -c $((${metaskip}-${tailskip})) \ |
|
|
862 | | tar --no-same-owner -xf - |
|
|
863 | else |
|
|
864 | tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \ |
|
|
865 | | head -c $((${metaskip}-${tailskip})) \ |
|
|
866 | > ${datafile} |
|
|
867 | fi |
818 | fi |
868 | fi |
819 | done |
869 | true |
820 | |
870 | #[ -s "${datafile}" ] || die "failure unpacking pdv ('${metaskip}' '${tailskip}' '${datafile}')" |
821 | debug-print "${FUNCNAME}: final list of patches: ${list}" |
871 | #assert "failure unpacking pdv ('${metaskip}' '${tailskip}' '${datafile}')" |
822 | |
|
|
823 | for x in ${list}; |
|
|
824 | do |
|
|
825 | debug-print "${FUNCNAME}: processing ${x}" |
|
|
826 | # deal with compressed files. /usr/bin/file is in the system profile, or should be. |
|
|
827 | case "`/usr/bin/file -b ${x}`" in |
|
|
828 | *gzip*) |
|
|
829 | patchfile="${T}/current.patch" |
|
|
830 | ungzip -c "${x}" > "${patchfile}" |
|
|
831 | ;; |
|
|
832 | *bzip2*) |
|
|
833 | patchfile="${T}/current.patch" |
|
|
834 | bunzip2 -c "${x}" > "${patchfile}" |
|
|
835 | ;; |
|
|
836 | *text*) |
|
|
837 | patchfile="${x}" |
|
|
838 | ;; |
|
|
839 | *) |
|
|
840 | die "Could not determine filetype of patch ${x}" |
|
|
841 | ;; |
|
|
842 | esac |
|
|
843 | debug-print "${FUNCNAME}: patchfile=${patchfile}" |
|
|
844 | |
|
|
845 | # determine patchlevel. supports p0 and higher with either $S or $WORKDIR as base. |
|
|
846 | target="`/bin/grep -m 1 '^+++ ' ${patchfile}`" |
|
|
847 | debug-print "${FUNCNAME}: raw target=${target}" |
|
|
848 | # strip target down to the path/filename, remove leading +++ |
|
|
849 | target="${target/+++ }"; target="${target%% *}" |
|
|
850 | # duplicate slashes are discarded by patch wrt the patchlevel. therefore we need |
|
|
851 | # to discard them as well to calculate the correct patchlevel. |
|
|
852 | target="${target//\/\//\/}" |
|
|
853 | debug-print "${FUNCNAME}: stripped target=${target}" |
|
|
854 | |
|
|
855 | # look for target |
|
|
856 | for basedir in "${S}" "${WORKDIR}" "${PWD}"; do |
|
|
857 | debug-print "${FUNCNAME}: looking in basedir=${basedir}" |
|
|
858 | cd "${basedir}" |
|
|
859 | |
|
|
860 | # try stripping leading directories |
|
|
861 | target2="${target}" |
|
|
862 | plevel=0 |
|
|
863 | debug-print "${FUNCNAME}: trying target2=${target2}, plevel=${plevel}" |
|
|
864 | while [ ! -f "${target2}" ] |
|
|
865 | do |
|
|
866 | target2="${target2#*/}" # removes piece of target2 upto the first occurence of / |
|
|
867 | plevel=$((plevel+1)) |
|
|
868 | debug-print "${FUNCNAME}: trying target2=${target2}, plevel=${plevel}" |
|
|
869 | [ "${target2}" == "${target2/\/}" ] && break |
|
|
870 | done |
|
|
871 | test -f "${target2}" && break |
|
|
872 | |
|
|
873 | # try stripping filename - needed to support patches creating new files |
|
|
874 | target2="${target%/*}" |
|
|
875 | plevel=0 |
|
|
876 | debug-print "${FUNCNAME}: trying target2=${target2}, plevel=${plevel}" |
|
|
877 | while [ ! -d "${target2}" ] |
|
|
878 | do |
|
|
879 | target2="${target2#*/}" # removes piece of target2 upto the first occurence of / |
|
|
880 | plevel=$((plevel+1)) |
|
|
881 | debug-print "${FUNCNAME}: trying target2=${target2}, plevel=${plevel}" |
|
|
882 | [ "${target2}" == "${target2/\/}" ] && break |
|
|
883 | done |
|
|
884 | test -d "${target2}" && break |
|
|
885 | |
|
|
886 | done |
|
|
887 | |
|
|
888 | test -f "${basedir}/${target2}" || test -d "${basedir}/${target2}" \ |
|
|
889 | || die "Could not determine patchlevel for ${x}" |
|
|
890 | debug-print "${FUNCNAME}: determined plevel=${plevel}" |
|
|
891 | # do the patching |
|
|
892 | ebegin "Applying patch ${x##*/}..." |
|
|
893 | /usr/bin/patch -p${plevel} < "${patchfile}" > /dev/null \ |
|
|
894 | || die "Failed to apply patch ${x}" |
|
|
895 | eend $? |
|
|
896 | |
|
|
897 | done |
|
|
898 | |
|
|
899 | } |
872 | } |
900 | |
873 | |
901 | # Unpack those pesky makeself generated files ... |
874 | # Unpack those pesky makeself generated files ... |
902 | # They're shell scripts with the binary package tagged onto |
875 | # They're shell scripts with the binary package tagged onto |
903 | # the end of the archive. Loki utilized the format as does |
876 | # the end of the archive. Loki utilized the format as does |
… | |
… | |
906 | # Usage: unpack_makeself [file to unpack] [offset] |
879 | # Usage: unpack_makeself [file to unpack] [offset] |
907 | # - If the file is not specified then unpack will utilize ${A}. |
880 | # - If the file is not specified then unpack will utilize ${A}. |
908 | # - If the offset is not specified then we will attempt to extract |
881 | # - If the offset is not specified then we will attempt to extract |
909 | # the proper offset from the script itself. |
882 | # the proper offset from the script itself. |
910 | unpack_makeself() { |
883 | unpack_makeself() { |
911 | local src="$1" |
884 | local src="`find_unpackable_file $1`" |
912 | local skip="$2" |
885 | local skip="$2" |
913 | |
|
|
914 | if [ -z "${src}" ] |
|
|
915 | then |
|
|
916 | src="${DISTDIR}/${A}" |
|
|
917 | else |
|
|
918 | if [ -e "${DISTDIR}/${src}" ] |
|
|
919 | then |
|
|
920 | src="${DISTDIR}/${src}" |
|
|
921 | elif [ -e "${PWD}/${src}" ] |
|
|
922 | then |
|
|
923 | src="${PWD}/${src}" |
|
|
924 | elif [ -e "${src}" ] |
|
|
925 | then |
|
|
926 | src="${src}" |
|
|
927 | fi |
|
|
928 | fi |
|
|
929 | [ ! -e "${src}" ] && die "Could not find requested makeself archive ${src}" |
|
|
930 | |
886 | |
931 | local shrtsrc="`basename ${src}`" |
887 | local shrtsrc="`basename ${src}`" |
932 | echo ">>> Unpacking ${shrtsrc} to ${PWD}" |
888 | echo ">>> Unpacking ${shrtsrc} to ${PWD}" |
933 | if [ -z "${skip}" ] |
889 | if [ -z "${skip}" ] |
934 | then |
890 | then |
… | |
… | |
968 | local tmpfile="`mymktemp ${T}`" |
924 | local tmpfile="`mymktemp ${T}`" |
969 | tail -n +${skip} ${src} 2>/dev/null | head -c 512 > ${tmpfile} |
925 | tail -n +${skip} ${src} 2>/dev/null | head -c 512 > ${tmpfile} |
970 | local filetype="`file -b ${tmpfile}`" |
926 | local filetype="`file -b ${tmpfile}`" |
971 | case ${filetype} in |
927 | case ${filetype} in |
972 | *tar\ archive) |
928 | *tar\ archive) |
973 | tail -n +${skip} ${src} | tar -xf - |
929 | tail -n +${skip} ${src} | tar --no-same-owner -xf - |
974 | ;; |
930 | ;; |
975 | bzip2*) |
931 | bzip2*) |
976 | tail -n +${skip} ${src} | bzip2 -dc | tar -xf - |
932 | tail -n +${skip} ${src} | bzip2 -dc | tar --no-same-owner -xf - |
977 | ;; |
933 | ;; |
978 | gzip*) |
934 | gzip*) |
979 | tail -n +${skip} ${src} | tar -xzf - |
935 | tail -n +${skip} ${src} | tar --no-same-owner -xzf - |
980 | ;; |
936 | ;; |
981 | *) |
937 | *) |
982 | false |
938 | false |
983 | ;; |
939 | ;; |
984 | esac |
940 | esac |
… | |
… | |
1006 | local l="`basename ${lic}`" |
962 | local l="`basename ${lic}`" |
1007 | |
963 | |
1008 | # here is where we check for the licenses the user already |
964 | # here is where we check for the licenses the user already |
1009 | # accepted ... if we don't find a match, we make the user accept |
965 | # accepted ... if we don't find a match, we make the user accept |
1010 | local alic |
966 | local alic |
1011 | for alic in ${ACCEPT_LICENSE} ; do |
967 | for alic in "${ACCEPT_LICENSE}" ; do |
1012 | [ "${alic}" == "*" ] && return 0 |
968 | [ "${alic}" == "*" ] && return 0 |
1013 | [ "${alic}" == "${l}" ] && return 0 |
969 | [ "${alic}" == "${l}" ] && return 0 |
1014 | done |
970 | done |
1015 | |
971 | |
1016 | local licmsg="`mymktemp ${T}`" |
972 | local licmsg="`mymktemp ${T}`" |
… | |
… | |
1036 | eerror "You MUST accept the license to continue! Exiting!" |
992 | eerror "You MUST accept the license to continue! Exiting!" |
1037 | die "Failed to accept license" |
993 | die "Failed to accept license" |
1038 | ;; |
994 | ;; |
1039 | esac |
995 | esac |
1040 | } |
996 | } |
|
|
997 | |
|
|
998 | # Aquire cd(s) for those lovely cd-based emerges. Yes, this violates |
|
|
999 | # the whole 'non-interactive' policy, but damnit I want CD support ! |
|
|
1000 | # |
|
|
1001 | # with these cdrom functions we handle all the user interaction and |
|
|
1002 | # standardize everything. all you have to do is call cdrom_get_cds() |
|
|
1003 | # and when the function returns, you can assume that the cd has been |
|
|
1004 | # found at CDROM_ROOT. |
|
|
1005 | # |
|
|
1006 | # normally the cdrom functions will refer to the cds as 'cd #1', 'cd #2', |
|
|
1007 | # etc... if you want to give the cds better names, then just export |
|
|
1008 | # the CDROM_NAME_X variables before calling cdrom_get_cds(). |
|
|
1009 | # |
|
|
1010 | # for those multi cd ebuilds, see the cdrom_load_next_cd() below. |
|
|
1011 | # |
|
|
1012 | # Usage: cdrom_get_cds <file on cd1> [file on cd2] [file on cd3] [...] |
|
|
1013 | # - this will attempt to locate a cd based upon a file that is on |
|
|
1014 | # the cd ... the more files you give this function, the more cds |
|
|
1015 | # the cdrom functions will handle |
|
|
1016 | cdrom_get_cds() { |
|
|
1017 | # first we figure out how many cds we're dealing with by |
|
|
1018 | # the # of files they gave us |
|
|
1019 | local cdcnt=0 |
|
|
1020 | local f= |
|
|
1021 | for f in "$@" ; do |
|
|
1022 | cdcnt=$((cdcnt + 1)) |
|
|
1023 | export CDROM_CHECK_${cdcnt}="$f" |
|
|
1024 | done |
|
|
1025 | export CDROM_TOTAL_CDS=${cdcnt} |
|
|
1026 | export CDROM_CURRENT_CD=1 |
|
|
1027 | |
|
|
1028 | # now we see if the user gave use CD_ROOT ... |
|
|
1029 | # if they did, let's just believe them that it's correct |
|
|
1030 | if [ ! -z "${CD_ROOT}" ] ; then |
|
|
1031 | export CDROM_ROOT="${CD_ROOT}" |
|
|
1032 | einfo "Found CD #${CDROM_CURRENT_CD} root at ${CDROM_ROOT}" |
|
|
1033 | return |
|
|
1034 | fi |
|
|
1035 | # do the same for CD_ROOT_X |
|
|
1036 | if [ ! -z "${CD_ROOT_1}" ] ; then |
|
|
1037 | local var= |
|
|
1038 | cdcnt=0 |
|
|
1039 | while [ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ] ; do |
|
|
1040 | cdcnt=$((cdcnt + 1)) |
|
|
1041 | var="CD_ROOT_${cdcnt}" |
|
|
1042 | if [ -z "${!var}" ] ; then |
|
|
1043 | eerror "You must either use just the CD_ROOT" |
|
|
1044 | eerror "or specify ALL the CD_ROOT_X variables." |
|
|
1045 | eerror "In this case, you will need ${CDROM_TOTAL_CDS} CD_ROOT_X variables." |
|
|
1046 | die "could not locate CD_ROOT_${cdcnt}" |
|
|
1047 | fi |
|
|
1048 | export CDROM_ROOTS_${cdcnt}="${!var}" |
|
|
1049 | done |
|
|
1050 | export CDROM_ROOT=${CDROM_ROOTS_1} |
|
|
1051 | einfo "Found CD #${CDROM_CURRENT_CD} root at ${CDROM_ROOT}" |
|
|
1052 | return |
|
|
1053 | fi |
|
|
1054 | |
|
|
1055 | if [ ${CDROM_TOTAL_CDS} -eq 1 ] ; then |
|
|
1056 | einfon "This ebuild will need the " |
|
|
1057 | if [ -z "${CDROM_NAME}" ] ; then |
|
|
1058 | echo "cdrom for ${PN}." |
|
|
1059 | else |
|
|
1060 | echo "${CDROM_NAME}." |
|
|
1061 | fi |
|
|
1062 | echo |
|
|
1063 | einfo "If you do not have the CD, but have the data files" |
|
|
1064 | einfo "mounted somewhere on your filesystem, just export" |
|
|
1065 | einfo "the variable CD_ROOT so that it points to the" |
|
|
1066 | einfo "directory containing the files." |
|
|
1067 | echo |
|
|
1068 | else |
|
|
1069 | einfo "This package will need access to ${CDROM_TOTAL_CDS} cds." |
|
|
1070 | cdcnt=0 |
|
|
1071 | while [ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ] ; do |
|
|
1072 | cdcnt=$((cdcnt + 1)) |
|
|
1073 | var="CDROM_NAME_${cdcnt}" |
|
|
1074 | [ ! -z "${!var}" ] && einfo " CD ${cdcnt}: ${!var}" |
|
|
1075 | done |
|
|
1076 | echo |
|
|
1077 | einfo "If you do not have the CDs, but have the data files" |
|
|
1078 | einfo "mounted somewhere on your filesystem, just export" |
|
|
1079 | einfo "the following variables so they point to the right place:" |
|
|
1080 | einfon "" |
|
|
1081 | cdcnt=0 |
|
|
1082 | while [ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ] ; do |
|
|
1083 | cdcnt=$((cdcnt + 1)) |
|
|
1084 | echo -n " CD_ROOT_${cdcnt}" |
|
|
1085 | done |
|
|
1086 | echo |
|
|
1087 | einfo "Or, if you have all the files in the same place, or" |
|
|
1088 | einfo "you only have one cdrom, you can export CD_ROOT" |
|
|
1089 | einfo "and that place will be used as the same data source" |
|
|
1090 | einfo "for all the CDs." |
|
|
1091 | echo |
|
|
1092 | fi |
|
|
1093 | export CDROM_CURRENT_CD=0 |
|
|
1094 | cdrom_load_next_cd |
|
|
1095 | } |
|
|
1096 | |
|
|
1097 | # this is only used when you need access to more than one cd. |
|
|
1098 | # when you have finished using the first cd, just call this function. |
|
|
1099 | # when it returns, CDROM_ROOT will be pointing to the second cd. |
|
|
1100 | # remember, you can only go forward in the cd chain, you can't go back. |
|
|
1101 | cdrom_load_next_cd() { |
|
|
1102 | export CDROM_CURRENT_CD=$((CDROM_CURRENT_CD + 1)) |
|
|
1103 | local var= |
|
|
1104 | |
|
|
1105 | unset CDROM_ROOT |
|
|
1106 | var=CDROM_ROOTS_${CDROM_CURRENT_CD} |
|
|
1107 | if [ -z "${!var}" ] ; then |
|
|
1108 | var="CDROM_CHECK_${CDROM_CURRENT_CD}" |
|
|
1109 | cdrom_locate_file_on_cd ${!var} |
|
|
1110 | else |
|
|
1111 | export CDROM_ROOT="${!var}" |
|
|
1112 | fi |
|
|
1113 | |
|
|
1114 | einfo "Found CD #${CDROM_CURRENT_CD} root at ${CDROM_ROOT}" |
|
|
1115 | } |
|
|
1116 | |
|
|
1117 | # this is used internally by the cdrom_get_cds() and cdrom_load_next_cd() |
|
|
1118 | # functions. this should *never* be called from an ebuild. |
|
|
1119 | # all it does is try to locate a give file on a cd ... if the cd isn't |
|
|
1120 | # found, then a message asking for the user to insert the cdrom will be |
|
|
1121 | # displayed and we'll hang out here until: |
|
|
1122 | # (1) the file is found on a mounted cdrom |
|
|
1123 | # (2) the user hits CTRL+C |
|
|
1124 | cdrom_locate_file_on_cd() { |
|
|
1125 | while [ -z "${CDROM_ROOT}" ] ; do |
|
|
1126 | local dir="$(dirname ${@})" |
|
|
1127 | local file="$(basename ${@})" |
|
|
1128 | local mline="" |
|
|
1129 | local showedmsg=0 |
|
|
1130 | |
|
|
1131 | for mline in `mount | egrep -e '(iso|cdrom)' | awk '{print $3}'` ; do |
|
|
1132 | [ -d "${mline}/${dir}" ] || continue |
|
|
1133 | [ ! -z "$(find ${mline}/${dir} -iname ${file} -maxdepth 1)" ] \ |
|
|
1134 | && export CDROM_ROOT=${mline} |
|
|
1135 | done |
|
|
1136 | |
|
|
1137 | if [ -z "${CDROM_ROOT}" ] ; then |
|
|
1138 | echo |
|
|
1139 | if [ ${showedmsg} -eq 0 ] ; then |
|
|
1140 | if [ ${CDROM_TOTAL_CDS} -eq 1 ] ; then |
|
|
1141 | if [ -z "${CDROM_NAME}" ] ; then |
|
|
1142 | einfo "Please insert the cdrom for ${PN} now !" |
|
|
1143 | else |
|
|
1144 | einfo "Please insert the ${CDROM_NAME} cdrom now !" |
|
|
1145 | fi |
|
|
1146 | else |
|
|
1147 | if [ -z "${CDROM_NAME_1}" ] ; then |
|
|
1148 | einfo "Please insert cd #${CDROM_CURRENT_CD} for ${PN} now !" |
|
|
1149 | else |
|
|
1150 | local var="CDROM_NAME_${CDROM_CURRENT_CD}" |
|
|
1151 | einfo "Please insert+mount the ${!var} cdrom now !" |
|
|
1152 | fi |
|
|
1153 | fi |
|
|
1154 | showedmsg=1 |
|
|
1155 | fi |
|
|
1156 | einfo "Press return to scan for the cd again" |
|
|
1157 | einfo "or hit CTRL+C to abort the emerge." |
|
|
1158 | read |
|
|
1159 | fi |
|
|
1160 | done |
|
|
1161 | } |